repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
jkandasa/integration_tests
|
scripts/azure_cleanup.py
|
Python
|
gpl-2.0
| 3,453 | 0.003186 |
import argparse
import sys
import traceback as tb
from datetime import datetime
from cfme.utils.path import log_path
from cfme.utils.providers import list_provider_keys, get_mgmt
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument('--nic-template',
help='NIC Name template to be removed', default="test", type=str)
parser.add_argument('--pip-template',
help='PIP Name template to be removed', default="test", type=str)
parser.add_argument('--days-old',
help='--days-old argument to find stack items older than X days ',
default="7", type=int)
parser.add_argument("--output", dest="output", help
|
="target file name, default "
"'cleanup_azure.log' in "
|
"utils.path.log_path",
default=log_path.join('cleanup_azure.log').strpath)
args = parser.parse_args()
return args
def azure_cleanup(nic_template, pip_template, days_old, output):
with open(output, 'w') as report:
report.write('azure_cleanup.py, NICs, PIPs and Stack Cleanup')
report.write("\nDate: {}\n".format(datetime.now()))
try:
for provider_key in list_provider_keys('azure'):
provider_mgmt = get_mgmt(provider_key)
nic_list = provider_mgmt.list_free_nics(nic_template)
report.write("----- Provider: {} -----\n".format(provider_key))
if nic_list:
report.write("Removing Nics with the name \'{}\':\n".format(nic_template))
report.write("\n".join(str(k) for k in nic_list))
report.write("\n")
provider_mgmt.remove_nics_by_search(nic_template)
else:
report.write("No \'{}\' NICs were found\n".format(nic_template))
pip_list = provider_mgmt.list_free_pip(pip_template)
if pip_list:
report.write("Removing Public IPs with the name \'{}\':\n".
format(pip_template))
report.write("\n".join(str(k) for k in pip_list))
report.write("\n")
provider_mgmt.remove_pips_by_search(pip_template)
else:
report.write("No \'{}\' Public IPs were found\n".format(pip_template))
stack_list = provider_mgmt.list_stack(days_old=days_old)
if stack_list:
report.write(
"Removing empty Stacks:\n")
for stack in stack_list:
if provider_mgmt.is_stack_empty(stack):
provider_mgmt.delete_stack(stack)
report.write("Stack {} is empty - Removed\n".format(stack))
else:
report.write("No stacks older than \'{}\' days were found\n".format(
days_old))
return 0
except Exception:
report.write("Something bad happened during Azure cleanup\n")
report.write(tb.format_exc())
return 1
if __name__ == "__main__":
args = parse_cmd_line()
sys.exit(azure_cleanup(args.nic_template, args.pip_template, args.days_old, args.output))
|
fiduswriter/fiduswriter
|
fiduswriter/bibliography/migrations/0003_alter_entry_options.py
|
Python
|
agpl-3.0
| 357 | 0 |
# Generated by Django 3.2.4 on 2021-07-05 13:56
from django.db import migrations
class Migration
|
(migrations.Migration):
dependencies = [
("bibliography", "0002_move_json_data"),
]
operations = [
migrations.AlterModelOptions(
|
name="entry",
options={"verbose_name_plural": "Entries"},
),
]
|
secondfoundation/Second-Foundation-Src
|
src/haruspex/python/echelon/investopedia_generator.py
|
Python
|
lgpl-2.1
| 2,630 | 0.031179 |
#Parsing program to sort through Investopedia
import urllib2
import re
#This is the code to par
|
se the List of Terms
def get_glossary(res_num):
html_lowered = res_num.lower();
begin = html_lowered.find('<!-- .alphabet -->')
end = html_lowered.find('<!-- .idx-1 -->')
if begin == -1 or end == -1:
return None
else:
return res_num[begin+len('<!-- .alphabet -->'):end].strip()
#This is the code to parse the
|
Title
def get_title(res_num):
html_lowered = res_num.lower();
begin = html_lowered.find('<title>')
end = html_lowered.find('</title>')
if begin == -1 or end == -1:
return None
else:
return res_num[begin+len('<title>'):end].strip()
#We start with the numbers section of Investopedia
url = "http://www.investopedia.com/terms/1/"
res_num=""
for line in urllib2.urlopen(url):
res_num+=line
title_num = get_title(res_num)
glossary_num = get_glossary(res_num)
##Find all hyperlinks in list then eliminate duplicates
glossary_parsed_num = re.findall(r'href=[\'"]?([^\'" >]+)', glossary_num)
glossary_parsed_num = list(set(glossary_parsed_num))
parent_url = 'http://www.investopedia.com'
tail = ' Definition | Investopedia'
short_tail = ' | Investopedia'
print title_num
gp_list = []
for x in glossary_parsed_num:
gpn = parent_url + x
res_num=""
for line in urllib2.urlopen(gpn):
res_num+=line
gpn_title = get_title(res_num)
gpn_penult = gpn_title.replace(tail,'')
gpn_final = gpn_penult.replace(short_tail,'')
gp_list.append(gpn_final)
#The alphabet section of Investopedia terms begins here
alfa = [chr(i) for i in xrange(ord('a'), ord('z')+1)]
for i, v in enumerate(alfa):
u = 'http://www.investopedia.com/terms/'
w = '/'
invest_alfa_url = u + v + w
# get url info
res_alfa=""
for line in urllib2.urlopen(invest_alfa_url):
res_alfa+=line
glossary_alfa = get_glossary(res_alfa)
title_alfa = get_title(res_alfa)
glossary_parsed_alfa = re.findall(r'href=[\'"]?([^\'" >]+)', glossary_alfa)
glossary_parsed_alfa = list(set(glossary_parsed_alfa))
print title_alfa
for x in glossary_parsed_alfa:
gpa = parent_url + x
res_num=""
for line in urllib2.urlopen(gpa):
res_num+=line
gpa_title = get_title(res_num)
gpa_penult = gpa_title.replace(tail,'')
gpa_final = gpa_penult.replace(short_tail,'')
gp_list.append(gpa_final)
#Write the new list to the file
with open('dict.dat','w') as f:
for item in gp_list:
f.write('%s\n' % item)
#Read back file to check the stock was added correctly
with open('dict.dat') as f:
gp_list = f.readlines()
gp_list = map(lambda s: s.strip(), gp_list)
gp_list = list(set(gp_list))
print gp_list
print ''
|
nimbis/django-cms
|
menus/base.py
|
Python
|
bsd-3-clause
| 1,763 | 0.001134 |
# -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
class Menu(object):
namespace =
|
None
def __init__(self, renderer):
self.renderer = renderer
if not self.namespace:
self.name
|
space = self.__class__.__name__
def get_nodes(self, request):
"""
should return a list of NavigationNode instances
"""
raise NotImplementedError
class Modifier(object):
def __init__(self, renderer):
self.renderer = renderer
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
pass
class NavigationNode(object):
def __init__(self, title, url, id, parent_id=None, parent_namespace=None,
attr=None, visible=True):
self.children = [] # do not touch
self.parent = None # do not touch, code depends on this
self.namespace = None # TODO: Assert why we need this and above
self.title = title
self.url = url
self.id = id
self.parent_id = parent_id
self.parent_namespace = parent_namespace
self.visible = visible
self.attr = attr or {} # To avoid declaring a dict in defaults...
def __repr__(self):
return "<Navigation Node: %s>" % smart_str(self.title)
def get_menu_title(self):
return self.title
def get_absolute_url(self):
return self.url
def get_attribute(self, name):
return self.attr.get(name, None)
def get_descendants(self):
return sum(([node] + node.get_descendants() for node in self.children), [])
def get_ancestors(self):
if getattr(self, 'parent', None):
return [self.parent] + self.parent.get_ancestors()
else:
return []
|
dlenwell/refstack-client
|
tests/unit/tests.py
|
Python
|
apache-2.0
| 909 | 0.0011 |
#
# Copyright (c) 2014 Piston Cloud Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may o
|
btain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import unittest
class TestSequenceFunctions(uni
|
ttest.TestCase):
def setUp(self):
pass
def test_nothing(self):
# make sure the shuffled sequence does not lose any elements
pass
if __name__ == '__main__':
unittest.main()
|
DIVERSIFY-project/SMART-GH
|
sensor_processing/constants.py
|
Python
|
apache-2.0
| 359 | 0.013928 |
"""
This contains all the constants needed for the daemons to run
"""
LOGGING_CONSTANTS = {
'LOGFILE' : 'summer.log',
|
'MAX_LOG_SIZE' : 1048576, # 1 MEG
'BACKUP_COUNT' : 5
}
def getLoggingConstants(constant):
"""
|
Returns various constants needing by the logging module
"""
return LOGGING_CONSTANTS.get(constant, False)
|
tmerrick1/spack
|
var/spack/repos/builtin/packages/py-psyclone/package.py
|
Python
|
lgpl-2.1
| 2,637 | 0.000758 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without
|
even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Pub
|
lic
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class PyPsyclone(PythonPackage):
"""Code generation for the PSyKAl framework from the GungHo project,
as used by the LFRic model at the UK Met Office."""
homepage = "https://github.com/stfc/PSyclone"
url = "https://github.com/stfc/PSyclone/archive/1.5.1.tar.gz"
giturl = "https://github.com/stfc/PSyclone.git"
version('1.5.1', git=giturl,
commit='eba7a097175b02f75dec70616cf267b7b3170d78')
version('develop', git=giturl, branch='master')
depends_on('py-setuptools', type='build')
depends_on('py-pyparsing', type=('build', 'run'))
# Test cases fail without compatible versions of py-fparser:
depends_on('py-fparser@0.0.5', type=('build', 'run'), when='@1.5.1')
depends_on('py-fparser', type=('build', 'run'), when='@1.5.2:')
# Dependencies only required for tests:
depends_on('py-numpy', type='test')
depends_on('py-nose', type='test')
depends_on('py-pytest', type='test')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_build(self):
# Limit py.test to search inside the build tree:
touch('pytest.ini')
with working_dir('src'):
Executable('py.test')()
def setup_environment(self, spack_env, run_env):
# Allow testing with installed executables:
spack_env.prepend_path('PATH', self.prefix.bin)
|
rhots/automation
|
heroes-sidebar-master/reddit.py
|
Python
|
isc
| 1,603 | 0.029944 |
import praw
import requests
from env import env
from twitch import twitch
class reddit:
def __init__(self):
self.r = praw.Reddit(user_agent='Heroes of the Storm Sidebar by /u/Hermes13')
self.env = env()
self.access_information = None
def setup(self):
# self.r.set_oauth_app_info( client_id=self.env.redditClientID,
# client_secret=self.env.redditSecretID,
# redirect_uri=self.env.redditRedirectURI)
# url = self.r.get_authorize_url('uniqueKey', 'identity modconfig modcontributors wikiread', True)
# import webbrowser
# webbrowser.open(url)
pass
def connect(self):
self.r.set_oauth_app_info( client_id=self.env.redditClientID,
client_secret=self.env.redditSecretID,
redirect_uri=self.env.redditRedirectURI)
# self.access_information = self.r.get_access_information(self.env.redditAuthCode)
# print self.access_information
# self.r.set_access_credentials(**self.access_information)
self.r.refresh_access_informatio
|
n(self.env.redditRefreshToken)
authenticated_user=self.r.ge
|
t_me()
def updateSidebar(self, matches, streams, freeRotation):
sidebar = self.r.get_wiki_page('heroesofthestorm', 'sidebar')
sidebarWiki = sidebar.content_md
if matches:
sidebarWiki = sidebarWiki.replace("%%EVENTS%%", matches)
if streams:
sidebarWiki = sidebarWiki.replace("%%STREAMS%%", streams)
if freeRotation:
sidebarWiki = sidebarWiki.replace("%%FREEROTATION%%", freeRotation)
self.r.update_settings(self.r.get_subreddit('heroesofthestorm'), description=sidebarWiki)
return sidebarWiki.encode('ascii','ignore')
|
hansbrenna/NetCDF_postprocessor
|
plotter3.py
|
Python
|
gpl-3.0
| 4,021 | 0.034071 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 15:32:58 2015
@author: hanbre
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xray
import datetime
import netCDF4
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib.pylab import *
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
import seaborn as sns
from IPython import embed
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def read_data(id_in):
data = xray.open_dataset(id_in)
return data
def plotter(vm,x,y):
#fig=figure()
print('plotter')
xx,yy=np.meshgrid(x,y)
if shape(xx)!=shape(vm):
vm=vm.transpose()
gases = ['O3','HCL','CL','CLY','']
if var in gases:
CF = contourf(x,y,vm,linspace(np.amin(vm.values),np.amax(vm.values),10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),np.amax(vm.values),10),colors='k')
elif var == 'T':
CF = contourf(x,y,vm,linspace(np.amin(vm.values),400,10),cmap=matplotlib.cm.jet)
CS=contour(x, y, vm,linspace(np.amin(vm.values),400,10),colors='k')
else:
norm = MidpointNormalize(midpoint=0)
CF=contourf(x,y,vm,np.linspace(np.amin(vm.values),np.amax(vm.values),1000),norm=norm,cmap='seismic')
CS=contour(x, y, vm,10,colors='k')
xlabel(x.units);ylabel(y.units)
clb = colorbar(CF); clb.set_label('('+v.units+')')
#title=('{0} at {1}={2} and {3}={4}'.format(var,getattr(v,pvar1)[p1],getattr(v,pvar1)[p1].values,getattr(v,pvar2)[p2],getattr(v,pvar2)[p2].values))
#close(fig)
return
def meaner(v,mvars):
vm = v.mean(dim=mvars)
return vm
def pointextr(v,pvar1,p1,pvar2,p2,pvars):
vm = v[pvars]
return vm
if __name__=='__main__':
i=0
#case_id = id_in.split('/')
with open(sys.argv[1], 'r') as file_in:
header=next(file_in)
for line in file_in:
i+=1
l=line.strip('\n').split(' ')
id_in=l[0]
ds=read_data(id_in)
typ = l[1]
print(typ)
var = l[2]
xvar = l[3]; yvar = l[4]
v=getattr(ds,var)
x=getattr(ds,xvar)
|
y=getattr(ds,yvar)
if typ == 'm':
print('here')
mvar1
|
= l[5]; mvar2 = l[6]
if size(v.dims)==4:
mvars = [mvar1,mvar2]
else:
mvars = [mvar1]
vm=meaner(v,mvars)
savestring = '{0}{1}{2}{3}{4}{5}{6}.png'.format(id_in,typ,var,xvar,yvar,mvar1,mvar2)
print(savestring)
elif typ == 'p':
print('there')
pvar1=l[5]; p1=int(l[7])
pvar2=l[6]; p2=int(l[8])
pvars = {pvar1: p1, pvar2: p2}
vm=pointextr(v,pvar1,p1,pvar2,p2,pvars)
savestring = '{0}{1}{2}{3}{4}{5}{6}{7}{8}.png'.format(id_in,typ,var,xvar,yvar,pvar1,pvar2,p1,p2)
print(savestring)
xis = axes([0.09, 0.1, 0.85, 0.82], axisbg = 'white')
fig = figure(num = i, figsize=(10.,5.), dpi=None, facecolor='w', edgecolor='k')
plotter(vm,x,y)
if yvar == 'lev':
print('log=True')
xis.set_yscale("log")
savefig(savestring,dpi=100, facecolor='w', edgecolor='w', orientation='portrait')
print('again')
close(fig)
del(ds)
|
naturali/tensorflow
|
tensorflow/models/rnn/ptb/ptb_word_lm.py
|
Python
|
apache-2.0
| 10,545 | 0.010906 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidde
|
n_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
|
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
if is_training and config.keep_prob < 1:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, state = tf.nn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, model, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size,
model.num_steps)):
fetches = [model.cost, model.final_state, eval_op]
feed_dict = {}
feed_dict[model.input_data] = x
feed_dict[model.targets] = y
for i, (c, h) in enumerate(model.initial_state):
fee
|
showmen15/testEEE
|
src/amberdriver/drive_to_point/drive_to_point_controller.py
|
Python
|
mit
| 7,564 | 0.003173 |
import logging
import logging.config
import sys
import threading
import os
from amberclient.collision_avoidance.collision_avoidance_proxy import CollisionAvoidanceProxy
from amberclient.common.amber_client import AmberClient
from amberclient.location.location import LocationProxy
from amberclient.roboclaw.roboclaw import RoboclawProxy
from amberdriver.common.message_handler import MessageHandler
from amberdriver.drive_to_point import drive_to_point_pb2
from amberdriver.drive_to_point.drive_to_point import DriveToPoint
from amberdriver.tools import config
__author__ = 'paoolo'
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/drive_to_point.ini' % pwd)
config.add_config_ini('%s/drive_to_point.ini' % pwd)
LOGGER_NAME = 'DriveToPointController'
USE_COLLISION_AVOIDANCE = config.DRIVE_TO_POINT_USE_COLLISION_AVOIDANCE == 'True'
class DriveToPointController(MessageHandler):
def __init__(self, pipe_in, pipe_out, driver):
MessageHandler.__init__(self, pipe_in, pipe_out)
self.__drive_to_point = driver
self.__logger = logging.getLogger(LOGGER_NAME)
def handle_data_message(self, header, message):
if message.HasExtension(drive_to_point_pb2.setTargets):
self.__handle_set_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTarget):
self.__handle_get_next_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getNextTargets):
self.__handle_get_next_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTarget):
self.__handle_get_visited_target(header, message)
elif message.HasExtension(drive_to_point_pb2.getVisitedTargets):
self.__handle_get_visited_targets(header, message)
elif message.HasExtension(drive_to_point_pb2.getConfiguration):
self.__handle_get_configuration(header, message)
else:
self.__logger.warning('No request in message')
def __handle_set_targets(self, header, message):
self.__logger.debug('Set targets')
targets = message.Extensions[drive_to_point_pb2.targets]
targets = zip(targets.longitudes, targets.latitudes, targets.radiuses)
self.__drive_to_point.set_targets(targets)
@MessageHandler.handle_and_response
def __handle_get_next_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next target')
next_target, current_location = self.__drive_to_point.get_next_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([next_target[0]])
targets.latitudes.extend([next_target[1]])
targets.radiuses.extend([next_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_next_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get next targets')
next_targets, current_location = self.__drive_to_point.get_next_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda next_target: next_target[0], next_targets))
targets.latitudes.extend(map(lambda next_target: next_target[1], next_targets))
targets.radiuses.extend(map(lambda next_target: next_target[2], next_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getNextTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_target(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited target')
visited_target, current_location = self.__drive_to_point.get_visited_target_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend([visited_target[0]])
targets.latitudes.extend([visited_target[1]])
targets.radiuses.extend([visited_target[2]])
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTarget] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_visited_targets(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get visited targets')
visited_targets, current_location = self.__drive_to_point.get_visited_targets_and_location()
targets = response_message.Extensions[drive_to_point_pb2.targets]
targets.longitudes.extend(map(lambda target: target[0], visited_targets))
targets.latitudes.extend(map(lambda target: target[1], visited_targets))
targets.radiuses.extend(map(lambda target: target[2], visited_targets))
location = response_message.Extensions[drive_to_point_pb2.location]
location.x, location.y, location.p, location.alfa, location.timeStamp = current_location
response_message.Extensions[drive_to_point_pb2.getVisitedTargets] = True
return response_header, response_message
@MessageHandler.handle_and_response
def __handle_get_configuration(self, received_header, received_message, response_header, response_message):
self.__logger.debug('Get configuration')
configuration = response_message.Extensions[drive_to_point_pb2.configuration]
configuration.maxSpeed = self.__drive_to_point.MAX_SPEED
response_message.Extensions[drive_to_point_pb2.getConfiguration] = True
return response_header, response_message
def handle_subscribe_message(self, header, message):
self.__logger.debug('Subscribe action, nothing to do...')
def handle_unsubscribe_message(self, header, message):
self.__logger.debug('Unsubscribe action, nothing to do...')
def handle_client_died_message(self, client_id):
self.__logger.info('Client %d died, stop!', client_id)
self.__drive_to_point.set_targets([])
if __name__
|
== '__main__':
client_for_location = AmberClient('127.0.0.1', name="location")
client_for_driver = AmberClient('127.0.0.1', name="driver")
location_proxy = LocationProxy(client_for_location, 0)
if USE_COLLISION_AVOIDANCE:
driver_proxy = CollisionAvoidanceProxy(client_for_driver, 0)
else:
driver_proxy = RoboclawProxy(client_for_driver, 0)
drive_to_point = DriveToPoint(driver_proxy, location_proxy)
driving_threa
|
d = threading.Thread(target=drive_to_point.driving_loop, name="driving-thread")
driving_thread.start()
location_thread = threading.Thread(target=drive_to_point.location_loop, name="location-thread")
location_thread.start()
controller = DriveToPointController(sys.stdin, sys.stdout, drive_to_point)
controller()
|
kennedyshead/home-assistant
|
homeassistant/components/zeroconf/models.py
|
Python
|
apache-2.0
| 1,697 | 0.001179 |
"""Models for Zeroconf."""
import asyncio
from typin
|
g import Any
from zeroconf import DNSPointer, DNSRecord, ServiceBrowser, Zeroconf
from zeroconf.asyncio import AsyncZeroconf
class HaZeroconf(Zeroconf):
"""Zeroconf t
|
hat cannot be closed."""
def close(self) -> None:
"""Fake method to avoid integrations closing it."""
ha_close = Zeroconf.close
class HaAsyncZeroconf(AsyncZeroconf):
"""Home Assistant version of AsyncZeroconf."""
def __init__( # pylint: disable=super-init-not-called
self, *args: Any, **kwargs: Any
) -> None:
"""Wrap AsyncZeroconf."""
self.zeroconf = HaZeroconf(*args, **kwargs)
self.loop = asyncio.get_running_loop()
async def async_close(self) -> None:
"""Fake method to avoid integrations closing it."""
class HaServiceBrowser(ServiceBrowser):
"""ServiceBrowser that only consumes DNSPointer records."""
def update_record(self, zc: Zeroconf, now: float, record: DNSRecord) -> None:
"""Pre-Filter update_record to DNSPointers for the configured type."""
#
# Each ServerBrowser currently runs in its own thread which
# processes every A or AAAA record update per instance.
#
# As the list of zeroconf names we watch for grows, each additional
# ServiceBrowser would process all the A and AAAA updates on the network.
#
# To avoid overwhemling the system we pre-filter here and only process
# DNSPointers for the configured record name (type)
#
if record.name not in self.types or not isinstance(record, DNSPointer):
return
super().update_record(zc, now, record)
|
adamgreenhall/openreviewquarterly
|
builder/config.py
|
Python
|
mit
| 879 | 0.025028 |
issues=[
dict(name='Habit',number=5,season='Winter 2012',
description='commit to a change, experience it, and record'),
dict(name='Interview', number=4, season='Autumn 2011',
description="this is your opportunity to inhabit another's mind"),
dict(name= 'Digital Presence', number= 3, season= 'Summer 2011',
description='what does your digital self look like?'),
dict(name= 'Adventure', number=2, season= 'Spring 2011',
description='take an adventure and wri
|
te about it.'),
dict(name= 'Unplugging', number=1, season= 'Winter 2011',
description='what are you looking forward to leaving?')
]
siteroot='/Users/adam/open review quarterly/
|
source/'
infodir='/Users/adam/open review quarterly/info'
skip_issues_before=5
illustration_tag='=== Illustration ==='
illustration_tag_sized="=== Illustration width: 50% ==="
|
davidko/evolspyse
|
core/behaviours/ooda.py
|
Python
|
lgpl-2.1
| 1,291 | 0.000775 |
"""Spyse OODA behaviour module"""
import time
# http://www.mindsim.com/MindSim/Corporate/OODA.html
# http://www.d-n-i.net/second_level/boyd_military.htm
# http://www.belisarius.com/modern_business_strategy/boyd/essence/eowl_frameset.htm
# http://www.valuebasedmanagement.net/methods_boyd_ooda_loop.html
# http://www.fastcompany.com/magazine/59/pilot.html
#
# The OODA loop (Observe, Orient, Decide, and Act) is an
# information strategy concept for information warfare
# developed by Colonel John Boyd (1927-1997). Although the
# OODA model was clearly created for military purposes,
# elements of the same theory can also be applied to business
# stra
|
tegy. Boyd developed the theory based on his earlier
# experience as a fighter pilot and work on energy maneuverability.
# He initially used it to explain victory in air-to-air combat,
# but in the last years of his career he expanded his OODA loop
# theory into a grand strategy that would defeat an enemy
# strategically by pych
|
ological paralysis.
from spyse.core.behaviours.fsm import FSMBehaviour
class Observation(object):
pass
class Orientation(object):
pass
class Decision(object):
pass
class Action(object):
pass
class OODABehaviour(FSMBehaviour):
pass
|
hgsoft/hgsoft-addons
|
custom_survey_multi_emails_and_portal/models/custom_survey.py
|
Python
|
gpl-3.0
| 454 | 0.015419 |
# -*- coding: utf-8 -*-
from odoo import fields, models
class Cu
|
stomSurvey(models.Model):
_inherit = 'survey.survey'
auth_required = fields.Boolean('Login required', help="Users with a public link will be requested to login before taking part to the survey",
oldname="authenticate", default=True)
users_can_go_back = fields.Boolean('Users can go b
|
ack', help="If checked, users can go back to previous pages.", default=True)
|
CAIDA/bgpstream
|
pybgpstream/docs/conf.py
|
Python
|
gpl-2.0
| 8,420 | 0.005819 |
# -*- coding: utf-8 -*-
#
# pybgpstream documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 19 11:07:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pybgpstream'
copyright = u'2015, The Regents of the University of California'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to
|
this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last
|
_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybgpstreamdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pybgpstream.tex', u'pybgpstream Documentation',
u'Alistair King', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pybgpstream', u'pybgpstream Documentation',
[u'Alistair King'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pybgpstream', u'pybgpstream Documentation',
u'Alistair King', 'pybgpstream', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index
|
bonattt/name-masher
|
tests/masher_test.py
|
Python
|
mit
| 289 | 0.00346 |
import unittest
class ATest(unittest.TestCase):
def setUp(self):
|
print("setup")
pass
def test_a(self):
|
self.assertTrue(True)
def tearDown(self):
print("tear down")
if __name__ == "__main__":
print("masher_test.py")
unittest.main()
|
tflovorn/scSuperSolver
|
src/RunInterface.py
|
Python
|
mit
| 4,820 | 0.002905 |
# Copyright (c) 2010, 2011 Timothy Lovorn
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from FileDict import FileDict
from ControllerQueue import ControllerQueue
from Grapher import Grapher
DEFAULT_MAX_PROCESSES = 2
class RunInterface(object):
def __init__(self, path):
self.path = path
def doRun(self, configFiles, maxProcesses=DEFAULT_MAX_PROCESSES):
"""Run a controller for each config in configFiles."""
queue = ControllerQueue(configFiles, maxProcesses)
queue.runAll()
def graphOneSeries(self, configFiles, xVar, yVar, outputName):
grapher = Grapher()
fig, axes = grapher.simple2D(configFiles, "config", xVar, "state", yVar)
grapher.setAxisLabels(axes, xVar, yVar)
figurePath = os.path.join(self.path, outputName)
grapher.saveFigure(fig, figurePath)
return fig, axes
def graphSeriesDict(self, seriesDict, seriesLabel, xVar, yVar, outputName,
legend_title=None):
grapher = Grapher()
fig, axes = grapher.plotSeriesDict(seriesDict, seriesLabel,
"config", xVar, "state", yVar, legend_title=legend_title)
grapher.setAxisLabels(axes, xVar, yVar)
figurePath = os.path.join(self.path, outputName)
grapher.saveFigure(fig, figurePath)
return fig, axes
def makeRun(self, baseConfig, runData):
"""Make a new run of config files from the base config and run
|
Data.
runData is a list of tuples which contain a label and a dict.
Labels are used to name generated configs and their specified output
files. The dicts are key-value pairs for data to modify in the
base config. Return a list of the names of config files generated.
"""
|
configNames = []
baseConfigFullPath = os.path.join(self.path, baseConfig)
for label, labelData in runData:
newConfig = FileDict(baseConfigFullPath)
newConfigFullPath = os.path.join(self.path, label + "_config")
labelData.update({"outputLogName" : label + "_out.fd",
"errorLogName" : label + "_error",
"debugLogName" : label + "_debug"})
for key, value in labelData.items():
newConfig.setGlobal(str(key), str(value))
configNames.append(newConfigFullPath)
newConfig.writeToFile(newConfigFullPath)
return configNames
def oneDimRun(self, baseConfig, label, varName, minimum, maximum, step):
"""One-dimensional run with varName from [minimum, maximum).
Returns names of config files.
"""
index = 0
varValue = minimum
runData = []
while varValue < maximum:
runData.append((label + "_" + str(index) + "_",
{varName : varValue}))
varValue += step
index += 1
return self.makeRun(baseConfig, runData)
def multiDimRun(self, baseConfig, label, varDataList):
configs = []
for index, data in enumerate(varDataList):
varName, minimum, maximum, step = data
if len(configs) == 0:
label += "_%s_" % varName
configs = self.oneDimRun(baseConfig, label,
varName, minimum, maximum, step)
else:
newConfigs = []
label += "_%s_" % varName
for index, some_config in enumerate(configs):
newConfigs.extend(self.oneDimRun(some_config,
label + "_" + str(index) + "_",
varName, minimum, maximum, step))
configs = newConfigs
return configs
|
hsoft/moneyguru
|
core/gui/import_window.py
|
Python
|
gpl-3.0
| 15,326 | 0.002088 |
# Copyright 2019 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import datetime
from collections import defaultdict
from core.util import dedupe, first as getfirst
from core.trans import tr
from ..model.date import DateFormat
from .base import GUIObject
from .import_table import ImportTable
from .selectable_list import LinkedSelectableList
DAY = 'day'
MONTH = 'month'
YEAR = 'year'
class SwapType:
DayMonth = 0
MonthYear = 1
DayYear = 2
DescriptionPayee = 3
InvertAmount = 4
def last_two_digits(year):
return year - ((year // 100) * 100)
def swapped_date(date, first, second):
attrs = {DAY: date.day, MONTH: date.month, YEAR: last_two_digits(date.year)}
newattrs = {first: attrs[second], second: attrs[first]}
if YEAR in newattrs:
newattrs[YEAR] += 2000
return date.replace(**newattrs)
def swap_format_elements(format, first, second):
# format is a DateFormat
swapped = format.copy()
elems = swapped.elements
TYPE2CHAR = {DAY: 'd', MONTH: 'M', YEAR: 'y'}
first_char = TYPE2CHAR[first]
second_char = TYPE2CHAR[second]
first_index = [i for i, x in enumerate(elems) if x.startswith(first_char)][0]
second_index = [i for i, x in enumerate(elems) if x.startswith(second_char)][0]
elems[first_index], elems[second_index] = elems[second_index], elems[first_index]
return swapped
class AccountPane:
def __init__(self, iwin, account, target_account, parsing_date_format):
self.iwin = iwin
self.account = account
self._selected_target = target_account
self.name = account.name
entries = iwin.loader.accounts.entries_for_account(account)
self.count = len(entries)
self.matches = [] # [[ref, imported]]
self.parsing_date_format = parsing_date_format
self.max_day = 31
self.max_month = 12
self.max_year = 99 # 2 digits
self._match_entries()
self._swap_possibilities = set()
self._compute_swap_possibilities()
def _compute_swap_possibilities(self):
entries = list(self.iwin.loader.accounts.entries_for_account(self.account))
if not entries:
return
self._swap_possibilities = set([(DAY, MONTH), (MONTH, YEAR), (DAY, YEAR)])
for first, second in self._swap_possibilities.copy():
for entry in entries:
try:
swapped_date(entry.date, first, second)
except ValueError:
self._swap_possibilities.remove((first, second))
break
def _match_entries(self):
to_import = list(self.iwin.loader.accounts.entries_for_account(self.account))
reference2entry = {}
for entry in (e for e in to_import if e.reference):
reference2entry[entry.reference] = entry
self.matches = []
if self.selected_target is not None:
entries = self.iwin.document.accounts.entries_for_account(self.selected_target)
for entry in entries:
if entry.reference in reference2entry:
other = reference2entry[entry.reference]
if entry.reconciled:
self.iwin.import_table.dont_import.add(other)
to_import.remove(other)
del reference2entry[entry.reference]
else:
other = None
if other is not None or not entry.reconciled:
self.matches.append([entry, other])
self.matches += [[None, entry] for entry in to_import]
self._sort_matches()
def _sort_matches(self):
self.matches.sort(key=lambda t: t[0].date if t[0] is not None else t[1].date)
def bind(self, existing, imported):
[match1] = [m for m in self.matches if m[0] is existing]
[match2] = [m for m in self.matches if m[1] is imported]
assert match1[1] is None
assert match2[0] is None
match1[1] = match2[1]
self.matches.remove(match2)
def can_swap_date_fields(self, first, second): # 'day', 'month', 'year'
return (first, second) in self._swap_possibilities or (second, first) in self._swap_possibilities
def match_entries_by_date_and_amount(self, threshold):
delta = datetime.timedelta(days=threshold)
unmatched = (
to_import for ref, to_import in self.matches if ref is None)
unmatched_refs = (
ref for ref, to_import in self.matches if to_import is None)
amount2refs = defaultdict(list)
for entry in unmatched_refs:
amount2refs[entry.amount].append(entry)
for entry in unmatched:
if entry.amount not in amount2refs:
continue
potentials = amount2refs[entry.amount]
for ref in potentials:
if abs(ref.date - entry.date) <= delta:
self.bind(ref, entry)
potentials.remove(ref)
self._sort_matches()
def unbind(self, existing, imported):
[match] = [m for m in self.matches if m[0] is existing and m[1] is imported]
match[1] = None
self.matches.append([None, imported])
self._sort_matches()
@property
def selected_target(self):
return self._selected_target
@selected_target.setter
def selected_target(self, value):
self._selected_target = value
self._match_entries()
# This is a modal window that is designed to be re-instantiated on each import
# run. It is shown modally by the UI as soon as its created on the UI side.
class ImportWindow(GUIObject):
# --- View interface
# close()
# close_selected_tab()
# set_swap_button_enabled(enabled: bool)
# update_selected_pane()
# show()
#
def __init__(self, mainwindow, target_account=None):
super().__init__()
if not hasattr(mainwindow, 'loader'):
raise ValueError("Nothing to import!")
self.mainwindow = mainwindow
self.document = mainwindow.document
self.app = self.document.app
self._selected_pane_index = 0
self._selected_target_index = 0
def setfunc(index):
self.view.set_swap_button_enabled(self.can_perform_swap())
self.swap_type_list = LinkedSelectableList(items=[
"<placeholder> Day <--> Month",
"<placeholder> Month <--> Year",
"<placeholder> Day <--> Year",
tr("Description <--> Payee"),
tr("Invert Amounts"),
], setfunc=setfunc)
self.swap_type_list.selected_index = SwapType.DayMonth
self.panes = []
self.import_table = ImportTable(self)
self.loader = self.mainwindow.loader
self.target_accounts = [
a for a in self.document.accounts if a.is_balance_sheet_account()]
self.target_accounts.sort(key=lambda a: a.name.lower())
accounts = []
for account in self.loader.accounts:
if account.is_balance_sheet_account():
entries = self.loader.accounts.entries_for_account(account)
if len(entries):
new_name = self.document.accounts.new_name(account.name)
if new_name != account
|
.name:
self.loader.accounts.rename_account(account, new_name)
accounts.append(account)
parsing_date_format = DateFormat.from_sysformat(self.loader.parsi
|
ng_date_format)
for account in accounts:
target = target_account
if target is None and account.reference:
target = getfirst(
t for t in self.target_accounts if t.reference == account.reference
)
self.panes.append(
AccountPane(self, account, target, parsing_date_format))
# --- Private
def _can_swap_date_fields(self, first, second): # 'day', 'month', 'year'
pane = self.selected_pa
|
axant/tgapp-mailtemplates
|
tests/test_controller_auth.py
|
Python
|
mit
| 626 | 0.007987 |
from .base import configure_app, create_app
impo
|
rt re
find_urls = re.compile('http[s]?://(?:[a-zA-Z]|[0-9
|
]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
class MailTemplatesAuthControllerTests(object):
def setup(self):
self.app = create_app(self.app_config, True)
class TestMailTemplatesAuthControllerSQLA(MailTemplatesAuthControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('sqlalchemy')
class TestMailTemplatesAuthControllerMing(MailTemplatesAuthControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('ming')
|
strets123/rdkit
|
rdkit/ML/Data/Quantize.py
|
Python
|
bsd-3-clause
| 10,583 | 0.02882 |
# $Id$
#
# Copyright (C) 2001-2008 Greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" Automatic search for quantization bounds
This uses the expected informational gain to determine where quantization bounds should
lie.
**Notes**:
- bounds are less than, so if the bounds are [1.,2.],
[0.9,1.,1.1,2.,2.2] -> [0,1,1,2,2]
"""
from __future__ import print_function
import numpy
from rdkit.ML.InfoTheory import entropy
from rdkit.six.moves import zip, map, range
try:
from rdkit.ML.Data import cQuantize
except:
hascQuantize = 0
else:
hascQuantize = 1
_float_tol = 1e-8
def feq(v1,v2,tol=_float_tol):
|
""" floating point equality with a tolerance factor
**Arguments**
- v1: a flo
|
at
- v2: a float
- tol: the tolerance for comparison
**Returns**
0 or 1
"""
return abs(v1-v2) < tol
def FindVarQuantBound(vals,results,nPossibleRes):
""" Uses FindVarMultQuantBounds, only here for historic reasons
"""
bounds,gain = FindVarMultQuantBounds(vals,1,results,nPossibleRes)
return (bounds[0],gain)
def _GenVarTable(vals,cuts,starts,results,nPossibleRes):
""" Primarily intended for internal use
constructs a variable table for the data passed in
The table for a given variable records the number of times each possible value
of that variable appears for each possible result of the function.
**Arguments**
- vals: a 1D Numeric array with the values of the variables
- cuts: a list with the indices of the quantization bounds
(indices are into _starts_ )
- starts: a list of potential starting points for quantization bounds
- results: a 1D Numeric array of integer result codes
- nPossibleRes: an integer with the number of possible result codes
**Returns**
the varTable, a 2D Numeric array which is nVarValues x nPossibleRes
**Notes**
- _vals_ should be sorted!
"""
nVals = len(cuts)+1
varTable = numpy.zeros((nVals,nPossibleRes),'i')
idx = 0
for i in range(nVals-1):
cut = cuts[i]
while idx < starts[cut]:
varTable[i,results[idx]] += 1
idx += 1
while idx < len(vals):
varTable[-1,results[idx]] += 1
idx += 1
return varTable
def _PyRecurseOnBounds(vals,cuts,which,starts,results,nPossibleRes,varTable=None):
""" Primarily intended for internal use
Recursively finds the best quantization boundaries
**Arguments**
- vals: a 1D Numeric array with the values of the variables,
this should be sorted
- cuts: a list with the indices of the quantization bounds
(indices are into _starts_ )
- which: an integer indicating which bound is being adjusted here
(and index into _cuts_ )
- starts: a list of potential starting points for quantization bounds
- results: a 1D Numeric array of integer result codes
- nPossibleRes: an integer with the number of possible result codes
**Returns**
- a 2-tuple containing:
1) the best information gain found so far
2) a list of the quantization bound indices ( _cuts_ for the best case)
**Notes**
- this is not even remotely efficient, which is why a C replacement
was written
"""
nBounds = len(cuts)
maxGain = -1e6
bestCuts = None
highestCutHere = len(starts) - nBounds + which
if varTable is None:
varTable = _GenVarTable(vals,cuts,starts,results,nPossibleRes)
while cuts[which] <= highestCutHere:
varTable = _GenVarTable(vals,cuts,starts,results,nPossibleRes)
gainHere = entropy.InfoGain(varTable)
if gainHere > maxGain:
maxGain = gainHere
bestCuts = cuts[:]
# recurse on the next vars if needed
if which < nBounds-1:
gainHere,cutsHere=_RecurseOnBounds(vals,cuts[:],which+1,starts,results,nPossibleRes,
varTable = varTable)
if gainHere > maxGain:
maxGain = gainHere
bestCuts = cutsHere
# update this cut
cuts[which] += 1
for i in range(which+1,nBounds):
if cuts[i] == cuts[i-1]:
cuts[i] += 1
return maxGain,bestCuts
def _NewPyRecurseOnBounds(vals,cuts,which,starts,results,nPossibleRes,varTable=None):
""" Primarily intended for internal use
Recursively finds the best quantization boundaries
**Arguments**
- vals: a 1D Numeric array with the values of the variables,
this should be sorted
- cuts: a list with the indices of the quantization bounds
(indices are into _starts_ )
- which: an integer indicating which bound is being adjusted here
(and index into _cuts_ )
- starts: a list of potential starting points for quantization bounds
- results: a 1D Numeric array of integer result codes
- nPossibleRes: an integer with the number of possible result codes
**Returns**
- a 2-tuple containing:
1) the best information gain found so far
2) a list of the quantization bound indices ( _cuts_ for the best case)
**Notes**
- this is not even remotely efficient, which is why a C replacement
was written
"""
nBounds = len(cuts)
maxGain = -1e6
bestCuts = None
highestCutHere = len(starts) - nBounds + which
if varTable is None:
varTable = _GenVarTable(vals,cuts,starts,results,nPossibleRes)
while cuts[which] <= highestCutHere:
gainHere = entropy.InfoGain(varTable)
if gainHere > maxGain:
maxGain = gainHere
bestCuts = cuts[:]
# recurse on the next vars if needed
if which < nBounds-1:
gainHere,cutsHere=_RecurseOnBounds(vals,cuts[:],which+1,starts,results,nPossibleRes,
varTable = None)
if gainHere > maxGain:
maxGain = gainHere
bestCuts = cutsHere
# update this cut
oldCut = cuts[which]
cuts[which] += 1
bot = starts[oldCut]
if oldCut+1 < len(starts):
top = starts[oldCut+1]
else:
top = starts[-1]
for i in range(bot,top):
v = results[i]
varTable[which,v] += 1
varTable[which+1,v] -= 1
for i in range(which+1,nBounds):
if cuts[i] == cuts[i-1]:
cuts[i] += 1
return maxGain,bestCuts
# --------------------------------
#
# find all possible dividing points
#
# There are a couple requirements for a dividing point:
# 1) the dependent variable (descriptor) must change across it,
# 2) the result score must change across it
#
# So, in the list [(0,0),(1,0),(1,1),(2,1)]:
# we should divide before (1,0) and (2,1)
#
# --------------------------------
def _NewPyFindStartPoints(sortVals,sortResults,nData):
startNext = []
tol = 1e-8
blockAct=sortResults[0]
lastBlockAct=None
lastDiv=None
i = 1
while i<nData:
# move to the end of this block:
while i<nData and sortVals[i]-sortVals[i-1]<=tol:
if sortResults[i] != blockAct:
# this block is heterogeneous
blockAct=-1
i+=1
if lastBlockAct is None:
# first time through:
lastBlockAct = blockAct
lastDiv = i
else:
if blockAct==-1 or lastBlockAct==-1 or blockAct!=lastBlockAct:
startNext.append(lastDiv)
lastDiv = i
lastBlockAct = blockAct
else:
lastDiv=i
if i<nData:
blockAct=sortResults[i]
i+=1
# catch the case that the last point also sets a bin:
if blockAct != lastBlockAct :
startNext.append(lastDiv)
return startNext
def FindVarMultQuantBounds(vals,nBounds,results,nPossibleRes):
""" finds multiple quantization bounds for a single variable
**Arguments**
- vals: sequence of variable values (assumed to be floats)
- nBounds: the number of quantization bounds to find
- results: a list of result codes (should be integers)
- nPossibleRes: an integer with the number of possible values of the
result variable
**Returns**
- a 2-tuple containing:
1) a list of the quantization bounds (floats)
2) the information gain associated with this quantization
"""
assert len(vals) == len(results), 'vals/results length mismatch'
nData = len(vals)
if nData == 0:
return [],-1e8
|
zenanhu/pluto
|
hydra/server1.py
|
Python
|
apache-2.0
| 1,675 | 0.001194 |
import errno
import signal
import os
import socket
import time
SERVER_ADDRESS = (HOST, PORT) = '', 8888
REQUEST_QUEUE_SIZE = 1024
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1,
os.WNOHANG,
)
except OSError:
return
if pid == 0:
return
def handle_request(client_connection):
request = client_connection.recv(1024)
print 'Child PID: {pid}. Parent PID: {ppid}'.format(pid=os.getpid(), ppid=os.getppid())
print request.decode()
http_response = '''\
HTTP/1.1 200 OK
Hello World!
'''
client_connection.sendall(http_response)
time.sleep(3)
def serve_forever():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(SERVER_ADDRESS)
listen_socket.listen(REQUEST_QUEUE_SIZE)
print 'Serving HTTP on port {port} ...'.format(port=PORT)
print 'Parent PID (PPID): {pid}\n'.format(pid=os.getpid())
signal.signal(signal.SIGCHLD, grim_reaper)
while True:
try:
client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
if code == errno.EINTR:
cont
|
inue
else:
raise
pid = os.fork()
if pid == 0:
listen_socket.close()
handle_request(client_connection)
client_connection.close()
os._exit(0)
else:
client_connection.close()
if __na
|
me__ == '__main__':
serve_forever()
|
beardbig/bandsharing
|
tests/bandsharing_tests.py
|
Python
|
gpl-2.0
| 1,065 | 0.030047 |
#!/usr/bin/env python
#:::::::::::::::::::::::::::::::::::::::::::::::::::::
#Author: Damiano Barboni <damianobarboni@gmail.com>
#Version: 0.1
#Description: Script used to test bandsharing.py
#Changelog: Wed Jun 11 12:07:33 CEST 2014
# First test version
#
#:::::::::::::::::::::::::::::::::::::::::::::::::::::
import os
import sys
import shutil
import unittest
current_path = os.path.realpath( __file__ ).split( os.path.basename(__file__) )[0]
bandsharing_path = os.path.abspath(os.path.join( current_path, os.pardir))
sys.path.insert(1, bandsharing_path)
class TestBandSharing( unittest.TestCase ):
def setUp(self):
pas
|
s
def test_bs( self ):
pass
def test_csv( self ):
pass
def makeSuite():
suite = unittest.TestSuite()
suite.addTest( TestBandSharing( 'test_bs' ) )
suite.addTest( TestBandSharing( 'test_cs
|
v' ) )
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=3).run(makeSuite())
|
doge-search/webdoge
|
liqian/WISC/research/research/spiders/WISCSpider.py
|
Python
|
unlicense
| 678 | 0.022124 |
import scrapy
import re
from research.items import ResearchItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class CaltechSpider(scrapy.Spider):
name = "WISC"
allowed_domains = ["cs.wisc.edu"]
start_urls = ["https://www.cs.wisc.edu/research/groups"]
def parse(self, response):
item = ResearchItem()
for sel
|
in response.xpath('//table[@class="views-table cols-2"]'):
item['
|
groupname'] = sel.xpath('caption/text()').extract()[0]
item['proflist'] = []
for selp in sel.xpath('.//div[@class="views-field views-field-name-1"]/span/a'):
tmpname = selp.xpath('text()').extract()
print str(tmpname)
item['proflist'].append(tmpname)
yield item
|
AlexYang1949/FuturesMeasure
|
restful/restfulApi.py
|
Python
|
mit
| 837 | 0.012063 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
app = Flask(__name__)
from charge.chargeManager import Char
|
geManager
from data.dataProvider import DataProvider
@ap
|
p.route('/')
def hello_world():
return jsonify(testPreMa(['棉花'],20))
@app.route('/result')
def get_result():
name = request.args.get('name').encode('utf-8')
print name
return jsonify(testPreMa([name], 20))
def testPreMa(nameArray,period):
for name in nameArray:
print 'preMa----------------%s--%d周期-------------------' % (name, period)
dp = DataProvider(name=name)
p_list = dp.getData(['date', 'close'])
cm = ChargeManager(p_list, period, nodeStat=False)
cm.startCharge('preMa')
return cm.resultJson()
if __name__ == '__main__':
app.run(host='localhost')
|
zurfyx/simple
|
simple/projects/views.py
|
Python
|
mit
| 17,984 | 0.001335 |
from annoying.functions import get_object_or_None
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.http.response import JsonResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils.decorators import method_decorator
from django.utils.functional import SimpleLazyObject
from django.views.decorators.csrf import csrf_protect
from django.views.generic import ListView
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from core.mixins import CustomLoginRequiredMixin, HeadOfDepartmentMixin
from django.core.mail import EmailMessage
from core.utils import WordFilter
from projects import constants
from projects.abstract_models import ProjectAttachment
from projects.forms import ProjectNewForm, ProjectEditForm, ProjectContributeForm, ProjectQuestionForm, ProjectAnswerForm
from projects.mixins import ApprovedProjectRequiredMixin, ProjectAddQuestionMixin
from users.models import User
from mixins import ProjectEditMixin
from .models import Project, ProjectRole, ProjectRating, ProjectTechnicalRequest, ProjectFavorite
class ProjectList(ListView):
"""
Display list of projects.
If a project is unapproved, it will only be returned to HeadOfDepartment.
"""
model = Project
template_name = 'projects/list.html'
context_object_name = 'projects'
ordering = ['-created']
class UserProjectList(ProjectList):
"""
Displays a list of user projects, by filtering the project current list.
"""
# TODO If any project is unapproved, it will only be visible to
# HeadOfDepartment
template_name = 'projects/user-list.html'
def get_queryset(self):
return self.model.objects.filter(user=self.kwargs['user'])
def get_context_data(self, **kwargs):
context = super(UserProjectList, self).get_context_data(**kwargs)
if type(self.kwargs['user']) != SimpleLazyObject:
context['user'] = get_object_or_404(User, id=self.kwargs['user'])
return context
class ProjectDetail(DetailView):
"""
Display project details.
If a project is unapproved, it will only be returned to HeadOfDepartment. A
404 messages will be thrown otherwise.
"""
# TODO If unapproved, only visible to HeadOfDepartment.
model = Project
template_name = 'projects/detail.html'
context_object_name = 'project'
def get_object(self, queryset=None):
obj = get_object_or_404(self.model, pk=self.kwargs['pk'])
obj.increase_visits()
obj.save()
return obj
def get_context_data(self, **kwargs):
context = super(ProjectDetail, self).get_context_data(**kwargs)
context['user_project_rating'] = \
get_object_or_None(ProjectRating,
project=self.kwargs['pk'],
user=self.request.user) \
if self.request.user.is_authenticated() else None
context['user_project_role'] = \
get_object_or_None(ProjectRole,
project=self.kwargs['pk'],
user=self.request.user)
return context
class SearchProject(ListView):
"""
Display project search.
If an user input a project's title, it shows the project. Else if, it shows No Results
"""
model = Project
context_object_name = 'projects'
template_name = 'projects/list.html'
def get_queryset(self):
filter = self.kwargs['title']
search = self.model.objects.filter(title__icontains=filter)
return search
class ProjectNewView(CustomLoginRequiredMixin, CreateView):
template_name = 'projects/new.html'
form_class = ProjectNewForm
success_url = 'projects:pending-approval'
def _create_owner_role(self, project, user):
"""
Owner is always a scientist of the project
"""
project_role = ProjectRole(project=project, user=user,
role=constants.ProjectRoles.SCIENTIST,
approved_role=True)
project_role.save()
def form_valid(self, form):
project = form.save(commit=False)
project.user = self.request.user
form.instance.title = WordFilter().clean(form.instance.title)
form.instance.body = WordFilter().clean(form.instance.body)
project.save()
# save attachments
|
for each in form.cleaned_data['attachments']:
ProjectAttachment.objects.create(project=project, object=each)
# create owner role
self._create_owner_role(project, project.user)
|
messages.success(self.request, project.title)
return HttpResponseRedirect(reverse(self.success_url))
class ProjectPendingApproval(TemplateView):
"""
Basic view to display that a project is "now pending to be approved".
"""
template_name = 'projects/pending-approval.html'
class ProjectApproveList(HeadOfDepartmentMixin, ListView):
"""
Displays a list of projects pending to be approved. Only accessible through
a HeadOfDepartment account.
"""
model = Project
template_name = 'projects/approve-list.html'
context_object_name = 'projects'
def get_queryset(self):
return self.model.objects.filter(awaiting_approval=True)
class ProjectApproveDeny(HeadOfDepartmentMixin, RedirectView):
"""
Generic redirect view to handle the action approve / deny, which can only
be accessed through a HeadOfDepartment account.
"""
pattern_name = 'projects:approve-list'
approve = False
def get_redirect_url(self, **kwargs):
project = get_object_or_404(Project, pk=kwargs['pk'])
project.awaiting_approval = False
project.approved = self.approve
project.save()
return reverse(self.pattern_name)
class ProjectApproveView(ProjectApproveDeny):
approve = True
class ProjectDenyView(ProjectApproveDeny):
approve = False
class ProjectContributeView(CustomLoginRequiredMixin,
ApprovedProjectRequiredMixin, CreateView):
"""
Form through which any logged in user can request to participate in an
approved project.
If the project has already received a petition from the user, and it is
still pending to be reviewed it will ignore the petition.
"""
template_name = 'projects/contribute.html'
form_class = ProjectContributeForm
success_url = 'projects:contribute'
def get_initial(self):
return {
'project': self.kwargs['pk'],
}
def form_valid(self, form):
if self._previous_role_petition():
url_contribute = reverse('projects:contribute',
kwargs={'pk': self.kwargs['pk']})
return HttpResponseRedirect(url_contribute)
project_role = form.save(commit=False)
project_role.user = self.request.user
project_role.save()
url_project = reverse(self.success_url,
kwargs={'pk': self.kwargs['pk']})
return HttpResponseRedirect(url_project)
def _previous_role_petition(self):
"""
No duplicate role petitions per project can be recorded.
Will return to the contribute view if so.
"""
petition = get_object_or_None(ProjectRole, user=self.request.user,
project=self.kwargs['pk'])
return petition is not None
def get_context_data(self, **kwargs):
context = super(ProjectContributeView, self).get_context_data(**kwargs)
project = Project.objects.get(pk=self.kwargs['pk'])
project_role = get_object_or_None(project.projectrole_set,
user=self.request.user)
context['project'] = project
context['project_role'] = project_role
return context
class ProjectApproveContributionList(CustomLogi
|
almossawi/firefox-code-quality
|
scripts/codequality.py
|
Python
|
mpl-2.0
| 4,809 | 0.02121 |
import numpy as np
from scipy import sparse
import statistics
def metrics(data_file):
output_str = ''
dsms = []
# the first thing we do is load the csv file (file #,from,to)
# into a DSM; we do this by converting the triples to a sparse matrix
# dsm is the first-order DSM of dependencies
dsm_initial = loadDsm(data_file)
# calculate the visibility matrices for all path lengths
dsms = raiseAllPowers(dsm_initial, -1)
# data file name
output_str = output_str + data_file + ','
# the final visibility matrix
dsm_visibility = dsms[len(dsms) - 1]
# number of files
output_str = output_str + str(len(dsm_initial)) + ','
# now, get the fan-in and fan-out data and calculate the density and propagation cost
[fi,fo] = getFiFo(dsm_initial)
[vfi,vfo] = getFiFo(dsm_visibility)
# get median values of vfi/vfo to put file counts into four buckets
arr_vfo = np.array(vfo).flatten()
arr_vfi = np.array(vfi).flatten()
arr_fo = np.array(fo).flatten()
arr_fi = np.array(fi).flatten()
density = (np.count_nonzero(dsm_initial) / len(dsm_initial) / len(dsm_initial)) * 100
output_str = output_str + str(density / 100) + ','
propagation_cost = sum(arr_vfo) / (len(dsm_initial) * len(dsm_initial)) * 100
output_str = output_str + str(propagation_cost / 100) + ','
vfo_median = statistics.median(arr_vfo)
vfi_median = statistics.median(list(filter(lambda x: x != 0, arr_vfi)))
vfo_mean = statistics.mean(np.array(arr_vfo).flatten())
vfi_mean = statistics.mean(list(filter(lambda x: x != 0, arr_vfi)))
vfi_mode = statistics.mode(arr_vfi)
vfo_mode = statistics.mode(arr_vfo)
fo_median = statistics.median(arr_fo)
fi_median = statistics.median(arr_fi)
control_size = 0 # high vfo, low vfi
core_size = 0 # high vfo, high vfi
peripheral_size = 0 # low vfo, low vfi
shared_size = 0 # low vfo, high vfi
for i, val in enumerate(vfi):
# base the cutoff points on the visibility matrix rather than first-order matrix
# otherwise, we'd use fi, fo, fi_median and fo_median
if vfi[i] >= vfi_median and vfo[i] >= vfo_median:
core_size += 1
elif vfi[i] < vfi_median and vfo[i] < vfo_median:
peripheral_size += 1
elif vfi[i] <= vfi_median and vfo[i] > vfo_median:
control_size += 1
elif vfi[i] > vfi_median and vfo[i] <= vfo_median:
shared_size += 1
print('vfo mean: ', vfo_mean)
print('vfi mean: ', vfi_mean)
print('vfo median: ', vfo_median)
print('vfi median: ', vfi_median)
print('vfi mode: ', vfi_mode)
print('fo median: ', fo_median)
print('fi median: ', fi_median)
print('core: ', core_size)
print('peripheral: ', peripheral_size)
print('shared: ', shared_size)
print('control: ', control_size)
print('vfi mode: ', vfi_mode)
print('vfo mode ', vfo_mode)
output_str = output_str + str(core_size) + ','
output_str = output_str + str(peripheral_size) + ','
output_str = output_str + str(shared_size) + ','
output_str = output_str + str(control_size) + ','
output_str = output_str + str(vfo_median) + ','
output_str = output_str + str(vfi_median) + ','
output_str = output_str + str(fo_median) + ','
output_str = output_str + str(fi_median)
return output_str
def raiseAllPowers(initial_matrix, max_paths):
initial_matrix = sparse.csr_matrix(initial_matrix
|
)
initial_matrix.data.fill(1)
done = 0
current_path_length = 0
matrices = []
if max_paths == -1:
max_paths = 1000
matrices.append(initial_matrix)
while done == 0 and current_path_length < max_paths:
print('
|
Calculating DSM for path length = ', current_path_length + 1)
# square the current matrix
matrix_squared = matrices[current_path_length] * matrices[current_path_length]
# sum the matrix with the previous one
matrix_squared = matrix_squared + matrices[current_path_length]
# sponify the matrix, so that we converge
matrix_squared.data.fill(1)
# nnz elements
print(len(matrix_squared.nonzero()[0]), len(matrices[current_path_length].nonzero()[0]))
# when we've achieved the transitive closure of our matrix, we're done
if len(matrix_squared.nonzero()[0]) == len(matrices[current_path_length].nonzero()[0]):
done = 1
continue
else:
matrices.append(matrix_squared)
current_path_length += 1
return matrices
def getFiFo(dsmProp):
FI = dsmProp.sum(axis=0) # sum over columns
FO = dsmProp.sum(axis=1) # sum over rows
FI = FI.transpose()
return [FI, FO]
# credit https://gist.github.com/kevinavery/9613505
def loadDsm(filename):
DATA = np.loadtxt(filename, delimiter=',')
dims = DATA.shape[1] - 1
shape = [np.max(DATA[:,i]) for i in range(dims)]
M = np.zeros(shape=shape)
for row in DATA:
index = tuple(row[:-1] - 1)
M.itemset(index, row[-1])
return M
|
kitsunde/jack-bower
|
bower/tests/test_settings.py
|
Python
|
mit
| 783 | 0 |
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ''))
DATABASES = {
'default': {
|
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'djan
|
go.contrib.sitemaps',
'django.contrib.sites',
'bower',
'bower.tests.test_app',
]
SECRET_KEY = 'foobar'
TEST_RUNNER = 'discover_runner.DiscoverRunner'
|
aerofs/zendesk-help-center-backer
|
zendesk/create_new_post_shell.py
|
Python
|
bsd-3-clause
| 2,438 | 0.006563 |
"""
Python script to create a new article in a given section id.
"""
import os
import sys
from zdesk import Zendesk
from scripts import file_constants
from colorama import init
from colorama import Fore
init()
def _create_shell(section_id):
# Get subdomain.
try:
subdomain = os.environ["ZENDESK_SUBDOMAIN"]
url = file_constants.get_url_from_subdomain(subdomain)
except KeyError:
print(Fore.RED + "Please set the environment variable ZENDESK_SUBDOMAIN" + Fore.RESET)
sys.exit(1)
# Get username.
try:
username = os.environ["ZENDESK_USR"]
except KeyError:
print(Fore.RED + "Please set the environment variable ZENDESK_USR" + Fore.RESET)
|
sys.exit(1)
# Get password.
try:
password = os.environ["ZENDESK_PWD"]
except KeyError:
print(Fore.RED +
|
"Please set the environment variable ZENDESK_PWD" + Fore.RESET)
sys.exit(1)
zendesk = Zendesk(url, username, password)
# Add a temporary title and leave it in draft mode.
new_article = {"article": {"title": "Temporary Title", "draft": True}}
response = zendesk.help_center_section_article_create(id = section_id, data = new_article)
# Report success.
print('Successfully created the article.')
# Create the article shell locally.
article_id = response['article']['id']
_empty_article(str(article_id))
def _empty_article(article_id):
article = "posts/" + article_id + "/index.html"
title = "posts/" + article_id + "/title.html"
if article_id.isdigit() and not os.path.isfile(article):
# Makes the folder for the article and pictures to be placed in.
os.makedirs('posts/' + article_id)
# Create the article and title shell.
open(article, 'a').close()
open(title, 'a').close()
# Provides the user with the location of the html file that was created.
print "The article is located at " + article
print "Enter the article's title at " + title
elif os.path.isfile(article):
print (Fore.RED + "Error: This article ID already exists: " + article_id + Fore.RESET)
sys.exit(1)
else:
print (Fore.RED + "Error: This article ID is invalid: " + article_id + Fore.RESET)
sys.exit(1)
def main():
if len(sys.argv) != 2:
print('Usage: python %s <section_id>' % sys.argv[0])
else:
_create_shell(sys.argv[1])
|
alasdairtran/mclearn
|
projects/jakub/test_appx_gp.py
|
Python
|
bsd-3-clause
| 3,431 | 0.002332 |
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn.gaussian_process
import sklearn.kernel_approximation
import splitter
from appx_gaussian_processes import appx_gp
TRAINING_NUM = 1500
TESTING_NUM = 50000
ALPHA = .003
LENGTH_SCALE = 1
GAMMA = .5 / (LENGTH_SCALE ** 2)
COMPONENTS = 100
def interval_in_box_from_line(box, line):
x_min, x_max, y_min, y_max = box
m, b = line
x_min_y = m * x_min + b
x_max_y = m * x_max + b
y_min_x = (y_min - b) / m
y_max_x = (y_max - b) / m
endpoints = set()
if y_min <= x_min_y <= y_max:
endpoints.add((x_min, x_min_y))
if y_min <= x_max_y <= y_max:
endpoints.add((x_max, x_max_y))
if x_min <= y_min_x <= x_max:
endpoints.add((y_min_x, y_min))
if x_min <= y_max_x <= x_max:
endpoints.add((y_max_x, y_max))
return endpoints
def approximate_kernel(train_X, test_X):
sampler = sklearn.kernel_approximation.RBFSampler(gamma=GAMMA, n_components=COMPONENTS)
sampler.fit(train_X)
appx_train_X = sampler.transform(train_X)
appx_test_X = sampler.transform(test_X)
return appx_train_X, appx_test_X
def main(path_in):
print('Loading data...')
data = splitter.load(path_in)
(train_X, train_y), (test_X, test_y) = splitter.split(data, TRAINING_NUM,
TESTING_NUM)
try:
gp_sigmas = np.loadtxt('gp_preds.txt')
assert gp_sigmas.shape == (TESTING_NUM,)
except (FileNotFoundError, AssertionError):
print('Fitting GP...')
kernel = sklearn.gaussian_process.kernels.RBF(
length_scale=LENGTH_SCALE)
gp = sklearn.gaussian_process.GaussianProcessRegressor(
kernel=kernel,
alpha=ALPHA,
copy_X_train=False)
gp.fit(train_X, train_y)
print('Predicting GP...')
_, gp_sigmas = gp.predict(test_X, return_std=True)
np.savetxt('gp_preds.txt', gp_sigmas)
print('Approximating kernel...')
appx_train_X, appx_test_X = approximate_kernel(train_X, test_X)
print('Fitting approximate GP...')
agp = appx_gp.AppxGaussianProcessRegressor(alpha=ALPHA)
agp.fit(appx_train_X, train_y)
print('Predicting approximate GP...')
_, agp_sigmas = agp.predict(appx_test_X, return_std=True)
print('Finding best fit...')
best_fit = np.polyfit(gp_sigmas, agp_sigmas, 1)
best_fit_box = (min(gp_sigmas), max(gp_sigmas),
min(agp_sigmas), max(agp_sigmas))
best_fit_endpoints = interval_in_box_from_line(best_fit_box, best_fit)
best_fit_xs, best_fit_ys = zip(*best_fit_endpoints)
print('Plotting...')
f = plt.figure()
ax = f.add_subplot(111)
sc = plt.scatter(gp_sigmas, agp_sigmas, s=.2, c=list(test_y))
plt.plot(best_fit_xs, best_fit_ys, color='red', label='Linear fit')
|
plt.title(r'$\gamma = {:.4},$ #components$= {}$'.format(GAMMA,
COMPONENTS))
plt.xlabel('GP uncertainty')
plt.ylabel('Approximate GP uncertainty')
|
plt.text(.975, .1, '$y = {:.4}x {:+.4}$'.format(*best_fit),
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes)
colorbar = plt.colorbar(sc)
colorbar.set_label('Redshift')
plt.legend(loc='lower right')
plt.show()
if __name__ == '__main__':
main(sys.argv[1])
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/html/widgets/tests/test_interaction.py
|
Python
|
bsd-3-clause
| 13,235 | 0.013827 |
"""Test interact and interactive."""
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from collections import OrderedDict
import nose.tools as nt
import IPython.testing.tools as tt
# from IPython.core.getipython import get_ipython
from IPython.html import widgets
from IPython.html.widgets import interact, interactive, Widget, interaction
from IPython.utils.py3compat import annotate
#-----------------------------------------------------------------------------
# Utility stuff
#-----------------------------------------------------------------------------
class DummyComm(object):
comm_id = 'a-b-c-d'
def send(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
_widget_attrs = {}
displayed = []
def setup():
_widget_attrs['comm'] = Widget.comm
Widget.comm = DummyComm()
_widget_attrs['_ipython_display_'] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
def teardown():
for attr, value in _widget_attrs.items():
setattr(Widget, attr, value)
def f(**kwargs):
pass
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
#-----------------------------------------------------------------------------
# Actual tests
#-----------------------------------------------------------------------------
def check_widget(w, **d):
"""Check a single widget against a dict"""
for attr, expected in d.items():
if attr == 'cls':
nt.assert_is(w.__class__, expected)
else:
value = getattr(w, attr)
nt.assert_equal(value, expected,
"%s.%s = %r != %r" % (w.__class__.__name__, attr, value, expected)
)
def check_widgets(container, **to_check):
"""Check that widgets are created as expected"""
# build a widget dictionary, so it matches
widgets = {}
for w in container.children:
widgets[w.description] = w
for key, d in to_check.items():
nt.assert_in(key, widgets)
check_widget(widgets[key], **d)
def test_single_value_string():
a = u'hello'
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.TextWidget,
description='a',
value=a,
)
def test_single_value_bool():
for a in (True, False):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.CheckboxWidget,
description='a',
value=a,
)
def test_single_value_dict():
for d in [
dict(a=5),
dict(a=5, b='b', c=dict),
]:
c = interactive(f, d=d)
w = c.children[0]
check_widget(w,
cls=widgets.DropdownWidget,
description='d',
values=d,
value=next(iter(d.values())),
)
def test_single_value_float():
for a in (2.25, 1.0, -3.5):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.FloatSliderWidget,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=0.1,
readout=True,
)
def test_single_value_int():
for a in (1, 5, -3):
c = interactive(f, a=a)
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.IntSliderWidget,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=1,
readout=True,
)
def test_list_tuple_2_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,1))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,-1))
for min, max in [ (0,1), (1,10), (1,2), (-5,5), (-20,-19) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSliderWidget,
min=min,
max=max,
step=1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,-1))
for min, max, step in [ (0,2,1), (1,10,2), (1,100,2), (-5,5,4), (-100,-20,4) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSliderWidget,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_2_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1.0,1.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=
|
(0.5,-0.5))
for min, max in [ (0.5, 1.5), (1.1,10.2), (1,2.2), (-5.,5), (-20,-19.) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSliderWidget,
min=min,
max=max,
step=.1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_float():
with nt.assert_raises(ValueError):
c = interacti
|
ve(f, tup=(1,2,0.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(-1,-2,1.))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2.,-1.))
for min, max, step in [ (0.,2,1), (1,10.,2), (1,100,2.), (-5.,5.,4), (-100,-20.,4.) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSliderWidget,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_str():
values = ['hello', 'there', 'guy']
first = values[0]
dvalues = OrderedDict((v,v) for v in values)
c = interactive(f, tup=tuple(values), lis=list(values))
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.DropdownWidget,
value=first,
values=dvalues
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_invalid():
for bad in [
(),
(5, 'hi'),
('hi', 5),
({},),
(None,),
]:
with nt.assert_raises(ValueError):
print(bad) # because there is no custom message in assert_raises
c = interactive(f, tup=bad)
def test_defaults():
@annotate(n=10)
def f(n, f=4.5, g=1):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSliderWidget,
value=10,
),
f=dict(
cls=widgets.FloatSliderWidget,
value=4.5,
),
g=dict(
cls=widgets.IntSliderWidget,
value=1,
),
)
def test_default_values():
@annotate(n=10, f=(0, 10.), g=5, h={'a': 1, 'b': 2}, j=['hi', 'there'])
def f(n, f=4.5, g=1, h=2, j='there'):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSliderWidget,
value=10,
),
f=dict(
cls=widgets.FloatSliderWidget,
value=4.5,
),
g=dict(
cls=widgets.IntSliderWidget,
va
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/nmt_gpu/nmt.py
|
Python
|
apache-2.0
| 46,513 | 0.007095 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import time
import tensorflow as tf
from mlperf_compliance import mlperf_log
import estimator
from utils import evaluation_utils
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
from variable_mgr import constants
utils.check_tensorflow_version()
FLAGS = None
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
defaul
|
t=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--
|
num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=100000, help="Num steps to train.")
parser.add_argument(
"--max_train_epochs", type=int, default=8, help="Max number of epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=4068191,
help="Number of examples in one epoch")
parser.add_argument(
"--target_bleu", type=float, default=22.0, help="Target bleu.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="",
help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--dev_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--output_dir", type=str, default="",
help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Se
|
jocelynj/weboob
|
weboob/applications/webcontentedit/webcontentedit.py
|
Python
|
gpl-3.0
| 3,817 | 0.00131 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is d
|
istributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# python2.5 compatibility
from _
|
_future__ import with_statement
import os
import sys
import tempfile
from weboob.core.bcall import CallErrors
from weboob.capabilities.content import ICapContent
from weboob.tools.application.repl import ReplApplication
__all__ = ['WebContentEdit']
class WebContentEdit(ReplApplication):
APPNAME = 'webcontentedit'
VERSION = '0.4'
COPYRIGHT = 'Copyright(C) 2010 Romain Bignon'
CAPS = ICapContent
def do_edit(self, line):
"""
edit ID
Edit a content with $EDITOR, then push it on the website.
"""
contents = []
for id in line.split():
_id, backend_name = self.parse_id(id)
backend_names = (backend_name,) if backend_name is not None else self.enabled_backends
contents += [content for backend, content in self.do('get_content', _id, backends=backend_names) if content]
if len(contents) == 0:
print >>sys.stderr, 'No contents found'
return 1
paths = {}
for content in contents:
tmpdir = os.path.join(tempfile.gettempdir(), "weboob")
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
fd, path = tempfile.mkstemp(prefix='%s_' % content.id.replace(os.path.sep, '_'), dir=tmpdir)
with os.fdopen(fd, 'w') as f:
data = content.content
if isinstance(data, unicode):
data = data.encode('utf-8')
f.write(data)
paths[path] = content
params = ''
if os.environ['EDITOR'] == 'vim':
params = '-p'
os.system("$EDITOR %s %s" % (params, ' '.join(paths.iterkeys())))
for path, content in paths.iteritems():
with open(path, 'r') as f:
data = f.read()
try:
data = data.decode('utf-8')
except UnicodeError:
pass
if content.content != data:
content.content = data
else:
contents.remove(content)
if len(contents) == 0:
print 'No changes. Abort.'
return
print 'Contents changed:\n%s' % ('\n'.join([' * %s' % content.id for content in contents]))
message = self.ask('Enter a commit message', default='')
if not self.ask('Do you want to push?', default=True):
return
errors = CallErrors([])
for content in contents:
path = [path for path, c in paths.iteritems() if c == content][0]
sys.stdout.write('Pushing %s...' % content.id)
sys.stdout.flush()
try:
self.do('push_content', content, message, backends=[content.backend]).wait()
except CallErrors, e:
errors.errors += e.errors
sys.stdout.write(' error (content saved in %s)\n' % path)
else:
sys.stdout.write(' done\n')
os.unlink(path)
if len(errors.errors) > 0:
raise errors
|
thejeshgn/quest
|
quest/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 349 | 0.005731 |
# -*- coding: utf-
|
8 -*-
f
|
rom south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['quest']
|
jmoreman/eTrack
|
etrack/urls.py
|
Python
|
mit
| 355 | 0 |
import os
from django.conf.urls import url, include
urlp
|
atterns = [
url(r'^misc/', include('misc.urls')),
url(r'^qualification/', include('qualification.urls')),
]
if os.environ.get('DJANGO_SETTINGS_MODULE')
|
== 'etrack.settings.dev':
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
sjdv1982/seamless
|
tests/lowlevel/codepen-seamless-client.py
|
Python
|
mit
| 643 | 0.001555 |
# run with https://codepen.io/sjdv1982/pen/MzNvJv
# copy-paste seamless-client.js
from seamless.highlevel import Context
ctx = Context()
ctx.cell1 = "test!"
ctx.cell1.share()
ctx.translate()
ctx.compute()
from seamless import shareserver
print(sha
|
reserver.namespaces["ctx"].shares)
print(shareserver.namespaces["ctx"].shares["cell1"].bound)
print(shareserver.namespaces["ctx"].shares["cell1"].bound.cell)
ctx.cell1.celltype = "plain"
ctx.translate(force=True)
ctx.compute()
print(shareserver.namespaces["ct
|
x"].shares)
print(shareserver.namespaces["ctx"].shares["cell1"].bound)
print(shareserver.namespaces["ctx"].shares["cell1"].bound.cell)
|
mcjug2015/mfserver2
|
django_app/forms.py
|
Python
|
gpl-3.0
| 444 | 0 |
''' forms, mostly used for simple tastypie valida
|
tion '''
from django.contrib.gis import forms
class MeetingForm(forms.Form):
''' form for meetings '''
day_of_week = forms.IntegerField(min_value=1, max_value=7)
start_time = forms.TimeField()
end_time = forms.TimeField()
name = forms.CharField(max_length=100)
description = forms.CharField(max_length=255, required=False)
address = forms.CharField(max_length=300
|
)
|
blink1073/imageio
|
imageio/core/format.py
|
Python
|
bsd-2-clause
| 21,404 | 0.006634 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
"""
.. note::
imageio is under construction, some details with regard to the
Reader and Writer classes may change.
These are the main classes of imageio. They expose an interface for
advanced users and plugin developers. A brief overview:
* imageio.FormatManager - for keeping track of registered formats.
* imageio.Format - representation of a file format reader/writer
* imageio.Format.Reader - object used during the reading of a file.
* imageio.Format.Writer - object used during saving a file.
* imageio.Request - used to store the filename and other info.
Plugins need to implement a Format class and register
a format object using ``imageio.formats.add_format()``.
"""
from __future__ import absolute_import, print_function, division
# todo: do we even use the known extensions?
# Some notes:
#
# The classes in this module use the Request object to pass filename and
# related info around. This request object is instantiated in
# imageio.get_reader and imageio.get_writer.
from __future__ import with_statement
import os
import numpy as np
from . import Image, asarray
from . import string_types, text_type, binary_type # noqa
class Format:
""" Represents an implementation to read/write a particular file format
A format instance is responsible for 1) providing information about
a format; 2) determining whether a certain file can be read/written
with this format; 3) providing a reader/writer class.
Generally, imageio will select the right format and use that to
read/write an image. A format can also be explicitly chosen in all
read/write functions. Use ``print(format)``, or ``help(format_name)``
to see its documentation.
To implement a specific format, one should create a subclass of
Format and the Format.Reader and Format.Writer classes. see
:doc:`plugins` for details.
Parameters
----------
name : str
A short name of this format. Users can select a format using its name.
description : str
A one-line description of the format.
extensions : str | list | None
List of filename extensions that this format supports. If a
string is passed it should be space or comma separated. The
extensions are used in the documentation and to allow users to
select a format by file extension. It is not used to determine
what format to use for reading/saving a file.
modes : str
A string containing the modes that this format can handle ('iIvV').
This attribute is used in the documentation and to select the
formats when reading/saving a file.
"""
def __init__(self, name, description, extensions=None, modes=None):
# Store name and description
self._name = name.upper()
self._description = description
# Store extensions, do some effort to normalize them.
# They are stored as a list of lowercase strings without leading dots.
if extensions is None:
extensions = []
elif isinstance(extensions, string_types):
extensions = extensions.replace(',', ' ').split(' ')
#
if isinstance(extensions, (tuple, list)):
self._extensions = [e.strip('.').lower() for e in extensions if e]
else:
raise ValueError('Invalid value for extensions given.')
# Store mode
self._modes = modes or ''
if not isinstance(self._modes, string_types):
raise ValueError('Invalid value for modes given.')
for m in self._modes:
if m not in 'iIvV?':
raise ValueError('Invalid value for mode given.')
def __repr__(self):
# Short description
return '<Format %s - %s>' % (self.name, self.description)
def __str__(self):
return self.doc
@property
def doc(self):
""" The documentation for this format (name + description + docstring).
"""
# Our docsring is assumed to be indented by four spaces. The
# first line needs special attention.
return '%s - %s\n\n %s\n' % (self.name, self.description,
self.__doc__.strip())
@property
def name(self):
""" The name of this format.
"""
return self._name
@property
def description(self):
""" A short description of this format.
"""
return self._description
@property
def extensions(self):
""" A list of file extensions supported by this plugin.
These are all lowercase without a leading dot.
"""
return self._extensions
@property
def modes(self):
""" A string specifying the modes that this format can handle.
"""
return self._modes
def get_reader(self, request):
""" get_reader(request)
Return a reader object that can be used to read data and info
from the given file. Users are encouraged to use
imageio.get_reader() instead.
"""
select_mode = request.mode[1] if request.mode[1] in 'iIvV' else ''
if select_mode not in self.modes:
raise RuntimeError('Format %s cannot read in mode %r' %
(self.name, select_mode))
|
return self.Reader(self, request)
def get_writer(self, request):
""" get_writer(request)
Return a writer object that can be used to write data and info
to the given file. Users are encouraged to use
imageio.get_writer() instead.
"""
select_mode = request.mode[1] if request.mode[1] in 'iIvV' else ''
if select_mode not in s
|
elf.modes:
raise RuntimeError('Format %s cannot write in mode %r' %
(self.name, select_mode))
return self.Writer(self, request)
def can_read(self, request):
""" can_read(request)
Get whether this format can read data from the specified uri.
"""
return self._can_read(request)
def can_write(self, request):
""" can_write(request)
Get whether this format can write data to the speciefed uri.
"""
return self._can_write(request)
def _can_read(self, request): # pragma: no cover
return None # Plugins must implement this
def _can_write(self, request): # pragma: no cover
return None # Plugins must implement this
# -----
class _BaseReaderWriter(object):
""" Base class for the Reader and Writer class to implement common
functionality. It implements a similar approach for opening/closing
and context management as Python's file objects.
"""
def __init__(self, format, request):
self.__closed = False
self._BaseReaderWriter_last_index = -1
self._format = format
self._request = request
# Open the reader/writer
self._open(**self.request.kwargs.copy())
@property
def format(self):
""" The :class:`.Format` object corresponding to the current
read/write operation.
"""
return self._format
@property
def request(self):
""" The :class:`.Request` object corresponding to the
current read/write operation.
"""
return self._request
def __enter__(self):
self._checkClosed()
return self
def __exit__(self, type, value, traceback):
if value is None:
# Otherwise error in close hide the real error.
self.close()
def __del__(self):
try:
self.close()
except Exception: # pragma: no cover
pass # Supress noise when called du
|
akrherz/pyIEM
|
util/make_ramps.py
|
Python
|
mit
| 848 | 0 |
"""Serialization of geometries for use in pyIEM.plot mapping
We use a pickled protocol=2, which is compat binary.
"""
from pandas import read_sql
from pyiem.util import get_dbconnstr
PATH = "../src/pyiem/data/ramps/"
# Be annoying
print("Be sure to run this against Mesonet dat
|
abase and not laptop!")
def do(ramp):
"""states."""
df = read_sql(
"SELECT l.coloridx, l.value, l.r, l.g, l.b from iemrasters_lookup l "
"JOIN iemrasters r ON (l.iemraster_id = r.id) WHERE r.name = %s and "
"value is not null "
"ORDER by coloridx ASC",
get_dbconnstr("mesosite"),
params=(ramp,),
index_col="coloridx",
)
df.to_csv(f"{PATH}{ramp}.txt")
def main():
"""Go Main"""
for table in ["compo
|
site_n0r", "composite_n0q"]:
do(table)
if __name__ == "__main__":
main()
|
SalesforceFoundation/CumulusCI
|
cumulusci/cli/tests/test_run_task.py
|
Python
|
bsd-3-clause
| 4,367 | 0.000458 |
"""Tests for the RunTaskCommand class"""
from cumulusci.cli.runtime import CliRuntime
from cumulusci.cli.cci import RunTaskCommand
import click
import pytest
from unittest.mock import Mock, patch
from cumulusci.cli import cci
from cumulusci.core.exceptions import CumulusCIUsageError
from cumulusci.cli.tests.utils import run_click_command, DummyTask
color_opts = {"options": {"color": {}}}
multiple_opts = {"options": {"foo": {}, "bar": {}, "baz": {}}}
test_tasks = {
"dummy-task": {"class_path": "cumulusci.cli.tes
|
ts.utils.DummyTask"},
|
"dummy-derived-task": {
"class_path": "cumulusci.cli.tests.test_run_task.DummyDerivedTask"
},
}
@pytest.fixture
def runtime():
runtime = CliRuntime(load_keychain=False)
runtime.project_config.config["tasks"] = {**test_tasks}
runtime.keychain = Mock()
runtime.keychain.get_default_org.return_value = (None, None)
with patch("cumulusci.cli.cci.RUNTIME", runtime):
yield runtime
def test_task_run(runtime):
DummyTask._run_task = Mock()
multi_cmd = cci.RunTaskCommand()
cmd = multi_cmd.get_command(Mock, "dummy-task")
run_click_command(cmd, "dummy-task", color="blue", runtime=runtime)
DummyTask._run_task.assert_called_once()
def test_task_run__no_project(runtime):
runtime.project_config = None
runtime.project_config_error = Exception("Broken")
with pytest.raises(Exception, match="Broken"):
cci.RunTaskCommand().get_command(Mock, "dummy-task")
def test_task_run__debug_before(runtime):
DummyTask._run_task = Mock()
multi_cmd = cci.RunTaskCommand()
set_trace = Mock(side_effect=SetTrace)
with patch("pdb.set_trace", set_trace):
with pytest.raises(SetTrace):
cmd = multi_cmd.get_command(Mock(), "dummy-task")
run_click_command(
cmd,
"dummy_task",
color="blue",
debug_before=True,
debug_after=False,
runtime=runtime,
)
def test_task_run__debug_after(runtime):
DummyTask._run_task = Mock()
multi_cmd = cci.RunTaskCommand()
set_trace = Mock(side_effect=SetTrace)
with patch("pdb.set_trace", set_trace):
with pytest.raises(SetTrace):
cmd = multi_cmd.get_command(Mock(), "dummy-task")
run_click_command(
cmd,
"dummy-task",
color="blue",
debug_before=False,
debug_after=True,
runtime=runtime,
)
def test_task_run__list_commands(runtime):
multi_cmd = cci.RunTaskCommand()
commands = multi_cmd.list_commands(Mock())
assert commands == ["dummy-derived-task", "dummy-task"]
def test_format_help(runtime):
with patch("cumulusci.cli.cci.click.echo") as echo:
runtime.universal_config = Mock()
RunTaskCommand().format_help(Mock(), Mock())
assert 4 == echo.call_count
assert 0 == len(runtime.universal_config.method_calls)
def test_get_default_command_options():
opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=False)
assert len(opts) == 4
opts = RunTaskCommand()._get_default_command_options(is_salesforce_task=True)
assert len(opts) == 5
assert any([o.name == "org" for o in opts])
def test_collect_task_options():
new_options = {"debug-before": None}
old_options = (("color", "green"),)
opts = RunTaskCommand()._collect_task_options(
new_options, old_options, "dummy-task", color_opts["options"]
)
assert opts == {"color": "green"}
def test_collect_task_options__duplicate():
new_options = {"color": "aqua"}
old_options = (("color", "green"),)
with pytest.raises(CumulusCIUsageError):
RunTaskCommand()._collect_task_options(
new_options, old_options, "dummy-task", color_opts["options"]
)
def test_collect_task_options__not_in_task():
new_options = {}
old_options = (("color", "green"),)
with pytest.raises(CumulusCIUsageError):
RunTaskCommand()._collect_task_options(
new_options, old_options, "dummy-task", {"not-color": {}}
)
class SetTrace(Exception):
pass
class DummyDerivedTask(DummyTask):
def _run_task(self):
click.echo(f"<{self.__class__}>\n\tcolor: {self.options['color']}")
|
ashmastaflash/cloudpassage_slim
|
cloudpassage_slim/halo_session.py
|
Python
|
bsd-2-clause
| 3,951 | 0 |
import base64
import httplib
import json
import os
import re
import ssl
import urllib
from urlparse import
|
urlunsplit
from exceptions import CloudPassageAuthentication
class HaloSession(object):
"""All Halo API session management happens in this object.
Args:
key(str): Halo API key
|
secret(str): Halo API secret
Kwargs:
api_host(str): Hostname for Halo API. Defaults to
``api.cloudpassage.com``
cert_file(str): Full path to CA file.
integration_string(str): This identifies a specific integration to the
Halo API.
"""
def __init__(self, halo_key, halo_secret, **kwargs):
self.key = halo_key
self.secret = halo_secret
self.api_host = "api.cloudpassage.com"
self.sdk_version = self.get_sdk_version()
self.sdk_version_string = "Halo-Python-SDK-slim/%s" % self.sdk_version
self.integration_string = ''
self.cert_file = None
if "api_host" in kwargs:
self.api_host = kwargs["api_host"]
if "cert_file" in kwargs:
self.cert_file = kwargs["cert_file"]
if "integration_string" in kwargs:
self.integration_string = kwargs["integration_string"]
self.user_agent = self.build_ua_string(self.sdk_version_string,
self.integration_string)
self.threads = 10
self.api_token = None
def authenticate(self):
"""Obtain and set an oauth API token."""
headers = self.build_auth_headers(self.key, self.secret)
headers["User-Agent"] = self.user_agent
params = urllib.urlencode({'grant_type': 'client_credentials'})
if self.cert_file is None:
connection = httplib.HTTPSConnection(self.api_host)
else:
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ctx.load_verify_locations(self.cert_file)
connection = httplib.HTTPSConnection(self.api_host,
context=ctx)
connection.request("POST", '/oauth/access_token', params, headers)
response = connection.getresponse()
code = response.status
body = response.read().decode()
if code == 401: # Bad API key...
raise CloudPassageAuthentication(json.dumps(body))
self.api_token = json.loads(body)['access_token']
return True
@classmethod
def build_auth_headers(cls, key, secret):
"""Create an auth string for Halo oauth."""
credfmt = "{key}:{secret}".format(key=key, secret=secret)
creds = base64.b64encode(credfmt)
auth_string = "Basic {creds}".format(creds=creds)
auth_header = {"Authorization": auth_string}
return auth_header
def build_header(self):
"""This builds the header, required for all API interaction."""
if self.api_token is None:
self.authenticate()
authstring = "Bearer " + self.api_token
header = {"Authorization": authstring,
"Content-Type": "application/json",
"User-Agent": self.user_agent}
return header
@classmethod
def build_ua_string(cls, sdk_version_str, integration_string):
ua = "{sdk} {integration}".format(sdk=sdk_version_str,
integration=integration_string)
return ua
def build_url(self, endpoint):
"""Build a URL from parts."""
url = urlunsplit(("https", self.api_host, endpoint, "", ""))
return url
@classmethod
def get_sdk_version(cls):
here_dir = os.path.abspath(os.path.dirname(__file__))
init_file_path = os.path.join(here_dir, "__init__.py")
raw_init_file = open(init_file_path).read()
rx_compiled = re.compile(r"\s*__version__\s*=\s*\"(\S+)\"")
version = rx_compiled.search(raw_init_file).group(1)
return version
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/oldnumeric/mlab.py
|
Python
|
gpl-3.0
| 3,566 | 0.009534 |
# This module is for compatibility only. All functions are defined elsewhere.
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud',
'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc',
'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']
import numpy.oldnumeric.linear_algebra as LinearAlgebra
import numpy.oldnumeric.random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
amax as _Nmax, amin as _Nmin, blackman, bartlett, \
squeeze, sinc, median, fliplr, mean as _Nmean, transpose
from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as np
from typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
and everything else is zeros.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def tri(N, M=None, k=0, typecode=None, dtype=None):
""" returns a N-by-M array where all the diagonals starting from
lower left corner up to the k-th are all ones.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def trapz(y, x=None, axis=-1):
return _Ntrapz(y, x, axis=axis)
def ptp(x, axis=0):
return _Nptp(x, axis)
def cumprod(x, axis=0):
return _Ncumprod(x, axis)
def max(x, axis=0):
return _Nmax(x, axis)
def min(x, axis=0):
return _Nmin(x, axis)
def prod(x, axis=0):
return _Nprod(x, axis)
def std(x, axis=0):
N = asarray(x).shape[axis]
return _Nstd(x, axis)*sqrt(N/(N-1.))
def mean(x, axis=0):
return _Nmean(x, axis)
# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
if y is None:
|
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError("x
|
and y must have the same number of observations")
m = m - _Nmean(m,axis=0)
y = y - _Nmean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
return squeeze(dot(transpose(m), conjugate(y)) / fact)
from numpy import sqrt, multiply
def corrcoef(x, y=None):
c = cov(x, y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *
import compat
import precision
import functions
import misc
import ufuncs
import numpy
__version__ = numpy.__version__
del numpy
__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__
del compat
del functions
del precision
del ufuncs
del misc
|
gpersistence/tstop
|
python/persistence/PAMAPSegments.py
|
Python
|
gpl-3.0
| 4,400 | 0.009091 |
#TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import json
import csv
import itertools
import argparse
from Datatypes.Configuration import get_filename
from Datatypes.Segments import Segments, Segment
import os
class PAMAPSegments(Segments):
"""
Segments generated from the Physical Activity Monitoring Data Set from
https://archive.ics.uci.edu/ml/datasets/PAMAP2+Physical+Activity+Monitoring
Labels are derived from the second value in each line of the file,
followed by a heart rate and readings from three inertial
measurement units (that sporadically have dropped values). The
values in config.data_index correspond to the index of the data
values used for segments and windows. In the event of a dropped
value, indicated by NaN in the file, we carry over the previous
data value.
"""
def __init__(self, config) :
config.label_index = 1
super(self.__class__, self).__init__(config)
if isinstance(self.config.data_file, list) :
self.config.data_file = self.config.data_file[0]
if not isinstance(self.config.data_index, list) :
self.config.data_index = [self.config.data_index]
with open(self.config.data_file, 'r') as data_file :
data_reader = csv.reader(data_file, delimiter=' ')
full_data = [line for line in data_reader]
# carry over previous values for any NaNs
prev_line = full_data[0]
line_range = range(len(prev_line))
for line in full_data[1:] :
for (l0, l1, i) in zip(prev_line, line, line_range) :
if l1 == "NaN" :
line[i] = l0
prev_line = line
label_set = set([d[self.config.label_index] for d in full_data])
if self.config.window_size == -1 :
self.config.window_size = self.config.segment_size
self.segments = []
for segment_start in range(0, len(full_data) - self.config.segment_size + 1, self.config.segment_stride) :
segment_end = segment_start + self.config.segment_size
windows = []
# if the data_index has more than one entry, interleave the results.
# e.g. if data_index is [1,2] it's [(x_0, label), (y_0, label), (x_1, label), (y_1, label)...]
f
|
or window_start in range(segment_start, segment_end - self.config.window_size + 1, self.config.window_stride):
window_end = window_start + self.config.window_size
windows.append(list(itertools.chain(*itertools.izip(*[[float(d[i]) for d in full_data[window_start:window_end]] \
for i in self.config.data_index]))))
labels = [d[self.config.label_index] for d in full_data[segmen
|
t_start:segment_end]]
label_dict = dict([(str(l), len([d for d in labels if d == l])) for l in list(set(labels))])
segment = Segment(windows=windows,
segment_start=segment_start,
segment_size=self.config.segment_size,
window_stride=self.config.window_stride,
window_size=self.config.window_size,
labels=label_dict,
filename=self.config.data_file,
data_index = self.config.data_index,
label_index = self.config.label_index)
self.segments.append(segment)
@staticmethod
def get_segment_filename(config, gz=True):
fields = ['data_file', 'data_index', 'segment_size', 'segment_stride', 'window_size', 'window_stride']
return get_filename(config, fields, 'PAMAPSegments', gz)
|
siimeon/Kipa
|
web/urls.py
|
Python
|
gpl-3.0
| 485 | 0.010309 |
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf impor
|
t settings
admin.autodiscover()
urlpatterns = patterns('',
(r'^kipa/', include('tupa.urls')),
(r'^admin/', include(admin.site.urls)),
)
if se
|
ttings.DEBUG :
urlpatterns += patterns('',
(r'^kipamedia/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),)
handler500 = 'tupa.views.raportti_500'
|
johnson1228/pymatgen
|
pymatgen/io/feff/__init__.py
|
Python
|
mit
| 311 | 0 |
# coding: utf-8
# Copyright (c) Pymatgen Dev
|
elopment Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
This package provides the modules to perform FEFF IO.
FEFF: http://feffproject.org/feffproject-feff.html
"""
from .inputs import *
fro
|
m .outputs import *
|
texastribune/wjordpress
|
wjordpress/admin.py
|
Python
|
apache-2.0
| 1,830 | 0.003825 |
from django.contrib import admin
from django.core.urlresolvers import NoReverseMatch
from . import models
class WPSiteAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'hook')
readonly_fields = ('name', 'description')
def save_model(self, request, obj, form, change):
# TODO do this sync async (give celery another shot?)
obj.save()
obj.fetch_all()
# CUSTOM METHODS #
def hook(self, obj):
"""
This is where an admin can find what url to point the webhook to.
Doing it as an absolute url lets us cheat and make the browser figure
out the host for us.
Requires HookPress: http://wordpress.org/plugins/hookpress/
"""
try:
return (u'<a href="{}" title="Add a save_post hook with the ID">'
'Webhook</a>'.format(obj.hook_url))
except NoReverseMatch:
return ''
hook.allow_tags = True
admin.site.register(models.WPSite, WPSiteAdmin)
class WPUserAdmin(admin.ModelAdmin):
readonly_fields = ('synced_at', )
admin.site.register(models.WPUser, WPUserAdmin)
class WPCategoryAdmin(admin.ModelAdmin):
readonly_fields = ('synced_at', )
admin.site.register(models.WPCategory, WPCategoryAdmin)
class WPTagAdm
|
in(admin.ModelAdmin):
readonly_fields = ('synced_at', )
a
|
dmin.site.register(models.WPTag, WPTagAdmin)
class WPPostAdmin(admin.ModelAdmin):
list_display = ('title', 'date', 'type', 'status', )
list_filter = ('type', 'status', )
readonly_fields = ('synced_at', )
admin.site.register(models.WPPost, WPPostAdmin)
class WPLogAdmin(admin.ModelAdmin):
list_display = ('timestamp', 'wp', 'action', )
list_filter = ('wp', 'action', )
readonly_fields = ('wp', 'timestamp', 'action', 'body', )
admin.site.register(models.WPLog, WPLogAdmin)
|
sitexa/foobnix
|
foobnix/util/agent.py
|
Python
|
gpl-3.0
| 1,349 | 0.007429 |
#-*- coding: utf-8 -*-
'''
Created on 24 дек. 20%0
@author: ivan
'''
import ra
|
ndom
all_agents = """
Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3
Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 5.2;
|
en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)
Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)
"""
def get_ranmom_agent():
agents = None
for i in xrange(10):
agents = all_agents.replace(str(i), str(random.randint(0, 10)))
return agents.splitlines()[random.randint(1, 10)]
|
ryanmiao/libvirt-test-API
|
repos/network/start.py
|
Python
|
gpl-2.0
| 1,786 | 0.003919 |
#!/usr/bin/evn python
# Start a network
import time
import os
import re
import sys
import commands
import libvirt
from libvirt import libvirtError
from src import sharedmod
required_params = ('networkname',)
optional_params = {}
def start(params):
"""activate a defined network"""
global logger
logger = params['logger']
params.pop('logger')
networkname = params['networkname']
logger.info("the name of virtual network to be activated is %s" % \
networkname)
conn = sharedmod.libvirtobj['conn']
net_defined_list = conn.listDefinedNetworks()
if networkname not in net_defined_list:
logger.error("virtual network %s doesn't exist \
or is active already." % networkname)
return 1
else:
netobj = conn.networkLookupByName(networkname)
netxmldesc = netobj.XMLDesc(0)
logger.debug("the xml description of the virtual network is %s" % \
netxmldesc)
|
try:
logger.info("begin to activate virtual network %s" % networkname)
netobj.create()
except libvirtError, e:
logger.error("API error message: %s, error code is %s" \
% (e.message, e.get_error_code()))
logger.error("fail to destroy domain")
return 1
net_activated_list = conn.lis
|
tNetworks()
if networkname not in net_activated_list:
logger.error("virtual network %s failed to be activated." % networkname)
return 1
else:
shell_cmd = "virsh net-list --all"
(status, text) = commands.getstatusoutput(shell_cmd)
logger.debug("the output of 'virsh net-list --all' is %s" % text)
logger.info("activate the virtual network successfully.")
time.sleep(3)
return 0
|
remiotore/Python-Tools
|
DirBuster.py
|
Python
|
mit
| 1,875 | 0.011733 |
#!/usr/bin/python
import os, sys
from termcolor import colored
try:
import requests
except:
print "[!]Requests module not found. Try to (re)install it.\n[!]pip install requests"
oks = []
print("EASY DIRBUSTER!")
def parser():
#URL
flag = False
while ( flag == False ):
url = raw_input("Insert an URL (with HTTP:// or HTTPS://)\n\tURL: ")
if (url.startswith("http://")):
flag = True
elif (url.startswith("https://")):
flag = True
else:
pass
#PATH
flag = False
while ( flag == False ):
path = raw_input("Insert path to File (ex: /root/wordlists/list.txt)\n\tPATH: ")
if (os.path.isfile(path)):
flag = True
else:
pass
return url, path
def requester(url, fpath):
if (requests.get(url).status_code != 200):
|
return 0
else:
with open(fpath) as f:
for path in f:
temp1 = url + "/" + str(path).replace("\n","")
temp2 = url + "/" + str(path).replace("\n","") + "/"
for temp in temp1,temp2:
r = requests.get(temp)
if (r.status_code == 200):
print colored(("[!] " + str(r.status_code) + " OK!! -> "
|
+ temp), 'green')
oks.append(str(temp))
elif (r.status_code == 403):
print colored(("[!] " + str(r.status_code) + " Forb -> " + temp), 'yellow')
else:
print colored(("[!] " + str(r.status_code) + " NotF -> " + temp), 'red')
return 1
url, path = parser()
#print url + path
if (requester(url, path) == 0):
print "Error. URL not available."
else:
print "200 OK Requested sites: "
for ok in oks:
print "\t" + ok
print "Finished Successfully!"
|
Roma2Lug-Projects/BEST_App
|
src/config.py
|
Python
|
apache-2.0
| 8,273 | 0.003747 |
'''
Created on 10/mar/2014
@author: sectumsempra
'''
import sys
'''
from variab_conc import errbox
from PyQt4 import QtGui, QtCore, Qt
from PyQt4.QtCore import pyqtSignal, SLOT
from PyQt4.QtCore import pyqtSlot, SIGNAL
from PyQt4.QtGui import QPushButton, QTextEdit, QTableWidgetItem
'''
from PyQt4 import QtGui, QtCore
import signal
import time
var_str = 'string'
nom_str = 'ss'
numvarinp = 'ala'
numconcinp = 'asda'
var_lis = []
nom_lis = []
pes_lis = []
punt_fin_dis = []
punt_fin_ord = []
punt_ordinati = []
class_fin = []
tab_pesi = None
class confgui(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
global numvarinp, numconcinp
bwidget = QtGui.QWidget(self)
master_columner = QtGui.QVBoxLayout()
grid = QtGui.QGridLayout()
#toplabel = QtGui.QLabel('Setup')
button1 =QtGui.QPushButton('Start!', bwidget)
vbox00 = QtGui.QVBoxLayout()
numconc = QtGui.QLabel('Concorrenti', bwidget)
vbox00.addWidget(numconc)
self.setWindowTitle("BESTAPP Config")
numconcinp = QtGui.QTextEdit()
vbox00.addWidget(numconcinp)
vbox01 = QtGui.QVBoxLayout()
numvar = QtGui.QLabel('Variabili', bwidget)
numvarinp = QtGui.QTextEdit()
vbox01.addWidget(numvar)
vbox01.addWidget(numvarinp)
grid.addLayout(vbox00, 0, 0)
grid.addLayout(vbox01, 0, 1)
#master_columner.addWidget(toplabel)
master_columner.addLayout(grid)
master_columner.addWidget(button1)
bwidget.setLayout(master_columner)
self.setCentralWidget(bwidget)
button1.clicked.connect(self.settings)
numvarinp.textChanged.connect(self.var_to_str)
numconcinp.textChanged.connect(self.nom_to_str)
self.resize(600, 400)
self.setStyleSheet("font: 16pt \"DejaVu Serif\";\n ")
def switchwind(self):
global varconfig
self.hide()
varconfig = valorideipesi()
print("initdone")
varconfig.showMaximized()
print("alldonw")
def settings(self):
self.varstr_to_list()
self.nomstr_to_list()
self.switchwind()
def varstr_to_list(self):
global var_lis
global var_str
var_lis = []
f = open('lista_var.txt', 'w+')
a = str(var_str)
f.write(a)
f.seek(0, 0)
for line in f:
linea = line.rstrip('\n')
var_lis.append(linea)
f.close()
def nomstr_to_list(self):
global nom_lis
global nom_str
nom_lis = []
f = open ('lista_nomi.txt', 'w+')
a = str(nom_str)
f.write(a)
f.seek(0, 0)
for line in f:
linea = line.rstrip('\n')
nom_lis.append(linea)
print(nom_lis)
f.close()
def nom_to_str(self):
global nom_str
nom_str = numconcinp.toPlainText()
def var_to_str(self):
global var_str
var_str = numvarinp.toPlainText()
class maingui(QtGui.QMainWindow):
#global nom_lis
#global var_lis
b = []
col = []
def __init__(self):
global nom_lis, var_lis
QtGui.QMainWindow.__init__(self)
bWidget = QtGui.QWidget(self)
self.len_nom_lis = len(nom_lis)
self.len_var_lis = len(var_lis)
self.tabellone = QtGui.QTableWidget()
self.tabellone.setColumnCount(self.len_var_lis)
self.tabellone.setRowCount(self.len_nom_lis)
self.button_save = QtGui.QPushButton("save", bWidget)
mainlay = QtGui.QVBoxLayout()
mainlay.addWidget(self.tabellone)
mainlay.addWidget(self.button_save)
bWidget.setLayout(mainlay)
self.setCentralWidget(bWidget)
self.setWindowTitle("BESTAPP Config")
self.grid_mk(nom_lis, var_lis)
def grid_mk(self, rw_names, col_names):
rw_num = len(rw_names)
col_num = len(col_names)
"""
for a in range(col_num):
for b in range(rw_num):
aleph = QtGui.QTableWidgetItem(0)
aleph.setText(str("0"))
self.tabellone.setItem(b, a, aleph)
"""
self.tabellone.setHorizontalHeaderLabels(col_names)
self.tabellone.setVerticalHeaderLabels(rw_names)
self.tabellone.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.tabellone.verticalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
#w_hei = int(700 / rw_num)
#ol_wid = int(1024 / col_num)
for i in range(0, col_num):
self.tabellone.setColumnWidth(i, 150)
for j in range(0, rw_num):
self.tabellone.setRowHeight(j, 50)
self.button_save.clicked.connect(self.readScores)
#print(rw_hei, col_wid)
#print("finished grid")
self.setStyleSheet("font: 16pt \"DejaVu Serif\";\n ")
#return None
def readScores(self):
global nom_lis
global var_lis
righe = len(nom_lis)
colonne = len(var_lis)
n = 0
f = open('lista_punteggi.txt','w+')
for rig in range(righe):
punt = []
for col in range(colonne):
pnt = str(self.tabellone.item(rig, col).text())
punt.append(pnt)
risultati = "|".join(punt)
f.write(risultati + "\n")
f.close()
self.close()
class valorideipesi(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
global var_lis, tab_pesi
num_r = len(var_lis)
sWidget = QtGui.QWidget()
tab_pesi = QtGui.QTableWidget()
tab_pesi.setColumnCount(1)
tab_pesi.setRowCount(num_r)
tab_pesi.setVerticalHeaderLabels(var_lis)
#ab_pesi.setColumnWidth(0, 300)
#or i in range(0, num_r):
# tab_pesi.setRowHeight(i, 80)
ok = QtGui.QPushButton("OK", sWidget)
vlay = QtGui.Q
|
VBoxLayout()
vlay.addWidget(tab_pesi)
vlay.addWidget(ok)
sWidget.setLayout(vlay)
self.setCentralWidget(sWidget)
self.resize(400, 400)
ok.clicked.connect(self.switchwind1)
self.setStyleSheet("font: 16pt \"DejaVu Serif\";\n ")
self.setWindowTitle("BESTAPP Config")
tab_pesi.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
tab_pesi.verticalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
def swi
|
tchwind1(self):
global mainwind
self.saveconstants()
self.hide()
mainwind = maingui()
mainwind.showMaximized()
print("connected")
def saveconstants(self):
global var_lis, pes_lis, tab_pesi
top = len(var_lis)
for i in range(0, top):
pes_lis.append(str(tab_pesi.item(i, 0).text())+"\n")
f = open('lista_pes.txt', 'w+')
f.writelines(pes_lis)
f.close()
print (pes_lis)
def classifica():
global pes_lis, punt_ordinati, punt_fin_dis, punt_fin_ord, class_fin, nom_lis
a = len(pes_lis)
divisore = 0
for z in pes_lis:
divisore += int(z)
top = len(punt_ordinati)
for lis_n in range (0, top):
lis = punt_ordinati[lis_n]
parziali = []
for i in range(1, a):
s1 = lis[i]
s1i = int(s1)
i2 = i - 1
s2 = pes_lis[i2]
s2i = int(s2)
par = s1i * s2i
parziali.append(par)
dividendo = 0
for x in parziali:
dividendo += int(x)
punteggio = float(int(dividendo) / int(divisore))
punt_fin_dis.append(punteggio)
punt_fin_ord = punt_fin_dis
punt_fin_ord.sort()
max = len(punt_fin_dis)
for v1 in punt_fin_ord:
for n in range(0, max):
if v1 == punt_fin_dis[n]:
elem = nom_lis[n]
class_fin.append(elem)
else: pass
print(class_fin)
print(punt_fin_ord)
class myItem(QtGui.QTableWidgetItem):
def __init__(self):
QtGui.QTableWidgetItem.__init__(self)
self.setText("0")
if __name__ == "__main__":
app = QtGui
|
paultag/moxie
|
migrations/env.py
|
Python
|
mit
| 2,125 | 0.002353 |
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
from moxie.models import Base
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
dbcfg = config.get_section(config.config_ini_section)
if 'DATABASE_URL' in os.environ:
dbcfg['sqlalchemy.url'] = os.environ['DATABASE_URL']
engine = engine_from_config(
dbcfg,
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
|
target_metadata=target_metadata
|
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
milasudril/anja
|
versioninfo.py
|
Python
|
gpl-3.0
| 2,158 | 0.056534 |
#@ {
#@ "targets":
#@ [{
#@ "name":"versioninfo.txt"
#@ ,"status_check":"dynamic"
#@ ,"dependencies":[{"ref":"maike","rel":"tool"}]
#@ }]
#@ }
import sys
import subprocess
import shutil
import os
def modified_time(filename):
try:
return (os.path.getmtime(filename),True)
except (KeyboardInterrupt, SystemExit):
raise
except:
return (0,False)
def newer(file_a,file_b):
mod_a=modified_time(file_a)
mod_b=modified_time(file_b)
if mod_a[1]==False and mod_b[1]==False:
raise OSError('Error:
|
None of the files %s, and %s are accessible.'%(file_a,file_b))
if not mod_a[1]:
return False
if not mod_b[1]:
return True
return mod_a[0] > mod_b[0]
def newer_than_all(file_a, files):
for file in files:
if newer(file,file_a):
return False
return True
def git_changes():
with subprocess.Popen(('git', 'status','--porcelain'),stdout=subprocess.PIPE) \
as git:
result=[];
for k in filter(None,git.stdout.read().decode()
|
.split('\n')):
result.append( k[3:].split(' ')[0] )
return result
def get_revision():
if shutil.which('git')==None:
with open('versioninfo-in.txt') as versionfile:
return versionfile.read().strip()
else:
with subprocess.Popen(('git', 'describe','--tags','--dirty','--always') \
,stdout=subprocess.PIPE) as git:
result=git.stdout.read().decode().strip()
git.wait()
status=git.returncode
if status:
with open('versioninfo-in.txt') as versionfile:
return versionfile.read().strip()
else:
return result
def write_error(*args, **kwargs):
print(*args,file=sys.stderr,**kwargs)
def get_rev_old():
with os.fdopen(os.open(target_dir + '/versioninfo.txt',os.O_RDONLY|os.O_CREAT),'r+') \
as verfile:
return verfile.read().strip()
try:
target_dir=sys.argv[1]
in_dir=sys.argv[2]
revision=get_revision()
rev_old=get_rev_old()
if rev_old!=revision:
with open(target_dir + '/versioninfo.txt','w') as verfile:
verfile.write(revision)
with open('versioninfo-in.txt','w') as verfile:
verfile.write(revision)
sys.exit(0)
except Exception:
write_error('%s:%d: error: %s\n'%(sys.argv[0],sys.exc_info()[2].tb_lineno,sys.exc_info()[1]))
sys.exit(-1)
|
ManoSeimas/manoseimas.lt
|
manoseimas/lobbyists/json_urls.py
|
Python
|
agpl-3.0
| 304 | 0 |
from django.conf.urls import patterns
|
, url
from manoseimas.lobbyists import views
urlpatterns = patterns(
'',
url(r'^lobbyists/?$', views.lobbyists_json, name='lobbyists_json'),
url(r'^law_projects/(?P<lobbyist_slug>[^/]+)/?$',
views.law_projects_json, name='law_projects_json'),
|
)
|
skitzycat/beedraw
|
beenetwork.py
|
Python
|
gpl-2.0
| 14,234 | 0.039834 |
# Beedraw/Hive network capable client and server allowing collaboration on a single image
# Copyright (C) 2009 Thomas Becker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import PyQt4.QtCore as qtcore
import PyQt4.QtNetwork as qtnet
import socket
try:
from PyQt4.QtXml import QXmlStreamReader
except:
from PyQt4.QtCore import QXmlStreamReader
import SocketServer
from animation import XmlToQueueEventsConverter
from sketchlog import SketchLogWriter
from beetypes import *
from beeutil import *
class PyServerEventHandler(SocketServer.BaseRequestHandler,qtcore.QObject):
def __init__(self,request,client_address,server,master,parentthread,id):
qtcore.QObject.__init__(self)
self.master=master
self.parentthread=parentthread
self.clientid=id
self.server=server
SocketServer.BaseRequestHandler.__init__(self,request,client_address,server)
def handle(self):
newsock=BeeSocket(BeeSocketTypes.python,self.request,True)
# start the listener, that will authenticate client and finish setup
newlistener=HiveClientListener(self,newsock,self.master,self.clientid)
newlistener.run()
class customPyServer(SocketServer.ThreadingMixIn,SocketServer.TCPServer,qtcore.QObject):
def __init__(self,hostport,master,parentthread):
qtcore.QObject.__init__(self)
SocketServer.TCPServer.__init__(self,hostport,PyServerEventHandler)
self.master=master
self.parentthread=parentthread
self.idlock=qtcore.QReadWriteLock()
self.nextid=0
def getNextId(self):
lock=qtcore.QWriteLocker(self.idlock)
self.nextid+=1
return self.nextid
def finish_request(self,request,client_address):
PyServerEventHandler(request,client_address,self,self.master,self.parentthread,self.getNextId())
# dont' close the request after we're done in here
def close_request(self,request):
pass
class BeeTCPServer(qtcore.QObject):
""" Socket interface to allow changing between different tcp server implementations to see if Qt sockets or standard python sockets are better on each platform."""
def __init__(self,type,port,parentthread,master):
if type==BeeSocketTypes.qt:
qtcore.QObject.__init__(self,parentthread)
self.type=type
self.parentthread=parentthread
self.master=master
self.port=port
self.idlock=qtcore.QReadWriteLock()
self.nextid=0
def getNextId(self):
lock=qtcore.QWriteLocker(self.idlock)
self.nextid+=1
return self.nextid
def start(self):
if self.type==BeeSocketTypes.qt:
self.server=qtnet.QTcpServer(self.parentthread)
qtcore.QObject.connect(self.server, qtcore.SIGNAL("newConnection()"), self.newConnectionQt)
if self.server.listen(qtnet.QHostAddress("0.0.0.0"),self.port):
event=HiveServerStatusEvent(HiveServerStatusTypes.running)
else:
event=HiveServerStatusEvent(HiveServerStatusTypes.starterror,"%s" % self.server.errorString())
BeeApp().app.postEvent(self.master,event)
elif self.type==BeeSocketTypes.python:
try:
self.server=customPyServer(("localhost",self.port),self.master,self.parentthread)
except:
self.server=None
event=HiveServerStatusEvent(HiveServerStatusTypes.starterror)
BeeApp().app.postEvent(self.master,event)
print_debug("WARNING: failed to create server")
if self.server:
event=HiveServerStatusEvent(HiveServerStatusTypes.running)
BeeApp().app.postEvent(self.master,event)
self.server.serve_forever()
def stop(self):
if self.server:
if self.type==BeeSocketTypes.qt:
self.server.close()
elif self.type==BeeSocketTypes.python:
self.server.shutdown()
self.server.socket.close()
def newConnectionQt(self):
print_debug("found new connection")
while self.server.hasPendingConnections():
newsock=BeeSocket(BeeSocketTypes.qt,self.server.nextPendingConnection())
# start the listener, that will authenticate client and finish setup
newlistener=HiveClientListener(self.parentthread,newsock,self.master,self.getNextId())
# push responsibility to new thread
newsock.socket.setParent(None)
newsock.socket.moveToThread(newlistener)
newlistener.start()
class BeeSocket:
""" Socket interface to allow changing between different socket implementations (Qt socket or standard python sockets). Qt sockets seem to be buggy under linux, while python sockets aren't as well implemented under windows. Also helps provide blocking interface to Qt sockets which are normally non-blocking. """
def __init__(self,type,socket,connected=False):
self.type=type
self.socket=socket
self.errorStr=""
self.connected=connected
self.pyconnectlock=qtcore.QReadWriteLock()
# set blocking to never time out
if self.type==BeeSocketTypes.python:
self.socket.settimeout(None)
def setPyConnectedState(self,state,lock=None):
if not lock:
lock=qtcore.QWriteLocker(self.pyconnectlock)
self.connected=state
def waitForConnected(self):
if self.type==BeeSocketTypes.qt:
connected=self.socket.waitForConnected()
return connected
elif self.type==BeeSocketTypes.python:
return self.isConnected()
def errorString(self):
if self.type==BeeSocketTypes.qt:
return self.socket.errorString()
elif self.type==BeeSocketTypes.python:
return self.errorStr
def disconnect(self):
if self.type==BeeSocketTypes.qt:
if not self.isConnected():
return
self.socket.disconnectFromHost()
elif self.type==BeeSocketTypes.python:
lock=qtcore.QWriteLocker(self.pyconnectlock)
if not self.isConnected(lock):
return
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.setPyConnectedState(False,lock)
def abort(self):
if self.type==BeeSocketTypes.qt:
self.socket.abort()
def connect(self,host,port):
if self.type==BeeSocketTypes.qt:
self.socket.connectToHost(host,port)
return self.socket.waitForConnected()
elif self.type==BeeSocketTypes.python:
try:
self.socket.connect((host,port))
self.setPyConnectedState(True)
except socket.error, errmsg:
print_debug("error while connecting: %s" % errmsg)
self.setPyConnectedState(False)
except:
self.errorStr="unknown connection error"
self.setPyConnectedState(False)
return self.isConnected()
def read(self,size):
retstring=""
if self.type==BeeSocketTypes.qt:
# only wait if t
|
here isn't data already available
if not self.socket.bytesAvailabl
|
e():
status=self.socket.waitForReadyRead(-1)
data=self.socket.read(size)
if data:
retstring="%s" % qtcore.QString(data)
elif self.type==BeeSocketTypes.python:
try:
retstring=self.socket.recv(size)
except socket.error, errmsg:
print_debug("exception while trying to read data: %s" % errmsg)
self.setPyConnectedState(False)
retstring=""
except:
print_debug("unknown error while trying to read data")
self.setPyConnectedState(False)
retstring=""
return retstring
def isConnected(self,lock=None):
if self.type==BeeSocketTypes.qt:
if self.socket.state()==qtnet.QAbstractSocket.UnconnectedState:
return False
else:
return True
elif self.type==BeeSocketTypes.python:
if not lock:
lock=qtcore.QReadLocker(self.pyconnectlock)
return self.connected
def write(self,data):
if not data:
return
if self.type==BeeSocketTypes.qt:
self.socket.write(data)
self.socket.flush()
if self.socket.state()!=qtnet.QTcpSocket.UnconnectedState:
self.socket.waitForBytesWritten(-1)
elif self.type==BeeSocketTypes.python:
try:
self.socket.sendall(data)
except socket.error, errmsg:
print_debug("exception
|
owenwater/alfred-cal
|
src/open.py
|
Python
|
mit
| 1,042 | 0.003839 |
#!/usr/bin/python
# encoding: utf-8
import subprocess
from config import Config
applescript_name_tem = "osascript/open_%s.scpt"
arg_tem = {
"calendar": "%s %s %s",
"fantastical": "%s-%s-%s",
"busycal": "%s-%s-%s",
"google": "%s%s%s"
}
SOFTWARE = 'software'
def open_cal(arg):
arg = arg.strip()
if arg.endswith(".json"):
open_file(arg)
else:
from workflow import Workflow
wf = Workflow()
default_software = Config('').load_default(SOFTWARE)
software_name = wf.settings.get(SOFTWARE, default_software)
file_name = applescript_name_tem % (software_name)
year, month, day = arg.split()
script_arg = arg_tem[software_name] % (year, month.zfill(2), day.zfill(2))
execute_osascript(file_name, script_arg)
def execute_osascript(file, arg):
subprocess.call(['osascript', file, arg])
de
|
f open_file(file):
subprocess.call(['open', file])
if __name__ == "__main__":
import sys
open_cal(' '.join(sys.argv[1:]))
| |
purepitch/trove
|
features/steps/file_command_line_option.py
|
Python
|
gpl-3.0
| 1,396 | 0.003582 |
# -*- coding: utf-8 -*-
from behave import given, then
from nose.tools import assert_true, assert_regexp_matches
import pexpect
import re
@then(u'I should see how many entries were found')
def see_number
|
_of_entries_found(context):
expected_text = 'Found total number of \d+ entries.'
context.trove.expect(expected_text)
output = context.trove.match.string.strip()
regexp = re.compile(expected_text)
assert_regexp_matches(output, regexp)
@then(u'the trove prompt should be shown')
@given(u'the trove prompt has been shown')
def see_trove_prompt(context):
expected_text = '(t
|
rove)'
context.trove.expect(expected_text)
output = context.trove.match.string.strip()
regexp = re.compile(expected_text)
assert_regexp_matches(output, regexp)
@given(u'trove is started with an empty --file option')
def trove_starts_with_empty_file_option(context):
trove = pexpect.spawn("python trove.py --file")
context.trove = trove
assert_true(trove.isalive())
@then(u'I should see the "--file missing argument" error message')
def see_file_missing_argument_error_message(context):
expected_text = 'error: argument --file: expected 1 argument'
context.trove.expect(expected_text)
output = context.trove.match.string.strip()
regexp = re.compile(expected_text)
assert_regexp_matches(output, regexp)
# vim: expandtab shiftwidth=4 softtabstop=4
|
archatas/imageuploads
|
imageuploads/settings.py
|
Python
|
gpl-2.0
| 4,670 | 0.001285 |
# Django settings for imageuploads project.
import os
PROJECT_DIR = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = os.path.join(PROJECT_DIR, "media")
STATIC_ROOT = os.path.join(PROJECT_DIR, "static")
MEDIA_URL = "/media/"
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "site_static"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'qomeppi59pg-(^lh7o@seb!-9d(yr@5n^=*y9w&(=!yd2p7&e^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imageuploads.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'imageuploads.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'crispy_forms',
'ajaxuploader',
'images',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'd
|
jango.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers'
|
: ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
try:
execfile(os.path.join(os.path.dirname(__file__), "local_settings.py"))
except IOError:
pass
|
Azure/azure-sdk-for-python
|
sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py
|
Python
|
mit
| 4,443 | 0.001576 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from azure.core.exceptions import HttpResponseError
from .._deserialize import (
process_storage_error)
from .._shared.response_handlers import return_response_headers
from .._shared.uploads_async import (
upload_data_chunks,
DataLakeFileChunkUploader, upload_substream_blocks)
def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
return any([
modified_access_conditions.if_modified_since,
modified_access_conditions.if_unmodified_since,
modified_access_conditions.if_none_match,
modified_access_conditions.if_match
])
async def upload_datalake_file( # pylint: disable=unused-argument
client=None,
stream=None,
length=None,
overwrite=None,
validate_content=None,
max_concurrency=None,
file_
|
settings=None,
**kwargs):
try:
if length == 0:
return {}
properties = kwargs.pop('properties', None)
umask = kwargs.pop('umask', None)
permissions = kwargs.pop('permissions', None)
path_http_headers = kwargs.pop('path_http
|
_headers', None)
modified_access_conditions = kwargs.pop('modified_access_conditions', None)
chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
if not overwrite:
# if customers didn't specify access conditions, they cannot flush data to existing file
if not _any_conditions(modified_access_conditions):
modified_access_conditions.if_none_match = '*'
if properties or umask or permissions:
raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
if overwrite:
response = await client.create(
resource='file',
path_http_headers=path_http_headers,
properties=properties,
modified_access_conditions=modified_access_conditions,
umask=umask,
permissions=permissions,
cls=return_response_headers,
**kwargs)
# this modified_access_conditions will be applied to flush_data to make sure
# no other flush between create and the current flush
modified_access_conditions.if_match = response['etag']
modified_access_conditions.if_none_match = None
modified_access_conditions.if_modified_since = None
modified_access_conditions.if_unmodified_since = None
use_original_upload_path = file_settings.use_byte_buffer or \
validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
hasattr(stream, 'seekable') and not stream.seekable() or \
not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
if use_original_upload_path:
await upload_data_chunks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=chunk_size,
stream=stream,
max_concurrency=max_concurrency,
validate_content=validate_content,
**kwargs)
else:
await upload_substream_blocks(
service=client,
uploader_class=DataLakeFileChunkUploader,
total_size=length,
chunk_size=chunk_size,
max_concurrency=max_concurrency,
stream=stream,
validate_content=validate_content,
**kwargs
)
return await client.flush_data(position=length,
path_http_headers=path_http_headers,
modified_access_conditions=modified_access_conditions,
close=True,
cls=return_response_headers,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
|
lhirschfeld/JargonBot
|
jargonbot.py
|
Python
|
mit
| 5,805 | 0.002756 |
# Lior Hirschfeld
# JargonBot
# -- Imports --
import re
import pickle
import random
import praw
from custombot import RedditBot
from time import sleep
from define import getDefinition
from collections import Counter
from nltk.stem import *
from sklearn import linear_model
# -- Setup Variables --
jargonBot = RedditBot('jargonBot')
stemmer = PorterStemmer()
with open('count.txt', 'r') as handle:
count = [line.split()[0] for line in handle.readlines()]
countStemmed = [stemmer.stem(word) for word in count]
with open('languages.pickle', 'rb') as handle:
languages = pickle.load(handle)
# -- Methods --
def jargon(lim, rate, subs, ml=False):
searchReddit(lim, rate, subs, ml)
# Search Reddit for words that need to be defined, and define them.
def searchReddit(lim, rate, subs, ml):
for sub in subs:
searchSub(sub, lim, ml)
jargonBot.updateIds()
if ml:
jargonBot.updateModels(["popularity", "wLength", "cLength"])
sleep(rate)
# Search a sub for words that need to be defined, and define them.
def searchSub(sub, lim, ml):
if sub not in languages:
analyze(sub)
subreddit = jargonBot.r.subreddit(sub)
subWords = [pair[0] for pair in languages[sub].most_common(10000)]
for submission in subreddit.hot(limit=lim):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if not hasattr(com, 'body') or com.id in jargonBot.ids:
continue
for word in com.body.split():
# Stem the word and check if it is rare enough to be defined.
# Find the most similar word in count to the stemmed word.
word = stemmer.stem(word)
if "'" in word:
continue
if word not in subWords:
for item in countStemmed:
if item == word:
word = item
|
break
if ml:
if sub not in jargonBot.models:
jargonBot.createModel(sub, [[[1000000, 10, 10]], [10]])
# If ML, after basic checks, predict using the model
# to decide whether to reply.
if word in count:
popularity = count.index(word)
else:
|
popularity = 1000000
info = {"popularity": popularity, "wLength": len(word),
"cLength": len(com.body), "cID": com.id,
"sID": submission.id, "sub": sub}
if popularity > 10000:
# Sometimes, randomly reply to train the model.
if random.random() < jargonBot.models[sub][1]:
reply(com, word, ml, info=info)
elif jargonBot.models[sub][0].predict([[info["popularity"],
info["wLength"], info["cLength"]]]) > 0:
reply(com, word, ml, info=info)
break
else:
if word not in count[:400000]:
reply(com, word, ml)
break
jargonBot.ids.append(com.id)
comment_queue.extend(com.replies)
# Reply to a comment with a word definition.
def reply(com, word, ml, info=None):
reply = ""
# Get the definition of the word (if it exists)
result = getDefinition(word)
if result != None:
# A definition has been found.
if result[1] != "":
reply += """Definition of **{}**: {}.\n\n>*{}.*""".format(word.lower(), result[0].capitalize(),
result[1].capitalize())
else:
reply += """Definition of **{}**: {}.""".format(word.lower(), result[0].capitalize())
if ml:
reply += """\n\nI am a bot which attempts to define difficult words automatically. I use machine learning to do this, and I can use your feedback to improve. Feel free to leave a comment to let me know what you thought of this definition!"""
reply += "\n\n---------\n\n^Check ^out ^my ^[code](https://github.com/lhirschfeld/JargonBot). "
reply += " ^Please ^contact ^/u/liortulip ^with"
reply += " ^any ^questions ^or ^concerns."
try:
cID = com.reply(reply)
if ml:
info["time"] = datetime.now()
info["cID"] = cID
jargonBot.responses.append(info)
print("Replied")
except praw.exceptions.APIException as error:
print("Hit rate limit error.")
jargonBot.updateIds()
sleep(600)
# Analyze the language of a particular sub.
def analyze(sub):
print("Analyzing:", sub)
subreddit = jargonBot.r.subreddit(sub)
words = Counter()
for submission in subreddit.hot(limit=300):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if hasattr(com, 'body'):
for word in com.body.split():
# Stem the word and add it to the counter.
word = stemmer.stem(word)
words[word] += 1
languages[sub] = words
with open('languages.pickle', 'wb') as handle:
pickle.dump(languages, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Analyzation complete.")
while True:
jargon(50, 10, ["science", "math", "askreddit"])
jargon(50, 10, ["science", "math", "askreddit"], ml=True)
print("Completed loop")
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/keras/python/keras/layers/merge.py
|
Python
|
bsd-2-clause
| 18,999 | 0.007158 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=not-callable
# pylint: disable=redefined-builtin
"""Layers can merge several input tensors into a single output tensor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.engine.topology import Layer
from tensorflow.python.framework import tensor_shape
class _Merge(Layer):
"""Generic merge layer for elementwise merge functions.
Used to implement `Sum`, `Average`, etc.
Arguments:
**kwargs: standard layer keyword arguments.
"""
def __init__(self, **kwargs):
super(_Merge, self).__init__(**kwargs)
self.supports_masking = True
def _merge_function(self, inputs):
raise NotImplementedError
def _compute_elemwise_op_output_shape(self, shape1, shape2):
"""Computes the shape of the resultant of an elementwise operation.
Arguments:
shape1: tuple or None. Shape of the first tensor
shape2: tuple or None. Shape of the second tensor
Returns:
expected output shape when an element-wise operation is
carried out on 2 tensors with shapes shape1 and shape2.
tuple or None.
Raises:
ValueError: if shape1 and shape2 are not compatible for
element-wise operations.
"""
if None in [shape1, shape2]:
return None
elif len(shape1) < len(shape2):
return self._compute_elemwise_op_output_shape(shape2, shape1)
elif not shape2:
return shape1
output_shape = list(shape1[:-len(shape2)])
for i, j in zip(shape1[-len(shape2):], shape2):
if i is None or j is None:
output_shape.append(None)
elif i == 1:
output_shape.append(j)
elif j == 1:
output_shape.append(i)
else:
if i != j:
raise ValueError('Operands could not be broadcast '
'together with shapes ' + str(shape1) + ' ' +
str(shape2))
|
output_shape.append(i)
return t
|
uple(output_shape)
def build(self, input_shape):
# Used purely for shape validation.
if not isinstance(input_shape, list):
raise ValueError('A merge layer should be called ' 'on a list of inputs.')
if len(input_shape) < 2:
raise ValueError('A merge layer should be called '
'on a list of at least 2 inputs. '
'Got ' + str(len(input_shape)) + ' inputs.')
input_shape = [tensor_shape.TensorShape(s).as_list() for s in input_shape]
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) > 1:
raise ValueError('Can not merge tensors with different '
'batch sizes. Got tensors with shapes : ' +
str(input_shape))
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
# If the inputs have different ranks, we have to reshape them
# to make them broadcastable.
if None not in input_shape and len(set(map(len, input_shape))) == 1:
self._reshape_required = False
else:
self._reshape_required = True
self.built = True
def call(self, inputs):
if self._reshape_required:
reshaped_inputs = []
input_ndims = list(map(K.ndim, inputs))
if None not in input_ndims:
# If ranks of all inputs are available,
# we simply expand each of them at axis=1
# until all of them have the same rank.
max_ndim = max(input_ndims)
for x in inputs:
x_ndim = K.ndim(x)
for _ in range(max_ndim - x_ndim):
x = K.expand_dims(x, 1)
reshaped_inputs.append(x)
return self._merge_function(reshaped_inputs)
else:
# Transpose all inputs so that batch size is the last dimension.
# (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)
transposed = False
for x in inputs:
x_ndim = K.ndim(x)
if x_ndim is None:
x_shape = K.shape(x)
batch_size = x_shape[0]
new_shape = K.concatenate([x_shape[1:], K.expand_dims(batch_size)])
x_transposed = K.reshape(x,
K.stack([batch_size, K.prod(x_shape[1:])]))
x_transposed = K.permute_dimensions(x_transposed, (1, 0))
x_transposed = K.reshape(x_transposed, new_shape)
reshaped_inputs.append(x_transposed)
transposed = True
elif x_ndim > 1:
dims = list(range(1, x_ndim)) + [0]
reshaped_inputs.append(K.permute_dimensions(x, dims))
transposed = True
else:
# We don't transpose inputs if they are 1D vectors or scalars.
reshaped_inputs.append(x)
y = self._merge_function(reshaped_inputs)
y_ndim = K.ndim(y)
if transposed:
# If inputs have been transposed, we have to transpose the output too.
if y_ndim is None:
y_shape = K.shape(y)
y_ndim = K.shape(y_shape)[0]
batch_size = y_shape[y_ndim - 1]
new_shape = K.concatenate(
[K.expand_dims(batch_size), y_shape[:y_ndim - 1]])
y = K.reshape(y, (-1, batch_size))
y = K.permute_dimensions(y, (1, 0))
y = K.reshape(y, new_shape)
elif y_ndim > 1:
dims = [y_ndim - 1] + list(range(y_ndim - 1))
y = K.permute_dimensions(y, dims)
return y
else:
return self._merge_function(inputs)
def compute_output_shape(self, input_shape):
if input_shape[0] is None:
output_shape = None
else:
output_shape = input_shape[0][1:]
for i in range(1, len(input_shape)):
if input_shape[i] is None:
shape = None
else:
shape = input_shape[i][1:]
output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)
batch_sizes = [s[0] for s in input_shape if s is not None]
batch_sizes = set(batch_sizes)
batch_sizes -= set([None])
if len(batch_sizes) == 1:
output_shape = (list(batch_sizes)[0],) + output_shape
else:
output_shape = (None,) + output_shape
return output_shape
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
if not isinstance(mask, list):
raise ValueError('`mask` should be a list.')
if not isinstance(inputs, list):
raise ValueError('`inputs` should be a list.')
if len(mask) != len(inputs):
raise ValueError('The lists `inputs` and `mask` '
'should have the same length.')
if all([m is None for m in mask]):
return None
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
class Add(_Merge):
"""Layer that adds a list of inputs.
It takes as input a list of tensors,
all of the same shape, and returns
a single tensor (also of the same shape).
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output += inputs
|
gmontamat/pyaw-reporting
|
awreporting/awreporting.py
|
Python
|
apache-2.0
| 4,023 | 0.000249 |
#!/usr/bin/env python
# Copyright 2021 - Gustavo Montamat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
awreporting.py
AdWords API reporting module suitable for large scale reports.
"""
import csv
import logging
import os
import queue
import shutil
import tempfile
from time import sleep
from awreporting.accounts import get_account_ids
from awreporting.reporting_threads import ReportDownloader, ReportDecompressor, END_SIGNAL
def read_query(query_file):
try:
with open(query_file, 'r') as fin:
query = fin.read().replace('\r', '').replace('\n', ' ')
except Exception as e:
logging.exception("Could not read query file.")
return
return query
def merge_output(output, path):
first = True
with open(output, 'w') as fout:
csv_writer = csv.writer(fout, delimiter=',', quotechar='"')
for file_name in os.listdir(path):
if file_name[-4:] == '.csv':
file_path = os.path.join(path, file_name)
with open(file_path, 'r') as fin:
csv_reader = csv.reader(fin, delimiter=',', quotechar='"')
if not first:
next(csv_reader, None) # Skip headers
else:
first = False
for row in csv_reader:
csv_writer.writerow(row)
def get_report(token, awql_query, output, threads, account_ids=None):
if account_ids is None:
logging.info("Retrieving all AdWords account ids.")
account_ids = get_account_ids(token)
if not account_ids:
logging.error("No account ids where found. Check token.")
return
logging.info("Creating temporal directory.")
temporal_path = tempfile.mkdtemp()
# Create a queue with all the account ids
queue_ids = queue.Queue()
[queue_ids.put(account_id) for account_id in account_ids]
while True:
queue_decompress = queue.Queue()
queue_fails = queue.Queue()
# Initialize two decompressor threads
logging.info("Starting ReportDecompressor threads.")
for i in range(2):
report_decompressor = ReportDecompressor(
queue_decompress, queue_fails, temporal_path
)
report_decompressor.daemon = True
report_decompressor.start()
# Initialize dow
|
nloader threads pool
logging.info("Starting ReportDownloader threads.")
max_threads = min(queue_ids.qsize(), threads)
for i in range(max_threads):
if queue_ids.qsize() == 0:
break
report_downloader = ReportDownloader(
token, queue_ids, queue_decompress, awql_query, temporal_path
)
report_downloader.daemon = True
|
report_downloader.start()
sleep(0.1)
logging.info("Used {thread_num} threads.".format(thread_num=i + 1))
# Wait until all the account ids have been processed
queue_ids.join()
queue_ids.put(END_SIGNAL)
# Wait until all gzipped reports have been extracted
queue_decompress.join()
queue_decompress.put(END_SIGNAL)
if queue_fails.qsize() == 0:
break
# Restart job with failed downloads
queue_ids = queue.Queue()
[queue_ids.put(account_id) for account_id in queue_fails.get()]
logging.info("All reports have been obtained.")
merge_output(output, temporal_path)
shutil.rmtree(temporal_path)
|
nils-werner/SimpleCV
|
SimpleCV/examples/detection/facetrack.py
|
Python
|
bsd-3-clause
| 849 | 0.002356 |
#!/usr/bin/env python
#
# Released under the BSD license. See LICENSE file for details.
"""
This program basically does face detection an blurs the face out.
"""
print __doc__
from SimpleCV import Camera, Display, HaarCascade
# Initialize the camera
cam = Camera()
# Create the display to show the image
display = Display()
# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")
# Loop forever
while display.
|
isNotDone():
# Get image, flip it so it looks mirrored, scale to speed things up
img = cam.getImage().flipHorizontal().scale(0.5)
# Load in trained face file
faces = img.findHaarFeatures(haarcascade)
# Pixelize the detected face
if faces:
bb = faces[-1].boundingBox()
img
|
= img.pixelize(10, region=(bb[0], bb[1], bb[2], bb[3]))
# Display the image
img.save(display)
|
mapycz/mapnik
|
scons/scons-local-3.0.1/SCons/Tool/gcc.py
|
Python
|
lgpl-2.1
| 3,530 | 0.003966 |
"""SCons.Tool.gcc
Tool-specific initialization for gcc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, mer
|
ge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INC
|
LUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gcc.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
from . import cc
import os
import re
import subprocess
import SCons.Util
compilers = ['gcc', 'cc']
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
if 'CC' not in env:
env['CC'] = env.Detect(compilers) or compilers[0]
cc.generate(env)
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
else:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -fPIC')
# determine compiler version
version = detect_version(env, env['CC'])
if version:
env['CCVERSION'] = version
def exists(env):
# is executable, and is a GNU compiler (or accepts '--version' at least)
return detect_version(env, env.Detect(env.get('CC', compilers)))
def detect_version(env, cc):
"""Return the version of the GNU compiler, or None if it is not a GNU compiler."""
cc = env.subst(cc)
if not cc:
return None
version = None
#pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],
pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# version = line
line = SCons.Util.to_str(pipe.stdout.readline())
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
version = match.group(0)
# Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:
# So continue with reading to let the child process actually terminate.
while SCons.Util.to_str(pipe.stdout.readline()):
pass
ret = pipe.wait()
if ret != 0:
return None
return version
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
jordanemedlock/psychtruths
|
temboo/core/Library/Bitly/OAuth/InitializeOAuth.py
|
Python
|
apache-2.0
| 5,111 | 0.005087 |
# -*- coding: utf-8 -*-
###############################################################################
#
# InitializeOAuth
# Generates an authorization URL that an application can use to complete the first step in the OAuth process.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InitializeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InitializeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InitializeOAuth, self).__init__(temboo_session, '/Library/Bitly/OAuth/InitializeOAuth')
def new_input_set(self):
return InitializeOAuthInputSet()
def _make_result_set(self, result, path):
return InitializeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InitializeOAuthChoreographyExecution(session, exec_id, path)
class InitializeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InitializeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Bitly after registering your application.)
"""
super(InitializeOAuthInputSet, self)._set_input('ClientID', value)
def set_CustomCallbackID(self, value):
"""
Set the value of the CustomCallbackID input for this Choreo. ((optional, string) A unique identifier that you can pass to eliminate the need to wait for a Temboo generated CallbackID. Callback identifiers may only contain numbers, letters, periods, and hyphens.)
"""
supe
|
r(InitializeOAuthInputSet, self)._set_input('CustomCallbackID', value)
def set_ForwardingURL(self, value):
"""
Set the value of the ForwardingURL input for this Choreo. ((optional, string) The URL that Temboo will redirect your users to after they grant access to your application. Th
|
is should include the "https://" or "http://" prefix and be a fully qualified URL.)
"""
super(InitializeOAuthInputSet, self)._set_input('ForwardingURL', value)
class InitializeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InitializeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AuthorizationURL(self):
"""
Retrieve the value for the "AuthorizationURL" output from this Choreo execution. ((string) The authorization URL that the application's user needs to go to in order to grant access to your application.)
"""
return self._output.get('AuthorizationURL', None)
def get_CallbackID(self):
"""
Retrieve the value for the "CallbackID" output from this Choreo execution. ((string) An ID used to retrieve the callback data that Temboo stores once your application's user authorizes.)
"""
return self._output.get('CallbackID', None)
class InitializeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InitializeOAuthResultSet(response, path)
|
Exgibichi/statusquo
|
test/functional/net.py
|
Python
|
mit
| 4,228 | 0.001419 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
connect_nodes_bi,
p2p_port,
)
class NetTest(StatusquoTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal
|
(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# check that getnettotals totalbytesrecv and totalbytessent
# are consistent with getpeerinfo
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 2)
net_totals = self.nodes[0].getnettotals()
assert_equal(sum([peer['bytesrecv'] for peer in peer_info]),
net_totals['totalbytesrecv'])
|
assert_equal(sum([peer['bytessent'] for peer in peer_info]),
net_totals['totalbytessent'])
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
time.sleep(0.1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
net_totals_after_ping = self.nodes[0].getnettotals()
for before, after in zip(peer_info, peer_info_after_ping):
assert_equal(before['bytesrecv_per_msg']['pong'] + 32, after['bytesrecv_per_msg']['pong'])
assert_equal(before['bytessent_per_msg']['ping'] + 32, after['bytessent_per_msg']['ping'])
assert_equal(net_totals['totalbytesrecv'] + 32*2, net_totals_after_ping['totalbytesrecv'])
assert_equal(net_totals['totalbytessent'] + 32*2, net_totals_after_ping['totalbytessent'])
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
assert_raises_jsonrpc(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
|
cherylyli/stress-aid
|
env/lib/python3.5/site-packages/helowrld/__init__.py
|
Python
|
mit
| 45 | 0.022222 |
d
|
ef tryprint():
return ('it will
|
be oke')
|
ytsarev/rally
|
rally/exceptions.py
|
Python
|
apache-2.0
| 6,154 | 0 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sys
from rally.openstack.common.gettextutils import _
from rally.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class RallyException(Exception):
"""Base Rally Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except KeyError:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
msg = "kwargs don't match in string format operation: %s"
LOG.debug(msg % kwargs, exc_info=exc_info)
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
else:
# at least get the core message out if something happened
message = self.msg_fmt
super(RallyException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return unicode(self)
class ImmutableException(RallyException):
msg_fmt = _("This object is immutable.")
class InvalidArgumentsException(RallyException):
msg_fmt = _("Invalid arguments: '%(message)s'")
class InvalidConfigException(RallyException):
msg_fmt = _("This config has invalid schema: `%(message)s`")
class InvalidRunnerResult(RallyException):
msg_fmt = _("Type of result of `%(name)s` runner should be"
" `base.ScenarioRunnerResult`. Got: `%(results_type)s`")
class InvalidTaskException(InvalidConfigException):
msg_fmt = _("This config is invalid: `%(message)s`")
class InvalidTaskConfigException(InvalidTaskException):
msg_fmt = _("This config has invalid schema: `%(message)s`")
class NotFoundScenarios(InvalidTaskException):
msg_fmt = _("There are no benchmark scenarios with names: `%(names)s`.")
class InvalidBenchmarkConfig(InvalidTaskException):
msg_fmt = _("Task config is invalid.\n"
"\tBenchmark %(name)s has wrong configuration of args at"
" position %(pos)s: %(args)s"
"\n\tReason: %(reason)s")
class TestException(RallyException):
msg_fmt = _("Test failed: %(test_message)s")
class NotFoundException(RallyException):
msg_fmt = _("Not found.")
class NoSuchEngine(NotFoundException):
msg_fmt = _("There is no engine with name `%(engine_name)s`.")
class NoSuchVMProvider(NotFoundException):
msg_fmt = _("There is no vm provider with name `%(vm_provider_name)s`.")
class NoSuchScenario(NotFoundException):
msg_fmt = _("There is no benchmark scenario with name `%(name)s`.")
class NoSuchRunner(NotFoundException):
msg_fmt = _("There is no benchmark runner with type `%(type)s`.")
class NoSuchContext(NotFoundException):
msg_fmt = _("There is no benchmark context with name `%(name)s`.")
class NoSuchConfigField(NotFoundException):
msg_fmt = _("There is no field in the task config with name `%(name)s`.")
class TaskNotFound(NotFoundException):
msg_fmt = _("Task with uuid=%(uuid)s not found.")
class DeploymentNotFound(NotFoundException):
msg_fmt = _("Deployment with uuid=%(uuid)s not found.")
class DeploymentIsBusy(RallyException):
msg_fmt = _("There are allocated resources for the deployment with "
"uuid=%(uuid)s.")
class ResourceNotFound(NotFoundException):
msg_fmt = _("Resource with id=%(id)s not found.")
class TimeoutException(RallyException):
msg_fmt = _("Timeout exceeded.")
class GetResourceFailure(RallyException):
msg_fmt = _("Failed to get the resource %(resource)s: %(err)s")
class GetResourceNotFound(GetResourceFailure):
msg_fmt = _("Resource %(resource)s is not found.")
class GetResourceErrorStatus(GetResourceFailure):
msg_fmt = _("Resouce %(resource)s has %(status)s status: %(fault)s")
class SSHError(RallyException):
msg_fmt = _("Remote command failed.")
class TaskInvalidStatus(RallyException):
msg_fmt = _("Task `%(uuid)s` in `%(actual)s` status but `%(require)s` is "
"required.")
class ChecksumMismatch(RallyException):
msg_fmt = _("Checksum mismatch for image: %(u
|
rl)s")
class InvalidAdminException(InvalidArgumentsException):
msg_fmt = _("user %(username)s doesn't have 'admin' role")
class InvalidEndpointsException(InvalidArgumentsException):
msg_fmt = _("wrong keystone credentials specified in your endpoint"
" properties. (HTTP 401)")
class HostUnreachableException(InvalidArgumentsException):
msg_fmt = _("unable to establish connection to the remote host: %(url)s")
class InvalidScenar
|
ioArgument(RallyException):
msg_fmt = _("Invalid scenario argument: '%(message)s'")
|
manhhomienbienthuy/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
Python
|
bsd-3-clause
| 20,619 | 0.001261 |
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition._pls import (
_center_scale_xy,
_get_first_singular_vectors_power_method,
_get_first_singular_vectors_svd,
_svd_flip_1d,
)
from sklearn.cross_decomposition import CCA
from sklearn.cross_decomposition import PLSSVD, PLSRegression, PLSCanonical
from sklearn.datasets import make_regression
from sklearn.utils import check_random_state
from sklearn.utils.extmath import svd_flip
from sklearn.exceptions import ConvergenceWarning
def assert_matrix_orthogonal(M):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)))
def test_pls_canonical_basics():
# Basic checks for PLSCanonical
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
# Check X = TP' and Y = UQ'
T = pls._x_scores
P = pls.x_loadings_
U = pls._y_scores
Q = pls.y_loadings_
# Need to scale first
Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
X.copy(), Y.copy(), scale=True
)
assert_array_almost_equal(Xc, np.dot(T, P.T))
assert_array_almost_equal(Yc, np.dot(U, Q.T))
# Check that rotations on training data lead to scores
Xt = pls.transform(X)
assert_array_almost_equal(Xt, pls._x_scores)
Xt, Yt = pls.transform(X, Y)
assert_array_almost_equal(Xt, pls._x_scores)
assert_array_almost_equal(Yt, pls._y_scores)
# Check that inverse_transform works
X_back = pls.inverse_transform(Xt)
assert_array_almost_equal(X_back, X)
_, Y_back = pls.inverse_transform(Xt, Yt)
assert_array_almost_equal(Y_back, Y)
def test_sanity_check_pls_regression():
# Sanity check for PLSRegression
# The results were checked against the R-packages plspm, misOmics and pls
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSRegression(n_components=X.shape[1])
X_trans, _ = pls.fit_transform(X, Y)
# FIXME: one would expect y_trans == pls.y_scores_ but this is not
# the case.
# xref: https://github.com/scikit-learn/scikit-learn/issues/22420
assert_allclose(X_trans, pls.x_scores_)
expected_x_weights = np.array(
[
[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983],
]
)
expected_x_loadings = np.array(
[
[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983],
]
)
expected_y_weights = np.array(
[
[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916],
]
)
expected_y_loadings = np.array(
[
[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916],
]
)
assert_array_almost_equal(np.abs(pls.x_loadings_), np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
# The R / Python difference in the signs should be consistent across
# loadings, weights, etc.
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
def test_sanity_check_pls_regression_constant_column_Y():
# Check behavior when the first column of Y is constant
# The results are checked against a modified version of plsreg2
# from the R-package plsdepot
d = load_linnerud()
X = d.data
Y = d.target
Y[:, 0] = 1
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[
[-0.6273573, 0.007081799, 0.7786994],
[-0.7493417, -0.277612681, -0.6011807],
[-0.2119194, 0.960666981, -0.1794690],
]
)
expected_x_loadings = np.array(
[
[-0.6273512, -0.22464538, 0.7786994],
[-0.6643156, -0.09871193, -0.6011807],
[-0.5125877, 1.01407380, -0.1794690],
]
)
expected_y_loadings = np.array(
[
[0.0000000, 0.0000000, 0.0000000],
[0.4357300, 0.5828479, 0.2174802],
[-0.1353739, -0.2486423, -0.1810386],
]
)
assert_array_almost_equal(np.abs(expected_x_weights), np.abs(pls.x_weights_))
assert_array_almost_equal(np.abs(expected_x_loadings), np.abs(pls.x_loadings_))
# For the PLSRegression with default parameters, y_loadings == y_weights
assert_array_almost_equal(np.abs(pls.y_loadings_), np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_loadings))
x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
# we ignore the first full-zeros row for y
y_loadings_sign_flip = np.sign(expected_y_loadings[1:] / pls.y_loadings_[1:])
assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)
def test_sanity_check_pls_canonical():
# Sanity check for PLSCanonical
# The results were checked against the R-package plspm
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[
[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271],
]
)
expected_x_rotations = np.array(
[
[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788],
]
)
expected_y_weights = np.array(
[
[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016],
]
)
expected_y_rotations = np.array(
[
[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826],
]
)
assert_array_almost_equal(np.abs(pls.x_rotations_), np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_), np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_), np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_), np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ /
|
expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
ass
|
ert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_sanity_check_pls_canonical_random():
#
|
mammique/django
|
django/contrib/auth/tests/auth_backends.py
|
Python
|
bsd-3-clause
| 15,671 | 0.002106 |
from __future__ import unicode_literals
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.tests.custom_user import ExtensionUser, CustomPermissionsUser, CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.contrib.auth import authenticate
from django.test import TestCase
from django.test.utils import override_settings
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.create_users()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == set(['auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), set(['auth.test2', 'auth.test', 'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = set(['auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set(['auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_get_all_superuser_permissions(self):
"A superuser has all permissions. Refs #14795"
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@skipIfCustomUser
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
def create_users(self):
self.user = User.objects.create_user(
username='test',
email='test@example.com',
password='test',
)
self.superuser = User.objects.create_superuser(
username='test2',
email='test2@example.com',
password='test',
)
@override_settings(AUTH_USER_MODEL='auth.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test'
|
,
email='test@example.com',
|
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
Tests that the model backend can accept a cred
|
teoreteetik/api-snippets
|
lookups/lookup-get-cname-example-1/lookup-get-cname-example-1.6.x.py
|
Python
|
mit
| 417 | 0 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Clien
|
t
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_si
|
d, auth_token)
number = client.lookups.phone_numbers("+16502530000").fetch(
type="caller-name",
)
print(number.carrier['type'])
print(number.carrier['name'])
|
kikusu/chainer
|
chainer/iterators/__init__.py
|
Python
|
mit
| 214 | 0 |
from ch
|
ainer.iterators import multiprocess_iterator
from chainer.iterators import serial_iterator
MultiprocessIterator = multiprocess_iterator.MultiprocessIterator
SerialIterator = serial_iterator
|
.SerialIterator
|
bomjacob/htxaarhuslan
|
main/migrations/0027_auto_20170103_1130.py
|
Python
|
mit
| 977 | 0.001028 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-03 10:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migr
|
ation(migrations.Migration):
dependencies = [
|
('main', '0026_auto_20161215_2204'),
]
operations = [
migrations.AddField(
model_name='lan',
name='show_calendar',
field=models.BooleanField(default=False, help_text='Hvorvidt en kalender skal vises på forsiden. Slå kun dette til hvis turneringer og andre events efterhånden er ved at være klar.', verbose_name='Vis kalender'),
),
migrations.AddField(
model_name='tournament',
name='end',
field=models.DateTimeField(null=True, verbose_name='Slut'),
),
migrations.AddField(
model_name='tournament',
name='start',
field=models.DateTimeField(null=True, verbose_name='Start'),
),
]
|
codexgigassys/codex-backend
|
src/PlugIns/PE/CypherPlug.py
|
Python
|
mit
| 513 | 0 |
# Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
from PlugIns.PlugIn import PlugIn
cl
|
ass CypherPlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.cypher"
def getName(self):
return "cypher"
def getVersion(self):
return 1
def process(self):
re
|
turn "Not_implemented"
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/internet/iocpreactor/const.py
|
Python
|
bsd-3-clause
| 550 | 0.001818 |
# Copyright
|
(c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows constants for IOCP
"""
# this stuff should really be gotten from Windows headers via pyrex, but it
# probably is not going to change
ERROR_PORT_UNREACHABLE = 1234
ERROR_NETWORK_UNREACHABLE = 1231
ERROR_CONNECTION_REFUSED = 1225
ERROR_IO_PENDING = 997
ERROR_OPERATION_ABORTED = 995
WAIT_TIMEOUT = 258
ERROR_NETNAME_DELETED = 64
ERROR_HANDLE_EOF = 38
INFINITE = -1
SO_UPDAT
|
E_CONNECT_CONTEXT = 0x7010
SO_UPDATE_ACCEPT_CONTEXT = 0x700B
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/boston/ws_boston_LGBMRegressor_postgresql_code_gen.py
|
Python
|
bsd-3-clause
| 131 | 0.015267 |
fro
|
m sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_mo
|
del("LGBMRegressor" , "boston" , "postgresql")
|
sjdv1982/seamless
|
tests/lowlevel/simple-remote.py
|
Python
|
mit
| 1,438 | 0.002086 |
# run scripts/jobslave-nodatabase.py
import os
os.environ["SEAMLESS_COMMUNION_ID"] = "simple-remote"
os.environ["SEAMLESS_COMMUNION_INCOMING"] = "localhost:8602"
import seamless
seamless.set_ncores(0)
from seamless import communion_server
communion_server.configure_master(
buffer=True,
transformation_job=True,
transformation_status=True,
)
from seamless.core import con
|
text, cell, transformer, unilink
ctx = context(toplevel=True)
ctx.cell1 = cell().set(1)
ctx.cell2 = cell().set(2)
ctx.result = cell()
ctx.tf = transformer({
"a":
|
"input",
"b": "input",
"c": "output"
})
ctx.cell1_unilink = unilink(ctx.cell1)
ctx.cell1_unilink.connect(ctx.tf.a)
ctx.cell2.connect(ctx.tf.b)
ctx.code = cell("transformer").set("c = a + b")
ctx.code.connect(ctx.tf.code)
ctx.result_unilink = unilink(ctx.result)
ctx.tf.c.connect(ctx.result_unilink)
ctx.result_copy = cell()
ctx.result.connect(ctx.result_copy)
ctx.compute(0.1)
print(ctx.cell1.value)
print(ctx.code.value)
ctx.compute()
print(ctx.result.value, ctx.status)
print(ctx.tf.exception)
ctx.cell1.set(10)
ctx.compute()
print(ctx.result.value, ctx.status)
ctx.code.set("c = a + b + 1000")
ctx.compute()
print(ctx.result.value, ctx.status)
print("Introduce delay...")
ctx.code.set("import time; time.sleep(2); c = -(a + b)")
ctx.compute(1.0)
print("after 1.0 sec...")
print(ctx.result.value, ctx.status)
print("...")
ctx.compute()
print(ctx.result.value, ctx.status)
|
cms-externals/sherpa
|
MODEL/UFO/templates.py
|
Python
|
gpl-3.0
| 427 | 0.009368 |
import pkg_resources
from string import Template
model
|
_template = Template(pkg_resources.resource_string(__name__, "model_template.C"))
lorentz_calc_template = Template(pkg_resources.resource_string(__name__, "lorentz_calc_template.C"))
sconstruct_
|
template = Template(pkg_resources.resource_string(__name__, "sconstruct_template"))
run_card_template = Template(pkg_resources.resource_string(__name__, "run_card_template"))
|
a113n/bcbio-nextgen
|
bcbio/pipeline/cleanbam.py
|
Python
|
mit
| 8,337 | 0.003958 |
"""Clean an input BAM file to work with downstream pipelines.
GATK and Picard based pipelines have specific requirements for
chromosome order, run group information and other BAM formatting.
This provides a pipeline to prepare and resort an input.
"""
import os
import sys
import pysam
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.heterogeneity import chromhacks
from bcbio.ngsalign import novoalign
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def fixrg(in_bam, names, ref_file, dirs, data):
"""Fix read group in a file, using samtools addreplacerg.
addreplacerg does not remove the old read group, causing confusion when
checking. We use reheader to work around this
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data)))
out_file = os.path.join(work_dir, "%s-fixrg.bam" % utils.splitext_plus(os.path.basename(in_bam))[0])
if not utils.file_exists(out_file):
out_file = os.path.join(work_dir, "%s-fixrg.bam" % dd.get_sample_name(data))
if not utils.file_uptodate(out_file, in_bam):
with file_transaction(data, out_file) as tx_out_file:
rg_info = novoalign.get_rg_info(names)
new_header = "%s-header.txt" % os.path.splitext(out_file)[0]
cores = dd.get_cores(data)
do.run("samtools view -H {in_bam} | grep -v ^@RG > {new_header}".format(**locals()),
"Create empty RG header: %s" % dd.get_sample_name(data))
|
cmd = ("samtools reheader {new_header} {in_bam} | "
"samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} -")
do.run(cmd.format(**locals()), "Fix read groups: %s" % dd.get_sample_nam
|
e(data))
return out_file
def remove_extracontigs(in_bam, data):
"""Remove extra contigs (non chr1-22,X,Y) from an input BAM.
These extra contigs can often be arranged in different ways, causing
incompatibility issues with GATK and other tools. This also fixes the
read group header as in fixrg.
This does not yet handle mapping over 1 -> chr1 issues since this requires
a ton of search/replace which slows down conversion.
"""
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "bamclean", dd.get_sample_name(data)))
out_file = os.path.join(work_dir, "%s-noextras.bam" % utils.splitext_plus(os.path.basename(in_bam))[0])
if not utils.file_exists(out_file):
out_file = os.path.join(work_dir, "%s-noextras.bam" % dd.get_sample_name(data))
if not utils.file_uptodate(out_file, in_bam):
with file_transaction(data, out_file) as tx_out_file:
target_chroms = _target_chroms_and_header(in_bam, data)
str_chroms = " ".join(target_chroms)
rg_info = novoalign.get_rg_info(data["rgnames"])
bcbio_py = sys.executable
ref_file = dd.get_ref_file(data)
local_bam = os.path.join(os.path.dirname(tx_out_file), os.path.basename(in_bam))
cores = dd.get_cores(data)
utils.symlink_plus(in_bam, local_bam)
bam.index(local_bam, data["config"])
cmd = ("samtools view -@ {cores} -h {local_bam} {str_chroms} | "
"""{bcbio_py} -c 'from bcbio.pipeline import cleanbam; """
"""cleanbam.fix_header("{ref_file}")' | """
"samtools view -@ {cores} -u - | "
"samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} - ")
do.run(cmd.format(**locals()), "bamprep, remove extra contigs: %s" % dd.get_sample_name(data))
return out_file
def _target_chroms_and_header(bam_file, data):
"""Get a list of chromosomes to target and new updated ref_file header.
Could potentially handle remapping from chr1 -> 1 but currently disabled due
to speed issues.
"""
special_remaps = {"chrM": "MT", "MT": "chrM"}
target_chroms = dict([(x.name, i) for i, x in enumerate(ref.file_contigs(dd.get_ref_file(data)))
if chromhacks.is_autosomal_or_sex(x.name)])
out_chroms = []
with pysam.Samfile(bam_file, "rb") as bamfile:
for bami, bam_contig in enumerate([c["SN"] for c in bamfile.header["SQ"]]):
if bam_contig in target_chroms:
target_chrom = bam_contig
elif bam_contig in special_remaps and special_remaps[bam_contig] in target_chroms:
target_chrom = special_remaps[bam_contig]
elif bam_contig.startswith("chr") and bam_contig.replace("chr", "") in target_chroms:
target_chrom = bam_contig.replace("chr", "")
elif "chr%s" % bam_contig in target_chroms:
target_chrom = "chr%s" % bam_contig
else:
target_chrom = None
# target_chrom == bam_contig ensures we don't try chr1 -> 1 style remapping
if target_chrom and target_chrom == bam_contig:
# Order not required if dealing with SAM file header fixing
#assert bami == target_chroms[target_chrom], \
# ("remove_extracontigs: Non-matching order of standard contig: %s %s (%s vs %s)" %
# (bam_file, target_chrom, bami, target_chroms[target_chrom]))
out_chroms.append(target_chrom)
assert out_chroms, ("remove_extracontigs: Did not find any chromosomes in reference file: %s %s" %
(bam_file, target_chroms))
return out_chroms
def fix_header(ref_file):
added_ref = False
for line in sys.stdin:
# skip current read groups, since adding new
# skip current contigs since adding new sequence dictionary
if line.startswith(("@RG", "@SQ")):
pass
elif not added_ref and not line.startswith("@"):
for x in ref.file_contigs(ref_file):
sys.stdout.write("@SQ\tSN:%s\tLN:%s\n" % (x.name, x.size))
added_ref = True
else:
sys.stdout.write(line)
def picard_prep(in_bam, names, ref_file, dirs, data):
"""Prepare input BAM using Picard and GATK cleaning tools.
- ReorderSam to reorder file to reference
- AddOrReplaceReadGroups to add read group information and coordinate sort
- PrintReads to filters to remove problem records:
- filterMBQ to remove reads with mismatching bases and base qualities
"""
runner = broad.runner_from_path("picard", data["config"])
work_dir = utils.safe_makedir(os.path.join(dirs["work"], "bamclean", names["sample"]))
runner.run_fn("picard_index_ref", ref_file)
reorder_bam = os.path.join(work_dir, "%s-reorder.bam" %
os.path.splitext(os.path.basename(in_bam))[0])
if not utils.file_exists(reorder_bam):
reorder_bam = os.path.join(work_dir, "%s-reorder.bam" % dd.get_sample_name(data))
reorder_bam = runner.run_fn("picard_reorder", in_bam, ref_file, reorder_bam)
rg_bam = runner.run_fn("picard_fix_rgs", reorder_bam, names)
return _filter_bad_reads(rg_bam, ref_file, data)
def _filter_bad_reads(in_bam, ref_file, data):
"""Use GATK filter to remove problem reads which choke GATK and Picard.
"""
bam.index(in_bam, data["config"])
out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0]
if not utils.file_exists(out_file):
with tx_tmpdir(data) as tmp_dir:
with file_transaction(data, out_file) as tx_out_file:
params = [("FixMisencodedBaseQualityReads"
if dd.get_quality_format(data, "").lower() == "illumina"
else "PrintReads"),
"-R", ref_file,
"-I", in_bam,
"-O", tx_out_file,
"-RF", "MatchingBasesAndQualsReadFilter",
"-RF", "SeqIsStoredReadFilter",
"-RF", "CigarContainsNoNOperator"]
jvm_opts = broad.get_gatk_opts(data["config"], t
|
brain0/archweb
|
devel/management/commands/pgp_import.py
|
Python
|
gpl-2.0
| 8,172 | 0.000734 |
# -*- coding: utf-8 -*-
"""
pgp_import command
Import keys and signatures from a given GPG keyring.
Usage: ./manage.py pgp_import <keyring_path>
"""
from collections import namedtuple, OrderedDict
from datetime import datetime
import logging
from pytz import utc
import subprocess
import sys
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from devel.models import DeveloperKey, PGPSignature
from devel.utils import UserFinder
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s -> %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stderr)
logger = logging.getLogger()
class Command(BaseCommand):
args = "<keyring_path>"
help = "Import keys and signatures from a given GPG keyring."
def handle(self, *args, **options):
v = int(options.get('verbosity', None))
if v == 0:
logger.level = logging.ERROR
elif v == 1:
logger.level = logging.INFO
elif v >= 2:
logger.level = logging.DEBUG
if len(args) < 1:
raise CommandError("keyring_path must be provided")
import_keys(args[0])
import_signatures(args[0])
def get_date(epoch_string):
'''Convert a epoch string into a python 'date' object (not datetime).'''
if not epoch_string:
return None
return datetime.utcfromtimestamp(int(epoch_string)).date()
def get_datetime(epoch_string):
'''Convert a epoch string into a python 'datetime' object.'''
if not epoch_string:
return None
return datetime.utcfromtimestamp(int(epoch_string)).replace(tzinfo=utc)
def call_gpg(keyring, *args):
# GPG is stupid and interprets any filename without path portion as being
# in ~/.gnupg/. Fake it out if we just get a bare filename.
if '/' not in keyring:
keyring = './%s' % keyring
gpg_cmd = ["gpg2", "--no-default-keyring", "--keyring", keyring,
"--with-colons", "--fixed-list-mode"]
gpg_cmd.extend(args)
logger.info("running command: %s", ' '.join(gpg_cmd))
proc = subprocess.Popen(gpg_cmd, stdout=subprocess.PIPE)
outdata, errdata = proc.communicate()
if proc.returncode != 0:
logger.error(errdata)
raise subprocess.CalledProcessError(proc.returncode, gpg_cmd)
return outdata
class KeyData(object):
def __init__(self, key, created, expires):
self.key = key
self.created = get_datetime(created)
self.expires = get_datetime(expires)
self.parent = None
self.revoked = None
self.db_id = None
def parse_keydata(data):
keys = OrderedDict()
current_pubkey = None
# parse all of the output from our successful GPG command
logger.info("parsing command output")
node = None
for line in data.split('\n'):
parts = line.split(':')
if parts[0] == 'pub':
key = parts[4]
current_pubkey = key
keys[key] = KeyData(key, parts[5], parts[6])
node = parts[0]
elif parts[0] == 'sub':
key = parts[4]
keys[key] = KeyData(key, parts[5], parts[6])
keys[key].parent = current_pubkey
node = parts[0]
elif parts[0] == 'uid':
node = parts[0]
elif parts[0] == 'rev' and node in ('pub', 'sub'):
keys[current_pubkey].revoked = get_datetime(parts[5])
return keys
def find_key_owner(key, keys, finder):
'''Recurse up the chain, looking for an owner.'''
if key is None:
return None
owner = finder.find_by_pgp_key(key.key)
if owner:
return owner
if key.parent:
return find_key_owner(keys[key.parent], keys, finder)
return None
def import_keys(keyring):
outdata = call_gpg(keyring, "--list-sigs")
keydata = parse_keydata(outdata)
logger.info("creating or finding %d keys", len(keydata))
created_ct = updated_ct = 0
with transaction.atomic():
finder = UserFinder()
# we are dependent on parents coming before children; parse_keydata
# uses an OrderedDict to ensure this is the case.
for data
|
in keydata.values():
parent_id = None
if data.parent:
parent_data = keydata.get(data.parent, None)
if parent_data:
parent_id = parent_data.db_id
other = {
'expires': data.expires,
'revoked': data.revoked,
'parent_id': parent_id,
}
dkey, created = DeveloperKey.objects.get_or_create(
key=data.key, created=data.
|
created, defaults=other)
data.db_id = dkey.id
# set or update any additional data we might need to
needs_save = False
if created:
created_ct += 1
else:
for k, v in other.items():
if getattr(dkey, k) != v:
setattr(dkey, k, v)
needs_save = True
if dkey.owner_id is None:
owner = find_key_owner(data, keydata, finder)
if owner is not None:
dkey.owner = owner
needs_save = True
if needs_save:
dkey.save()
updated_ct += 1
key_ct = DeveloperKey.objects.all().count()
logger.info("%d total keys in database", key_ct)
logger.info("created %d, updated %d keys", created_ct, updated_ct)
class SignatureData(object):
def __init__(self, signer, signee, created):
self.signer = signer
self.signee = signee
self.created = created
self.expires = None
self.revoked = None
def parse_sigdata(data):
nodes = {}
edges = []
current_pubkey = None
# parse all of the output from our successful GPG command
logger.info("parsing command output")
for line in data.split('\n'):
parts = line.split(':')
if parts[0] == 'pub':
current_pubkey = parts[4]
nodes[current_pubkey] = None
elif parts[0] == 'uid':
uid = parts[9]
# only set uid if this is the first one encountered
if nodes[current_pubkey] is None:
nodes[current_pubkey] = uid
elif parts[0] == 'sig':
signer = parts[4]
created = get_date(parts[5])
edge = SignatureData(signer, current_pubkey, created)
if parts[6]:
edge.expires = get_date(parts[6])
edges.append(edge)
elif parts[0] == 'rev':
signer = parts[4]
revoked = get_date(parts[5])
# revoke any prior edges that match
matches = [e for e in edges if e.signer == signer
and e.signee == current_pubkey]
for edge in matches:
edge.revoked = revoked
return nodes, edges
def import_signatures(keyring):
outdata = call_gpg(keyring, "--list-sigs")
nodes, edges = parse_sigdata(outdata)
# now prune the data down to what we actually want.
# prune edges not in nodes, remove duplicates, and self-sigs
pruned_edges = {edge for edge in edges
if edge.signer in nodes and edge.signer != edge.signee}
logger.info("creating or finding up to %d signatures", len(pruned_edges))
created_ct = updated_ct = 0
with transaction.atomic():
for edge in pruned_edges:
sig, created = PGPSignature.objects.get_or_create(
signer=edge.signer, signee=edge.signee,
created=edge.created, expires=edge.expires,
defaults={ 'revoked': edge.revoked })
if sig.revoked != edge.revoked:
sig.revoked = edge.revoked
sig.save()
updated_ct += 1
if created:
created_ct += 1
sig_ct = PGPSignature.objects.all().count()
logger.info("%d total signatures in database", sig_ct)
logger.info("created %d, updated %d signatures", created_ct, updated_ct)
# vim: set ts=4 sw=4 et:
|
qxf2/qxf2-page-object-model
|
utils/Base_Logging.py
|
Python
|
mit
| 4,369 | 0.013733 |
"""
Qxf2 Services: A plug-n-play class for logging.
This class wraps around Python's loguru module.
"""
import os, inspect
import pytest,logging
from loguru import logger
from pytest_reportportal import RPLogger, RPLogHandler
class Base_Logging():
"A plug-n-play class for logging"
def __init__(self,log_file_name=None,level="DEBUG",format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {module} | {message}"):
"Constructor for the logging class"
self.log_file_name=log_file_name
self.log_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','log'))
self.level=level
self.format=format
self.log = self.set_log(self.log_file_name,self.level,self.format)
self.rp_logger = None
def set_log(self,log_file_name,level,form
|
at,test_module_name=None):
"Add an handler sending log messages to a sink"
if test_module_name is None:
test_module_name = self.get_calling_module()
if not os.path.exists
|
(self.log_file_dir):
os.makedirs(self.log_file_dir)
if log_file_name is None:
log_file_name = self.log_file_dir + os.sep + test_module_name + '.log'
else:
log_file_name = self.log_file_dir + os.sep + log_file_name
logger.add(log_file_name,level=level,format=format,
rotation="30 days", filter=None, colorize=None, serialize=False, backtrace=True, enqueue=False, catch=True)
def get_calling_module(self):
"Get the name of the calling module"
calling_file = inspect.stack()[-1][1]
if 'runpy' in calling_file:
calling_file = inspect.stack()[4][1]
calling_filename = calling_file.split(os.sep)
#This logic bought to you by windows + cygwin + git bash
if len(calling_filename) == 1: #Needed for
calling_filename = calling_file.split('/')
self.calling_module = calling_filename[-1].split('.')[0]
return self.calling_module
def setup_rp_logging(self, rp_pytest_service):
"Setup reportportal logging"
try:
# Setting up a logging.
logging.setLoggerClass(RPLogger)
self.rp_logger = logging.getLogger(__name__)
self.rp_logger.setLevel(logging.INFO)
# Create handler for Report Portal.
rp_handler = RPLogHandler(rp_pytest_service)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return self.rp_logger
except Exception as e:
self.write("Exception when trying to set rplogger")
self.write(str(e))
self.exceptions.append("Error when setting up the reportportal logger")
def write(self,msg,level='info'):
"Write out a message"
#fname = inspect.stack()[2][3] #May be use a entry-exit decorator instead
all_stack_frames = inspect.stack()
for stack_frame in all_stack_frames[1:]:
if 'Base_Page' not in stack_frame[1]:
break
fname = stack_frame[3]
d = {'caller_func': fname}
if self.rp_logger:
if level.lower()== 'debug':
self.rp_logger.debug(msg=msg)
elif level.lower()== 'info':
self.rp_logger.info(msg)
elif level.lower()== 'warn' or level.lower()=='warning':
self.rp_logger.warning(msg)
elif level.lower()== 'error':
self.rp_logger.error(msg)
elif level.lower()== 'critical':
self.rp_logger.critical(msg)
else:
self.rp_logger.critical(msg)
return
if level.lower()== 'debug':
logger.debug("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'info':
logger.info("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'warn' or level.lower()=='warning':
logger.warning("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'error':
logger.error("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'critical':
logger.critical("{module} | {msg}",module=d['caller_func'],msg=msg)
else:
logger.critical("Unknown level passed for the msg: {}", msg)
|
rvmoura96/projeto-almoxarifado
|
mysite/urls.py
|
Python
|
mit
| 838 | 0 |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
|
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^adm
|
in/', admin.site.urls),
url(r'', include('almoxarifado.urls')),
]
|
LCOGT/valhalla
|
valhalla/proposals/migrations/0017_auto_20181109_1828.py
|
Python
|
gpl-3.0
| 1,094 | 0.001828 |
# Generated by Django 2.1.3 on 2018-11-09 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0016_auto_20180405_2116'),
]
operations = [
migrations.AlterField(
model_name='timeallocation',
name='instrument_name',
field=models.CharField(choices=[('0M4-SCICAM-SBIG', '0M4-SCICAM-SBIG'), ('0M8-NRES-SCICAM', '0M8-NRES-SCICAM'), ('0M8-SCICAM-SBIG'
|
, '0M8-SCICAM-SBIG'), ('1M0-NRES-SCICAM', '1M0-NRES-SCICAM'), ('1M0-SCICAM-SINISTRO', '1M0-SCICAM-SINISTRO'), ('1M0-SCICAM-SBIG', '1M0-SCICAM-SBIG'), ('1M0-NRES-COMMISSIONING', '1M0-NRES-COMMISSIONING'), ('2M0-FLOYDS-SCICAM', '2M0-FLOYDS-SCICAM'), ('2M0-SCICAM-SPECTRAL', '2M0-SCICAM-SPECTRAL'), ('2M0-SCICAM-SBIG', '2M0-SCICAM-SBIG')], max_length=200),
),
migrations.AlterField(
model_name='ti
|
meallocation',
name='telescope_class',
field=models.CharField(choices=[('0m4', '0m4'), ('0m8', '0m8'), ('1m0', '1m0'), ('2m0', '2m0')], max_length=20),
),
]
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/transport_nsw/sensor.py
|
Python
|
apache-2.0
| 4,474 | 0 |
"""Support for Transport NSW (AU) to query next leave event."""
from datetime import timedelta
from TransportNSW import TransportNSW
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_MODE,
CONF_API_KEY,
CONF_NAME,
TIME_MINUTES,
)
import homeassistant.helpers.config_validation as cv
ATTR_STOP_ID = "stop_id"
ATTR_ROUTE = "route"
ATTR_DUE_IN = "due"
ATTR_DELAY = "delay"
ATTR_REAL_TIME = "real_time"
ATTR_DESTINATION = "destination"
ATTRIBUTION = "Data provided by Transport NSW"
CONF_STOP_ID = "stop_id"
CONF_ROUTE = "route"
CONF_DESTINATION = "destination"
DEFAULT_NAME = "Next Bus"
ICONS = {
"Train": "mdi:train",
"Lightrail": "mdi:tram",
"Bus": "mdi:bus",
"Coach": "mdi:bus",
"Ferry": "mdi:ferry",
"Schoolbus": "mdi:bus",
"n/a": "mdi:clock",
None: "mdi:clock",
}
|
SCAN_INTE
|
RVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ROUTE, default=""): cv.string,
vol.Optional(CONF_DESTINATION, default=""): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Transport NSW sensor."""
stop_id = config[CONF_STOP_ID]
api_key = config[CONF_API_KEY]
route = config.get(CONF_ROUTE)
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
data = PublicTransportData(stop_id, route, destination, api_key)
add_entities([TransportNSWSensor(data, stop_id, name)], True)
class TransportNSWSensor(SensorEntity):
"""Implementation of an Transport NSW sensor."""
def __init__(self, data, stop_id, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._stop_id = stop_id
self._times = self._state = None
self._icon = ICONS[None]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._times is not None:
return {
ATTR_DUE_IN: self._times[ATTR_DUE_IN],
ATTR_STOP_ID: self._stop_id,
ATTR_ROUTE: self._times[ATTR_ROUTE],
ATTR_DELAY: self._times[ATTR_DELAY],
ATTR_REAL_TIME: self._times[ATTR_REAL_TIME],
ATTR_DESTINATION: self._times[ATTR_DESTINATION],
ATTR_MODE: self._times[ATTR_MODE],
ATTR_ATTRIBUTION: ATTRIBUTION,
}
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data from Transport NSW and update the states."""
self.data.update()
self._times = self.data.info
self._state = self._times[ATTR_DUE_IN]
self._icon = ICONS[self._times[ATTR_MODE]]
class PublicTransportData:
"""The Class for handling the data retrieval."""
def __init__(self, stop_id, route, destination, api_key):
"""Initialize the data object."""
self._stop_id = stop_id
self._route = route
self._destination = destination
self._api_key = api_key
self.info = {
ATTR_ROUTE: self._route,
ATTR_DUE_IN: "n/a",
ATTR_DELAY: "n/a",
ATTR_REAL_TIME: "n/a",
ATTR_DESTINATION: "n/a",
ATTR_MODE: None,
}
self.tnsw = TransportNSW()
def update(self):
"""Get the next leave time."""
_data = self.tnsw.get_departures(
self._stop_id, self._route, self._destination, self._api_key
)
self.info = {
ATTR_ROUTE: _data["route"],
ATTR_DUE_IN: _data["due"],
ATTR_DELAY: _data["delay"],
ATTR_REAL_TIME: _data["real_time"],
ATTR_DESTINATION: _data["destination"],
ATTR_MODE: _data["mode"],
}
|
qtile/qtile
|
test/layouts/test_treetab.py
|
Python
|
mit
| 6,060 | 0.001155 |
# Copyright (c) 2019 Guangwang Huang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.layouts.layout_utils import assert_focus_path, assert_focused
class TreeTabConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d"),
]
layouts = [
layout.TreeTab(sections=["Foo", "Bar"]),
]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
follow_mouse_focus = False
treetab_config = pytest.mark.parametrize("manager", [TreeTabConfig], indirect=True)
@treetab_config
def test_window(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("float1", floating=True)
manager.test_window("float2", floating=True)
manager.test_window("three")
# test preconditions, columns adds clients at pos of current, in two stacks
assert manager.c.layout.info()["clients"] == ["one", "three", "two"]
assert manager.c.layout.info()["sections"] == ["Foo", "Bar"]
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two"], ["three"]],
"Bar": [],
}
# last added window has f
|
ocus
assert_focused(manager, "three")
manager.c.layout.up()
assert_focused(manager, "two")
manager.c.layout.down()
assert_focused(manager, "three")
# test command move_up/down
manager.c.layout.move_up()
assert manager.c.
|
layout.info()["clients"] == ["one", "three", "two"]
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["three"], ["two"]],
"Bar": [],
}
manager.c.layout.move_down()
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two"], ["three"]],
"Bar": [],
}
# section_down/up
manager.c.layout.up() # focus two
manager.c.layout.section_down()
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["three"]],
"Bar": [["two"]],
}
manager.c.layout.section_up()
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["three"], ["two"]],
"Bar": [],
}
# del_section
manager.c.layout.up() # focus three
manager.c.layout.section_down()
manager.c.layout.del_section("Bar")
assert manager.c.layout.info()["client_trees"] == {"Foo": [["one"], ["two"], ["three"]]}
# add_section
manager.c.layout.add_section("Baz")
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two"], ["three"]],
"Baz": [],
}
manager.c.layout.del_section("Baz")
# move_left/right
manager.c.layout.move_left() # no effect for top-level children
assert manager.c.layout.info()["client_trees"] == {"Foo": [["one"], ["two"], ["three"]]}
manager.c.layout.move_right()
assert manager.c.layout.info()["client_trees"] == {"Foo": [["one"], ["two", ["three"]]]}
manager.c.layout.move_right() # no effect
assert manager.c.layout.info()["client_trees"] == {"Foo": [["one"], ["two", ["three"]]]}
manager.test_window("four")
manager.c.layout.move_right()
manager.c.layout.up()
manager.test_window("five")
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two", ["three", ["four"]], ["five"]]]
}
# expand/collapse_branch, and check focus order
manager.c.layout.up()
manager.c.layout.up() # focus three
manager.c.layout.collapse_branch()
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two", ["three"], ["five"]]]
}
assert_focus_path(manager, "five", "float1", "float2", "one", "two", "three")
manager.c.layout.expand_branch()
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two", ["three", ["four"]], ["five"]]]
}
assert_focus_path(manager, "four", "five", "float1", "float2", "one", "two", "three")
@treetab_config
def test_sort_windows(manager):
manager.test_window("one")
manager.test_window("two")
manager.test_window("101")
manager.test_window("102")
manager.test_window("103")
assert manager.c.layout.info()["client_trees"] == {
"Foo": [["one"], ["two"], ["101"], ["102"], ["103"]],
"Bar": [],
}
"""
# TODO how to serialize a function object? i.e. `sorter`:
def sorter(window):
try:
if int(window.name) % 2 == 0:
return 'Even'
else:
return 'Odd'
except ValueError:
return 'Bar'
manager.c.layout.sort_windows(sorter)
assert manager.c.layout.info()['client_trees'] == {
'Foo': [],
'Bar': [['one'], ['two']],
'Even': [['102']],
'Odd': [['101'], ['103']]
}
"""
|
TouK/vumi
|
vumi/middleware/tests/test_tagger.py
|
Python
|
bsd-3-clause
| 5,053 | 0 |
"""Tests for vumi.middleware.tagger."""
import re
from vumi.middleware.tagger import TaggingMiddleware
from vumi.message import TransportUserMessage
from vumi.tests.helpers import VumiTestCase
class TestTaggingMiddleware(VumiTestCase):
DEFAULT_CONFIG = {
'incoming': {
'addr_pattern': r'^\d+(\d{3})$',
'tagpool_template': r'pool1',
'tagname_template': r'mytag-\1',
},
'outgoing': {
'tagname_pattern': r'mytag-(\d{3})$',
'msg_template': {
'from_addr': r'1234*\1',
},
},
}
def mk_tagger(self, config=None):
dummy_worker = object()
if config is None:
config = self.DEFAULT_CONFIG
self.mw = TaggingMiddleware("dummy_tagger", config, dummy_worker)
self.mw.setup_middleware()
def mk_msg(self, to_addr, tag=None, from_addr="12345"):
msg = TransportUserMessage(to_addr=to_addr, from_addr=from_addr,
transport_name="dummy_connector",
transport_type="dummy_transport_type")
if tag is not None:
TaggingMiddleware.add_tag_to_msg(msg, tag)
return msg
def get_tag(self, to_addr):
msg = self.mk_msg(to_addr)
msg = self.mw.handle_inbound(msg, "dummy_connector")
return TaggingMiddleware.map_msg_to_tag(msg)
def get_from_addr(self, to_addr, tag):
msg = self.mk_msg(to_addr, tag, from_addr=None)
msg = self.mw.handle_outbound(msg, "dummy_connector")
return msg['from_addr']
def test_inbound_matching_to_addr(self):
self.mk_tagger()
self.assertEqual(self.get_tag("123456"), ("pool1", "mytag-456"))
self.assertEqual(self.get_tag("1234"), ("pool1", "mytag-234"))
def test_inbound_nonmatching_to_addr(self):
self.mk_tagger()
self.assertEqual(self.get_tag("a1234"), None)
def test_inbound_nonmatching_to_addr_leaves_msg_unmodified(self):
self.mk_tagger()
tag = ("dont", "modify")
orig_msg = self.mk_msg("a1234", tag=tag)
msg = orig_msg.from_json(orig_msg.to_json())
msg = self.mw.handle_inbound(msg, "dummy_connector")
self.assertEqual(msg, orig_msg)
def test_inbound_none_to_addr(self):
self.mk_tagger()
self.assertEqual(self.get_tag(None), None)
def test_outbound_matching_tag(self):
self.mk_tagger()
self.assertEqual(self.get_from_addr("111", ("pool1", "mytag-456")),
"1234*456")
self.assertEqual(self.get_from_addr("111", ("pool1", "mytag-789")),
"1234*789")
def test_outbound_nonmatching_tag(self):
self.mk_tagger()
self.assertEqual(self.get_from_addr("111", ("pool1", "othertag-456")),
None)
def test_outbound_nonmatching_tag_leaves_msg_unmodified(self):
self.mk_tagger()
orig_msg = self.mk_msg("a1234", tag=("pool1", "othertag-456"))
msg = orig_msg.from_json(orig_msg.to_json())
msg = self.mw.handle_outbound(msg, "dummy_connector")
for key in msg.payload.keys():
self.assertEqual(msg[key], orig_msg[key], "Key %r not equal" % key)
self.assertEqual(msg, orig_msg)
def test_outbound_no_tag(self):
self.mk_tagger()
self.assertEqual(self.get_from_addr("111", None), None)
def test_deepupdate(self):
orig = {'a': {'b': "foo"}, 'c': "bar"}
TaggingMiddleware._deepupdate(re.match(".*", "foo"), orig,
{'a': {'b': "baz"}, 'd': r'\g<0>!',
'e': 1})
self.assertEqual(orig, {'a': {'b': "baz"}, 'c': "bar", 'd': "foo!",
'e': 1})
def test_deepupdate_with_recursion(self):
self.mk_tagger()
orig = {'a': {'b': "foo"}, 'c': "bar"}
new = {'a': {'b': "baz"}}
new['a']['d'] = new
TaggingMiddleware._deepupdate(re.match(".*", "foo"), orig, new)
self.assertEqual(orig, {'a': {'b': "baz"}, 'c': "bar"})
def test_map_msg_to_tag(self):
msg = self.mk_msg("123456")
|
self.assertEqual(TaggingMiddleware.map_msg_to_tag(msg), None)
msg['helper_metadata']['tag'] = {'tag': ['pool', 'mytag']}
self.assertEqual(TaggingMiddleware.map_ms
|
g_to_tag(msg),
("pool", "mytag"))
def test_add_tag_to_msg(self):
msg = self.mk_msg("123456")
TaggingMiddleware.add_tag_to_msg(msg, ('pool', 'mytag'))
self.assertEqual(msg['helper_metadata']['tag'], {
'tag': ['pool', 'mytag'],
})
def test_add_tag_to_payload(self):
payload = {}
TaggingMiddleware.add_tag_to_payload(payload, ('pool', 'mytag'))
self.assertEqual(payload, {
'helper_metadata': {
'tag': {
'tag': ['pool', 'mytag'],
},
},
})
|
tarballs-are-good/sympy
|
sympy/polys/tests/test_monomialtools.py
|
Python
|
bsd-3-clause
| 3,561 | 0.048301 |
"""Tests for tools and arithmetics for monomials of distributed polynomials. """
from sympy.polys.monomialtools import (
monomials, monomial_count,
monomial_lex_cmp, monomial_grlex_cmp, monomial_grevlex_cmp, mo
|
nomial_cmp,
monomial_mul, monomial_div, monomial_gcd, monomial_lcm, monomial_max, mono
|
mial_min,
)
from sympy.abc import x, y
from sympy.utilities.pytest import raises
def test_monomials():
assert sorted(monomials([], 0)) == [1]
assert sorted(monomials([], 1)) == [1]
assert sorted(monomials([], 2)) == [1]
assert sorted(monomials([], 3)) == [1]
assert sorted(monomials([x], 0)) == [1]
assert sorted(monomials([x], 1)) == [1, x]
assert sorted(monomials([x], 2)) == [1, x, x**2]
assert sorted(monomials([x], 3)) == [1, x, x**2, x**3]
assert sorted(monomials([x, y], 0)) == [1]
assert sorted(monomials([x, y], 1)) == [1, x, y]
assert sorted(monomials([x, y], 2)) == [1, x, y, x**2, y**2, x*y]
assert sorted(monomials([x, y], 3)) == [1, x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2]
def test_monomial_count():
assert monomial_count(2, 2) == 6
assert monomial_count(2, 3) == 10
def test_monomial_lex_cmp():
assert monomial_lex_cmp((1,2,3), (1,2,3)) == 0
assert monomial_lex_cmp((2,2,3), (1,2,3)) == 1
assert monomial_lex_cmp((1,3,3), (1,2,3)) == 1
assert monomial_lex_cmp((1,2,4), (1,2,3)) == 1
assert monomial_lex_cmp((0,2,3), (1,2,3)) == -1
assert monomial_lex_cmp((1,1,3), (1,2,3)) == -1
assert monomial_lex_cmp((1,2,2), (1,2,3)) == -1
def test_monomial_grlex_cmp():
assert monomial_grlex_cmp((1,2,3), (1,2,3)) == 0
assert monomial_grlex_cmp((2,2,3), (1,2,3)) == 1
assert monomial_grlex_cmp((1,3,3), (1,2,3)) == 1
assert monomial_grlex_cmp((1,2,4), (1,2,3)) == 1
assert monomial_grlex_cmp((0,2,3), (1,2,3)) == -1
assert monomial_grlex_cmp((1,1,3), (1,2,3)) == -1
assert monomial_grlex_cmp((1,2,2), (1,2,3)) == -1
assert monomial_grlex_cmp((2,2,3), (1,2,4)) == 1
assert monomial_grlex_cmp((1,3,3), (1,2,4)) == 1
assert monomial_grlex_cmp((0,2,3), (1,2,2)) == -1
assert monomial_grlex_cmp((1,1,3), (1,2,2)) == -1
def test_monomial_grevlex_cmp():
assert monomial_grevlex_cmp((1,2,3), (1,2,3)) == 0
assert monomial_grevlex_cmp((2,2,3), (1,2,3)) == 1
assert monomial_grevlex_cmp((1,3,3), (1,2,3)) == 1
assert monomial_grevlex_cmp((1,2,4), (1,2,3)) == 1
assert monomial_grevlex_cmp((0,2,3), (1,2,3)) == -1
assert monomial_grevlex_cmp((1,1,3), (1,2,3)) == -1
assert monomial_grevlex_cmp((1,2,2), (1,2,3)) == -1
assert monomial_grevlex_cmp((2,2,3), (1,2,4)) == 1
assert monomial_grevlex_cmp((1,3,3), (1,2,4)) == 1
assert monomial_grevlex_cmp((0,2,3), (1,2,2)) == -1
assert monomial_grevlex_cmp((1,1,3), (1,2,2)) == -1
def test_monomial_cmp():
assert monomial_cmp('lex') == monomial_lex_cmp
assert monomial_cmp('grlex') == monomial_grlex_cmp
assert monomial_cmp('grevlex') == monomial_grevlex_cmp
raises(ValueError, "monomial_cmp('unknown')")
def test_monomial_mul():
assert monomial_mul((3,4,1), (1,2,0)) == (4,6,1)
def test_monomial_div():
assert monomial_div((3,4,1), (1,2,0)) == (2,2,1)
def test_monomial_gcd():
assert monomial_gcd((3,4,1), (1,2,0)) == (1,2,0)
def test_monomial_lcm():
assert monomial_lcm((3,4,1), (1,2,0)) == (3,4,1)
def test_monomial_max():
assert monomial_max((3,4,5), (0,5,1), (6,3,9)) == (6,5,9)
def test_monomial_min():
assert monomial_min((3,4,5), (0,5,1), (6,3,9)) == (0,3,1)
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/extended_status.py
|
Python
|
gpl-2.0
| 3,926 | 0 |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Status Admin API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
authorize = extensions.soft_extension_authorizer('compute', 'extended_status')
class ExtendedStatusController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % (Extended_status.alias, state)
server[key] = instance[state]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, r
|
esp_obj):
context = req.environ['nova.context']
|
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Extended_status(extensions.ExtensionDescriptor):
"""Extended Status support."""
name = "ExtendedStatus"
alias = "OS-EXT-STS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_status/api/v1.1")
updated = "2011-11-03T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedStatusController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}task_state' % Extended_status.namespace,
'%s:task_state' % Extended_status.alias)
elem.set('{%s}power_state' % Extended_status.namespace,
'%s:power_state' % Extended_status.alias)
elem.set('{%s}vm_state' % Extended_status.namespace,
'%s:vm_state' % Extended_status.alias)
class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
class ExtendedStatusesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
|
jdburton/gimp-osx
|
src/gimp-2.6.12/plug-ins/pygimp/plug-ins/pyconsole.py
|
Python
|
gpl-2.0
| 20,993 | 0.00181 |
#
# pyconsole.py
#
# Copyright (C) 2004-2006 by Yevgen Muntyan <muntyan@math.tamu.edu>
# Portions of code by Geoffrey French.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public version 2.1 as
# published by the Free Software Foundation.
#
# See COPYING.lib file that comes with this distribution for full text
# of the license.
#
# This module 'runs' python interpreter in a TextView widget.
# The main class is Console, usage is:
# Console(locals=None, banner=None, completer=None, use_rlcompleter=True, start_script='') -
# it creates the widget and 'starts' interactive session; see the end
# of this file. If start_script is not empty, it pastes it as it was
# entered from keyboard.
#
# Console has "command" signal which is emitted when code is about to
# be executed. You may connect to it using console.connect or
# console.connect_after to get your callback ran before or after the
# code is executed.
#
# To modify output appearance, set attributes of console.stdout_tag and
# console.stderr_tag.
#
# Console may subclass a type other than gtk.TextView, to allow syntax
# highlighting and stuff,
# e.g.:
# console_type = pyconsole.ConsoleType(moo.edit.TextView)
# console = console_type(use_rlcompleter=False, start_script="import moo\nimport gtk\n")
#
# This widget is not a replacement for real terminal with python running
# inside: GtkTextView is not a terminal.
# The use case is: you have a python program, you create this widget,
# and inspect your program interiors.
import gtk
import gtk.gdk as gdk
import gobjec
|
t
import pango
import gtk.keysyms as _keys
import code
import sys
import keyword
import re
# commonprefix() from posixpath
def _commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix
|
= m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0:
return ''
break
return prefix
class _ReadLine(object):
class Output(object):
def __init__(self, console, tag_name):
object.__init__(self)
self.buffer = console.get_buffer()
self.tag_name = tag_name
def write(self, text):
pos = self.buffer.get_iter_at_mark(self.buffer.get_insert())
self.buffer.insert_with_tags_by_name(pos, text, self.tag_name)
class History(object):
def __init__(self):
object.__init__(self)
self.items = ['']
self.ptr = 0
self.edited = {}
def commit(self, text):
if text and self.items[-1] != text:
self.items.append(text)
self.ptr = 0
self.edited = {}
def get(self, dir, text):
if len(self.items) == 1:
return None
if text != self.items[self.ptr]:
self.edited[self.ptr] = text
elif self.edited.has_key(self.ptr):
del self.edited[self.ptr]
self.ptr = self.ptr + dir
if self.ptr >= len(self.items):
self.ptr = 0
elif self.ptr < 0:
self.ptr = len(self.items) - 1
try:
return self.edited[self.ptr]
except KeyError:
return self.items[self.ptr]
def __init__(self, quit_func=None):
object.__init__(self)
self.quit_func = quit_func
self.set_wrap_mode(gtk.WRAP_CHAR)
self.modify_font(pango.FontDescription("Monospace"))
self.buffer = self.get_buffer()
self.buffer.connect("insert-text", self.on_buf_insert)
self.buffer.connect("delete-range", self.on_buf_delete)
self.buffer.connect("mark-set", self.on_buf_mark_set)
self.do_insert = False
self.do_delete = False
self.stdout_tag = self.buffer.create_tag("stdout", foreground="#006000")
self.stderr_tag = self.buffer.create_tag("stderr", foreground="#B00000")
self._stdout = _ReadLine.Output(self, "stdout")
self._stderr = _ReadLine.Output(self, "stderr")
self.cursor = self.buffer.create_mark("cursor",
self.buffer.get_start_iter(),
False)
insert = self.buffer.get_insert()
self.cursor.set_visible(True)
insert.set_visible(False)
self.ps = ''
self.in_raw_input = False
self.run_on_raw_input = None
self.tab_pressed = 0
self.history = _ReadLine.History()
self.nonword_re = re.compile("[^\w\._]")
def freeze_undo(self):
try: self.begin_not_undoable_action()
except: pass
def thaw_undo(self):
try: self.end_not_undoable_action()
except: pass
def raw_input(self, ps=None):
if ps:
self.ps = ps
else:
self.ps = ''
iter = self.buffer.get_iter_at_mark(self.buffer.get_insert())
if ps:
self.freeze_undo()
self.buffer.insert(iter, self.ps)
self.thaw_undo()
self.__move_cursor_to(iter)
self.scroll_to_mark(self.cursor, 0.2)
self.in_raw_input = True
if self.run_on_raw_input:
run_now = self.run_on_raw_input
self.run_on_raw_input = None
self.buffer.insert_at_cursor(run_now + '\n')
def on_buf_mark_set(self, buffer, iter, mark):
if mark is not buffer.get_insert():
return
start = self.__get_start()
end = self.__get_end()
if iter.compare(self.__get_start()) >= 0 and \
iter.compare(self.__get_end()) <= 0:
buffer.move_mark_by_name("cursor", iter)
self.scroll_to_mark(self.cursor, 0.2)
def __insert(self, iter, text):
self.do_insert = True
self.buffer.insert(iter, text)
self.do_insert = False
def on_buf_insert(self, buf, iter, text, len):
if not self.in_raw_input or self.do_insert or not len:
return
buf.stop_emission("insert-text")
lines = text.splitlines()
need_eol = False
for l in lines:
if need_eol:
self._commit()
iter = self.__get_cursor()
else:
cursor = self.__get_cursor()
if iter.compare(self.__get_start()) < 0:
iter = cursor
elif iter.compare(self.__get_end()) > 0:
iter = cursor
else:
self.__move_cursor_to(iter)
need_eol = True
self.__insert(iter, l)
self.__move_cursor(0)
def __delete(self, start, end):
self.do_delete = True
self.buffer.delete(start, end)
self.do_delete = False
def on_buf_delete(self, buf, start, end):
if not self.in_raw_input or self.do_delete:
return
buf.stop_emission("delete-range")
start.order(end)
line_start = self.__get_start()
line_end = self.__get_end()
if start.compare(line_end) > 0:
return
if end.compare(line_start) < 0:
return
self.__move_cursor(0)
if start.compare(line_start) < 0:
start = line_start
if end.compare(line_end) > 0:
end = line_end
self.__delete(start, end)
def do_key_press_event(self, event, parent_type):
if not self.in_raw_input:
return parent_type.do_key_press_event(self, event)
tab_pressed = self.tab_pressed
self.tab_pressed = 0
handled = True
state = event.state & (gdk.SHIFT_MASK |
gdk.CONTROL_MASK |
gdk.MOD1_MASK)
keyval = event.keyval
if not state:
if keyval == _keys.Return:
self._commit()
elif keyval == _keys.Up:
self.__history(-1)
eli
|
sachinpro/sachinpro.github.io
|
tensorflow/python/framework/docs.py
|
Python
|
apache-2.0
| 21,127 | 0.008425 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Updates generated docs from Python doc comments.
Updates the documentation files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
_arg_re = re.compile(" *([*]{0,2}[a-zA-Z][a-zA-Z0-9_]*):")
_section_re = re.compile("([A-Z][a-zA-Z ]*):$")
_always_drop_symbol_re = re.compile("_[_a-zA-Z0-9]")
_anchor_re = re.compile(r"^[\w.]+$")
_member_mark = "@@"
_indiv_dir = "functions_and_classes"
_num_subdirs = 10
_subdir_prefix = "shard"
class Document(object):
"""Base class for an automatically generated document."""
def write_markdown_to_file(self, f):
"""Writes a Markdown-formatted version of this document to file `f`.
Args:
f: The output file.
"""
raise NotImplementedError("Document.WriteToFile")
class Index(Document):
"""An automatically generated index for a collection of documents."""
def __init__(self, module_to_name, members, filename_to_library_map,
path_prefix):
"""Creates a new Index.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
filename_to_library_map: A list of (filename, Library) pairs. The order
corresponds to the order in which the libraries appear in the index.
path_prefix: Prefix to add to links in the index.
"""
self._module_to_name = module_to_name
self._members = members
self._filename_to_library_map = filename_to_library_map
self._path_prefix = path_prefix
def write_markdown_to_file(self, f):
"""Writes this index to file `f`.
The output is formatted as an unordered list. Each list element
contains the title of the library, followed by a list of symbols
in that library hyperlinked to the corresponding anchor in that
library.
Args:
f: The output file.
"""
print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f)
print("", file=f)
print("# TensorFlow Python reference documentation", file=f)
print("", file=f)
fullname_f = lambda name: self._members[name][0]
anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))
for filename, library in self._filename_to_library_map:
sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x))
member_names = [n for n in sorted_names if n in self._members]
# TODO(wicke): This is a hack that should be removed as soon as the
# website code allows it.
full_filename = self._path_prefix + filename
links = ["[`%s`](%s#%s)" % (name, full_filename, anchor_f(name))
for name in member_names]
if links:
print("* **[%s](%s)**:" % (library.title, full_filename), file=f)
for link in links:
print(" * %s" % link, file=f)
print("", file=f)
def collect_members(module_to_name, exclude=()):
"""Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
exclude: Set of fully qualified names to exclude.
Returns:
Dictionary mapping name to (fullname, member) pairs.
Raises:
RuntimeError: if we can not resolve a name collision.
"""
members = {}
for module, module_name in module_to_name.items():
all_names = getattr(module, "__all__", None)
for name, member in inspect.getmembers(module)
|
:
if ((inspect.isfunction(member) or inspect.isclass(member)) and
not _always_drop_symbol_re.match(name) and
(all_names is None or name in all_names)):
fullname = "%s.%s" % (module_name, name)
if fullname in exclude:
continue
if name in members:
other_fullname, other_member = members[name]
if member is not other_member:
|
raise RuntimeError("Short name collision between %s and %s" %
(fullname, other_fullname))
if len(fullname) == len(other_fullname):
raise RuntimeError("Can't decide whether to use %s or %s for %s: "
"both full names have length %d" %
(fullname, other_fullname, name, len(fullname)))
if len(fullname) > len(other_fullname):
continue # Use the shorter full name
members[name] = fullname, member
return members
def _get_anchor(module_to_name, fullname):
"""Turn a full member name into an anchor.
Args:
module_to_name: Dictionary mapping modules to short names.
fullname: Fully qualified name of symbol.
Returns:
HTML anchor string. The longest module name prefix of fullname is
removed to make the anchor.
Raises:
ValueError: If fullname uses characters invalid in an anchor.
"""
if not _anchor_re.match(fullname):
raise ValueError("'%s' is not a valid anchor" % fullname)
anchor = fullname
for module_name in module_to_name.values():
if fullname.startswith(module_name + "."):
rest = fullname[len(module_name)+1:]
# Use this prefix iff it is longer than any found before
if len(anchor) > len(rest):
anchor = rest
return anchor
def _stable_hash(s):
"""A simple string hash that won't change from run to run."""
ret = 0
for c in s:
ret = ret * 97 + ord(c)
return ret
class Library(Document):
"""An automatically generated document for a set of functions and classes."""
def __init__(self,
title,
module,
module_to_name,
members,
documented,
exclude_symbols=(),
prefix=None):
"""Creates a new Library.
Args:
title: A human-readable title for the library.
module: Module to pull high level docstring from (for table of contents,
list of Ops to document, etc.).
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
exclude_symbols: A list of specific symbols to exclude.
prefix: A string to include at the beginning of the page.
"""
self._title = title
self._module = module
self._module_to_name = module_to_name
self._members = dict(members) # Copy since we mutate it below
self._exclude_symbols = frozenset(exclude_symbols)
documented.update(exclude_symbols)
self._documented = documented
self._mentioned = set()
self._prefix = prefix or ""
@property
def title(self):
"""The human-readable title for this library."""
return self._title
@property
def mentioned(self):
"""Set of names mentioned in this library."""
return self._mentioned
@property
def exclude_symbols(self):
"""Set of excluded symbols."""
return self._exclude_symbols
def _should_include_member(self, name):
"""Returns True if this member should be included in the document."""
# Always exclude symbols matching _always_drop_symbol_re.
if _always_drop_symbol_re.match(name):
return False
# Finally, exclude any specifically-excluded symbols.
if name in self._exclude_symbols:
return False
return True
def get_imported_modules(self, module):
"""Returns the list of modules imported from `module`."""
for name, member in inspect.getmembers(module):
if inspect.ismodule(member):
yield name, member
def get_cla
|
muet/Espruino
|
boards/OLIMEXINO_STM32_RE.py
|
Python
|
mpl-2.0
| 4,169 | 0.015112 |
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils
import json
info = {
'name': 'Olimexino STM32 / Leaflabs Maple with STM32F103RET',
'link': ['https://www.olimex.com/Products/Duino/STM32/OLIMEXINO-STM32/', 'http://leaflabs.com/devices/maple/'],
'variables': 3250,
'binary_name': 'espruino_%v_olimexino_stm32_re.bin
|
',
'build' : {
'defines' : [
'USE_NET',
'USE_GRAPHICS',
'USE_FILESYSTEM',
'USE_TV',
'USE_HASHLIB'
]
}
}
chip = {
'part': 'STM32F103RET6',
'family': 'STM32F1',
'package': 'LQFP64',
'ram': 64,
'flash': 512,
'speed': 72,
'usart': 5,
'spi': 3,
'i2c': 2,
'adc': 3,
'dac': 2
}
devices = {
'OSC_RTC': {'pin_1': 'D22',
'pin_2': 'D23'},
'LED1': {'pin': 'D13'},
'LED2': {'pin': 'D3'},
'BTN1': {
|
'pin': 'D38'},
'USB': {'pin_disc': 'D39',
'pin_dm': 'D40',
'pin_dp': 'D41'},
'SD': {'pin_cs': 'D25',
'pin_di': 'D34',
'pin_do': 'D33',
'pin_clk': 'D32'}}
# left-right, or top-bottom order
board = {
'top': ['D14', 'GND', 'D13', 'D12', 'D11', 'D10', 'D9', 'D8', '', 'D7', 'D6', 'D5', 'D4', 'D3', 'D2', 'D1', 'D0'],
'bottom': ['RST', '3.3', '3.3A', 'GNDA', 'GND', 'VIN', '', 'A0', 'A1', 'A2', 'A3', 'A4', 'A5'],
'right': ['D23', 'D25', 'D27', 'D29', 'D31', 'D33', 'D35', 'D37'],
'right2': ['D24', 'D26', 'D28', 'D30', 'D32', 'D34', 'D36', 'GND'],
'left': ['3V3', 'D7', 'D29', 'D12', 'D13'],
'left2': ['GND', 'D8', 'D20', 'D11', 'D4'],
'_pinmap': {'A0': 'D15', 'A1': 'D16', 'A2': 'D17', 'A3': 'D18', 'A4': 'D19', 'A5': 'D20'}
}
board['left'].reverse()
board['left2'].reverse()
board['right'].reverse()
board['right2'].reverse()
board["_css"] = """
#board {
width: 540px;
height: 418px;
top: 300px;
left: 200px;
background-image: url(img/OLIMEXINO_STM32.jpg);
}
#boardcontainer {
height: 850px;
}
#top {
top: -20px;
left: 140px;
}
#bottom {
top: 431px;
left: 220px;
}
#left {
top: 155px;
right: 520px;
}
#left2 {
top:155px;
left: 20px;
}
#right {
top: 155px;
left: 520px;
}
#right2 {
top: 155px;
right: 20px;
}
""";
def get_pins():
pins = pinutils.scan_pin_file([], 'stm32f103xe.csv', 6, 10, 11)
# Olimexino/Maple pins have stupid names
pinmapping = {'D0': 'PA3',
'D1': 'PA2',
'D2': 'PA0',
'D3': 'PA1',
'D4': 'PB5',
'D5': 'PB6',
'D6': 'PA8',
'D7': 'PA9',
'D8': 'PA10',
'D9': 'PB7',
'D10': 'PA4',
'D11': 'PA7',
'D12': 'PA6',
'D13': 'PA5',
'D14': 'PB8',
'D15':'PC0', # shared with A0-A15
'D16': 'PC1',
'D17': 'PC2',
'D18': 'PC3',
'D19': 'PC4',
'D20': 'PC5',
'D21': 'PC13',
'D22': 'PC14',
'D23': 'PC15',
'D24': 'PB9',
'D25': 'PD2',
'D26': 'PC10',
'D27': 'PB0',
'D28': 'PB1',
'D29': 'PB10',
'D30': 'PB11',
'D31': 'PB12',
'D32': 'PB13',
'D33': 'PB14',
'D34': 'PB15',
'D35': 'PC6',
'D36': 'PC7',
'D37': 'PC8',
'D38':'PC9', # for button
'D39':'PC12', # for USB disc
'D40':'PA11', # for USB dm
'D41':'PA12', # for USB dp
}
newpins = []
for newname in pinmapping:
pin = pinutils.findpin(pins, pinmapping[newname], True)
pin['name'] = 'P' + newname
pin['sortingname'] = newname[0] + newname[1:].rjust(2, '0')
newpins.append(pin)
# Because 'pinmapping' is NOT stored in order!!!
newpins = sorted(newpins, key=lambda pin: pin['sortingname'])
return newpins
|
liqd/adhocracy4
|
tests/comments/test_model.py
|
Python
|
agpl-3.0
| 2,973 | 0 |
from urllib.parse import urljoin
import pytest
from adhocracy4.comments import models as comments_models
from adhocracy4.ratings import models as rating_models
@pytest.mark.django_db
def test_delete_comment(comment_factory, rating_factory):
comment = comment_factory()
for i in range(5):
comment_factory(content_object=comment)
comment_count = comments_models.Comment.objects.all().count()
rating_factory(content_object=comment)
rating_count = rating_models.Rating.objects.all().count()
assert comment_count == 6
assert rating_count == 1
comment.delete()
comment_count = comments_models.Comment.objects.all().count()
rating_count = rating_models.Rating.objects.all().count()
assert comment_count == 0
assert rating_count == 0
@pytest.mark.django_db
def test_save(comment_factory):
comment_removed = comment_factory(comment='I am not yet removed')
comment_censored = comment_factory(comment='I am not yet censored')
assert comment_removed.comment == 'I am not yet removed'
assert comment_censored.comment == 'I am not yet censored'
comment_removed.is_removed = True
comment_removed.save()
comment_removed.refresh_from_db()
comment_censored.is_censored = True
comment_censored.save()
comment_censored.refresh_from_db()
assert comment_removed.comment == ''
assert comment_censored.comment == ''
@pytest.mark.django_db
def test_str(comment_factory):
short_comment = comment_factory(comment='I am so short')
long_comment = comment_factory(
comment='I am a very very very long comment. More than 200 '
'characters. Yes yes yes. That long! Really that long. How long is '
'that. Yes yes yes. That long! Really that long. How long is that. '
'Yes yes yes. That long! Really that long. How long is that.'
)
assert str(short_comment) == short_comment.comment
assert str(long_comment) == "{} ...".format(long_comment.comment[:200])
@pytest.mark.django_db
def test_get_absolute_url(comment, child_comment):
# comment from factory has Question as content_object, which does not
# define get_absolte_url, so url of module is used
assert comment.get_absolute_url() == \
urljoin(comment.module.get_absolute_url(),
"?comment={}".format(str(comment.id)))
assert child_comment.get_absolute_url() == \
urljoin(child_com
|
ment.content_object.get_absolute_url(),
"?comment={}".format(str(child_comment.id)))
@pytest.mark.django_db
de
|
f test_notification_content(comment):
assert comment.notification_content == comment.comment
@pytest.mark.django_db
def test_project(comment):
assert comment.project == comment.module.project
@pytest.mark.django_db
def test_module(comment, child_comment):
assert comment.module == comment.content_object.module
assert child_comment.module == \
child_comment.content_object.content_object.module
|
ihmpdcc/cutlass
|
tests/test_sample_attr.py
|
Python
|
mit
| 10,454 | 0.001148 |
#!/usr/bin/env python
""" A unittest script for the SampleAttribute module. """
import unittest
import json
from cutlass import SampleAttribute
from CutlassTestConfig import CutlassTestConfig
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class SampleAttributeTest(unittest.TestCase):
""" A unit test class for the SampleAttribute module. """
session = None
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
# Establish the session for each test method
cls.session = CutlassTestConfig.get_session()
cls.util = CutlassTestUtil()
def testImport(self):
""" Test the importation of the SampleAttribute module. """
success = False
try:
from cutlass import SampleAttribute
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(SampleAttribute is None)
def testSessionCreate(self):
""" Test the creation of a SampleAttribute via the session. """
success = False
attrib = None
try:
attrib = self.session.create_sample_attr()
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(attrib is None)
def testFecalCal(self):
""" Test the fecalcal property. """
attrib = self.session.create_sample_attr()
self.util.stringTypeTest(self, attrib, "fecalcal")
self.util.stringPropertyTest(self, attrib, "fecalcal")
def testSampleDesc(self):
""" Test the sample_desc property. """
attrib = self.session.create_sample_attr()
self.util.stringTypeTest(self, attrib, "sample_desc")
self.util.stringPropertyTest(self, attrib, "sample_desc")
def testSampleType(self):
""" Test the sample_type property. """
attrib = self.session.create_sample_attr()
self.util.stringTypeTest(self, attrib, "sample_type")
self.util.stringPropertyTest(self, attrib, "sample_type")
def testSubproject(self):
""" Test the subproject property. """
attrib = self.session.create_sample_attr()
self.util.stringTypeTest(self, attrib, "subproject")
self.util.stringPropertyTest(self, attrib, "subproject")
def testToJson(self):
""" Test the generation of JSON from a SampleAttribute instance. """
attrib = self.session.create_sample_attr()
success = False
fecalcal = "test fecalcal"
attrib.fecalcal = fecalcal
attrib_json = None
try:
attrib_json = attrib.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(attrib_json is not None, "to_json() returned data.")
parse_success = False
try:
attrib_data = json.loads(attrib_json)
parse_success = True
except Exception:
pass
self.assertTrue(parse_success,
"to_json() did not throw an exception.")
self.assertTrue(attrib_data is not None,
"to_json() returned parsable JSON.")
self.assertTrue('meta' in attrib_data, "JSON has 'meta' key in it.")
self.assertEqual(attrib_data['meta']['fecalcal'],
fecalcal,
"'fecalcal' in JSON had expect
|
ed value."
)
def testDataInJson(self):
""" Test if the correct data is in the generated JSON. """
attrib = self.session.create_sample_attr()
|
success = False
fecalcal = "test fecalcal"
sample_desc = "DNA: mom-vaginal"
sample_type = "BC1D"
subproject = "earlyPregStudy"
attrib.fecalcal = fecalcal
attrib.sample_desc = sample_desc
attrib.sample_type = sample_type
attrib.subproject = subproject
attrib_json = None
try:
attrib_json = attrib.to_json()
success = True
except Exception:
pass
self.assertTrue(success, "Able to use 'to_json'.")
self.assertTrue(attrib_json is not None, "to_json() returned data.")
parse_success = False
try:
attrib_data = json.loads(attrib_json)
parse_success = True
except Exception:
pass
self.assertTrue(parse_success,
"to_json() did not throw an exception.")
self.assertTrue(attrib_data is not None,
"to_json() returned parsable JSON.")
self.assertTrue('meta' in attrib_data, "JSON has 'meta' key in it.")
self.assertEqual(attrib_data['meta']['fecalcal'],
fecalcal,
"'fecalcal' in JSON had expected value."
)
self.assertEqual(attrib_data['meta']['sample_desc'],
sample_desc,
"'sample_desc' in JSON had expected value."
)
self.assertEqual(attrib_data['meta']['sample_type'],
sample_type,
"'sample_type' in JSON had expected value."
)
self.assertEqual(attrib_data['meta']['subproject'],
subproject,
"'subproject' in JSON had expected value."
)
def testId(self):
""" Test the id property. """
attrib = self.session.create_sample_attr()
self.assertTrue(attrib.id is None,
"New template sample attribute has no ID.")
with self.assertRaises(AttributeError):
attrib.id = "test"
def testVersion(self):
""" Test the version property. """
attrib = self.session.create_sample_attr()
self.assertTrue(attrib.version is None,
"New template sample attribute has no version.")
with self.assertRaises(ValueError):
attrib.version = "test"
def testTags(self):
""" Test the tags property. """
attrib = self.session.create_sample_attr()
tags = attrib.tags
self.assertTrue(type(tags) == list,
"SampleAttribute tags() method returns a list.")
self.assertEqual(len(tags), 0,
"Template sample attribute tags list is empty.")
new_tags = ["tagA", "tagB"]
attrib.tags = new_tags
self.assertEqual(attrib.tags, new_tags,
"Can set tags on a sample attribute.")
json_str = attrib.to_json()
doc = json.loads(json_str)
self.assertTrue('tags' in doc['meta'],
"JSON representation has 'tags' field in 'meta'.")
self.assertEqual(doc['meta']['tags'], new_tags,
"JSON representation had correct tags after setter.")
def testAddTag(self):
""" Test the add_tag() method. """
attrib = self.session.create_sample_attr()
attrib.add_tag("test")
self.assertEqual(attrib.tags, ["test"],
"Can add a tag to a sample attribute.")
json_str = attrib.to_json()
doc = json.loads(json_str)
self.assertEqual(doc['meta']['tags'], ["test"],
"JSON representation had correct tags after add_tag().")
# Try adding the same tag yet again, shouldn't get a duplicate
with self.assertRaises(ValueError):
attrib.add_tag("test")
json_str = attrib.to_json()
doc2 = json.loads(json_str)
self.assertEqual(doc2['meta']['tags'], ["test"],
"JSON document did not end up with duplicate tags.")
def testRequiredFields(self):
""" Test the required_fields() static method. """
required = SampleAttribute.required_fields()
self.assertEqual(type(required), tuple,
"required_fields() returns a tuple.")
self.assertTru
|
rocketDuck/folivora
|
folivora/migrations/0007_normalize_names.py
|
Python
|
isc
| 9,029 | 0.007864 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from ..utils.pypi import normalize_name
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for package in orm['folivora.Package'].objects.all():
package.normalized_name = normalize_name(package.name)
package.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'folivora.log': {
'Meta': {'object_name': 'Log'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('django_orm.postgresql.hstore.fields.DictionaryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['folivora.Package']", 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['folivora.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'when': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'folivora.package': {
'Meta': {'unique_together': "(('name', 'provider'),)", 'object_name': 'Package'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_sync_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'normalized_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'default': "'pypi'", 'max_length': '255'}),
'url': ('django.db.
|
models.fields.URLField', [], {'max_length': '200'})
},
'folivora.packageversion': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'PackageVersio
|
n'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['folivora.Package']"}),
'release_date': ('django.db.models.fields.DateTimeField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'folivora.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'through': "orm['folivora.ProjectMember']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'folivora.projectdependency': {
'Meta': {'unique_together': "(('project', 'package'),)", 'object_name': 'ProjectDependency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['folivora.Package']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies'", 'to': "orm['folivora.Project']"}),
'update': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['folivora.PackageVersion']", 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'folivora.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jabber': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mail': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['folivora.Project']"}),
'state': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'folivora.syncstate': {
'Meta': {'object_name': 'SyncState'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'state': ('django.db.models.fields.CharField', [],
|
NeoBelerophon/transwhat
|
buddy.py
|
Python
|
gpl-3.0
| 5,903 | 0.027444 |
__author__ = "Steffen Vogel"
__copyright__ = "Copyright 2015, Steffen Vogel"
__license__ = "GPLv3"
__maintainer__ = "Steffen Vogel"
__email__ = "post@steffenvogel.de"
"""
This file is part of transWhat
transWhat is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
transwhat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with transWhat. If not, see <http://www.gnu.org/licenses/>.
"""
from Spectrum2 import protocol_pb2
import logging
import time
import utils
import base64
import deferred
from deferred import call
class Buddy():
def __init__(self, owner, number, nick, statusMsg, groups, image_hash):
self.nick = nick
self.owner = owner
self.number = number
self.groups = groups
self.image_hash = image_hash if image_hash is not None else ""
self.statusMsg = u""
self.lastseen = 0
s
|
elf.presence = 0
def update(self, nick, groups, image_hash):
self.nick = nick
self.groups = groups
if image_hash is not None:
self.image_hash = image_hash
def __str__(self):
return "%s (nick=%s)" % (self.number, self.nick)
class BuddyList(dict):
def __init__(self, owner, backend, user, session):
self.owner = owner
self.backend = backend
self.session = session
self.user = user
self.logger =
|
logging.getLogger(self.__class__.__name__)
self.synced = False
def _load(self, buddies):
for buddy in buddies:
number = buddy.buddyName
nick = buddy.alias
statusMsg = buddy.statusMessage.decode('utf-8')
groups = [g for g in buddy.group]
image_hash = buddy.iconHash
self[number] = Buddy(self.owner, number, nick, statusMsg,
groups, image_hash)
self.logger.debug("Update roster")
contacts = self.keys()
contacts.remove('bot')
if self.synced == False:
self.session.sendSync(contacts, delta = False, interactive = True)
self.synced = True
self.logger.debug("Roster add: %s", str(list(contacts)))
for number in contacts:
buddy = self[number]
self.backend.handleBuddyChanged(self.user, number, buddy.nick,
buddy.groups, protocol_pb2.STATUS_NONE,
iconHash = buddy.image_hash if buddy.image_hash is not None else "")
self.session.subscribePresence(number)
self.logger.debug("%s is requesting statuses of: %s", self.user, contacts)
self.session.requestStatuses(contacts, success = self.onStatus)
def onStatus(self, contacts):
self.logger.debug("%s received statuses of: %s", self.user, contacts)
for number, (status, time) in contacts.iteritems():
buddy = self[number]
if status is None:
buddy.statusMsg = ""
else:
buddy.statusMsg = utils.softToUni(status)
self.updateSpectrum(buddy)
def load(self, buddies):
if self.session.loggedIn:
self._load(buddies)
else:
self.session.loginQueue.append(lambda: self._load(buddies))
def update(self, number, nick, groups, image_hash):
if number in self:
buddy = self[number]
buddy.update(nick, groups, image_hash)
else:
buddy = Buddy(self.owner, number, nick, "", groups, image_hash)
self[number] = buddy
self.logger.debug("Roster add: %s", buddy)
self.session.sendSync([number], delta = True, interactive = True)
self.session.subscribePresence(number)
self.session.requestStatuses([number], success = self.onStatus)
if image_hash == "" or image_hash is None:
self.requestVCard(number)
self.updateSpectrum(buddy)
return buddy
def updateSpectrum(self, buddy):
if buddy.presence == 0:
status = protocol_pb2.STATUS_NONE
elif buddy.presence == 'unavailable':
status = protocol_pb2.STATUS_AWAY
else:
status = protocol_pb2.STATUS_ONLINE
statusmsg = buddy.statusMsg
if buddy.lastseen != 0:
timestamp = time.localtime(buddy.lastseen)
statusmsg += time.strftime("\n Last seen: %a, %d %b %Y %H:%M:%S", timestamp)
iconHash = buddy.image_hash if buddy.image_hash is not None else ""
self.logger.debug("Updating buddy %s (%s) in %s, image_hash = %s",
buddy.nick, buddy.number, buddy.groups, iconHash)
self.logger.debug("Status Message: %s", statusmsg)
self.backend.handleBuddyChanged(self.user, buddy.number, buddy.nick,
buddy.groups, status, statusMessage=statusmsg, iconHash=iconHash)
def remove(self, number):
try:
buddy = self[number]
del self[number]
self.backend.handleBuddyChanged(self.user, number, "", [],
protocol_pb2.STATUS_NONE)
self.backend.handleBuddyRemoved(self.user, number)
self.session.unsubscribePresence(number)
# TODO Sync remove
return buddy
except KeyError:
return None
def requestVCard(self, buddy, ID=None):
if buddy == self.user or buddy == self.user.split('@')[0]:
buddy = self.session.legacyName
# Get profile picture
self.logger.debug('Requesting profile picture of %s', buddy)
response = deferred.Deferred()
self.session.requestProfilePicture(buddy, onSuccess = response.run)
response = response.arg(0)
pictureData = response.pictureData()
# Send VCard
if ID != None:
call(self.logger.debug, 'Sending VCard (%s) with image id %s: %s',
ID, response.pictureId(), pictureData.then(base64.b64encode))
call(self.backend.handleVCard, self.user, ID, buddy, "", "",
pictureData)
# Send image hash
if not buddy == self.session.legacyName:
try:
obuddy = self[buddy]
nick = obuddy.nick
groups = obuddy.groups
except KeyError:
nick = ""
groups = []
image_hash = pictureData.then(utils.sha1hash)
call(self.logger.debug, 'Image hash is %s', image_hash)
call(self.update, buddy, nick, groups, image_hash)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2dcontour/textfont/_color.py
|
Python
|
mit
| 424 | 0.002358 |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="col
|
or", parent_name="histogram2dcontour.textfont", **kwargs
):
super(ColorValidato
|
r, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
arypbatista/gobspy
|
gobspy.py
|
Python
|
gpl-3.0
| 75 | 0 |
#!/usr/bin/python
# -*- coding:
|
utf-8 -*-
from gob
|
spy import main
main()
|
pantuza/art-gallery
|
src/timer.py
|
Python
|
gpl-2.0
| 1,202 | 0.003328 |
# -*- coding: utf-8 -*-
import time
|
class Timer(object):
'''
Simple timer control
'''
def __init__(self, delay):
self.current_time = 0
self.set_delay(delay)
def pause(self, pause):
if pause >= self.delay:
self.current_time = time.clock()
self.next_time = self.
|
current_time + pause
def set_delay(self, delay):
assert delay >= 0
self.delay = delay
if self.delay == 0:
self.next_time = self.current_time
else:
self.current_time = time.clock()
self.next_time = self.current_time + self.delay
def idle(self):
'''
Verify if the timer is idle
'''
self.current_time = time.clock()
## if next frame occurs in the future, now it's idle time
if self.next_time > self.current_time:
return True
# if pass more than one delay time, synchronize it
if (self.current_time - self.next_time) > self.delay:
self.next_time = self.current_time + self.delay
else:
self.next_time += self.delay
return False
|
foolwealth/django_multi_deployable_template
|
lib/lib_a/helper.py
|
Python
|
mit
| 115 | 0.008696 |
import ra
|
ndom
def
|
say_thing():
return random.choice([
"dude",
"sup",
"yo mama"
])
|
cuppa-joe/dsame
|
defs.py
|
Python
|
isc
| 274,490 | 0.029815 |
VERSION = '0.1.3.0'
PROGRAM = 'dsame'
DESCRIPTION = 'dsame is a program to decode EAS/SAME alert messages'
COPYRIGHT = 'Copyright (C) 2016 Joseph W. Metcalf'
TEST_STRING = 'EAS: ZCZC-WXR-RWT-055027-055039-055047-055117-055131-055137-055139-055015-055071+0030-0771800-KMKX/NWS-'
MSG__TEXT={
'EN' :
{'MSG1' : '{article} {organization} {preposition} {location} {has} issued a {event} valid until {end}',
'MSG2' : '{conjunction} for the following {division} in {state}: ',
'MSG3' : '{county}{punc} ',
'MSG4' : '',
'AND' : 'and',
'ALL' : 'all',
'HAS' : 'has',
'HAVE' : 'have',
'THE' : 'the',
'A' : 'a',
'IN' : 'in',
'' : '',
}
}
FIPS_DIVN={
'' : None,
'02' : 'boroughs',
'22' : 'parishes',
'11' : None,
'57' : None,
'58' : None,
'59' : None,
'60' : None,
'61' : None,
'64' : None,
'65' : None,
'66' : None,
'68' : None,
'69' : None,
'70' : None,
'73' : None,
'74' : None,
'75' : None,
'77' : None,
'78' : None,
'91' : None,
'92' : None,
'93' : None,
'94' : None,
'96' : None,
'97' : None,
'98' : None,
}
US_SAME_AREA={
'LOCATION' : 'US',
'01' : 'Alabama',
'02' : 'Alaska',
'04' : 'Arizona',
'05' : 'Arkansas',
'06' : 'California',
'08' : 'Colorado',
'09' : 'Connecticut',
'10' : 'Delaware',
'11' : 'District of Columbia',
'12' : 'Florida',
'13' : 'Georgia',
'15' : 'Hawaii',
'16' : 'Idaho',
'17' : 'Illinois',
'18' : 'Indiana',
'19' : 'Iowa',
'20' : 'Kansas',
'21' : 'Kentucky',
'22' : 'Louisiana',
'23' : 'Maine',
'24' : 'Maryland',
'25' : 'Massachusetts',
'26' : 'Michigan',
'27' : 'Minnesota',
'28' : 'Mississippi',
'29' : 'Missouri',
'30' : 'Montana',
'31' : 'Nebraska',
'32' : 'Nevada',
'33' : 'New Hampshire',
'34' : 'New Jersey',
'35' : 'New Mexico',
'36' : 'New York',
'37' : 'North Carolina',
'38' : 'North Dakota',
'39' : 'Ohio',
'40' : 'Oklahoma',
'41' : 'Oregon',
'42' : 'Pennsylvania',
'44' : 'Rhode Island',
'45' : 'South Carolina',
'46' : 'South Dakota',
'47' : 'Tennessee',
'48' : 'Texas',
'49' : 'Utah',
'50' : 'Vermont',
'51' : 'Virginia',
'53' : 'Washington',
'54' : 'West Virginia',
'55' : 'Wisconsin',
'56' : 'Wyoming',
'57' : 'Pacific Coast from Washington to California',
'58' : 'Alaskan Coast',
'59' : 'Hawaiian Coast',
|
'60' : 'American Samoa',
'61' : 'American Samoa Waters',
'64' : 'Federated States of Micronesia',
'65' : 'Mariana Islands Waters (including Guam)',
'66' : 'Guam',
'68' : 'Marshall Islands',
'69' : 'Northern Mariana Islands',
'70' : 'Palau',
'72' : 'Puerto Rico',
'73' : 'Atlantic Coast from Maine to Virginia',
'74' : 'U.S. Mi
|
nor Outlying Islands',
'75' : 'Atlantic Coast from North Carolina to Florida, and the Coasts of Puerto Rico and Virgin Islands',
'77' : 'Gulf of Mexico',
'78' : 'Virgin Islands',
'91' : 'Lake Superior',
'92' : 'Lake Michigan',
'93' : 'Lake Huron',
'94' : 'Saint Clair River, Detroit River, and Lake Saint Clair',
'96' : 'Lake Erie',
'97' : 'Niagara River and Lake Ontario',
'98' : 'Saint Lawrence River',
'XX' : 'TEST',
}
CA_SAME_AREA={
'LOCATION' : 'CA',
'11' : 'Nova Scotia',
'12' : 'Nova Scotia',
'13' : 'Nova Scotia',
'14' : 'Nova Scotia',
'15' : 'New Brunswick',
'16' : 'New Brunswick',
'17' : 'New Brunswick',
'18' : 'Prince Edward Island',
'21' : 'Newfoundland/Labrador',
'22' : 'Newfoundland/Labrador',
'23' : 'Newfoundland/Labrador',
'24' : 'Newfoundland/Labrador',
'25' : 'Newfoundland/Labrador',
'26' : 'Newfoundland/Labrador',
'27' : 'Newfoundland/Labrador',
'28' : 'Newfoundland/Labrador',
'29' : 'Newfoundland/Labrador',
'31' : 'Quebec',
'32' : 'Quebec',
'33' : 'Quebec',
'34' : 'Quebec',
'35' : 'Quebec',
'36' : 'Quebec',
'37' : 'Quebec',
'38' : 'Quebec',
'39' : 'Quebec',
'41' : 'Ontario',
'42' : 'Ontario',
'43' : 'Ontario',
'44' : 'Ontario',
'45' : 'Ontario',
'46' : 'Ontario',
'47' : 'Ontario',
'48' : 'Ontario',
'49' : 'Ontario',
'51' : 'Manitoba',
'52' : 'Manitoba',
'53' : 'Manitoba',
'54' : 'Manitoba',
'55' : 'Manitoba',
'56' : 'Manitoba',
'57' : 'Manitoba',
'58' : 'Manitoba',
'59' : 'Manitoba',
'61' : 'Saskatchewan',
'62' : 'Saskatchewan',
'63' : 'Saskatchewan',
'64' : 'Saskatchewan',
'65' : 'Saskatchewan',
'66' : 'Saskatchewan',
'67' : 'Saskatchewan',
'68' : 'Saskatchewan',
'71' : 'Alberta',
'72' : 'Alberta',
'73' : 'Alberta',
'74' : 'Alberta',
'75' : 'Alberta',
'76' : 'Alberta',
'77' : 'Alberta',
'78' : 'Alberta',
'79' : 'Alberta',
'81' : 'British Columbia',
'82' : 'British Columbia',
'83' : 'British Columbia',
'84' : 'British Columbia',
'85' : 'British Columbia',
'86' : 'British Columbia',
'87' : 'British Columbia',
'88' : 'British Columbia',
'89' : 'British Columbia',
'91' : 'Yukon',
'92' : 'Yukon',
'93' : 'Yukon',
'94' : 'Northwest Territories',
'95' : 'Northwest Territories',
'96' : 'Northwest Territories',
'97' : 'Nunavut',
'98' : 'Nunavut',
'99' : 'Nunavut',
'XX' : 'TEST',
}
US_SAME_CODE={
'01001' : 'Autauga',
'01003' : 'Baldwin',
'01005' : 'Barbour',
'01007' : 'Bibb',
'01009' : 'Blount',
'01011' : 'Bullock',
'01013' : 'Butler',
'01015' : 'Calhoun',
'01017' : 'Chambers',
'01019' : 'Cherokee',
'01021' : 'Chilton',
'01023' : 'Choctaw',
'01025' : 'Clarke',
'01027' : 'Clay',
'01029' : 'Cleburne',
'01031' : 'Coffee',
'01033' : 'Colbert',
'01035' : 'Conecuh',
'01037' : 'Coosa',
'01039' : 'Covington',
'01041' : 'Crenshaw',
'01043' : 'Cullman',
'01045' : 'Dale',
'01047' : 'Dallas',
'01049' : 'Dekalb',
'01051' : 'Elmore',
'01053' : 'Escambia',
'01055' : 'Etowah',
'01057' : 'Fayette',
'01059' : 'Franklin',
'01061' : 'Geneva',
'01063' : 'Greene',
'01065' : 'Hale',
'01067' : 'Henry',
'01069' : 'Houston',
'01071' : 'Jackson',
'01073' : 'Jefferson',
'01075' : 'Lamar',
'01077' : 'Lauderdale',
'01079' : 'Lawrence',
'01081' : 'Lee',
'01083' : 'Limestone',
'01085' : 'Lowndes',
'01087' : 'Macon',
'01089' : 'Madison',
'01091' : 'Marengo',
'01093' : 'Marion',
'01095' : 'Marshall',
'01097' : 'Mobile',
'01099' : 'Monroe',
'01101' : 'Montgomery',
'01103' : 'Morgan',
'01105' : 'Perry',
'01107' : 'Pickens',
'01109' : 'Pike',
'01111' : 'Randolph',
'01113' : 'Russell',
'01115' : 'Saint Clair',
'01117' : 'Shelby',
'01119' : 'Sumter',
'01121' : 'Talladega',
'01123' : 'Tallapoosa',
'01125' : 'Tuscaloosa',
'01127' : 'Walker',
'01129' : 'Washington',
'01131' : 'Wilcox',
'01133' : 'Winston',
'02013' : 'Aleutians East',
'02016' : 'Aleutians West',
'02020' : 'Anchorage',
'02050' : 'Bethel',
'02060' : 'Bristol Bay',
'02068' : 'Denali',
'02070' : 'Dillingham',
'02090' : 'Fairbanks North Star',
'02100' : 'Haines',
'02110' : 'Juneau',
'02122' : 'Kenai Peninsula',
'02130' : 'Ketchikan Gateway',
'02150' : 'Kodiak Island',
'02164' : 'Lake and Peninsula',
'02170' : 'Matanuska-Susitna',
'02180' : 'Nome',
'02185' : 'North Slope',
'02188' : 'Northwest Arctic',
'02201' : 'Prince of Wales-Outer Ketchikan',
'02220' : 'Sitka',
'02232' : 'Skagway-Hoonah-Angoon',
'02240' : 'Southeast Fairbanks',
'02261' : 'Valdez-Cordova',
'02270' : 'Wade Hampton',
'02280' : 'Wrangell-Petersburg',
'02282' : 'Yakutat',
'02290' : 'Yukon-Koyukuk',
'04001' : 'Apache',
'04003' : 'Cochise',
'04005' : 'Coconino',
'04007' : 'Gila',
'04009' : 'Gr
|
pyfa-org/eos
|
eos/restriction/restriction/resource.py
|
Python
|
lgpl-3.0
| 4,071 | 0 |
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from abc import ABCMeta
from abc import abstractmethod
from collections import namedtuple
from eos.const.eos import Restriction
from eos.const.eve import AttrId
from eos.restriction.exception import RestrictionValidationError
from .base import BaseRestriction
ResourceErrorData = namedtuple(
'ResourceErrorData', ('total_use', 'output', 'item_use'))
class ResourceRestriction(BaseRestriction, metaclass=ABCMeta):
"""Base class for all resource restrictions.
Resources in this context is something produced by ship/character and
consumed by other items.
"""
def __init__(self, fit):
self.__fit = fit
@property
@abstractmethod
def _stat_name(self):
"""This name will be used to get numbers from stats service."""
...
@property
@abstractmethod
def _use_attr_id(self):
...
def validate(self):
# Use stats module to get resource use and output
stats = getattr(self.__fit.stats, self._stat_name)
total_use = stats.used
# Can be None, so fall back to 0 in this case
output = stats.output or 0
# If we're not out of resource, do nothing
if total_use <= output:
return
tainted_items = {}
for item in stats._users:
resource_use = item.attrs[self._use_attr_id]
# Ignore items which do not actually consume resource
if resource_use <= 0:
continue
tainted_items[item] = ResourceErrorData(
total_use=total_use,
output=output,
item_use=resource_use)
raise RestrictionValidationError(tainted_items)
class CpuRestriction(ResourceRestriction):
"""CPU use by items should not exceed ship CPU output.
Details:
For validation, stats module data is used.
"""
type = Restriction.cpu
_stat_name = 'cpu'
_use_attr_id = AttrId.cpu
class PowergridRestriction(ResourceRestriction):
"""Power grid use by items should not exceed ship power grid output.
Details:
For validation, stats module data is used.
"""
type = Restriction.powergrid
_stat_name = 'powergrid'
_use_attr_id = AttrId.power
class CalibrationRestriction(ResourceRestriction):
"""Calibration use by items should not exceed ship calibration output.
Details:
For validation, stats module data is used.
"""
type = Restrict
|
ion.calibration
_stat_name = 'calibration'
_use_attr_id = AttrId.upgrade_cost
class DroneBayVolumeRestriction(ResourceRestriction):
"""Drone bay volume use by items should not exceed ship drone bay v
|
olume.
Details:
For validation, stats module data is used.
"""
type = Restriction.dronebay_volume
_stat_name = 'dronebay'
_use_attr_id = AttrId.volume
class DroneBandwidthRestriction(ResourceRestriction):
"""Drone bandwidth use by items should not exceed ship drone bandwidth.
Details:
For validation, stats module data is used.
"""
type = Restriction.drone_bandwidth
_stat_name = 'drone_bandwidth'
_use_attr_id = AttrId.drone_bandwidth_used
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.