repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jind11/sentence-classification
|
code/util.py
|
1
|
16183
|
import json
import os
import time
import cPickle
import numpy as np
import sys
import re
import operator
import fnmatch
from gensim.models.keyedvectors import KeyedVectors
from tensorflow.contrib import learn
from keras.preprocessing import sequence
# construc embedding vectors based on the google word2vec and vocabulary
def process_word2vec(word2vec_dir, vocab, save_path, random_init=True):
# read pre-trained word embedddings from the binary file
print('Loading google word2vec...')
word2vec_path = word2vec_dir + '/GoogleNews-vectors-negative300.bin.gz'
word_vectors = KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
print('Word2vec loaded!')
if random_init:
word2vec = np.random.uniform(-0.25, 0.25, (len(vocab), 300))
else:
word2vec = np.zeros((len(vocab), 300))
found = 0
for idx, token in enumerate(vocab):
try:
vec = word_vectors[token]
except:
pass
else:
word2vec[idx, :] = vec
found += 1
del word_vectors
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab), word2vec_path))
np.savez_compressed(save_path, word2vec=word2vec)
print("saved trimmed word2vec matrix at: {}".format(save_path))
# construct embedding vectors according to the GloVe word vectors and vocabulary
def process_glove(glove_dir, glove_dim, vocab_dir, save_path, random_init=True):
"""
:param vocab_list: [vocab]
:return:
"""
save_path = save_path + '.{}'.format(glove_dim)
if not os.path.isfile(save_path + ".npz"):
# read vocabulary
with open(vocab_dir + '/vocabulary.pickle', 'rb') as f:
vocab_map = cPickle.load(f)
f.close()
vocab_list = list(zip(*vocab_map)[0])
glove_path = os.path.join(glove_dir, "glove.6B.{}d.txt".format(glove_dim))
if random_init:
glove = np.random.uniform(-0.25, 0.25, (len(vocab_list), glove_dim))
else:
glove = np.zeros((len(vocab_list), glove_dim))
found = 0
with open(glove_path, 'r') as fh:
for line in fh.readlines():
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in vocab_list:
idx = vocab_list.index(word)
glove[idx, :] = vector
found += 1
if word.capitalize() in vocab_list:
idx = vocab_list.index(word.capitalize())
glove[idx, :] = vector
found += 1
if word.upper() in vocab_list:
idx = vocab_list.index(word.upper())
glove[idx, :] = vector
found += 1
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab_list), glove_path))
np.savez_compressed(save_path, glove=glove)
print("saved trimmed glove matrix at: {}".format(save_path))
def load_embeddings(dir, embedding_type):
return np.load(dir)[embedding_type]
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
# preprocess the MR datasets
def preprocess_data_and_labels_MR(positive_data_file_path, negative_data_file_path, save_path, pad_width=0):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file_path, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file_path, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[1] for _ in positive_examples]
negative_labels = [[0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# pad the left and right with zeros
if pad_width > 0:
x_padded = np.lib.pad(x, ((0, 0), (pad_width, pad_width)), 'constant', constant_values=(0, 0))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(x.shape[0]))
x_shuffled = x_padded[shuffle_indices]
y_shuffled = y[shuffle_indices]
# merge data and labels
data_and_labels = zip(x_shuffled, y_shuffled)
# save train data and labels
with open(save_path + '/data_and_labels.pickle', 'w') as f:
cPickle.dump(data_and_labels, f)
f.close()
# get vocabulary and save it
# Extract word:id mapping from the object.
vocab_dict = vocab_processor.vocabulary_._mapping
# Sort the vocabulary dictionary on the basis of values(id)
sorted_vocab_dict = sorted(vocab_dict.items(), key=operator.itemgetter(1))
sorted_vocab = list(zip(*sorted_vocab_dict))[0]
with open(save_path + '/vocabulary.pickle', 'w') as f:
cPickle.dump(sorted_vocab, f)
f.close()
# Process word vector embeddings
process_word2vec('../data', sorted_vocab, '../data/word2vec.trimmed')
# Extract a set of n-grams from a list of integers.
def create_ngram_set(input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
# Augment the input list of list (sequences) by appending n-grams values.
def add_ngram(sequences, token_indice, ngram_range=2):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
# preprocess the MR datasets especially for fasttext model
def preprocess_data_and_labels_MR_fasttext(positive_data_file_path, negative_data_file_path, save_path, ngram_range=1, pad_width=0):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file_path, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file_path, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[1] for _ in positive_examples]
negative_labels = [[0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = list(vocab_processor.fit_transform(x_text))
# Extract word:id mapping from the object.
vocab_dict = vocab_processor.vocabulary_._mapping
max_features = len(vocab_dict)
# remove filled <UNK>, i.e., 0 index
x = [filter(lambda a: a != 0, line) for line in x]
print('Average sequence length before adding n-grams: {}'.format(np.mean(list(map(len, x)), dtype=int)))
# Add n-grams...
if ngram_range > 1:
print('Adding {}-gram features'.format(ngram_range))
# Create set of unique n-gram from the training set.
ngram_set = set()
for input_list in x:
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
# Dictionary mapping n-gram token to a unique integer.
# Integer values are greater than max_features in order
# to avoid collision with existing features.
start_index = max_features + 1
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {token_indice[k]: k for k in token_indice}
# Augmenting with n-grams features
x = add_ngram(x, token_indice, ngram_range)
print('Average sequence length after adding n-grams: {}'.format(np.mean(list(map(len, x)), dtype=int)))
# pad sequence
x = np.array(sequence.pad_sequences(x, padding='post'))
print('x shape:', x.shape)
# pad the left and right with zeros
if pad_width > 0:
x_padded = np.lib.pad(x, ((0, 0), (pad_width, pad_width)), 'constant', constant_values=(0, 0))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(x_padded.shape[0]))
x_shuffled = x_padded[shuffle_indices]
y_shuffled = y[shuffle_indices]
# merge data and labels
data_and_labels = zip(x_shuffled, y_shuffled)
# save train data and labels
with open(save_path, 'w') as f:
cPickle.dump(data_and_labels, f)
f.close()
def load_data_MR(file_dir, fold=1):
print ("Loading datasets...")
# read train data and labels
with open(file_dir + '/data_and_labels.pickle', 'r') as f:
data_and_labels = cPickle.load(f)
f.close()
# Split train/test set
test_sample_index_s = int((fold - 1) / 10.0 * float(len(data_and_labels)))
test_sample_index_e = int(fold / 10.0 * float(len(data_and_labels)))
train_data_and_labels = data_and_labels[:test_sample_index_s] + data_and_labels[test_sample_index_e:]
test_data_and_labels = data_and_labels[test_sample_index_s:test_sample_index_e]
# Split data and labels
train_data, train_labels = zip(*train_data_and_labels)
train_data, train_labels = np.array(train_data), np.array(train_labels)
test_data, test_labels = zip(*test_data_and_labels)
test_data, test_labels = np.array(test_data), np.array(test_labels)
# read vocabulary
with open(file_dir + '/vocabulary.pickle', 'r') as f:
vocab = cPickle.load(f)
f.close()
seq_len = train_data.shape[1]
vocab_size = len(vocab)
return (train_data, train_labels, test_data, test_labels, seq_len, vocab_size)
def load_data_MR_fasttext(file_path, fold=1):
print ("Loading datasets...")
# read train data and labels
with open(file_path, 'r') as f:
data_and_labels = cPickle.load(f)
f.close()
# Split train/test set
test_sample_index_s = int((fold - 1) / 10.0 * float(len(data_and_labels)))
test_sample_index_e = int(fold / 10.0 * float(len(data_and_labels)))
train_data_and_labels = data_and_labels[:test_sample_index_s] + data_and_labels[test_sample_index_e:]
test_data_and_labels = data_and_labels[test_sample_index_s:test_sample_index_e]
# Split data and labels
train_data, train_labels = zip(*train_data_and_labels)
train_data, train_labels = np.array(train_data), np.array(train_labels)
test_data, test_labels = zip(*test_data_and_labels)
test_data, test_labels = np.array(test_data), np.array(test_labels)
seq_len = train_data.shape[1]
vocab_size = max([np.amax(train_data), np.amax(test_data)]) + 1
return (train_data, train_labels, test_data, test_labels, seq_len, vocab_size)
# preprocess the AskaPatient dataset
def preprocess_data_and_labels_AAP(data_file_path, save_path):
def merge_folds(data_file_path, save_path):
# merge all the separated folds into one file
train = []
val = []
test = []
for file in os.listdir(data_file_path):
if fnmatch.fnmatch(file, '*train.txt'):
train += (open(data_file_path + '/' + file, 'r').readlines())
elif fnmatch.fnmatch(file, '*validation.txt'):
val += (open(data_file_path + '/' + file, 'r').readlines())
else:
test += (open(data_file_path + '/' + file, 'r').readlines())
open(save_path + '/train.txt', 'w').write(''.join(train))
open(save_path + '/val.txt', 'w').write(''.join(val))
open(save_path + '/test.txt', 'w').write(''.join(test))
print len(train+val+test)
merge_folds(data_file_path, save_path)
def create_batches(data, labels, batch_size, shuffle=True):
# Generates a batch iterator for a dataset.
data_and_labels = np.array(zip(data, labels))
data_size = len(data)
num_batches_per_epoch = int((data_size - 1) / batch_size) + 1
# Shuffle the data
if shuffle:
np.random.seed(11)
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data_and_labels[shuffle_indices]
else:
shuffled_data = data_and_labels
# create batches
batches = []
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
batches.append(shuffled_data[start_index:end_index])
return batches
class Progbar(object):
"""
Progbar class copied from keras (https://github.com/fchollet/keras/)
Displays a progress bar.
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, exact=None):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
values = values or []
exact = exact or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for k, v in exact:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1]
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('='*(prog_width-1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.'*(self.width-prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if isinstance(self.sum_values[k], list):
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n")
def add(self, n, values=None):
self.update(self.seen_so_far+n, values)
if __name__=="__main__":
preprocess_data_and_labels_MR('../data/rt-polarity.pos', '../data/rt-polarity.neg', '../data', pad_width=4)
# preprocess_data_and_labels_MR_fasttext('../data/rt-polarity.pos', '../data/rt-polarity.neg', '../data/fasttext_data_and_labels.pickle',
# ngram_range=3, pad_width=4)
|
mit
| 949,150,617,451,764,600 | 33.213531 | 139 | 0.672496 | false |
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/app/builtins.py
|
1
|
12278
|
# -*- coding: utf-8 -*-
"""
celery.app.builtins
~~~~~~~~~~~~~~~~~~~
Built-in tasks that are always available in all
app instances. E.g. chord, group and xmap.
"""
from __future__ import absolute_import
from __future__ import with_statement
from collections import deque
from itertools import starmap
from celery._state import get_current_worker_task
from celery.utils import uuid
#: global list of functions defining tasks that should be
#: added to all apps.
_shared_tasks = []
def shared_task(constructor):
"""Decorator that specifies that the decorated function is a function
that generates a built-in task.
The function will then be called for every new app instance created
(lazily, so more exactly when the task registry for that app is needed).
"""
_shared_tasks.append(constructor)
return constructor
def load_shared_tasks(app):
"""Loads the built-in tasks for an app instance."""
for constructor in _shared_tasks:
constructor(app)
@shared_task
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.
This task is also added do the periodic task schedule so that it is
run every day at midnight, but :program:`celerybeat` must be running
for this to be effective.
Note that not all backends do anything for this, what needs to be
done at cleanup is up to each backend, and some backends
may even clean up in realtime so that a periodic cleanup is not necessary.
"""
@app.task(name='celery.backend_cleanup', _force_evaluate=True)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
@shared_task
def add_unlock_chord_task(app):
"""The unlock chord task is used by result backends that doesn't
have native chord support.
It creates a task chain polling the header for completion.
"""
from celery.canvas import subtask
from celery import result as _res
@app.task(name='celery.chord_unlock', max_retries=None,
default_retry_delay=1, ignore_result=True, _force_evaluate=True)
def unlock_chord(group_id, callback, interval=None, propagate=False,
max_retries=None, result=None):
if interval is None:
interval = unlock_chord.default_retry_delay
result = _res.GroupResult(group_id, map(_res.AsyncResult, result))
j = result.join_native if result.supports_native_join else result.join
if result.ready():
subtask(callback).delay(j(propagate=propagate))
else:
return unlock_chord.retry(countdown=interval,
max_retries=max_retries)
return unlock_chord
@shared_task
def add_map_task(app):
from celery.canvas import subtask
@app.task(name='celery.map', _force_evaluate=True)
def xmap(task, it):
task = subtask(task).type
return list(map(task, it))
return xmap
@shared_task
def add_starmap_task(app):
from celery.canvas import subtask
@app.task(name='celery.starmap', _force_evaluate=True)
def xstarmap(task, it):
task = subtask(task).type
return list(starmap(task, it))
return xstarmap
@shared_task
def add_chunk_task(app):
from celery.canvas import chunks as _chunks
@app.task(name='celery.chunks', _force_evaluate=True)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks
@shared_task
def add_group_task(app):
_app = app
from celery.canvas import maybe_subtask, subtask
from celery.result import from_serializable
class Group(app.Task):
app = _app
name = 'celery.group'
accept_magic_kwargs = False
def run(self, tasks, result, group_id, partial_args):
app = self.app
result = from_serializable(result)
# any partial args are added to all tasks in the group
taskit = (subtask(task).clone(partial_args)
for i, task in enumerate(tasks))
if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
return app.GroupResult(
result.id,
[task.apply(group_id=group_id) for task in taskit],
)
with app.producer_or_acquire() as pub:
[task.apply_async(group_id=group_id, publisher=pub,
add_to_parent=False) for task in taskit]
parent = get_current_worker_task()
if parent:
parent.request.children.append(result)
return result
def prepare(self, options, tasks, args, **kwargs):
AsyncResult = self.AsyncResult
options['group_id'] = group_id = (
options.setdefault('task_id', uuid()))
def prepare_member(task):
task = maybe_subtask(task)
opts = task.options
opts['group_id'] = group_id
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = uuid()
return task, AsyncResult(tid)
try:
tasks, results = zip(*[prepare_member(task) for task in tasks])
except ValueError: # tasks empty
tasks, results = [], []
return (tasks, self.app.GroupResult(group_id, results),
group_id, args)
def apply_async(self, partial_args=(), kwargs={}, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(partial_args, kwargs, **options)
tasks, result, gid, args = self.prepare(
options, args=partial_args, **kwargs
)
super(Group, self).apply_async((
list(tasks), result.serializable(), gid, args), **options
)
return result
def apply(self, args=(), kwargs={}, **options):
return super(Group, self).apply(
self.prepare(options, args=args, **kwargs),
**options).get()
return Group
@shared_task
def add_chain_task(app):
from celery.canvas import Signature, chord, group, maybe_subtask
_app = app
class Chain(app.Task):
app = _app
name = 'celery.chain'
accept_magic_kwargs = False
def prepare_steps(self, args, tasks):
steps = deque(tasks)
next_step = prev_task = prev_res = None
tasks, results = [], []
i = 0
while steps:
# First task get partial args from chain.
task = maybe_subtask(steps.popleft())
task = task.clone() if i else task.clone(args)
res = task._freeze()
i += 1
if isinstance(task, group):
# automatically upgrade group(..) | s to chord(group, s)
try:
next_step = steps.popleft()
# for chords we freeze by pretending it's a normal
# task instead of a group.
res = Signature._freeze(task)
task = chord(task, body=next_step, task_id=res.task_id)
except IndexError:
pass
if prev_task:
# link previous task to this task.
prev_task.link(task)
# set the results parent attribute.
res.parent = prev_res
results.append(res)
tasks.append(task)
prev_task, prev_res = task, res
return tasks, results
def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
task_id=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
options.pop('publisher', None)
tasks, results = self.prepare_steps(args, kwargs['tasks'])
result = results[-1]
if group_id:
tasks[-1].set(group_id=group_id)
if chord:
tasks[-1].set(chord=chord)
if task_id:
tasks[-1].set(task_id=task_id)
result = tasks[-1].type.AsyncResult(task_id)
tasks[0].apply_async()
return result
def apply(self, args=(), kwargs={}, subtask=maybe_subtask, **options):
last, fargs = None, args # fargs passed to first task only
for task in kwargs['tasks']:
res = subtask(task).clone(fargs).apply(last and (last.get(), ))
res.parent, last, fargs = last, res, None
return last
return Chain
@shared_task
def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a subtask, and this generates the task
responsible for that."""
from celery import group
from celery.canvas import maybe_subtask
_app = app
class Chord(app.Task):
app = _app
name = 'celery.chord'
accept_magic_kwargs = False
ignore_result = False
def run(self, header, body, partial_args=(), interval=1,
max_retries=None, propagate=False, eager=False, **kwargs):
group_id = uuid()
AsyncResult = self.app.AsyncResult
prepare_member = self._prepare_member
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([maybe_subtask(s).clone() for s in tasks])
# - eager applies the group inline
if eager:
return header.apply(args=partial_args, task_id=group_id)
results = [AsyncResult(prepare_member(task, body, group_id))
for task in header.tasks]
# - fallback implementations schedules the chord_unlock task here
app.backend.on_chord_apply(group_id, body,
interval=interval,
max_retries=max_retries,
propagate=propagate,
result=results)
# - call the header group, returning the GroupResult.
# XXX Python 2.5 doesn't allow kwargs after star-args.
return header(*partial_args, **{'task_id': group_id})
def _prepare_member(self, task, body, group_id):
opts = task.options
# d.setdefault would work but generating uuid's are expensive
try:
task_id = opts['task_id']
except KeyError:
task_id = opts['task_id'] = uuid()
opts.update(chord=body, group_id=group_id)
return task_id
def apply_async(self, args=(), kwargs={}, task_id=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
group_id = options.pop('group_id', None)
chord = options.pop('chord', None)
header = kwargs.pop('header')
body = kwargs.pop('body')
header, body = (list(maybe_subtask(header)),
maybe_subtask(body))
if group_id:
body.set(group_id=group_id)
if chord:
body.set(chord=chord)
callback_id = body.options.setdefault('task_id', task_id or uuid())
parent = super(Chord, self).apply_async((header, body, args),
kwargs, **options)
body_result = self.AsyncResult(callback_id)
body_result.parent = parent
return body_result
def apply(self, args=(), kwargs={}, propagate=True, **options):
body = kwargs['body']
res = super(Chord, self).apply(args, dict(kwargs, eager=True),
**options)
return maybe_subtask(body).apply(
args=(res.get(propagate=propagate).get(), ))
return Chord
|
bsd-3-clause
| -1,646,172,077,825,770,000 | 35.325444 | 79 | 0.558153 | false |
clarkperkins/click-shell
|
setup.py
|
1
|
1935
|
import io
import os
from setuptools import setup, find_packages
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
SHORT_DESCRIPTION = "An extension to click that easily turns your click app into a shell utility"
# Use the README.md as the long description
LONG_DESCRIPTION = read('README.rst')
requirements = [
'click>=7.0',
]
setup(
name='click-shell',
version=get_version('click_shell/__init__.py'),
url="https://github.com/clarkperkins/click-shell",
author="Clark Perkins",
author_email="r.clark.perkins@gmail.com",
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='BSD',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
install_requires=requirements,
dependency_links=[],
extras_require={
'readline': ['gnureadline'],
'windows': ['pyreadline'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: User Interfaces',
'Topic :: System :: Shells',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
|
bsd-3-clause
| 2,017,556,178,663,941,400 | 28.769231 | 97 | 0.610853 | false |
xin1195/smartSearch
|
home/handler/webSocketHandler.py
|
1
|
1486
|
#!/usr/bin/env python3
# UserSocket接口,用于浏览器通信
import json
import tornado.websocket
from home.handler.xiaoTianHandler import xiao_tian_answer
from setting import logger
def send_to_one_user(user_client, message):
user_client.write_message(json.dumps(message))
def send_to_many_user(user_clients, message):
for user_client in user_clients:
user_client.write_message(json.dumps(message))
class UserSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self.user_client_map = {}
def data_received(self, chunk):
pass
def send_to_all_user(self, message):
for i in self.user_client_map:
self.user_client_map[i].write_message(json.dumps(message))
def open(self):
self.write_message(json.dumps([{
'cmd': 'open',
}, {
'form_user': 'system',
'data': 'open:connect_success',
}]))
def on_close(self):
pass
def on_message(self, message):
if message == 'heart':
device_id = str(self.get_argument('device_id'))
logger.info(device_id + ': 心跳连接正常')
else:
res = xiao_tian_answer(question=message)
self.write_message({"message": res})
logger.info({"message", message})
def on_pong(self, data):
logger.info({"on_pong", data})
|
apache-2.0
| -2,113,201,457,961,597,400 | 25.925926 | 70 | 0.610729 | false |
sfelixjr/booklet
|
src/booklet_handler.py
|
1
|
1403
|
# Copyright 2014 - Samuel de Sousa (felixjr.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from booklet_progress import BookletProgress
from booklet_progress_gui import Ui_Dialog
from PyQt4.QtGui import QDialog
# A class to update the progress bar shown to the user.
class BookletHandler():
def __init__(self, source, target):
self.dialog = QDialog()
self.progress = Ui_Dialog()
self.progress.setupUi(self.dialog)
self.thread = BookletProgress()
self.thread.set_values(source, target)
self.thread.partDone.connect(self.update)
self.thread.procDone.connect(self.finish)
self.thread.start()
self.dialog.exec_()
# It updates the progress bar.
#
# Parameters:
# -val: the progress value, e.g. 26%.
def update(self, val):
self.progress.progressBar.setValue(val)
# Called when the process is done in order to close
# the Dialog.
def finish(self):
self.dialog.close()
|
apache-2.0
| 4,349,328,688,752,345,600 | 32.404762 | 74 | 0.743407 | false |
PLUS-POSTECH/study.plus.or.kr
|
src/problem/signals.py
|
1
|
1112
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from integration.helpers import discord
from .models import ProblemInstance, ProblemQuestion, ProblemAuthLog
# pylint: disable=W0613
@receiver(post_save, sender=ProblemInstance)
def on_register_problem_handler(sender, instance, **kwargs):
discord.on_problem_registered(instance)
@receiver(post_save, sender=ProblemQuestion)
def on_question_handler(sender, instance: ProblemQuestion, **kwargs):
if instance.answer == "":
discord.on_question(instance)
else:
discord.on_answer(instance)
@receiver(post_save, sender=ProblemAuthLog)
def on_auth(sender, instance: ProblemAuthLog, **kwargs):
if instance.auth_key == instance.problem_instance.problem.auth_key:
problem_instance = instance.problem_instance
if ProblemAuthLog.objects.filter(problem_instance=problem_instance, auth_key=problem_instance.problem.auth_key).count() == 1:
discord.on_first_blood(instance)
else:
discord.on_solved(instance)
else:
discord.on_auth_tried(instance)
|
apache-2.0
| -2,252,245,296,482,034,700 | 34.870968 | 133 | 0.735612 | false |
avaris/aBibliophile
|
grouperproxy.py
|
1
|
11591
|
#!/usr/bin/env python
# -.- coding: utf-8 -.-
# Author : Deniz Turgut
# Created : 09.02.2012
from collections import namedtuple
from PyQt4 import QtGui, QtCore
# each group item has a 'name' and a list of 'children' consisting
# of each child's QPersistentModelIndex (pmi)
groupItem = namedtuple('groupItem',['name', 'children'])
# each row item has 'pmi' and a list of groups it is assigned
rowItem = namedtuple('rowItem',['pmi', 'groups'])
class GrouperProxy(QtGui.QAbstractProxyModel):
def __init__(self, parent = None):
super(GrouperProxy, self).__init__(parent)
self._root = QtCore.QModelIndex()
self.clear()
def clear(self):
self._groups = [] # stores the groups and their children
self._rows = [] # stores the rows from original model and their groups
def setSourceModel(self, model, displayColumn=None, groupColumn=0, groupSeparator=None):
'''
sets the source model.
arguments:
- model: source model
- displayColumn: column to be displayed. use 'None' to show all. (default = None)
- groupColumn: column to be used for grouping. (default = 0)
- groupSeparator: string used for separating groups from groupColumn.
use 'None' for no separation. (default = None)
'''
super(GrouperProxy, self).setSourceModel(model)
self.connectSignals()
self.setDisplayColumn(displayColumn)
self.setGroupColumn(groupColumn, groupSeparator)
def setGroupColumn(self, column, separator=None):
self._groupColumn = column
self._groupSeparator = separator
self._group()
def setDisplayColumn(self, column):
self.beginResetModel()
self._displayColumn = column
self.endResetModel()
def connectSignals(self):
sourceModel = self.sourceModel()
#sourceModel.columnsAboutToBeInserted.connect(self.beginInsertColumns)
#sourceModel.columnsAboutToBeMoved.connect(self.beginMoveColumns)
#sourceModel.columnsAboutToBeRemoved.connect(self.beginRemoveColumns)
#sourceModel.columnsInserted.connect(self.endInsertColumns)
#sourceModel.columnsMoved.connect(self.endMoveColumns)
#sourceModel.columnsRemoved.connect(self.endRemoveColumns)
sourceModel.dataChanged.connect(self._dataChanged)
#sourceModel.headerDataChanged.connect(self.headerDataChanged.emit)
#sourceModel.layoutAboutToBeChanged.connect(self.layoutAboutToBeChanged.emit)
sourceModel.layoutChanged.connect(self._group)
#sourceModel.modelAboutToBeReset.connect(self.beginResetModel)
sourceModel.modelReset.connect(self._group)
#sourceModel.rowsAboutToBeInserted.connect(self.beginInsertRows)
#sourceModel.rowsAboutToBeMoved.connect(self.beginMoveRows)
#sourceModel.rowsAboutToBeRemoved.connect(self.beginRemoveRows)
sourceModel.rowsInserted.connect(self._rowsInserted)
sourceModel.rowsMoved.connect(self.endMoveRows)
sourceModel.rowsRemoved.connect(self._rowsRemoved)
def mapToSource(self, index):
if not index.isValid():
return QtCore.QModelIndex()
parent = index.internalPointer()
if parent == self._root:
return QtCore.QModelIndex()
else:
groupIndex, group = self._getGroup(parent)
pmi = group.children[index.row()]
if self._displayColumn is None:
column = index.column()
else:
column = self._displayColumn
return self.sourceModel().index(pmi.row(), column)
def mapFromSource(self, index):
if not index.isValid():
return QtCore.QModelIndex()
pmi = QtCore.QPersistentModelIndex(self.sourceModel().index(index.row(), self._groupColumn))
rowIndex, row = self._getRow(pmi)
if row.groups:
groupIndex, group = self._getGroup(row.groups[0])
rowIndex = group.children.index(row.pmi)
column = 0 if self._displayColumn is not None else index.column()
return self.index(rowIndex, column, self.index(groupIndex, 0, self._root))
else:
return QtCore.QModelIndex()
def rowCount(self, parent):
if parent == self._root:
return len(self._groups)
elif parent.internalPointer() == self._root:
return len(self._groups[parent.row()].children)
else:
return 0
def columnCount(self, parent):
if self._displayColumn is not None:
return 1
else:
return self.sourceModel().columnCount(QtCore.QModelIndex())
def index(self, row, column, parent):
if self.hasIndex(row, column, parent):
if parent == self._root:
return self.createIndex(row, column, self._root)
else:
return self.createIndex(row, column, self._groups[parent.row()].name)
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
parent = index.internalPointer()
if parent == self._root:
return self._root
else:
groupIndex, group = self._getGroup(parent)
return self.index(groupIndex, 0, self._root)
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
parent = index.internalPointer()
if parent == self._root:
return self._groups[index.row()].name
else:
column = index.column() if self._displayColumn is None else self._displayColumn
groupIndex, group = self._getGroup(parent)
itemPmi = group.children[index.row()]
return self.sourceModel().index(itemPmi.row(), column).data()
def flags(self, index):
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def _group(self):
self.beginResetModel()
self.clear()
sourceModel = self.sourceModel()
for i in range(sourceModel.rowCount(QtCore.QModelIndex())):
index = sourceModel.index(i, self._groupColumn)
pmi = QtCore.QPersistentModelIndex(index)
rowIndex, row = self._getRow(pmi)
groups = self._findGroups(i)
for groupName in groups:
groupIndex, group = self._getGroup(groupName, False)
group.children.append(pmi)
row.groups.append(groupName)
self.endResetModel()
def _getGroup(self, groupName, emitSignals=True):
'''
returns 'index, groupItem' with '.name == groupName'
if no item is found, a new item is appended with 'name == groupName' and returned
'''
for index, group in enumerate(self._groups):
if groupName == group.name:
return index, group
index = len(self._groups)
if emitSignals: self.beginInsertRows(self._root, index, index)
self._groups.append(groupItem(groupName, []))
if emitSignals: self.endInsertRows()
return index, self._groups[-1]
def _getRow(self, pmi):
'''
returns 'index, rowItem' with '.pmi == pmi'
if no item is found, a new item is appended with '.pmi == pmi' and returned
'''
for index, row in enumerate(self._rows):
if pmi == row.pmi:
return index, row
index = len(self._rows)
self._rows.append(rowItem(pmi, []))
return index, self._rows[-1]
def _findGroups(self, sourceRow):
'''
returns a list of groups for item in row in sourceModel.
'''
rowData = unicode(self.sourceModel().index(sourceRow, self._groupColumn).data())
if self._groupSeparator is None:
return [rowData]
else:
return rowData.split(self._groupSeparator)
def _rowsRemoved(self, parent_, start, end):
for row in self._rows[start:end+1]:
for groupName in row.groups:
groupIndex, group = self._getGroup(groupName)
parent = self.index(groupIndex, 0, self._root)
childIndex = group.children.index(row.pmi)
self.beginRemoveRows(parent, childIndex, childIndex)
group.children.pop(childIndex)
self.endRemoveRows()
if not len(group.children):
self.beginRemoveRows(self._root, groupIndex, groupIndex)
self._groups.pop(groupIndex)
self.endRemoveRows()
self._rows[start:end+1] = []
def _rowsInserted(self, parent_, start, end):
for i in range(start, end+1):
pmi = QtCore.QPersistentModelIndex(self.sourceModel().index(i, self._groupColumn))
groups = self._findGroups(i)
for groupName in groups:
groupIndex, group = self._getGroup(groupName)
parent = self.createIndex(groupIndex, 0, self._root)
self.beginInsertRows(parent, len(group.children), len(group.children))
group.children.append(pmi)
self.endInsertRows()
self._rows.insert(i, rowItem(pmi, groups))
def _dataChanged(self, topleft, bottomright):
for i in range(topleft.row(), bottomright.row()+1):
row = self._rows[i]
if (self._displayColumn is None or
topleft.column() <= self._displayColumn <= bottomright.column()):
for groupName in row.groups:
groupIndex, group = self._getGroup(groupName)
rowIndex = group.children.index(row.pmi)
parent = self.index(groupIndex, 0, self._root)
# emit dataChanged
self.dataChanged.emit(self.index(rowIndex, 0, parent),
self.index(rowIndex, self.columnCount(parent)-1, parent))
if topleft.column() <= self._groupColumn <= bottomright.column():
oldGroupSet = set(row.groups)
newGroupSet = set(self._findGroups(i))
for groupName in oldGroupSet - newGroupSet:
# things to remove
groupIndex, group = self._getGroup(groupName)
rowIndex = group.children.index(row.pmi)
parent = self.index(groupIndex, 0, self._root)
self.beginRemoveRows(parent, rowIndex, rowIndex)
group.children.pop(rowIndex)
self.endRemoveRows()
if not group.children:
# empty group
self.beginRemoveRows(self._root, groupIndex, groupIndex)
self._groups.pop(groupIndex)
self.endRemoveRows()
row.groups.remove(groupName)
for groupName in newGroupSet - oldGroupSet:
# things to add
groupIndex, group = self._getGroup(groupName)
parent = self.index(groupIndex, 0, self._root)
self.beginInsertRows(parent, len(group.children), len(group.children))
group.children.append(row.pmi)
self.endInsertRows()
row.groups.append(groupName)
|
gpl-3.0
| -4,178,263,212,650,144,300 | 39.246528 | 100 | 0.588646 | false |
HEPData/hepdata
|
tests/pyyaml_test.py
|
1
|
1683
|
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2021 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import pytest
import yaml
def test_parse_trailing_tab_libyaml():
"""
Check that PyYAML (with LibYAML) can parse a trailing tab character.
Currently this is only possible with LibYAML, not with pure-Python PyYAML.
:return:
"""
data = yaml.load('key: value\t', Loader=yaml.CSafeLoader)
assert data['key'] == 'value'
def test_parse_trailing_tab_pyyaml():
"""
Latest PyYAML v5.4.1 (pure Python) currently has a bug parsing a trailing tab character.
https://github.com/yaml/pyyaml/issues/306 and https://github.com/yaml/pyyaml/issues/450
:return:
"""
with pytest.raises(yaml.scanner.ScannerError):
yaml.load('key: value\t', Loader=yaml.SafeLoader)
|
gpl-2.0
| -6,733,662,192,276,484,000 | 32.66 | 92 | 0.723708 | false |
lucperkins/heron
|
bazel_configure.py
|
1
|
14832
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Verifies required libraries and tools exist and are valid versions.
# Is so creates scripts/compile/env_exec.sh containing environment used
# by bazel when building.
#
# When changing this script, verify that it still works by running locally
# on a mac. Then verify the other environments by doing this:
#
# cd docker
# ./build-artifacts.sh ubuntu15.10 0.12.0 .
# ./build-artifacts.sh ubuntu14.04 0.12.0 .
# ./build-artifacts.sh centos7 0.12.0 .
#
import os
import re
import sys
import stat
import getpass
import datetime
import platform
import subprocess
sys.path.append('third_party/python/semver')
import semver
######################################################################
# Architecture and system defines
######################################################################
ARCH_AND_SYS = {
('x86_64', 'Darwin') : ('IS_I386_MACOSX', 'IS_MACOSX'),
('x86_64', 'Linux' ) : ('IS_I386_LINUX', 'IS_LINUX'),
}
######################################################################
# Discover the name of the user compiling
######################################################################
def discover_user():
return getpass.getuser()
######################################################################
# Discover the name of the host compiling
######################################################################
def discover_host():
return platform.node()
######################################################################
# Get the time of the setup - does not change every time you compile
######################################################################
def discover_timestamp():
return str(datetime.datetime.now())
######################################################################
# Get the processor the platform is running on
######################################################################
def discover_processor():
return platform.machine()
######################################################################
# Get the operating system of the platform
######################################################################
def discover_os():
return platform.system()
######################################################################
# Get the operating system version
######################################################################
def discover_os_version():
return platform.release()
######################################################################
# Get the git sha of the branch - you are working
######################################################################
def discover_git_sha():
output = subprocess.check_output("git rev-parse HEAD", shell=True)
return output.decode('ascii', 'ignore').strip("\n")
######################################################################
# Get the name of branch - you are working on
######################################################################
def discover_git_branch():
output = subprocess.check_output("git rev-parse --abbrev-ref HEAD", shell=True)
return output.decode('ascii', 'ignore').strip("\n")
######################################################################
# Utility functions for system defines
######################################################################
def define_string(name, value):
return '#define %s "%s"\n' % (name, value)
def define_value(name, value):
return '#define %s %s\n' % (name, value)
######################################################################
# Discover where a program is located using the PATH variable
######################################################################
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
######################################################################
# Discover the real path of the program
######################################################################
def real_program_path(program_name):
which_path = which(program_name)
if which_path:
return os.path.realpath(which_path)
return None
def fail(message):
print("\nFAILED: %s" % message)
sys.exit(1)
# Assumes the version is at the end of the first line consisting of digits and dots
def get_trailing_version(line):
version = re.search('([\d.]+)$', line)
if version and '.' in version.group(0):
return version.group(0)
def discover_version(path):
if "python" in path:
version_flag = "-V"
else:
version_flag = "--version"
command = "%s %s" % (path, version_flag)
version_output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
first_line = version_output.decode('ascii', 'ignore').split("\n")[0]
version = get_trailing_version(first_line)
if version:
return version
# on debian, /usr/bin/gcc --version returns this:
# gcc-5 (Debian 5.3.1-14) 5.3.1 20160409
debian_line = re.search('.*?Debian.*?\s(\d[\d\.]+\d+)\s.*', first_line)
if debian_line:
version = get_trailing_version(debian_line.group(1))
if version:
return version
# on centos, /usr/bin/gcc --version returns this:
# gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-4)
redhat_line = re.search('(.*)\s+[0-9]+\s+\(Red Hat .*\)$', first_line)
if redhat_line:
version = get_trailing_version(redhat_line.group(1))
if version:
return version
# on ubuntu, /usr/bin/gcc --version returns this:
# gcc-5 (Ubuntu 5.2.1-22ubuntu2) 5.2.1 20151010
ubuntu_line = re.search('.*\s+\(Ubuntu .*\)\s+([\d\.]+)\s+\d+$', first_line)
if ubuntu_line:
version = get_trailing_version(ubuntu_line.group(1))
if version:
return version
# on mac, /usr/bin/cpp --version returns this:
# Apple LLVM version 6.0 (clang-600.0.56) (based on LLVM 3.5svn)
mac_line = re.search('^(Apple LLVM version\s+[\d\.]+)\s+\(clang.*', first_line)
if mac_line:
version = get_trailing_version(mac_line.group(1))
if version:
return version
# with python anaconda, --V returns this:
# Python 2.7.11 :: Anaconda 2.2.0 (x86_64)
anaconda_line = re.search('.*\s+Anaconda\s+.*\s', first_line)
if anaconda_line:
version = anaconda_line.group(0).split(' ')[1]
if version:
return version
# python on debian, --V returns this:
# Python 2.7.11+
python_line = re.search('^Python\s+(\d[\d\.]+)\+{0,1}.*', first_line)
if python_line:
version = python_line.group(1)
if version:
return version
fail ("Could not determine the version of %s from the following output\n%s\n%s" % (path, command, version_output))
def to_semver(version):
# is version too short
if re.search('^[\d]+\.[\d]+$', version):
return "%s.0" % version
# is version too long
version_search = re.search('^([\d]+\.[\d]+\.[\d]+)\.[\d]+$', version)
if version_search:
return version_search.group(1)
return version
def assert_min_version(path, min_version):
version = discover_version(path)
if not semver.match(to_semver(version), ">=%s" % to_semver(min_version)):
fail("%s is version %s which is less than the required version %s" % (path, version, min_version))
return version
######################################################################
# Discover the program using env variable/program name
######################################################################
def discover_program(program_name, env_variable = ""):
env_value = program_name
if env_variable:
try:
env_value = os.environ[env_variable]
except KeyError:
pass
return real_program_path(env_value)
######################################################################
# Get the platform we are running
######################################################################
def discover_platform():
return discover_os()
######################################################################
# Make the file executable
######################################################################
def make_executable(path):
st_mode = os.stat(path).st_mode
os.chmod(path, st_mode | stat.S_IXUSR)
######################################################################
# Discover a tool needed to compile Heron
######################################################################
def discover_tool(program, msg, envvar, min_version = ''):
VALUE = discover_program(program, envvar)
if not VALUE:
fail("""You need to have %s installed to build Heron.
Note: Some vendors install %s with a versioned name
(like /usr/bin/%s-4.8). You can set the %s environment
variable to specify the full path to yours.'""" % (program, program, program, envvar))
print_value = VALUE
if min_version:
version = assert_min_version(VALUE, min_version)
print_value = "%s (%s)" % (VALUE, version)
print('Using %s:\t%s' % (msg.ljust(20), print_value))
return VALUE
######################################################################
# Discover the linker directory
######################################################################
def discover_linker(environ):
BLDFLAG = '-B' + os.path.dirname(environ['LD'])
return BLDFLAG
######################################################################
# Discover a tool needed but default to certain value if not able to
######################################################################
def discover_tool_default(program, msg, envvar, defvalue):
VALUE = discover_program(program, envvar)
if not VALUE:
VALUE = defvalue
print('%s:\tnot found, but ok' % (program.ljust(26)))
else:
print('Using %s:\t%s' % (msg.ljust(20), VALUE))
return VALUE
def export_env_to_file(out_file, env):
if env in os.environ:
out_file.write('export %s="%s"\n' % (env, os.environ[env]))
######################################################################
# Generate the shell script that recreates the environment
######################################################################
def write_env_exec_file(platform, environ):
env_exec_file = 'scripts/compile/env_exec.sh'
out_file = open(env_exec_file, 'w')
out_file.write('#!/bin/bash\n\n')
out_file.write('set -eu \n\n')
# If C environment is set, export them
for env in ['CC', 'CPP', 'CFLAGS']:
export_env_to_file(out_file, env)
# If CXX environment is set, export them
for env in ['CXX', 'CXXCPP', 'CXXFLAGS']:
export_env_to_file(out_file, env)
# If linker environment is set, export them
for env in ['LDFLAGS', 'LIBS']:
export_env_to_file(out_file, env)
# Invoke the programs
out_file.write('# Execute the input programs\n')
out_file.write('$*')
make_executable(env_exec_file)
print('Wrote the environment exec file %s' % (env_exec_file))
######################################################################
# Generate system defines based on processor, os and os version
######################################################################
def generate_system_defines():
key = (discover_processor(), discover_os(), discover_os_version())
if key in ARCH_AND_SYS:
defines = ARCH_AND_SYS[key]
else:
key = (discover_processor(), discover_os())
defines = ARCH_AND_SYS[key]
strings = []
for define in defines:
strings.append(define_value(define, '1'))
return "".join(strings)
######################################################################
# Write heron config header at config/heron-config.h
######################################################################
def write_heron_config_header(config_file):
if os.path.exists(config_file): os.unlink(config_file)
out_file = open(config_file, 'w')
out_file.write(define_string('PACKAGE', 'heron'))
out_file.write(define_string('PACKAGE_NAME', 'heron'))
out_file.write(define_string('PACKAGE_VERSION', 'unversioned'))
out_file.write(define_string('PACKAGE_COMPILE_USER', discover_user()))
out_file.write(define_string('PACKAGE_COMPILE_HOST', discover_host()))
out_file.write(define_string('PACKAGE_COMPILE_TIME', discover_timestamp()))
out_file.write(define_string('GIT_SHA', discover_git_sha()))
out_file.write(define_string('GIT_BRANCH', discover_git_branch()))
out_file.write(generate_system_defines())
out_file.close()
print('Wrote the heron config header file: \t"%s"' % (config_file))
######################################################################
# MAIN program that sets up your workspace for bazel
######################################################################
def main():
env_map = dict()
# Discover the platform
platform = discover_platform()
print("Platform %s" % platform)
# do differently on mac
if platform == "Darwin":
c_min = '4.2.1'
cpp_min = '6.0' # on mac this will be clang version
else:
c_min = '4.8.1'
cpp_min = c_min
# Discover the tools environment
env_map['CC'] = discover_tool('gcc','C compiler', 'CC', c_min)
env_map['CXX'] = discover_tool('g++','C++ compiler', 'CXX', c_min)
env_map['CPP'] = discover_tool('cpp','C preprocessor', 'CPP', cpp_min)
env_map['CXXCPP'] = discover_tool('cpp','C++ preprocessor', 'CXXCPP', cpp_min)
env_map['LD'] = discover_tool('ld','linker', 'LD')
env_map['BLDFLAG'] = discover_linker(env_map)
# Discover the utilities
env_map['AUTOMAKE'] = discover_tool('automake', 'Automake', 'AUTOMAKE', '1.9.6')
env_map['AUTOCONF'] = discover_tool('autoconf', 'Autoconf', 'AUTOCONF', '2.6.3')
env_map['MAKE'] = discover_tool('make', 'Make', 'MAKE', '3.81')
env_map['PYTHON'] = discover_tool('python', 'Python', 'PYTHON', '2.7')
if platform == 'Darwin':
env_map['LIBTOOL'] = discover_tool('glibtool', 'Libtool', 'LIBTOOL', '2.4.2')
else:
env_map['LIBTOOL'] = discover_tool('libtool', 'Libtool', 'LIBTOOL', '2.4.2')
env_map['AR'] = discover_tool('ar', 'archiver', 'AR')
env_map['GCOV']= discover_tool('gcov','coverage tool', 'GCOV')
env_map['DWP'] = discover_tool_default('dwp', 'dwp', 'DWP', '/usr/bin/dwp')
env_map['NM'] = discover_tool_default('nm', 'nm', 'NM', '/usr/bin/nm')
env_map['OBJCOPY'] = discover_tool_default('objcopy', 'objcopy', 'OBJCOPY', '/usr/bin/objcopy')
env_map['OBJDUMP'] = discover_tool_default('objdump', 'objdump', 'OBJDUMP', '/usr/bin/objdump')
env_map['STRIP'] = discover_tool_default('strip', "strip", 'STRIP', '/usr/bin/strip')
# write the environment executable file
# write_env_exec_file(platform, env_map)
if __name__ == '__main__':
main()
|
apache-2.0
| 6,978,873,587,451,403,000 | 35.712871 | 116 | 0.526429 | false |
demisto/content
|
Tests/sdknightly/create_entities_for_nightly_sdk.py
|
1
|
5120
|
import argparse
import json
import os
import shutil
import subprocess
from pathlib import Path
from typing import Tuple
def run_command(cmd: str) -> Tuple[str, str]:
return subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8').communicate()
def create_incident_field(path: Path, incident_to_associate: str) -> str:
"""
Creates an incident field
Args:
path: A path of the pack
incident_to_associate: an incident type to associate the incident field
Returns:
The path to the incident field
"""
hello_field_path = 'Packs/HelloWorld/IncidentFields/incidentfield-Hello_World_Status.json'
with open(hello_field_path) as stream:
field = json.load(stream)
name = 'Hello World IncidentField Test'
cliname = name.lower().replace(' ', '')
field.update({
'name': name,
'cliName': cliname,
'id': f'incident_{cliname}',
'associatedTypes': [incident_to_associate]
})
dest_incident = path / 'IncidentFields'
if not os.path.isdir(dest_incident):
os.mkdir(dest_incident)
field_path = dest_incident / f'incidentfield-{name.replace(" ", "_")}.json'
with open(field_path, 'w+') as stream:
json.dump(field, stream, indent=4)
return str(field_path)
def create_layout(path: Path, layout_name: str) -> str:
"""
Creates a layout field
Args:
path: A path of the pack
layout_name: a layout name to create
Returns:
The path to the layout
"""
layout_path_sample = Path('Packs/HelloWorld/Layouts/layoutscontainer-Hello_World_Alert.json')
with open(layout_path_sample) as stream:
layout = json.load(stream)
dest_layout = path / 'Layouts'
if not os.path.isdir(dest_layout):
os.mkdir(dest_layout)
layout.update({
'id': layout_name,
'name': layout_name
})
layout_path = dest_layout / f'layoutscontainer-{layout_name.replace(" ", "_")}.json'
with open(layout_path, 'w+') as stream:
json.dump(layout, stream, indent=4)
return str(layout_path)
def create_incident_type(path: Path, layout_name: str) -> str:
"""
Creates an incident type
Args:
path: A path of the pack
layout_name: a layout to associate the incident field
Returns:
The path to the incident type
"""
incident_type_path_sample = Path('Packs/HelloWorld/IncidentTypes/incidenttype-Hello_World_Alert.json')
with open(incident_type_path_sample) as stream:
incident_type = json.load(stream)
name = 'Hello World Alert Test'
incident_type.update({
'name': name,
'id': name,
'layout': layout_name
})
dest_incident_path = path / 'IncidentTypes'
if not os.path.isdir(dest_incident_path):
os.mkdir(dest_incident_path)
incident_path = dest_incident_path / f'incidenttype-{name.replace(" ", "_")}.json'
with open(incident_path, 'w+') as stream:
json.dump(incident_type, stream, indent=4)
return str(incident_path)
def create_mapper(path: Path) -> str:
"""
Creates a mapper
Args:
path: A path of the pack
Returns:
The path to the mapper
"""
mapper_path_sample = Path('Packs/HelloWorld/Classifiers/classifier-mapper-incoming-HelloWorld.json')
with open(mapper_path_sample) as stream:
mapper = json.load(stream)
name = 'Hello World Test - Incoming Mapper'
_id = 'HelloWorld-mapper Test'
mapper.update({
'name': name,
'id': _id
})
dest_mapper_path = path / 'Classifiers'
if not os.path.isdir(dest_mapper_path):
os.mkdir(dest_mapper_path)
mapper_path = dest_mapper_path / 'classifier-mapper-incoming-HelloWorldTest.json'
with open(mapper_path, 'w+') as stream:
json.dump(mapper, stream, indent=4)
return str(mapper_path)
def main():
parser = argparse.ArgumentParser(description="Creates incident field, incident type, mapper and a "
"layout in a given pack.")
parser.add_argument('pack_name')
parser.add_argument('--artifacts-folder', required=False)
args = parser.parse_args()
pack_path = Path('Packs') / args.pack_name
layout_name = 'Hello World Test Layout'
uploaded_entities = [
create_layout(pack_path, layout_name),
create_incident_field(pack_path, 'Hello World Alert Test'),
create_incident_type(pack_path, layout_name),
create_mapper(pack_path)
]
print("Created entities:")
print("\t" + "\n\t".join(uploaded_entities))
if args.artifacts_folder:
entities_folder = Path(args.artifacts_folder) / 'UploadedEntities'
if not os.path.isdir(entities_folder):
os.mkdir(entities_folder)
print(f"Storing files to {entities_folder}")
for file in uploaded_entities:
file_name = file.split('/')[-1]
shutil.copyfile(file, entities_folder / file_name)
print(f"file: {file_name} stored.")
if __name__ in '__main__':
main()
|
mit
| 4,064,482,202,864,410,600 | 30.411043 | 120 | 0.631641 | false |
miniCruzer/postit-desktop
|
restclient.py
|
1
|
4123
|
import json
import logging
import os
import requests
from bs4 import BeautifulSoup
from PyQt5.QtCore import QThread, pyqtSignal, QSettings
from PyQt5.QtWidgets import QDialog, QMessageBox, QDialogButtonBox
from ui.Ui_LoginDialog import Ui_LoginDialog
def getLoginToken(address, email, password, timeout=15):
""" attempt to get a login token. KeyError means invalid username or password"""
client = requests.session()
soup = BeautifulSoup(client.get(address, timeout=timeout).text, "html.parser")
csrf = soup.find('input', {'name': "csrf_token" })['value']
login_data = json.dumps({
"email": email,
"password": password,
"csrf_token": csrf
})
r = client.post(address, data=login_data, headers={"content-type": "application/json" },
timeout=timeout) # type: requests.Response
# if there's a login failure here, the server will report back whether the username or password
# was wrong. https://github.com/mattupstate/flask-security/issues/673
if not r.ok:
logging.info(f"response: {r.status_code}")
logging.debug(r.text)
return r.json()['response']['user']['authentication_token']
def uploadHandle(address, token, handle):
r = requests.post(address, headers={"Authentication-Token": token}, files={"image": handle})
logging.info(f"response: {r.status_code}")
r.raise_for_status()
return r.json()['url']
def uploadFile(address, token, path, delete=True):
r = uploadHandle(address, token, open(path, "rb"))
if delete:
os.unlink(path)
return r
class UploadThread(QThread):
resultReady = pyqtSignal(str, object)
def __init__(self, addr, token, path, parent=None):
super(UploadThread, self).__init__(parent)
self.addr = addr
self.path = path
self.token = token
def run(self):
url, error = None, None
try:
url = uploadFile(self.addr, self.token, self.path)
except Exception as e:
error = e
self.resultReady.emit(url, error)
class UploadHandleThread(UploadThread):
def run(self):
url, error = None, None
try:
url = uploadHandle(self.addr, self.token, self.path)
except Exception as e:
error = e
self.resultReady.emit(url, error)
class LoginThread(QThread):
resultReady = pyqtSignal(str, object)
def __init__(self, addr, email, password, parent=None):
super(LoginThread, self).__init__(parent)
self.addr = addr
self.email = email
self.password = password
def run(self):
token, error = None, None
try:
token = getLoginToken(self.addr, self.email, self.password)
except Exception as e:
error = e
self.resultReady.emit(token, error)
class LoginDialog(QDialog, Ui_LoginDialog):
def __init__(self, parent):
super(LoginDialog, self).__init__(parent)
self.setupUi(self)
self.loginToken = None
self.thread = QThread(self)
def accept(self):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
addr = QSettings(QSettings.IniFormat, QSettings.UserScope,
"GliTch_ Is Mad Studios", "PostIt").value("internet/address")
self.thread = LoginThread(addr + "/login",
self.emailAddressLineEdit.text(),
self.passwordLineEdit.text(), self)
self.thread.resultReady.connect(self.gotToken)
self.thread.start()
def reject(self):
if self.thread.isRunning():
self.thread.terminate()
super().reject()
def gotToken(self, token, error):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
if token and not error:
self.loginToken = token
super().accept()
else:
msg = ''
if isinstance(error, KeyError):
msg = "Invalid username or password."
else:
msg = str(error)
QMessageBox.critical(self, "Login Failed", msg)
|
mit
| 4,215,635,340,765,329,400 | 27.047619 | 99 | 0.617512 | false |
Svjard/presto-admin
|
prestoadmin/util/constants.py
|
1
|
1424
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This modules contains read-only constants used throughout
the presto admin project.
"""
import os
import prestoadmin
PRESTOADMIN_LOG_DIR = "/var/log/prestoadmin"
# Logging Config File Locations
LOGGING_CONFIG_FILE_NAME = 'presto-admin-logging.ini'
LOGGING_CONFIG_FILE_DIRECTORIES = [
os.path.join(prestoadmin.main_dir, 'prestoadmin')
]
# local configuration
LOCAL_CONF_DIR = "/etc/opt/prestoadmin"
CONFIG_PATH = os.path.join(LOCAL_CONF_DIR, "config.json")
COORDINATOR_DIR = os.path.join(LOCAL_CONF_DIR, "coordinator")
WORKERS_DIR = os.path.join(LOCAL_CONF_DIR, "workers")
CONNECTORS_DIR = os.path.join(LOCAL_CONF_DIR, "connectors")
# remote configuration
REMOTE_CONF_DIR = "/etc/presto"
REMOTE_CATALOG_DIR = os.path.join(REMOTE_CONF_DIR, "catalog")
REMOTE_PACKAGES_PATH = "/opt/prestoadmin/packages"
REMOTE_PRESTO_LOG_DIR = "/var/log/presto"
|
apache-2.0
| -661,595,847,563,454,200 | 31.363636 | 74 | 0.75 | false |
rthill/django-ldapdb
|
ldapdb/models/base.py
|
1
|
6199
|
# -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2011, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import ldap
import logging
import django.db.models
from django.db import connections, router
from django.db.models import signals
import ldapdb
logger = logging.getLogger(__name__)
class Model(django.db.models.base.Model):
"""
Base class for all LDAP models.
"""
dn = django.db.models.fields.CharField(max_length=200)
# meta-data
base_dn = None
search_scope = ldap.SCOPE_SUBTREE
object_classes = ['top']
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.saved_pk = self.pk
def build_rdn(self):
"""
Build the Relative Distinguished Name for this entry.
"""
bits = []
for field in self._meta.fields:
if field.db_column and field.primary_key:
bits.append("%s=%s" % (field.db_column, getattr(self, field.name)))
if not len(bits):
raise Exception("Could not build Distinguished Name")
return '+'.join(bits)
def build_dn(self):
"""
Build the Distinguished Name for this entry.
"""
return "%s,%s" % (self.build_rdn(), self.base_dn)
raise Exception("Could not build Distinguished Name")
def delete(self, using=None):
"""
Delete this entry.
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
logger.info("Deleting LDAP entry %s" % self.dn)
connection.delete_s(self.dn)
signals.post_delete.send(sender=self.__class__, instance=self)
def save(self, using=None):
"""
Saves the current instance.
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
if not self.dn:
# create a new entry
record_exists = False
entry = [('objectClass', self.object_classes)]
new_dn = self.build_dn()
for field in self._meta.fields:
if not field.db_column:
continue
value = getattr(self, field.name)
if value or value == 0:
entry.append((field.db_column, field.get_db_prep_save(value, connection=connection)))
logger.info("Creating new LDAP entry %s" % new_dn)
logger.debug(entry)
connection.add_s(new_dn, entry)
# update object
self.dn = new_dn
else:
# update an existing entry
record_exists = True
modlist = []
orig = self.__class__.objects.get(pk=self.saved_pk)
for field in self._meta.fields:
if not field.db_column:
continue
old_value = getattr(orig, field.name, None)
new_value = getattr(self, field.name, None)
if old_value != new_value:
if new_value or new_value == 0:
modlist.append((ldap.MOD_REPLACE, field.db_column, field.get_db_prep_save(new_value, connection=connection)))
elif old_value:
modlist.append((ldap.MOD_DELETE, field.db_column, None))
if len(modlist):
# handle renaming
new_dn = self.build_dn()
if new_dn != self.dn:
logger.info("Renaming LDAP entry %s to %s" % (self.dn, new_dn))
connection.rename_s(self.dn, self.build_rdn())
self.dn = new_dn
logger.info("Modifying existing LDAP entry %s" % self.dn)
logger.debug(modlist)
connection.modify_s(self.dn, modlist)
else:
logger.info("No changes to be saved to LDAP entry %s" % self.dn)
# done
self.saved_pk = self.pk
signals.post_save.send(sender=self.__class__, instance=self, created=(not record_exists))
@classmethod
def scoped(base_class, base_dn):
"""
Returns a copy of the current class with a different base_dn.
"""
class Meta:
proxy = True
import re
suffix = re.sub('[=,]', '_', base_dn)
name = "%s_%s" % (base_class.__name__, str(suffix))
new_class = type(name, (base_class,), {'base_dn': base_dn, '__module__': base_class.__module__, 'Meta': Meta})
return new_class
class Meta:
abstract = True
|
bsd-3-clause
| 675,032,290,876,975,000 | 36.786585 | 133 | 0.597386 | false |
facebook/redex
|
tools/python/symbolicator/symbol_files.py
|
1
|
2239
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import re
import subprocess
##############################################################################
# Util functions
##############################################################################
def find_buck_artifacts(target):
"""
Given a buck target, find the location of its build artifacts, which
contain the symbol files.
"""
root = subprocess.check_output(["buck", "root"]).strip()
rule, output = subprocess.check_output(
["buck", "targets", "--show-output", target]
).split()
re_match = re.match("//(.*_redex)", rule)
artifacts = os.path.join(
root, "buck-out", "gen", re_match.group(1).replace(":", "/") + "__redex"
)
return artifacts
##############################################################################
class SymbolFiles(object):
def __init__(self, extracted_symbols, line_map, debug_line_map, iodi_metadata):
self.extracted_symbols = extracted_symbols
self.line_map = line_map
self.debug_line_map = debug_line_map
self.iodi_metadata = iodi_metadata
@staticmethod
def from_buck_artifact_dir(artifact_dir):
line_map_fn_v1 = os.path.join(artifact_dir, "redex-line-number-map")
line_map_fn_v2 = os.path.join(artifact_dir, "redex-line-number-map-v2")
if os.path.exists(line_map_fn_v2):
line_map_fn = line_map_fn_v2
else:
line_map_fn = line_map_fn_v1
return SymbolFiles(
os.path.join(artifact_dir, "redex-class-rename-map.txt"),
line_map_fn,
os.path.join(artifact_dir, "redex-debug-line-map-v2"),
os.path.join(artifact_dir, "iodi-metadata"),
)
@staticmethod
def from_buck_target(target):
artifact_dir = find_buck_artifacts(target)
logging.info("buck target %s has artifact dir at %s", target, artifact_dir)
return SymbolFiles.from_buck_artifact_dir(artifact_dir)
|
mit
| 7,663,031,275,686,946,000 | 33.446154 | 83 | 0.570344 | false |
sruizr/pysync_redmine
|
test/repositories/test_redmine.py
|
1
|
11220
|
import pytest
from unittest.mock import Mock, patch
from pysync_redmine.repositories.redmine import RedmineRepo
from pysync_redmine.domain import (
Project,
Task,
Phase,
Member,
Calendar
)
from helper import get_basic_frame as get_base
import datetime
import pdb
class A_RedmineRepo:
def setup_method(self, method):
self.patcher = patch('pysync_redmine.repositories.redmine.redmine')
redmine = self.patcher.start()
self.source = Mock()
redmine.Redmine.return_value = self.source
self.repo = RedmineRepo()
self.repo.open_source(project_key='example', url='http://fake_redmine.org',
username='user', password='psw')
self.project = self.repo.project
self.project._id = 123
def teardown_method(self, method):
self.patcher.stop()
@patch('pysync_redmine.repositories.redmine.redmine')
def should_be_loaded_with_project_url(self, mock_redmine):
redmine = Mock()
mock_redmine.Redmine.return_value = redmine
project = Mock()
project.id = 1
project.name = 'This is an example'
redmine.project.get.return_value = project
redmine_repo = RedmineRepo()
redmine_repo.open_source(project_key='example',
url='http://fake_redmine',
username='user', password='psw')
assert redmine_repo.setup_pars['url'] == 'http://fake_redmine'
assert redmine_repo.project.key == 'example'
assert redmine_repo.project._id == 1
mock_redmine.Redmine.assert_called_with(
'http://fake_redmine',
username='user',
password='psw'
)
@patch('getpass.getpass')
@patch('builtins.input')
@patch('pysync_redmine.repositories.redmine.redmine')
def should_be_loaded_without_user(self, mock_redmine, mock_input,
mock_getpass):
redmine = Mock()
mock_redmine.Redmine.return_value = redmine
project = Mock()
project.id = 1
project.key = 'example'
project.name = 'This is an example'
redmine.project.get.return_value = project
mock_input.return_value = 'userrr'
mock_getpass.return_value = 'pswww'
redmine_repo = RedmineRepo()
redmine_repo.open_source(project_key=project.key, url='http://fake_redmine')
assert redmine_repo.project._id == project.id
mock_redmine.Redmine.assert_called_with('http://fake_redmine',
username='userrr',
password='pswww')
def should_load_members(self):
member_ships = dict()
for i in range(0, 4):
member_ships[i] = Mock()
member_ships[i].user_id = i+1
member_ships[i].roles_id = [r for r in range(0, i+1)]
self.source.member_ship.filter.return_value = member_ships
roles = dict()
for i in range(0, 4):
roles[i] = Mock()
roles[i].name = 'name {}'.format(i)
roles[i].id = i
self.source.role.all.return_value = roles
users = dict()
for i in range(1, 5):
users[i] = Mock()
users[i].id = i
users[i].login = 'user{}'.format(i)
self.source.user.all.return_value = users
self.repo.load_members()
project = self.repo.project
roles = list(roles.values())
for i in range(1, 5):
member = project.members[i]
assert member._id == i
assert member.key == 'user{}'.format(i)
assert member.roles == set([r.name for r in roles[0:i]])
pars = {'project_id': self.project._id}
self.source.member_ship.filter.assert_called_with(**pars)
self.source.role.all.assert_called_with()
self.source.user.all.assert_called_with()
@patch('pysync_redmine.repositories.redmine.ResourceWrapper')
def should_load_phases(self, mock_wrapper):
mock_wrapper.side_effect = lambda x, y: x
versions = dict()
for i in range(1, 3):
versions[i] = Mock()
versions[i].id = i
versions[i].name = 'v{}'.format(i)
versions[i].description = 'version number {}'.format(i)
versions[i].due_date = datetime.date(2016, 1, i)
self.source.version.filter.return_value = versions
self.repo.load_phases()
pars = {'project_id': self.project._id}
self.source.version.filter.assert_called_with(project_id=self.project._id)
# pdb.set_trace()
for i in range(1, 3):
phase = self.project.phases[i]
assert phase._id == i
assert phase.description == '{}. {}'.format(versions[i].name,
versions[i].description)
assert phase.due_date == versions[i].due_date
def should_load_tasks(self):
issues = []
for i in range(0, 2):
issue = Mock()
issue.id = i
issue.subject = 'description {}'.format(i)
issues.append(issue)
self.source.issue.filter.return_value = issues
def should_insert_member(self):
member = Member(self.project, 'user_key',
*['master chef'])
user = Mock()
user.id = 456
self.source.user.filter.return_value = [user]
roles = [Mock(), Mock()]
roles[0].id = 1
roles[0].name = 'no my friend'
roles[1].id = 2
roles[1].name = 'master chef'
self.source.role.all.return_value = roles
membership = Mock()
membership.id = 3
self.source.project_membership.create.return_value = membership
self.repo.insert_member(member)
pars = {
'project_id': 123,
'user_id': user.id,
'role_ids': [2]
}
self.source.project_membership.create.assert_called_with(
**pars)
assert member._id == 456
def should_insert_phase(self):
phase = Phase(self.project)
phase.key = '12'
phase.description = 'A phase description'
phase.due_date = datetime.date(2016, 1, 4)
version = Mock()
version.id = 3
self.source.version.create.return_value = version
self.repo.insert_phase(phase)
pars ={
'project_id': 123,
'name': phase.key,
'description': phase.description,
'due_date': phase.due_date
}
self.source.version.create.assert_called_with(**pars)
assert phase._id == 3
def should_insert_task(self):
task = Task(self.project)
task.description = 'task description'
task.start_date = datetime.date(2016, 1, 4)
task.duration = 1
task.complete = 75
root = self.project.tokens
input_1 = root.add_node(['1', '2', '3'])
input_2 = root.add_node(['1', '2', '4'])
output_1 = root.add_node(['1', '5'])
output_2 = root.add_node(['1', '6'])
task.inputs = [input_1, input_2]
task.outputs = [output_1, output_2]
issue = Mock()
issue.id = 5
self.source.issue.create.return_value = issue
self.repo.insert_task(task)
description = self._get_issue_description()
print(description)
pars = {
'project_id': 123,
'subject': 'task description',
'start_date': datetime.date(2016, 1, 4),
'due_date': datetime.date(2016, 1, 5),
'done_ratio': 75,
'description': description
}
self.source.issue.create.assert_called_with(**pars)
assert task._id == 5
def should_update_task_update_main_fields_and_new_nexts(self):
phase, member, parent, main_task, next_task = get_base(self.project)
# Updating changes
main_task.description = 'Final description'
main_task.start_date = datetime.date(2016, 1, 5)
main_task.duration = 3
main_task.complete = 100
main_task.assigned_to = member
main_task.phase = phase
main_task.parent = parent
main_task.relations.add_next(next_task, 0)
self.source.issue_relation.filter.return_value = []
self.repo.update_task(main_task)
pars = {
'subject': 'Final description',
'start_date': main_task.start_date,
'due_date': datetime.date(2016, 1, 8),
'done_ratio': 100,
'fixed_version_id': phase._id,
'assigned_to_id': member._id,
'parent_issue_id': parent._id
}
self.source.issue.update.assert_called_with(1, **pars)
pars = {
'issue_id': main_task._id,
'issue_to_id': next_task._id,
'relation_type': 'precedes',
'delay': 0
}
self.source.issue_relation.create.assert_called_with(
**pars)
assert not self.source.issue_relation.delete.called
def should_update_tasks_with_removed_next_tasks(self):
phase, member, parent, main_task, next_task = get_base(self.project)
mock_relation = Mock()
mock_relation.id = 1000
mock_relation.issue_to_id = next_task._id
mock_relation.relation_type = 'precedes'
self.source.issue_relation.filter.return_value = [mock_relation]
self.repo.update_task(main_task)
self.source.issue_relation.delete.assert_called_with(mock_relation.id)
def should_update_tasks_with_changed_delays(self):
phase, member, parent, main_task, next_task = get_base(self.project)
main_task.relations.add_next(next_task, 1)
mock_relation = Mock()
mock_relation.id = 1000
mock_relation.issue_to_id = next_task._id
mock_relation.relation_type = 'precedes'
mock_relation.delay = 0
self.source.issue_relation.filter.return_value = [mock_relation]
self.repo.update_task(main_task)
self.source.issue_relation.delete.assert_called_with(mock_relation.id)
pars = {
'issue_id': main_task._id,
'issue_to_id': next_task._id,
'relation_type': 'precedes',
'delay': 1
}
self.source.issue_relation.create.assert_called_with(
**pars)
def _get_issue_description(self):
description = """h3. Inputs
* [[1]]
** 2
*** 3
*** 4
h3. Outputs
* [[1]]
** 5
** 6
------"""
return description
|
mit
| -1,539,851,799,964,469,500 | 31.903226 | 84 | 0.531462 | false |
cwurld/django-phonegap
|
django_phonegap/data_port/views.py
|
1
|
1032
|
__author__ = 'Chuck Martin'
from django.views.generic.edit import CreateView
from django.views.generic import ListView
from rest_framework import generics
from rest_framework import permissions
from serializers import MessageSerializer
from models import Message
from forms import MessageForm
class CreateMessage(CreateView):
"""
For testing the message form before turning it into a PhoneGap app.
"""
model = Message
form_class = MessageForm
def get_context_data(self, **kwargs):
kwargs = super(CreateMessage, self).get_context_data(**kwargs)
kwargs['IS_PHONEGAP'] = False
return kwargs
class ListMessages(ListView):
model = Message
class CreateMessageREST(generics.ListCreateAPIView):
"""
For receiving Message form data from mobile device.
"""
queryset = Message.objects.all()
serializer_class = MessageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def pre_save(self, obj):
obj.user = self.request.user
|
bsd-3-clause
| 2,202,635,197,737,025,800 | 24.8 | 71 | 0.722868 | false |
rafaels88/py2docx
|
py2docx/elements/image.py
|
1
|
5118
|
# coding: utf-8
import os
from os.path import basename
from PIL import Image as PILImage
from py2docx.document import DOCUMENT_PATH
from py2docx.util import Unit
class Image(object):
def __init__(self, image_path, document, align=None,
width='100%', height='100%'):
self.image = open(image_path, 'rb')
self.image_name = basename(self.image.name).replace(" ", '-')
self.document = document
self.align = align
self.width = width
self.height = height
self.xml = '<w:p>' + \
'<w:pPr>' + \
'{properties}' + \
'</w:pPr>' + \
'<w:r>' + \
'<w:drawing>' + \
'<wp:inline distT="0" distB="0" distL="0" distR="0">' + \
'<wp:extent cx="{width}" cy="{height}" />' + \
'<wp:effectExtent l="25400" t="0" r="0" b="0" />' + \
'<wp:docPr id="1" name="Picture 0" descr="{image_name}" />' + \
'<wp:cNvGraphicFramePr>' + \
'<a:graphicFrameLocks xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" noChangeAspect="1" />' + \
'</wp:cNvGraphicFramePr>' + \
'<a:graphic xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">' + \
'<a:graphicData uri="http://schemas.openxmlformats.org/drawingml/2006/picture">' + \
'<pic:pic xmlns:pic="http://schemas.openxmlformats.org/drawingml/2006/picture">' + \
'<pic:nvPicPr>' + \
'<pic:cNvPr id="0" name="{image_name}" />' + \
'<pic:cNvPicPr />' + \
'</pic:nvPicPr>' + \
'<pic:blipFill>' + \
'<a:blip r:embed="{rel_id}" />' + \
'<a:stretch>' + \
'<a:fillRect />' + \
'</a:stretch>' + \
'</pic:blipFill>' + \
'<pic:spPr>' + \
'<a:xfrm>' + \
'<a:off x="0" y="0" />' + \
'<a:ext cx="{width}" cy="{height}" />' + \
'</a:xfrm>' + \
'<a:prstGeom prst="rect">' + \
'<a:avLst />' + \
'</a:prstGeom>' + \
'</pic:spPr>' + \
'</pic:pic>' + \
'</a:graphicData>' + \
'</a:graphic>' + \
'</wp:inline>' + \
'</w:drawing>' + \
'</w:r>' + \
'</w:p>'
self.xml_props = []
self._upload_image()
self._set_properties()
def _get_image(self):
return self.image
def _set_relashionship(self, rel_id):
self.xml = self.xml.format(rel_id=rel_id)
def _upload_image(self):
dir_media = "{0}/word/media".format(DOCUMENT_PATH)
if not os.path.exists(dir_media):
os.makedirs(dir_media)
img_uploaded = open("{0}/{1}".format(dir_media, self.image_name), 'wb')
img_uploaded.write(self.image.read())
img_uploaded.close()
def _set_properties(self):
self._set_align()
self.xml = self.xml.replace('{properties}',
''.join(self.xml_props))
image_pil = PILImage.open(self.image.name)
width = Unit.pixel_to_emu(image_pil.size[0])
height = Unit.pixel_to_emu(image_pil.size[1])
width_percentage_num = float(self.width[:-1])
height_percentage_num = float(self.height[:-1])
width = (width_percentage_num / 100) * width
height = (height_percentage_num / 100) * height
self.xml = self.xml.replace("{width}", str(int(width))) \
.replace("{height}", str(int(height))) \
.replace("{image_name}", self.image_name)
def _set_align(self):
if self.align and \
self.align in ['left',
'right',
'center',
'justify']:
xml = '<w:jc w:val="{align}"/>'
self.xml_props.append(xml.format(align=self.align))
def _get_xml(self):
rel_id = self.document \
.document_relationship_file \
._add_image(self.image_name)
self._set_relashionship(rel_id)
return self.xml
|
mit
| -6,378,917,781,017,072,000 | 44.292035 | 145 | 0.388824 | false |
Upward-Spiral-Science/claritycontrol
|
code/scripts/roi_analysis.py
|
1
|
2744
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
from __builtin__ import *
import gc
import numpy as np
from skimage.feature import greycomatrix, greycoprops
import matplotlib as mpl
mpl.use('TkAgg') # Solve runtime issue
import matplotlib.pyplot as plt
## Fake imge and label volumes to fast test functionality
def loadImg():
return np.random.random_sample((100,100,100))
def loadAtlas():
atlas_volume = np.zeros((100,100,100),dtype=np.uint32)
atlas_volume[10:50,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*1
atlas_volume[50:90,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*2
atlas_volume[10:50,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*3
atlas_volume[50:90,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*4
atlas_volume[10:50,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*5
atlas_volume[50:90,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*6
atlas_volume[10:50,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*7
atlas_volume[50:90,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*8
return atlas_volume
## END
## True data
# path = "~/Workspaces/claritycontrol/code/data/raw/"
# token = "Fear199"
# pathname = path+token+".img"
#
# img_volume = nib.load(pathname).get_data()[:,:,:,0]
## END
## get atlas values
atlas_volume = loadAtlas()
print atlas_volume.shape
atlas_values, atlas_count = np.unique(atlas_volume,return_counts=True)
atlas_values = atlas_values[1:] # remove background
## get img
img_volume = loadImg()
print img_volume.shape
class_id = 0 # Fear, Control, Cocaine
subject_id = 199
## normalize volume Z-standardization
img_volume = (img_volume-np.mean(img_volume))/np.std(img_volume)
## prepare results matrix
columns = ['class_id', 'subject_id', 'roi', 'mean', 'std', 'energy', 'entropy', 'correlation', 'contrast', 'variance', 'sumMean',
'inertial', 'clusterShade', 'clusterTendency', 'homogeneity', 'maxProbability', 'inverseVariance']
features = np.zeros((len(atlas_values), len(columns)), dtype=np.float32)
## compute GLCM and properties
for roi_id in range(len(atlas_values)):
features[roi_id, 0] = class_id
features[roi_id, 1] = subject_id
features[roi_id, 2] = atlas_values[roi_id]
## mask img and get roi block
mask_volume = (atlas_volume == atlas_values[roi_id])
xs, ys, zs = mask_volume.nonzero()
roi_block = np.multiply(img_volume, mask_volume)[min(xs):max(xs), min(ys):max(ys), min(zs):max(zs)]
del mask_volume # memory collect
## compute mean and std
features[roi_id, 3] = np.mean(roi_block[roi_block != 0])
features[roi_id, 4] = np.std(roi_block[roi_block != 0])
## compute GLCM and properties
# features[roi_id, 5] = 0
# features[roi_id, 6] = 0
|
apache-2.0
| -5,778,710,156,627,409,000 | 32.463415 | 129 | 0.672376 | false |
ernestyalumni/Propulsion
|
cantera_stuff/tut3.py
|
1
|
3166
|
## tut3.py
## tut3.m implemented in Python for cantera
## cf. http://www.cantera.org/docs/sphinx/html/matlab/tutorials/tut3.html
############################################################################
## Copyleft 2016, Ernest Yeung <ernestyalumni@gmail.com>
## 20160125
##
## This program, along with all its code, is free software; you can redistribute
## it and/or modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## linkedin : ernestyalumni
## wordpress : ernestyalumni
############################################################################
# Tutorial 3: Getting Help
#
import cantera as ct
# Suppose you have created a Cantera object and want to know what
# methods are available for it, and get help on using the methods.
g = ct.Solution("gri30.xml")
# The first thing you need to know is the class object g
# belongs to. Type:
type(g)
# This tells you that g belongs to a class called 'Solution'. To find
# the methods for this class, type
dir(ct.Solution)
# This command returns all method names as a Python list.
# A long list is printed. Some methods are
# inherited from other classes. For example, variable P and
# method set_unnormalized_mass_fractions are
# inherited from a class 'ThermoPhase'. Don't be concerned at this
# point about what these base classes are - we'll come back to them
# later.
# Now that you see what methods are available, you can type
# 'help(<method_name>)' to print help text for any method. For example,
help(ct.Solution.P)
help(ct.Solution.set_unnormalized_mass_fractions)
help(ct.Solution.net_rates_of_progress)
# For help on how to construct objects of a given class, type
# 'help(<classname>)'
help(ct.Solution)
# Now that you know how to get help when you need it, you can
# explore using the Cantera Toolbox on your own. But there are a
# few more useful things to know, which are described in the next
# few tutorials.
#################################################################
# end of tutorial 3
#################################################################
|
gpl-2.0
| -2,860,081,278,530,264,600 | 41.783784 | 84 | 0.555591 | false |
valbertovc/django_semantic_ui
|
polls/admin.py
|
1
|
2085
|
from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Choice, Question
from polls.forms import QuestionForm, ChoiceForm
from core.utils import SemanticIcons
from django.utils.translation import ugettext_lazy as _
class ChoiceAdmin(admin.ModelAdmin):
model = Choice
actions = None
list_display_links = None
list_display = ('get_action_icons', 'question', 'choice_text', 'votes')
form = ChoiceForm
def get_action_icons(self, obj):
'''
Add action icons at admin list
'''
from django.core import urlresolvers
change_url = urlresolvers.reverse('admin:polls_choice_change', args=(obj.id,))
return mark_safe('<a href="{}"><i class="pencil yellow icon"></i></a>'.format(change_url))
get_action_icons.short_description = u''
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 0
class QuestionAdmin(admin.ModelAdmin):
inlines = [ChoiceInline]
list_display = ('get_action_icons', 'question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
actions = None
list_display_links = None
list_per_page = 10
form = QuestionForm
date_hierarchy = 'pub_date'
fieldsets = (
(None, { 'fields': ('question_text', 'pub_date')}),
('Advanced', { 'fields': ('description', 'accepted')}),
)
def get_action_icons(self, obj):
from django.core import urlresolvers
change_url = urlresolvers.reverse('admin:polls_question_change', args=(obj.id,))
return mark_safe('<a href="{}"><i class="pencil yellow icon"></i></a>'.format(change_url))
get_action_icons.short_description = u''
def was_published_recently(self, obj):
return SemanticIcons.as_boolean_icons(obj.was_published_recently())
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.short_description = _('Published recently?')
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice, ChoiceAdmin)
|
mit
| 2,115,875,492,208,643,800 | 33.75 | 98 | 0.671463 | false |
mfcovington/djangocms-genome-browser
|
cms_genome_browser/models.py
|
1
|
12337
|
from django.db import models
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, RegexValidator
from filer.fields.file import FilerFileField
from filer.fields.image import FilerImageField
class Browser(models.Model):
class Meta:
ordering = ['name']
verbose_name = "Genome Browser"
verbose_name_plural = "Genome Browsers"
name = models.CharField('browser name',
help_text='Enter a brief, descriptive name for the browser.',
max_length=255,
unique=True,
)
description = models.TextField('browser description',
blank=True,
help_text='Enter a description for the browser.',
)
image = FilerImageField(
blank=True,
null=True,
help_text='Upload/select an image to represent this genome browser.',
related_name='%(app_label)s_%(class)s_browser_image',
)
slug = models.SlugField('slug',
help_text='Enter a unique slug for this genome browser. ' \
'This should get auto-generated.',
max_length=255,
unique=True,
)
chr = models.CharField('default chromosome',
help_text='The chromosome to display when the browser loads.',
max_length=64,
)
start = models.IntegerField('default start position',
help_text='The start position of range to display when the browser loads.',
validators=[
MinValueValidator(1),
],
)
end = models.IntegerField('default end position',
help_text='The end position of range to display when the browser loads.',
validators=[
MinValueValidator(1),
],
)
coordinate_system = models.ForeignKey('cms_genome_browser.CoordSystem',
help_text='Select a coordinate system. Taxonomy ID, authority, version, ' \
'and UCSC name are shown in parentheses, if present.',
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def clean(self):
if self.start > self.end:
raise ValidationError('Start position cannot come after end position.')
def __str__(self):
return self.name
class CoordSystem(models.Model):
class Meta:
ordering = ['species', 'auth', 'version']
verbose_name = "Coordinate System"
verbose_name_plural = "Coordinate Systems"
UCSC_OLD_REGEX = r'^[a-z]{2}\d+$' # gs#
UCSC_NEW_REGEX = r'^[a-z]{3}[A-Z][a-z]{2}\d+$' # gggSss#
species = models.ForeignKey('cms_genome_browser.Species',
help_text='Select a species. Taxonomy ID is shown in parentheses, if present.',
)
auth = models.CharField('authority',
blank=True,
help_text='Authority string used in the ' \
'<a href="http://dasregistry.org/" target="_blank">DAS Registry</a>.',
max_length=10,
)
version = models.CharField('version',
blank=True,
help_text='Version string used in the ' \
'<a href="http://dasregistry.org/" target="_blank">DAS Registry</a>.',
max_length=10,
)
ucsc_name = models.CharField('UCSC name',
blank=True,
help_text='UCSC genome browser name of the assembly, if defined in the list of ' \
'<a href="https://genome.ucsc.edu/FAQ/FAQreleases.html#release1" target="_blank">' \
'UCSC genome releases</a>.',
max_length=10,
validators=[
RegexValidator(
regex='%s|%s' % (UCSC_OLD_REGEX, UCSC_NEW_REGEX),
message="UCSC name must be of the format 'gs#' or 'gggSss#'.",
code='invalid_UCSC_name'
),
]
)
def __str__(self):
coord_system_str = self.species.name
supplemental = []
if self.species.taxid:
supplemental.append(str(self.species.taxid))
supplemental.append(' '.join([self.auth, self.version]))
if self.ucsc_name:
supplemental.append(self.ucsc_name)
if supplemental:
coord_system_str += ' (%s)' % '; '.join(supplemental)
return coord_system_str
class Species(models.Model):
class Meta:
ordering = ['name', 'taxid']
verbose_name = "Species"
verbose_name_plural = "Species"
name = models.CharField('species name',
help_text='Enter the species name.',
max_length=255,
)
taxid = models.IntegerField('taxonomy ID',
blank=True,
null=True,
help_text='Enter the Taxonomy ID for the species. ' \
'Taxonomy names and IDs can be found at ' \
'<a href="http://www.ncbi.nlm.nih.gov/taxonomy" target="_blank">NCBI</a>.',
)
def __str__(self):
species_str = self.name
if self.taxid:
species_str += ' (%s)' % self.taxid
return species_str
class Stylesheet(models.Model):
class Meta:
ordering = ['name',]
verbose_name='Stylesheet'
verbose_name_plural='Stylesheets'
STYLESHEET_TYPE_CHOICES = (
('XML', 'DAS XML Stylesheet'),
('JSON', 'JSON-encoded Stylesheet'),
)
name = models.CharField('stylesheet name',
help_text='Enter a brief, descriptive name for this stylesheet.',
max_length=255,
unique=True,
)
description = models.TextField('stylesheet description',
blank=True,
help_text='Describe the style this stylesheet provides.',
)
style_file = FilerFileField(
help_text='Upload/select an image to represent this genome browser.select a stylesheet for the track. More info can be found in the ' \
'<a href="https://www.biodalliance.org/stylesheets.html" target="_blank">' \
'Stylesheets for Dalliance</a> documentation.',
related_name='%(app_label)s_%(class)s_stylesheet',
)
is_downloadable = models.BooleanField('stylesheet downloadable?',
default=True,
help_text="Add download button for stylesheet file to the genome browser's info window.",
)
style_type = models.CharField('stylesheet type',
choices=STYLESHEET_TYPE_CHOICES,
help_text='Select the type of stylesheet being used.',
max_length=4,
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Track(models.Model):
class Meta:
ordering = ['browser', 'order']
verbose_name = 'Track'
verbose_name_plural = 'Tracks'
TRACK_TYPE_CHOICES = (
('BAM', 'BAM'),
('BED', (
('BED-MemStore', 'BED (MemStore)'),
('BED-Tabix', 'BED (Tabix)'),
)
),
('bigWig', 'bigWig'),
('bigBed', 'bigBed'),
('DAS', (
('DAS-feature', 'DAS (feature)'),
('DAS-sequence', 'DAS (sequence)'),
)
),
('twoBit', 'twoBit'),
('VCF', (
('VCF-MemStore', 'VCF (MemStore)'),
('VCF-Tabix', 'VCF (Tabix)'),
)
),
('WIG', 'WIG'),
)
order = models.PositiveIntegerField()
name = models.CharField('track name',
help_text='Enter a brief name for the track.',
max_length=64,
)
description = models.CharField('track description',
blank=True,
help_text='Enter a short description for the track.',
max_length=255,
)
browser = models.ForeignKey(Browser,
blank=True,
null=True,
help_text='Specify genome browser this track belongs to.',
)
track_type = models.CharField('track type',
choices = TRACK_TYPE_CHOICES,
help_text='Select the source type for this track.',
max_length=20,
)
data_file = FilerFileField(
help_text='Upload/select a data file for the track. More info can be found in the ' \
'<a href="http://www.biodalliance.org/config-source.html" target="_blank">' \
'Configuring a source</a> documentation.',
related_name='%(app_label)s_%(class)s_data',
)
index_file = FilerFileField(
blank=True,
null=True,
help_text="<strong>If data file is a BAM or Tabix file</strong>, upload/select " \
"an index file (.bai or .tbi) that corresponds to the track's BAM/Tabix file.",
related_name='%(app_label)s_%(class)s_index',
)
collapse_super_groups = models.BooleanField('CSG?',
default=False,
help_text="Attempt to allow more 'gene-like' rendering for some data sources.",
)
provides_entrypoint = models.BooleanField('entry?',
default=False,
# What are Entry Points? http://genboree.org/theCommons/ezfaq/show/epigenome-workshop?faq_id=467
help_text='Does this track provide entry points? ' \
'Entry points comprise the coordinate system on which annotations are made.',
)
pinned = models.BooleanField('pin?',
default=False,
help_text="'Pin' this trackc in the non-scrolling section at the top of the browser.",
)
is_downloadable = models.BooleanField('D/L?',
default=True,
help_text="Add download button for data file to the genome browser's info window.",
)
stylesheet = models.ForeignKey('cms_genome_browser.Stylesheet',
blank=True,
null=True,
help_text='Choose a stylesheet to add cusom styles to this track.',
)
publish_track = models.BooleanField('publish?',
default=True,
help_text='Display track in the genome browser.'
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@property
def payload(self):
PAYLOADS = {
'BAM': '',
'BED-MemStore': 'bed',
'BED-Tabix': 'bed',
'bigWig': '',
'bigBed': '',
'DAS-feature': '',
'DAS-sequence': '',
'VCF-MemStore': 'vcf',
'VCF-Tabix': 'vcf',
'twoBit': '',
'WIG': 'wig',
}
return PAYLOADS[self.track_type]
@property
def tier_type(self):
TIER_TYPES = {
'BAM': '',
'BED-MemStore': 'memstore',
'BED-Tabix': 'tabix',
'bigWig': '',
'bigBed': '',
'DAS-feature': '',
'DAS-sequence': 'sequence',
'VCF-MemStore': 'memstore',
'VCF-Tabix': 'tabix',
'twoBit': 'sequence',
'WIG': 'memstore',
}
return TIER_TYPES[self.track_type]
@property
def uri_label(self):
URI_LABELS = {
'BAM': 'bamURI',
'BED-MemStore': 'uri',
'BED-Tabix': 'uri',
'bigWig': 'bwgURI',
'bigBed': 'bwgURI',
'DAS-feature': 'uri',
'DAS-sequence': 'uri',
'VCF-MemStore': 'uri',
'VCF-Tabix': 'uri',
'twoBit': 'twoBitURI',
'WIG': 'uri',
}
return URI_LABELS[self.track_type]
def clean(self):
if self.index_file == None:
if self.track_type == 'BAM':
raise ValidationError("Must upload/select BAM index (.bai) " \
"file for '{}'.".format(self.data_file))
if self.tier_type == 'tabix':
raise ValidationError("Must upload/select Tabix index (.tbi) " \
"file for '{}'.".format(self.data_file))
else:
if self.track_type != 'BAM' and self.tier_type != 'tabix':
raise ValidationError("Index files are only needed if data file is " \
"BAM, BED (Tabix), or VCF (Tabix). " \
"Please remove index file '{}' or switch data file type." \
.format(self.index_file))
def __str__(self):
return self.name
|
bsd-3-clause
| -1,954,305,381,308,600,800 | 31.295812 | 143 | 0.550539 | false |
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/numpy/core/numeric.py
|
2
|
79518
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import collections
from . import multiarray
from . import umath
from .umath import *
from . import numerictypes
from .numerictypes import *
if sys.version_info[0] >= 3:
import pickle
basestring = str
else:
import cPickle as pickle
loads = pickle.loads
__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero',
'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile',
'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto',
'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction', 'isclose',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning', 'may_share_memory', 'full', 'full_like']
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
nditer = multiarray.nditer
nested_iters = multiarray.nested_iters
broadcast = multiarray.broadcast
dtype = multiarray.dtype
copyto = multiarray.copyto
ufunc = type(sin)
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 0, casting='unsafe')
return res
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
array([ 1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, 1, casting='unsafe')
return res
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar
Fill value.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
is chosen as `np.array(fill_value).dtype`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
full_like : Fill an array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[ inf, inf],
[ inf, inf]])
>>> np.full((2, 2), 10, dtype=np.int)
array([[10, 10],
[10, 10]])
"""
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
def full_like(a, fill_value, dtype=None, order='K', subok=True):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
full : Fill a new array.
Examples
--------
>>> x = np.arange(6, dtype=np.int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([ nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
count_nonzero = multiarray.count_nonzero
empty = multiarray.empty
empty_like = multiarray.empty_like
fromstring = multiarray.fromstring
fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
may_share_memory = multiarray.may_share_memory
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
where = multiarray.where
concatenate = multiarray.concatenate
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
promote_types = multiarray.promote_types
min_scalar_type = multiarray.min_scalar_type
result_type = multiarray.result_type
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
putmask = multiarray.putmask
einsum = multiarray.einsum
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and not C-order.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, basestring):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric,
(correlate(a,v) == correlate(v,a), and the conjugate is not taken
for complex arrays). If False, uses the conventional signal
processing definition.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a, v, mode)
else:
return multiarray.correlate2(a, v, mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a, v = array(a, ndmin=1), array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a, b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:, newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from ._dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* (2,) array_like, both elements array_like of the same length
List of axes to be summed over, first sequence applying to `a`,
second to `b`.
See Also
--------
dot, einsum
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
try:
n = a.shape[axis]
except IndexError:
raise ValueError('axis must be >= 0 and < %d' % a.ndim)
reshape = False
if n == 0:
return a
shift %= n
indexes = concatenate((arange(n - shift, n), arange(n - shift)))
res = a.take(indexes, axis)
if reshape:
res = res.reshape(a.shape)
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError(msg % ('axis', axis, n))
if not (0 <= start < n+1):
raise ValueError(msg % ('start', start, n+1))
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2, 3]) or (b.shape[0] not in [2, 3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x, y, z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0, axisc)
#Use numarray's printing function
from .arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
if skipdtype:
return "%s(%s)" % (cName, lst)
else:
typename = arr.dtype.name
# Quote typename in the output if it is "complex".
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = "'%s'" % typename
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([], dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim, dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters in turn be (0, 0), (0, 1),
(1, 0), (1, 1).
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
# ' <-- unbreak Emacs fontification
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = open(file, "rb")
return pickle.load(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
If either array contains one or more NaNs, False is returned.
Infs are treated as equal if they are in the same place and of the same
sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False, ndmin=1)
y = array(b, copy=False, ndmin=1)
xinf = isinf(x)
yinf = isinf(y)
if any(xinf) or any(yinf):
# Check that x and y have inf's only in the same positions
if not all(xinf == yinf):
return False
# Check that sign of inf's in x and y is the same
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
# ignore invalid fpe's
with errstate(invalid='ignore'):
r = all(less_equal(abs(x-y), atol + rtol * abs(y)))
return r
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([True, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
result = less_equal(abs(x-y), atol + rtol * abs(y))
if isscalar(a) and isscalar(b):
result = bool(result)
return result
x = array(a, copy=False, subok=True, ndmin=1)
y = array(b, copy=False, subok=True, ndmin=1)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[isnan(x) & isnan(y)] = True
return cond
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(asarray(a1 == a2).all())
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(asarray(a1 == a2).all())
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." %size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." %size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not isinstance(func, collections.Callable):
if not hasattr(func, 'write') or not isinstance(func.write, collections.Callable):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call', _Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
from . import fromnumeric
from .fromnumeric import *
extend_all(fromnumeric)
|
gpl-3.0
| -4,376,133,516,950,663,700 | 28.127473 | 90 | 0.571569 | false |
google-research/deeplab2
|
model/layers/axial_blocks_test.py
|
1
|
1959
|
# coding=utf-8
# Copyright 2021 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for axial_blocks."""
import tensorflow as tf
from deeplab2.model.layers import axial_blocks
class AxialBlocksTest(tf.test.TestCase):
def test_conv_basic_block_correct_output_shape(self):
layer = axial_blocks.AxialBlock(
filters_list=[256, 256],
strides=2)
float_training_tensor = tf.constant(0.0, dtype=tf.float32)
output = layer((tf.zeros([2, 65, 65, 32]),
float_training_tensor))[1]
self.assertListEqual(output.get_shape().as_list(), [2, 33, 33, 256])
def test_conv_bottleneck_block_correct_output_shape(self):
layer = axial_blocks.AxialBlock(
filters_list=[64, 64, 256],
strides=1)
float_training_tensor = tf.constant(0.0, dtype=tf.float32)
output = layer((tf.zeros([2, 65, 65, 32]),
float_training_tensor))[0]
self.assertListEqual(output.get_shape().as_list(), [2, 65, 65, 256])
def test_axial_block_correct_output_shape(self):
layer = axial_blocks.AxialBlock(
filters_list=[128, 64, 256],
strides=2,
attention_type='axial')
float_training_tensor = tf.constant(0.0, dtype=tf.float32)
output = layer((tf.zeros([2, 65, 65, 32]),
float_training_tensor))[1]
self.assertListEqual(output.get_shape().as_list(), [2, 33, 33, 256])
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
| -500,197,564,487,542,700 | 35.277778 | 74 | 0.666156 | false |
kmiernik/Pyspectr
|
bin/spectrum_fitter.py
|
1
|
6663
|
#!/usr/bin/env python3
"""
K. Miernik 2013
k.a.miernik@gmail.com
GPL v3
Spectrum fitting code
"""
import argparse
import math
import numpy
import os
import sys
import time
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_errors
from Pyspectr.hisfile import HisFile as HisFile
from Pyspectr.peak_fitter import PeakFitter as PeakFitter
from Pyspectr.exceptions import GeneralError as GeneralError
class SpectrumParser:
def __init__(self, file_name):
self.base_name, ext = os.path.splitext(file_name)
if len(ext) > 0 and ext in (".gz", ".his", ".tgz"):
self.file_type = 'his'
self.data_file = HisFile(file_name)
elif len(ext) > 0 and ext in ".txt":
self.file_type = 'txt'
self.data_file = numpy.loadtxt(file_name)
else:
raise GeneralError(
'Files other than txt, his, tgz and gz are not supported')
def parse(self, spectrum, show, pause):
spectra_ids = spectrum.get('id')
id_list = []
if self.file_type == 'his':
for element in spectra_ids.split(','):
element = element.split('-')
if len(element) > 1:
new_elements = []
for i in range(int(element[0]), int(element[1]) + 1):
id_list.append(i)
else:
id_list.append(int(element[0]))
elif self.file_type == 'txt':
if spectra_ids != '':
raise GeneralError('Spectrum id not supported for txt files')
else:
id_list.append('')
peaks = spectrum.findall('peak')
x_min = int(spectrum.get('min'))
x_max = int(spectrum.get('max'))
smin = spectrum.get('smin')
smax = spectrum.get('smax')
for spectrum_id in id_list:
plot_name = '{}_{}'.format(self.base_name, spectrum_id)
PF = PeakFitter(peaks, spectrum.get('baseline'), plot_name)
if self.file_type == 'txt':
data_x = self.data_file[x_min:x_max, 0]
data_y = self.data_file[x_min:x_max, 1]
if self.data_file.shape[1] == 2:
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
else:
data_dy = self.data_file[x_min:x_max, 2]
for iy, y in enumerate(data_dy):
if y <= 0:
data_dy[iy] = 1.0
elif self.file_type == 'his':
data = self.data_file.load_histogram(spectrum_id)
if data[0] != 1:
raise GeneralError('Only 1D histograms are supported')
data_x = data[1][x_min:x_max]
data_y = data[3][x_min:x_max]
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
if smin is not None and smax is not None:
width = [float(smin), float(smax)]
else:
width = None
fit_result = PF.fit(data_x, data_y, data_dy, width=width)
if show == 'plot' or show == 'svg':
plt.clf()
plt.xlabel('Channel')
plt.ylabel('Counts')
plt.plot(data_x, data_y, linestyle='steps-mid')
plt.plot(data_x, fit_result['baseline'], linestyle='--')
plt.plot(fit_result['x_axis'], fit_result['fit'], linewidth=1.0)
if show == 'svg':
svg_name = 'fit_{0}_{1}_{2}'.format(plot_name,
int(data_x[0]), int(data_x[-1]))
svg_name = svg_name.replace('.', '').\
replace('/', '') + '.svg'
plt.savefig(svg_name)
else:
plt.show()
plt.draw()
time.sleep(pause)
elif show == 'quiet':
pass
for i, peak in enumerate(peaks):
if peak.get('ignore') == 'True':
continue
x0 = PF.params['x{}'.format(i)].value
dx = PF.params['x{}'.format(i)].stderr
A = PF.params['A{}'.format(i)].value
dA = PF.params['A{}'.format(i)].stderr
s = PF.params['s{}'.format(i)].value
E = peaks[i].get('E')
name = peaks[i].get('name')
if name is None:
name = ""
Area = PF.find_area(data_x, i)
print('{:>8} {:>8} {:>8.2f} {:>8.2f}'.\
format(name, E, x0, dx),
'{:>8.1f} {:>8.1f} {:>8.3f} {:>8.1f}'.\
format(A, dA, s, Area))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('config', nargs=1,
help='Config files')
parser.add_argument('--pause', '-p', nargs=1, type=float, default=[0.5],
help='Pause time in seconds')
out_group = parser.add_mutually_exclusive_group()
out_group.add_argument('--plot', action='store_true',
help='Plot window during fitting')
out_group.add_argument('--svg', action='store_true',
help='SVG files saved during fitting')
out_group.add_argument('--quiet', action='store_true',
help='No output during fitting')
args = parser.parse_args()
show = 'plot'
if args.svg:
show = 'svg'
elif args.quiet:
show = 'quiet'
try:
tree = ET.parse(args.config[0])
except (xml.parsers.expat.ExpatError,
xml.etree.ElementTree.ParseError) as err:
print("File '{0}' parsing error: {1}".format(
args.config[0], err))
exit()
root = tree.getroot()
for data_file in root.findall('data_file'):
SP = SpectrumParser(data_file.get('name'))
print('# File: ', data_file.get('name'))
print('# {: ^8} {:^7} {:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'
.format('Name', 'E', 'x0', 'dx', 'A', 'dA', 's', 'Area'))
for spectrum in data_file.findall('spectrum'):
SP.parse(spectrum, show, args.pause[0])
|
gpl-3.0
| -7,458,183,059,348,559,000 | 36.432584 | 80 | 0.47186 | false |
cchannon/WhatIsThis
|
ComputerVision.py
|
1
|
4492
|
from __future__ import print_function
import time
import picamera
from datetime import datetime
import requests
import operator
import numpy as np
import json
import urllib2
def processRequest( json, data, headers, params, lcd ):
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('Uploading...')
retries = 0
result = None
while True:
response = requests.request( 'post', _url, json = jsonObj, data = data, headers = headers, params = params )
if response.status_code == 429:
lcd.message( "Message: %s" % ( response.json()['message'] ) )
if retries <= _maxNumRetries:
time.sleep(1)
retries += 1
continue
else:
lcd.message( 'Error: failed after retrying!' )
break
elif response.status_code == 200 or response.status_code == 201:
if 'content-length' in response.headers and int(response.headers['content-length']) == 0:
result = None
elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str):
if 'application/json' in response.headers['content-type'].lower():
result = response.json() if response.content else None
elif 'image' in response.headers['content-type'].lower():
result = response.content
else:
lcd.message( "Error code: %d" % ( response.status_code ) )
lcd.message( "Message: %s" % ( response.json()['message'] ) )
break
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('Complete!')
time.sleep(1.0)
lcd.clear()
lcd.set_color(1.0, 1.0, 1.0)
return result
def renderResult (result, lcd) :
descriptionText = result['description']['captions'][0]['text']
if len(descriptionText) <= 16:
lcd.message(descriptionText)
i = 15
while i <= len(descriptionText) :
lcd.clear()
lcd.message(descriptionText[i-15:i])
time.sleep(0.3)
if lcd.is_pressed(LCD.SELECT):
return
if lcd.is_pressed(LCD.LEFT):
i = 15
continue
if i == len(descriptionText):
while True:
if lcd.is_pressed(LCD.SELECT):
break
if lcd.is_pressed(LCD.LEFT):
i = 14
break
i += 1
LCD = None
import Adafruit_CharLCD as LCD
# API parameters
_url = 'https://api.projectoxford.ai/vision/v1.0/analyze'
_key = '' # insert your API key here
_maxNumRetries = 10
params = {'visualFeatures' : 'Color, Categories, Description'}
headers = dict()
headers['Ocp-Apim-Subscription-Key'] = _key
headers['Content-Type'] = 'application/octet-stream'
jsonObj = None
# Initialize the LCD using the pins
lcd = LCD.Adafruit_CharLCDPlate()
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
# This part isn't really necessary, but it's fun and it buys a bit of time to connect to internet
for i in range(1,3):
for j in range (1,4):
lcd.clear()
displayMessage = 'Bootup\nin progress.'
if j == 2:
displayMessage +='.'
if j == 3:
displayMessage +='..'
lcd.message(displayMessage)
time.sleep(1.0)
## Validate internet connection
while True:
try:
urllib2.urlopen("http://www.bing.com").close()
except urllib2.URLError:
lcd.clear()
lcd.set_color(1.0, 1.0, 0.0)
lcd.message("Please wait\nfor internets")
time.sleep(1)
else:
lcd.clear()
lcd.message("Connected to\nthe internet!")
time.sleep(2)
break
# Initialize the camera and set parameters
camera = picamera.PiCamera()
camera.resolution = (1920, 1080)
camera.rotation = 90 # you may not need this; depends on how you set up your camera.
lcd.clear()
lcd.message('Take a picture!')
while True:
# Loop through each button and check if it is pressed.
if lcd.is_pressed(LCD.SELECT):
# Button is pressed, change the message and backlight.
lcd.clear()
lcd.message('Capturing...')
imageName = r'/home/pi/CV/image' + str(datetime.now()) + '.jpg'
camera.capture(imageName)
time.sleep(2.0)
with open(imageName, 'rb') as f:
data = f.read()
result = processRequest(json, data, headers, params, lcd)
if result is not None:
renderResult(result, lcd)
|
mit
| 3,266,545,874,019,211,000 | 32.281481 | 116 | 0.586153 | false |
jucacrispim/toxicbuild
|
tests/webui/steps/build_steps.py
|
1
|
2858
|
# -*- coding: utf-8 -*-
import time
from behave import given, when, then
from selenium.common.exceptions import StaleElementReferenceException
from toxicbuild.ui import settings
from tests.webui import take_screenshot
from tests.webui.steps.base_steps import ( # noqa f811
given_logged_in_webui, then_sees_message)
@given('is in the waterfall')
def is_in_waterfall(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
waterfall_url = '{}someguy/repo-bla/waterfall'.format(base_url)
browser.get(waterfall_url)
time.sleep(0.5)
@when('he clicks in the reschedule buildset button in the waterfall')
def click_reschedule(context):
browser = context.browser
def fn():
try:
el = browser.find_elements_by_class_name('fa-redo')[2]
el = el if el.is_displayed() else None
except IndexError:
el = None
return el
el = browser.click_and_retry_on_stale(fn)
assert el
@given('the user already rescheduled a buildset in the waterfall')
@take_screenshot
def buildset_already_rescheduled(context):
browser = context.browser
def fn():
classes = ['build-preparing', 'build-pending']
for cls in classes:
try:
el = browser.find_elements_by_class_name(cls)[0]
except IndexError:
el = None
if el:
break
return el
el = browser.wait_element_become_present(fn)
assert el
@when('the user clicks in the build details button')
def click_buildetails_button(context):
browser = context.browser
def fn():
try:
el = browser.find_elements_by_class_name('build-details-link')[1]
except IndexError:
el = None
return el
el = browser.wait_element_become_present(fn)
el.click()
@then('he sees the build details page')
def see_build_details(context):
browser = context.browser
def fn():
try:
el = browser.find_elements_by_class_name(
'build-details-container')[0]
except IndexError:
el = None
return el
el = browser.wait_element_become_present(fn)
assert el
@given('the user is in the build details page')
def is_in_build_details_page(context):
pass
@then('he waits for the build to finish')
def wait_build_finish(context):
browser = context.browser
def fn():
el = browser.find_elements_by_class_name('build-total-time')[0]
try:
if el.text:
r = el
else:
r = None
except StaleElementReferenceException:
r = None
return r
el = browser.wait_element_become_present(fn)
assert el
|
agpl-3.0
| -5,854,433,497,256,047,000 | 23.016807 | 77 | 0.608118 | false |
IrregularShed/newoldtv
|
appleII.py
|
1
|
8885
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# AppleII (Colour) Image Filter - Apple II plugin for The GIMP.
# Copyright (C) 2008, 2010 Paulo Silva <nitrofurano@gmail.com>
# Copyright (C) 2008 Daniel Carvalho
# Copyright (C) 2010 Dave Jeffery <kecskebak.blog@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sdlbasic version Apple2hirespictFilterClust_1011061846.sdlbas
# Speed enhancements based on blog post by Joao Bueno and Akkana Peck
from array import array
from gimpfu import *
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
def apple2(img, layer, halftone, pattern):
gimp.context_push()
img.undo_group_start()
# Set up globals
global width, height, height2
global dst_pxl, pxl_size
# Width and height stored in variables for speed
width = img.width
height = img.height
# Create a new duplicate layer above the existing one
position = pdb.gimp_image_get_layer_position(img, layer)
new_layer = pdb.gimp_layer_copy(layer, False)
pdb.gimp_image_add_layer(img, new_layer, position)
# Resize the new layer to create two "buffer" scanlines
height2 = height + 2
pdb.gimp_layer_resize(new_layer, width, height2, 0, 0)
# Create a copy of the image to write to
dst_rgn = new_layer.get_pixel_rgn(0, # x
0, # y
width, # w
height2, # h
True, # dirty
False ) # shadow
dst_pxl = array("B", dst_rgn[0:width, 0:height2])
# Work out colour depth of image
pxl_size = dst_rgn.bpp
# Define two halftone clusters - tuples used for speed
cluster = ((( 0, 6, 8, 14),
( 2, 12, 4, 10),
( 8, 14, 0, 6),
( 4, 10, 2, 12)),
(( 0, 12, 3, 15),
( 8, 4, 11, 7),
( 2, 14, 1, 13),
(10, 6, 9, 5)))
# Define Apple II palette
colours = ((0x00, 0x00, 0x00), (0x00, 0x00, 0xFF),
(0xFF, 0x00, 0x00), (0xFF, 0x00, 0xFF),
(0x00, 0xFF, 0x00), (0x00, 0xFF, 0xFF),
(0xFF, 0xFF, 0x00), (0xFF, 0xFF, 0xFF))
BLACK = ink(0x00, 0x00, 0x00)
BLUE = ink(0x00, 0x00, 0xFF)
RED = ink(0xFF, 0x00, 0x00)
MAGENTA = ink(0xFF, 0x00, 0xFF)
GREEN = ink(0x00, 0xFF, 0x00)
CYAN = ink(0x00, 0xFF, 0xFF)
YELLOW = ink(0xFF, 0xFF, 0x00)
WHITE = ink(0xFF, 0xFF, 0xFF)
# Define primary table
prim_table = ((0, 1), (1, 2), (2, 2), (1, 0),
(2, 0), (1, 2), (2, 2), (3, 1))
# Used on toolbar
ipass = 1
if halftone:
# Process images with halftones
# Initialise progress bar
gimp.progress_init("Apple II (Colour) Image Filter - Pass " + str(ipass))
for y in range(0, height):
# Update progress bar
gimp.progress_init("Apple II (Colour) Image Filter - Pass " + str(ipass))
gimp.progress_update(float(y) / height)
for x in range(0, width):
rgba = point(x, y)
r, g, b = rgba[0], rgba[1], rgba[2]
patgf = (((cluster[pattern][y % 4][x % 4] + 1) * 255) / 16)
rpat = 1 if r > patgf else 0
gpat = 1 if g > patgf else 0
bpat = 1 if b > patgf else 0
o4b = (bpat + (rpat * 2) + (gpat * 4)) % 8
rgba = ink(colours[o4b][0], colours[o4b][1], colours[o4b][2])
dot(x, y, rgba)
ipass += 1
# Colour Correction
# Set buffer line
yed = height2 - 2
# Initialise progress bar
gimp.progress_init("Apple II Image Filter - Pass " + str(ipass))
# Process image a scanline at a time
for y in range(0, height):
# Update progress bar
gimp.progress_update(float(y) / height)
# Use scanline to create proccessed buffer scanline (yed)
for x1 in range(0, (width / 7) + 2):
cflc = 0
for x2 in range (0, 7):
x = x2 + (x1 * 7)
rgba = point(x, y)
r, g, b = rgba[0], rgba[1], rgba[2]
prim = (g / 128) * 4 + (r / 128) * 2 + (b / 128)
apv, cfl = prim_table[prim]
if(x % 2) == 0:
if apv in (0, 2):
dot(x, yed, BLACK)
if apv in (0, 1):
dot(x + 1, yed, BLACK)
cflc = cflc + cfl
if cflc < 8:
dot(x1, yed + 1, BLACK)
# Clear scanline in actual image
blank_line(y, BLACK)
for x in range(0, width, 2):
rgba = point(x, yed)
b = rgba[2]
if b > 127:
dot(x, y, MAGENTA)
for x in range(1, width, 2):
rgba = point(x, yed)
b = rgba[2]
if b > 127:
dot(x, y, GREEN)
for x in range(0, width - 2, 2):
rgba1 = point(x, yed)
rgba2 = point(x + 2, yed)
b1 = rgba1[2]
b2 = rgba2[2]
if (b1 > 127) and (b2 > 127):
dot(x + 1, y, MAGENTA)
for x in range(1, width - 2, 2):
rgba1 = point(x, yed)
rgba2 = point(x + 2, yed)
b1 = rgba1[2]
b2 = rgba2[2]
if (b1 > 127) and (b2 > 127):
dot(x + 1, y, GREEN)
for x in range(1, width - 1):
rgba1 = point(x, yed)
rgba2 = point(x + 1, yed)
b1 = rgba1[2]
b2 = rgba2[2]
if (b1 > 127) and (b2 > 127):
dot(x, y, WHITE)
dot(x + 1, y, WHITE)
for x in range(1, width - 2):
rgba1 = point(x, y)
rgba2 = point(x + 1, y)
rgba3 = point(x + 2, y)
white_pxl = array("B", "\xff" * pxl_size)
if (rgba1 == white_pxl and
rgba3 == white_pxl and
rgba2 != white_pxl):
dot (x + 1, y, BLACK)
for x1 in range(0, (width / 7) + 2):
q = point(x1, yed + 1)
q = q[2]
for x2 in range(0, 7):
n = point(x1 * 7 + x2, y)
if(n == MAGENTA) and q > 128:
n = BLUE;
if(n == GREEN) and q > 128:
n = ink(0xff,0x7f,0x00)
dot(x1 * 7 + x2, y, n)
blank_line(yed, WHITE)
blank_line(yed + 1, WHITE)
dst_rgn[0:width, 0:height2] = dst_pxl.tostring()
new_layer.update(0, 0, width, height2)
layer = pdb.gimp_image_merge_down(img, new_layer, CLIP_TO_IMAGE)
img.undo_group_end()
gimp.context_pop()
def ink(r, g, b):
rgba = array("B", "\xff" * pxl_size)
rgba[0], rgba[1], rgba[2] = r, g, b
return rgba
def dot(x, y, rgba):
global dst_pxl
if x in range(width):
dst_pos = (x + width * y) * pxl_size
dst_pxl[dst_pos : dst_pos + pxl_size] = rgba
def point(x, y):
if x in range(width):
dst_pos = (x + width * y) * pxl_size
return dst_pxl[dst_pos: dst_pos + pxl_size]
else:
return [0] * pxl_size
def blank_line(y, rgba):
global dst_pxl
line = array("B", rgba * width)
dst_pos = (width * y) * pxl_size
dst_pxl[dst_pos : dst_pos + (pxl_size * width)] = line
register("python-fu-apple2",
N_("AppleII (Colour) Image Filter"),
"",
"Nitrofurano",
"Nitrofurano",
"2008",
N_("_AppleII (Colour)"),
# "RGB*, GRAY*",
"RGB*",
[(PF_IMAGE, "image", _("Input image"), None),
(PF_DRAWABLE, "drawable", _("Input drawable"), None),
(PF_TOGGLE, "halftone", _("Use halftones?"), True),
(PF_RADIO, "pattern", _("Halftone cluster"), 0,
((_("One"), 0),
(_("Two"), 1)))
],
[],
apple2,
menu="<Image>/Filters/Retro Computing",
domain=("gimp20-python", gimp.locale_directory))
main()
|
gpl-3.0
| 8,907,679,847,010,967,000 | 31.075812 | 85 | 0.48475 | false |
RicardoJohann/frappe
|
frappe/hooks.py
|
1
|
8537
|
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies"
app_description = "Full stack web framework with Python, Javascript, MariaDB, Redis, Node"
app_icon = "octicon octicon-circuit-board"
app_color = "orange"
source_link = "https://github.com/frappe/frappe"
app_license = "MIT"
develop_version = '12.x.x-develop'
staging_version = '11.0.3-beta.29'
app_email = "info@frappe.io"
docs_app = "frappe_io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
page_js = {
"setup-wizard": "public/js/frappe/setup_wizard.js"
}
# website
app_include_js = [
"assets/js/libs.min.js",
"assets/js/desk.min.js",
"assets/js/list.min.js",
"assets/js/form.min.js",
"assets/js/control.min.js",
"assets/js/report.min.js",
"assets/frappe/js/frappe/toolbar.js"
]
app_include_css = [
"assets/css/desk.min.css",
"assets/css/list.min.css",
"assets/css/form.min.css",
"assets/css/report.min.css",
"assets/css/module.min.css"
]
web_include_js = [
"website_script.js"
]
bootstrap = "assets/frappe/css/bootstrap.css"
web_include_css = [
"assets/css/frappe-web.css"
]
website_route_rules = [
{"from_route": "/blog/<category>", "to_route": "Blog Post"},
{"from_route": "/kb/<category>", "to_route": "Help Article"},
{"from_route": "/newsletters", "to_route": "Newsletter"}
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
email_append_to = ["Event", "ToDo", "Communication"]
get_rooms = 'frappe.chat.doctype.chat_room.chat_room.get_rooms'
calendars = ["Event"]
# login
on_session_creation = [
"frappe.core.doctype.activity_log.feed.login_feed",
"frappe.core.doctype.user.user.notify_admin_access_to_system_manager",
"frappe.limits.check_if_expired",
"frappe.utils.scheduler.reset_enabled_scheduler_events",
]
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.get_permission_query_conditions",
"Contact": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_contact",
"Address": "frappe.contacts.address_and_contact.get_permission_query_conditions_for_address",
"Communication": "frappe.core.doctype.communication.communication.get_permission_query_conditions_for_communication",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.get_permission_query_conditions"
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission",
"Kanban Board": "frappe.desk.doctype.kanban_board.kanban_board.has_permission",
"Contact": "frappe.contacts.address_and_contact.has_permission",
"Address": "frappe.contacts.address_and_contact.has_permission",
"Communication": "frappe.core.doctype.communication.communication.has_permission",
"Workflow Action": "frappe.workflow.doctype.workflow_action.workflow_action.has_permission",
"File": "frappe.core.doctype.file.file.has_permission"
}
has_website_permission = {
"Address": "frappe.contacts.doctype.address.address.has_website_permission"
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.core.doctype.activity_log.feed.update_feed",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions"
],
"on_trash": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.workflow.doctype.workflow_action.workflow_action.process_workflow_actions"
],
"on_change": [
"frappe.core.doctype.feedback_trigger.feedback_trigger.trigger_feedback_request",
]
},
"Email Group Member": {
"validate": "frappe.email.doctype.email_group.email_group.restrict_email_group"
},
}
scheduler_events = {
"all": [
"frappe.email.queue.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
"frappe.oauth.delete_oauth2_data",
"frappe.integrations.doctype.razorpay_settings.razorpay_settings.capture_payment",
"frappe.twofactor.delete_all_barcodes_for_users",
"frappe.integrations.doctype.gcalendar_settings.gcalendar_settings.sync",
"frappe.website.doctype.web_page.web_page.check_publish_status"
],
"hourly": [
"frappe.model.utils.link_count.update_link_count",
'frappe.model.utils.user_settings.sync_user_settings',
"frappe.utils.error.collect_error_snapshots",
"frappe.desk.page.backups.backups.delete_downloadable_backups",
"frappe.limits.update_space_usage",
"frappe.desk.doctype.auto_repeat.auto_repeat.make_auto_repeat_entry",
"frappe.deferred_insert.save_to_db"
],
"daily": [
"frappe.email.queue.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.error_log.error_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.notification.notification.trigger_daily_alerts",
"frappe.realtime.remove_old_task_logs",
"frappe.utils.scheduler.disable_scheduler_on_expiry",
"frappe.utils.scheduler.restrict_scheduler_events_if_dormant",
"frappe.email.doctype.auto_email_report.auto_email_report.send_daily",
"frappe.core.doctype.feedback_request.feedback_request.delete_feedback_request",
"frappe.core.doctype.activity_log.activity_log.clear_authentication_logs",
],
"daily_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_daily",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_daily"
],
"weekly_long": [
"frappe.integrations.doctype.dropbox_settings.dropbox_settings.take_backups_weekly",
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_weekly",
"frappe.utils.change_log.check_for_update",
"frappe.desk.doctype.route_history.route_history.flush_old_route_records"
],
"monthly": [
"frappe.email.doctype.auto_email_report.auto_email_report.send_monthly"
],
"monthly_long": [
"frappe.integrations.doctype.s3_backup_settings.s3_backup_settings.take_backups_monthly"
]
}
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict",
("page", "setup-wizard"): "frappe.geo.country_info.get_translated_dict"
}
sounds = [
{"name": "email", "src": "/assets/frappe/sounds/email.mp3", "volume": 0.1},
{"name": "submit", "src": "/assets/frappe/sounds/submit.mp3", "volume": 0.1},
{"name": "cancel", "src": "/assets/frappe/sounds/cancel.mp3", "volume": 0.1},
{"name": "delete", "src": "/assets/frappe/sounds/delete.mp3", "volume": 0.05},
{"name": "click", "src": "/assets/frappe/sounds/click.mp3", "volume": 0.05},
{"name": "error", "src": "/assets/frappe/sounds/error.mp3", "volume": 0.1},
{"name": "alert", "src": "/assets/frappe/sounds/alert.mp3", "volume": 0.2},
# {"name": "chime", "src": "/assets/frappe/sounds/chime.mp3"},
# frappe.chat sounds
{ "name": "chat-message", "src": "/assets/frappe/sounds/chat-message.mp3", "volume": 0.1 },
{ "name": "chat-notification", "src": "/assets/frappe/sounds/chat-notification.mp3", "volume": 0.1 }
# frappe.chat sounds
]
bot_parsers = [
'frappe.utils.bot.ShowNotificationBot',
'frappe.utils.bot.GetOpenListBot',
'frappe.utils.bot.ListBot',
'frappe.utils.bot.FindBot',
'frappe.utils.bot.CountBot'
]
setup_wizard_exception = "frappe.desk.page.setup_wizard.setup_wizard.email_setup_wizard_exception"
before_write_file = "frappe.limits.validate_space_limit"
before_migrate = ['frappe.patches.v11_0.sync_user_permission_doctype_before_migrate.execute']
otp_methods = ['OTP App','Email','SMS']
|
mit
| -6,504,451,987,586,173,000 | 36.279476 | 118 | 0.733162 | false |
praekelt/mc2
|
mc2/organizations/tests/base.py
|
1
|
2289
|
from django.test import TransactionTestCase
from django.db.models import Q
from django.test.client import RequestFactory
from django.utils.text import slugify
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from mc2.organizations.models import Organization, OrganizationUserRelation
class OrganizationTestCase(TransactionTestCase):
def mk_user(self, username='foobar', email='foobar@gmail.com',
password='password', **kwargs):
User = get_user_model()
return User.objects.create_user(username, email, password, **kwargs)
def mk_organization(self, name='Foo', users=[], **kwargs):
fields = {
'name': name,
'slug': slugify(unicode(name))
}
fields.update(kwargs)
org = Organization.objects.create(**fields)
for user in users:
OrganizationUserRelation.objects.create(
user=user,
organization=org,
is_admin=True)
return org
def mk_request(self, method, *args, **kwargs):
request = RequestFactory()
request = getattr(request, method)(*args, **kwargs)
request.session = {}
return request
def get_perms(self, perm):
if isinstance(perm, basestring):
perms = (perm,)
else:
perms = perm
perms = [p.split('.', 1) for p in perms]
filter_clauses = [
Q(content_type__app_label=p[0], codename=p[1])
for p in perms]
perms_qs = Permission.objects.filter(
reduce(lambda x, y: x | y, filter_clauses))
if len(perms_qs) != len(perms):
raise Permission.DoesNotExist
return perms_qs
def grant_perms(self, obj, perm):
perms_field = ('permissions'
if isinstance(obj, Group)
else 'user_permissions')
perms = list(self.get_perms(perm))
getattr(obj, perms_field).add(*perms)
def revoke_perms(self, obj, perm):
perms_field = ('permissions'
if isinstance(obj, Group)
else 'user_permissions')
perms = list(self.get_perms(perm))
getattr(obj, perms_field).remove(*perms)
|
bsd-2-clause
| -7,064,009,165,907,239,000 | 34.215385 | 76 | 0.591525 | false |
praekelt/diamondash
|
diamondash/widgets/histogram/tests/test_histogram.py
|
1
|
1032
|
from twisted.trial import unittest
from diamondash import utils
from diamondash.config import ConfigError
from diamondash.widgets.histogram import HistogramWidgetConfig
def mk_config_data(**overrides):
return utils.add_dicts({
'name': 'test-histogram',
'target': 'some.target',
'time_range': '1d',
'backend': {'type': 'diamondash.tests.utils.ToyBackend'}
}, overrides)
class ChartWidgetConfigTestCase(unittest.TestCase):
def test_parsing(self):
config = HistogramWidgetConfig(mk_config_data())
self.assertEqual(config['backend']['bucket_size'], 3600000)
metric_config, = config['backend']['metrics']
self.assertEqual(metric_config['target'], 'some.target')
self.assertEqual(metric_config['name'], 'test-histogram')
def test_parsing_config_for_no_target(self):
config = mk_config_data()
del config['target']
self.assertRaises(
ConfigError,
HistogramWidgetConfig.parse,
config)
|
bsd-3-clause
| -7,736,860,265,764,022,000 | 30.272727 | 67 | 0.658915 | false |
beeftornado/sentry
|
src/sentry/migrations/0127_backfill_platformexternalissue_project_id.py
|
1
|
2415
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-11-10 00:02
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.foreignkey
from sentry.models.platformexternalissue import PlatformExternalIssue
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
def backfill_platformexternalissue_project_id(apps, schema_editor):
"""
Fill the PlatformExternalIssue.project_id from related Group.project_id.
"""
PlatformExternalIssue = apps.get_model("sentry", "PlatformExternalIssue")
Group = apps.get_model("sentry", "Group")
external_issues_with_group = PlatformExternalIssue.objects.filter(
project_id__isnull=True
).select_related("group")
for external_issue in RangeQuerySetWrapperWithProgressBar(
queryset=external_issues_with_group, step=1000
):
try:
PlatformExternalIssue.objects.filter(id=external_issue.id).update(
project_id=external_issue.group.project_id
)
except Group.DoesNotExist:
pass
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = True
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = False
dependencies = [
("sentry", "0126_make_platformexternalissue_group_id_flexfk"),
]
operations = [
migrations.RunPython(backfill_platformexternalissue_project_id, migrations.RunPython.noop),
]
|
bsd-3-clause
| 973,593,084,817,502,600 | 42.125 | 99 | 0.719255 | false |
jeremiedecock/snippets
|
python/beautifulsoup/get_images_absolute_urls.py
|
1
|
2696
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Warning:
# Urllib is part of the Python3 standard library but this is not the case for
# urllib2 and urllib3 !
# "urllib and urllib2 have little to do with each other. They were designed to
# be independent and standalone, each solving a different scope of problems,
# and urllib3 follows in a similar vein."
# Required package (on Debian8):
# - BeautifulSoup4: python3-bs4
# Online documentation:
# - BeautifulSoup4: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
# - Urllib: https://docs.python.org/3/library/internet.html
# https://docs.python.org/3/library/urllib.request.html
import argparse
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import urljoin
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='A BeautifulSoup snippet.')
parser.add_argument("url", nargs=1, metavar="URL",
help="The URL of the webpage to parse.")
args = parser.parse_args()
url = args.url[0]
# GET HTML ################################################################
html = urllib.request.urlopen(url).read()
# PARSE HTML ##############################################################
soup = BeautifulSoup(html)
for anchor in soup.find_all('img'):
relative_url = anchor.get('src')
absolute_url = urljoin(url, relative_url)
print(absolute_url)
if __name__ == '__main__':
main()
|
mit
| 2,120,359,385,086,357,000 | 36.416667 | 79 | 0.665924 | false |
xlk521/cloudguantou
|
oauth2/libs/multipart.py
|
1
|
1462
|
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
CRLF = '\r\n'
writer = codecs.lookup('utf-8')[3]
def guess_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/sctet-stream'
def iter_fields(fields):
if isinstance(fields, dict):
fields = fields.iteritems()
return ((k, v) for k, v in fields)
def build_multipart(fields, boundary=None):
body = BytesIO()
boundary = boundary or uuid4().hex
for field_name, value in iter_fields(fields):
body.write('--%s%s'%(boundary, CRLF))
if isinstance(value, tuple):
file_name, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"%s'%(field_name, file_name, CRLF))
body.write('Content-Type: %s%s'%(guess_type(file_name), CRLF*2))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"%s'
%(field_name, CRLF))
body.write('Content-Type: text/plain%s'%(CRLF*2))
if isinstance(data, int):
data = str(data)
if isinstance(data, unicode):
writer(body).write(data)
else:
body.write(data)
body.write(CRLF)
body.write('--%s--%s'%(boundary, CRLF))
content_type = 'multipart/form-data; boundary=%s' % boundary
return body.getvalue(), content_type
|
bsd-3-clause
| -2,128,613,986,496,722,200 | 30.106383 | 78 | 0.582079 | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-batchai/azure/mgmt/batchai/models/resource_id_py3.py
|
1
|
1054
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceId(Model):
"""Represents a resource ID. For example, for a subnet, it is the resource URL
for the subnet.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the resource
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, *, id: str, **kwargs) -> None:
super(ResourceId, self).__init__(**kwargs)
self.id = id
|
mit
| 8,363,730,131,767,143,000 | 29.114286 | 82 | 0.552182 | false |
linuxlewis/djorm-ext-pgfulltext
|
djorm_pgfulltext/models.py
|
1
|
13514
|
# -*- coding: utf-8 -*-
from itertools import repeat
import six
from django.db import models, connections
from django.db.models.query import QuerySet
from django.utils.encoding import smart_text
from djorm_pgfulltext.utils import adapt
# Compatibility import and fixes section.
try:
from django.db.transaction import atomic
except ImportError:
# This encapsulates pre django 1.6 transaction
# behavior under same abstraction as django 1.6 atomic
# decorator. This not intends to emulate django 1.6 atomic
# behavior, only has partially same interface for easy
# use.
from django.db import transaction
class atomic(object):
def __init__(self, using=None):
self.using = using
def __enter__(self):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
self.forced_managed = True
else:
self.forced_managed = False
def __exit__(self, *args, **kwargs):
try:
if self.forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if self.forced_managed:
transaction.leave_transaction_management(using=self.using)
def auto_update_search_field_handler(sender, instance, *args, **kwargs):
instance.update_search_field()
class SearchManagerMixIn(object):
"""
A mixin to create a Manager with a 'search' method that may do a full text search
on the model.
The manager is set up with a list of one or more model's fields that will be searched.
It can be a list of field names, or a list of tuples (field_name, weight). It can also
be None, in that case every CharField and TextField in the model will be searched.
You can also give a 'search_field', a VectorField into where the values of the searched
fields are copied and normalized. If you give it, the searches will be made on this
field; if not, they will be made directly in the searched fields.
When using search_field, if auto_update = True, Django signals will be used to
automatically syncronize the search_field with the searched fields every time instances
are saved. If not, you can call to 'update_search_field' method in model instances to do this.
If search_field not used, both auto_update and update_search_field does nothing. Alternatively,
you can create a postgresql trigger to do the syncronization at database level, see this:
http://www.postgresql.org/docs/9.1/interactive/textsearch-features.html#TEXTSEARCH-UPDATE-TRIGGERS
In both cases, you should create a text search index, on either the searched fields or
the compound search_field, like explained here:
http://www.postgresql.org/docs/9.1/interactive/textsearch-tables.html#TEXTSEARCH-TABLES-INDEX
Finally, you can give a 'config', the Postgres text search configuration that will be used
to normalize the search_field and the queries. How do you can create a configuration:
http://www.postgresql.org/docs/9.1/interactive/textsearch-configuration.html
Note that 'config' can be a tuple as in ('pg_catalog.english', 'pg_catalog.simple').
In this case, fields are tokenized using each of the tokenizers specified in 'config'
and the result is contatenated. This allows you to create tsvector with multiple configs.
To do all those actions in database, create a setup sql script for Django:
https://docs.djangoproject.com/en/1.4/howto/initial-data/#providing-initial-sql-data
"""
def __init__(self,
fields=None,
search_field='search_index',
config='pg_catalog.english',
auto_update_search_field=False):
self.search_field = search_field
self.default_weight = 'D'
self.config = config
self.auto_update_search_field = auto_update_search_field
self._fields = fields
super(SearchManagerMixIn, self).__init__()
def contribute_to_class(self, cls, name):
'''
Called automatically by Django when setting up the model class.
'''
if not cls._meta.abstract:
# Attach this manager as _fts_manager in the model class.
if not getattr(cls, '_fts_manager', None):
cls._fts_manager = self
# Add 'update_search_field' instance method, that calls manager's update_search_field.
if not getattr(cls, 'update_search_field', None):
def update_search_field(self, search_field=None, fields=None, using=None, config=None, extra=None):
self._fts_manager.update_search_field(
pk=self.pk, search_field=search_field, fields=fields, using=using, config=config, extra=extra
)
setattr(cls, 'update_search_field', update_search_field)
if self.auto_update_search_field:
models.signals.post_save.connect(auto_update_search_field_handler, sender=cls)
super(SearchManagerMixIn, self).contribute_to_class(cls, name)
def get_queryset(self):
return SearchQuerySet(model=self.model, using=self._db)
def search(self, *args, **kwargs):
return self.get_queryset().search(*args, **kwargs)
def update_search_field(self, pk=None, search_field=None, fields=None, config=None, using=None, extra=None):
"""
Update the search_field of one instance, or a list of instances, or
all instances in the table (pk is one key, a list of keys or none).
If there is no search_field, this function does nothing.
:param pk: Primary key of instance
:param search_field: search_field which will be updated
:param fields: fields from which we update the search_field
:param config: config of full text search
:param using: DB we are using
"""
if not search_field:
search_field = self.search_field
if not search_field:
return
if fields is None:
fields = self._fields
if not config:
config = self.config
if using is None:
using = self.db
connection = connections[using]
qn = connection.ops.quote_name
where_sql = ''
params = []
if pk is not None:
if isinstance(pk, (list, tuple)):
params = pk
else:
params = [pk]
where_sql = "WHERE %s IN (%s)" % (
qn(self.model._meta.pk.column),
','.join(repeat("%s", len(params)))
)
search_vector = self._get_search_vector(config, using, fields=fields, extra=extra)
sql = "UPDATE %s SET %s = %s %s;" % (
qn(self.model._meta.db_table),
qn(search_field),
search_vector or "''",
where_sql
)
with atomic():
cursor = connection.cursor()
cursor.execute(sql, params)
def _find_text_fields(self):
fields = [f for f in self.model._meta.fields
if isinstance(f, (models.CharField, models.TextField))]
return [(f.name, None) for f in fields]
def _parse_fields(self, fields):
"""
Parse fields list into a correct format needed by this manager.
If any field does not exist, raise ValueError.
"""
parsed_fields = set()
if fields is not None and isinstance(fields, (list, tuple)):
if len(fields) > 0 and isinstance(fields[0], (list, tuple)):
parsed_fields.update(fields)
else:
parsed_fields.update([(x, None) for x in fields])
# Does not support field.attname.
field_names = set(field.name for field in self.model._meta.fields if not field.primary_key)
non_model_fields = set(x[0] for x in parsed_fields).difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this"
" model: {0}".format(", ".join(x for x in non_model_fields)))
else:
parsed_fields.update(self._find_text_fields())
return parsed_fields
def _get_search_vector(self, configs, using, fields=None, extra=None):
if fields is None:
vector_fields = self._parse_fields(self._fields)
else:
vector_fields = self._parse_fields(fields)
if isinstance(configs, six.string_types[0]):
configs = [configs]
search_vector = []
for config in configs:
for field_name, weight in vector_fields:
search_vector.append(
self._get_vector_for_field(field_name, weight=weight, config=config, using=using, extra=extra)
)
return ' || '.join(search_vector)
def _get_vector_for_field(self, field_name, weight=None, config=None, using=None, extra=None):
if not weight:
weight = self.default_weight
if not config:
config = self.config
if using is None:
using = self.db
field = self.model._meta.get_field(field_name)
ret = None
if hasattr(self.model, '_convert_field_to_db'):
ret = self.model._convert_field_to_db(field, weight, config, using, extra=extra)
if ret is None:
ret = self._convert_field_to_db(field, weight, config, using, extra=extra)
return ret
@staticmethod
def _convert_field_to_db(field, weight, config, using, extra=None):
connection = connections[using]
qn = connection.ops.quote_name
return "setweight(to_tsvector('%s', coalesce(%s.%s, '')), '%s')" % \
(config, qn(field.model._meta.db_table), qn(field.column), weight)
class SearchQuerySet(QuerySet):
@property
def manager(self):
return self.model._fts_manager
@property
def db(self):
return self._db or self.manager.db
def search(self, query, rank_field=None, rank_function='ts_rank', config=None,
rank_normalization=32, raw=False, using=None, fields=None,
headline_field=None, headline_document=None):
'''
Convert query with to_tsquery or plainto_tsquery, depending on raw is
`True` or `False`, and return a QuerySet with the filter.
If `rank_field` is not `None`, a field with this name will be added
containing the search rank of the instances, and the queryset will be
ordered by it. The rank_function and normalization are explained here:
http://www.postgresql.org/docs/9.1/interactive/textsearch-controls.html#TEXTSEARCH-RANKING
If an empty query is given, no filter is made so the QuerySet will
return all model instances.
If `fields` is not `None`, the filter is made with this fields instead
of defined on a constructor of manager.
If `headline_field` and `headline_document` is not `None`, a field with
this `headline_field` name will be added containing the headline of the
instances, which will be searched inside `headline_document`.
Search headlines are explained here:
http://www.postgresql.org/docs/9.1/static/textsearch-controls.html#TEXTSEARCH-HEADLINE
'''
if not config:
config = self.manager.config
db_alias = using if using is not None else self.db
connection = connections[db_alias]
qn = connection.ops.quote_name
qs = self
if using is not None:
qs = qs.using(using)
if query:
function = "to_tsquery" if raw else "plainto_tsquery"
ts_query = smart_text(
"%s('%s', %s)" % (function, config, adapt(query))
)
full_search_field = "%s.%s" % (
qn(self.model._meta.db_table),
qn(self.manager.search_field)
)
# if fields is passed, obtain a vector expression with
# these fields. In other case, intent use of search_field if
# exists.
if fields:
search_vector = self.manager._get_search_vector(config, using, fields=fields)
else:
if not self.manager.search_field:
raise ValueError("search_field is not specified")
search_vector = full_search_field
where = " (%s) @@ (%s)" % (search_vector, ts_query)
select_dict, order = {}, []
if rank_field:
select_dict[rank_field] = '%s(%s, %s, %d)' % (
rank_function,
search_vector,
ts_query,
rank_normalization
)
order = ['-%s' % (rank_field,)]
if headline_field is not None and headline_document is not None:
select_dict[headline_field] = "ts_headline('%s', %s, %s)" % (
config,
headline_document,
ts_query
)
qs = qs.extra(select=select_dict, where=[where], order_by=order)
return qs
class SearchManager(SearchManagerMixIn, models.Manager):
pass
|
bsd-3-clause
| -2,555,190,300,441,185,000 | 36.643454 | 117 | 0.60441 | false |
xjw1001001/IGCexpansion
|
IGCexpansion/Tree.py
|
1
|
20621
|
# A separate class to represent tree structure
# Xiang Ji
# xji3@ncsu.edu
from operator import itemgetter
from itertools import groupby
from Bio import Phylo
import networkx as nx
import os, sys
import numpy as np
from copy import deepcopy
from Common import *
class Tree:
def __init__(self, tree_newick, DupLosList, terminal_node_list, node_to_pos):
self.newicktree = tree_newick # newick tree file location
self.duploslist = DupLosList # duplication loss nodes file location
# Tree topology related variable
# The tree for this project is fixed, but keep the read-in feature anyway
self.phylo_tree = None # Phylo tree structure
self.tree_json = None # store the tree dictionary used for json likelihood package parsing
self.edge_to_blen = None # dictionary store the unpacked tree branch length information {(node_from, node_to):blen}
self.edge_list = None # kept all edges in the same order with x_rates
self.node_to_num = None # dictionary used for translating tree info from self.edge_to_blen to self.tree
self.num_to_node = None # dictionary used for translating tree info from self.tree to self.edge_to_blen
self.node_to_dup = dict() # used to keep which orlg numbers are new duplicates on each duplication node
# info for configurations
self.terminal_node_list = terminal_node_list
self.node_to_pos = node_to_pos
self.node_to_conf = dict() # A dictionary store configurations on each node
# Speciation node starts with N, Duplication node with D, Loss node with L
self.dup_events = dict() # A dictionary stores duplication events: ortholog group in key gives birth to the two ortholog groups in the content list
self.n_orlg = 0 # number of ortholog groups
self.visited_DL_nodes = list()
self.n_js = None
self.get_tree()
def get_tree(self):
tree = Phylo.read( self.newicktree, "newick")
self.phylo_tree = tree.as_phyloxml(rooted = 'True')
self.add_duplos_nodes()
#set node number for nonterminal nodes and specify root node
self.get_tree_json()
# get process function is implemented in Func.py
# get node_to_conf
self.get_configurations()
# if 'Root' node is added as root node
# there must be a duplication node directly after it and the Root node has no outgroup
# delete the Root node after all configuration
if self.phylo_tree.root.name == 'Root':
assert(len(self.phylo_tree.root.clades) == 1)
print 'Now remove root node and start with first duplication node'
self.node_to_conf.pop(self.phylo_tree.root.name)
self.root_with_duplication(self.phylo_tree.root.clades[0].name)
# Warning:
# cannot remove root node if it has multiple paralogs
def get_tree_json(self):
tree_nx = Phylo.to_networkx(self.phylo_tree)
triples = [(u.name, v.name, d['weight']) for (u, v, d) in tree_nx.edges(data = True)] # data = True to have the blen as 'weight'
T = nx.DiGraph()
edge_to_blen = {}
for va, vb, blen in triples:
edge = (va, vb)
T.add_edge(*edge)
edge_to_blen[edge] = blen
self.edge_to_blen = edge_to_blen
# Now sort all nodes according to the degree where degree is as defined in graph theory
all_nodes = sorted(T.degree().items(), key = lambda node: node[1])
self.node_to_num = {n[0]:i for i, n in enumerate(all_nodes)}
self.num_to_node = {i:n[0] for i, n in enumerate(all_nodes)}
edge_list = sorted(edge_to_blen.keys())
# Now setup self.tree dictionary
tree_row = [self.node_to_num[na] for na, nb in edge_list]
tree_col = [self.node_to_num[nb] for na, nb in edge_list]
self.edge_list = edge_list
self.tree_json = dict(
row_nodes = tree_row,
column_nodes = tree_col,
#process = tree_process,
edge_rate_scaling_factors = np.ones(len(tree_row))
)
def get_tree_process(self, conf_list):
tree_process = []
for edge in self.edge_list:
parent_node, child_node = edge
conf = self.node_to_conf[parent_node]
tree_process.append(conf_list.index(conf))
self.tree_json['edge_processes'] = tree_process
def add_duplos_nodes(self):
assert(os.path.isfile(self.duploslist))
with open(self.duploslist, 'rb') as f:
for line in f:
items = line.split()
if items:
branch = items[0]
first_ = branch.find('_')
father_node_name = branch[:first_]
child_node_name = branch[(first_ + 1):]
if father_node_name == 'Root' and self.phylo_tree.root.name != 'Root':
self.add_root()
for add_node in items[1:]:
self.add_node(father_node_name, child_node_name, add_node)
father_node_name = add_node
def add_node(self, father_node_name, child_node_name, add_node):
child_clade = self.find_clade(child_node_name)
father_clade = self.find_parent_clade(child_node_name)
assert(father_clade.name == father_node_name)
new_clade = Phylo.BaseTree.Clade(name = add_node, clades = [child_clade])
father_clade.clades.remove(child_clade)
father_clade.clades.append(new_clade)
def add_root(self, add_node = 'Root'):
new_clade = Phylo.BaseTree.Clade(name = add_node, clades = [self.phylo_tree.root])
self.phylo_tree.root = new_clade
def root_with_duplication(self, node_name): # This function is restricted to change root location
assert(self.is_duplication_node(node_name))
duplication_clade = self.find_clade(node_name)
assert(duplication_clade in self.phylo_tree.root.clades)
if len(self.phylo_tree.root.clades) == 2:
duplication_clade.clades.extend([clade for clade in self.phylo_tree.root.clades if clade.name != node_name])
self.phylo_tree.root = duplication_clade
# update json tree
self.get_tree_json()
def update_tree(self):
for i in range(len(self.edge_list)):
node1 = self.num_to_node[self.tree_json['row_nodes'][i]]
node2 = self.num_to_node[self.tree_json['column_nodes'][i]]
self.tree_json['edge_rate_scaling_factors'][i] = self.edge_to_blen[(node1, node2)]
def unpack_x_rates(self, log_x_rates, Force_rates = None): # TODO: Change it to fit general tree structure rather than cherry tree
assert(len(log_x_rates) == len(self.edge_list))
x_rates = np.exp(log_x_rates)
if Force_rates != None:
for i in Force_rates.keys():
x_rates[i] = Force_rates[i]
assert(len(x_rates) == len(self.edge_to_blen))
for edge_it in range(len(self.edge_list)):
self.edge_to_blen[self.edge_list[edge_it]] = x_rates[edge_it]
self.update_tree()
def find_clade(self, clade_name):
hit_clades = list(self.phylo_tree.find_clades(clade_name))
assert(len(hit_clades) == 1)
return hit_clades[0]
def find_parent_clade(self, clade_name):
child_clade = self.find_clade(clade_name)
path = self.phylo_tree.get_path(child_clade)
if len(path) > 1:
father_clade = path[-2]
else:
father_clade = self.phylo_tree.root
return father_clade
def init_root_conf(self):
self.node_to_conf[self.phylo_tree.root.name] = [[0, 1]]
self.n_orlg = 1
def get_configurations_for_path(self, terminal_node_name, node_to_pos):
terminal_clade = self.find_clade(terminal_node_name)
path = self.phylo_tree.get_path(terminal_clade)
for clade in path:
if clade.name in self.node_to_conf:
continue
elif self.is_duplication_node(clade.name):
self.get_configuration_for_duplication_node(clade.name, node_to_pos[clade.name])
self.visited_DL_nodes.append(clade.name)
elif self.is_deletion_node(clade.name):
self.get_configuration_for_deletion_node(clade.name, node_to_pos[clade.name])
self.visited_DL_nodes.append(clade.name)
elif self.is_speciation_node(clade.name):
self.get_configuration_for_speciation_node(clade.name)
elif self.is_terminal_node(clade.name):
self.get_configuration_for_terminal_node(clade.name)
else:
print 'The node cannot be recognised!'
def get_configurations(self):
all_DP_nodes = [node for node in self.node_to_num if self.is_duplication_node(node) or self.is_deletion_node(node)]
assert(all([node in self.node_to_pos for node in all_DP_nodes]))
if not self.phylo_tree.root.name in self.node_to_conf:
self.init_root_conf()
for node in self.terminal_node_list:
self.get_configurations_for_path(node, self.node_to_pos)
assert(all([node in self.visited_DL_nodes for node in self.node_to_pos]))
assert(self.is_configurations_same_size())
self.n_js = len(self.node_to_conf[self.node_to_conf.keys()[0]])
def is_duplication_node(self, node_name):
return node_name[0] == 'D' and str.isdigit(node_name[1:])
def is_deletion_node(self, node_name):
return node_name[0] == 'L' and str.isdigit(node_name[1:])
def is_speciation_node(self, node_name):
return node_name[0] == 'N' and str.isdigit(node_name[1:])
def is_terminal_node(self, node_name):
return node_name in [clade.name for clade in self.phylo_tree.get_terminals()]
def get_configuration_for_terminal_node(self, node_name):
# A terminal node copies its configuration from its parent node
self.copy_configuration_from_parent(node_name)
def copy_configuration_from_parent(self, node_name):
parent_clade = self.find_parent_clade(node_name)
self.node_to_conf[node_name] = deepcopy(self.node_to_conf[parent_clade.name])
def get_configuration_for_speciation_node(self, node_name):
# A speciation node copies its configuration from its parent node
self.copy_configuration_from_parent(node_name)
def is_configurations_same_size(self):
return len(set([len(self.node_to_conf[node]) for node in self.node_to_conf])) == 1
def get_configuration_for_duplication_node(self, node_name, orlg_pos): # this is simplified version
# There are cases this function cannot handle
# For example, this duplication tree: ((3, 4)D2, (1, 2)D1)N0
assert(not node_name in self.node_to_conf)
parent_clade = self.find_parent_clade(node_name)
assert(parent_clade.name in self.node_to_conf)
old_configuration = self.node_to_conf[parent_clade.name]
ortho_group_to_pos = divide_configuration(old_configuration)
if type(orlg_pos) == int:
old_orlg = ortho_group_to_pos['loc'][orlg_pos]
assert(self.is_configurations_same_size())
if len(ortho_group_to_pos['extent'][old_orlg]) == 1:# now only deal with case that the ortholog group occupies only one position
pos = ortho_group_to_pos['extent'][old_orlg][0]
assert(self.node_to_conf[parent_clade.name][pos][1]) # only extent lineage can give birth
# Step 1, replace old ortholog group with two new groups
new_orlg_1 = self.n_orlg
new_orlg_2 = self.n_orlg + 1
self.n_orlg += 2
self.dup_events[old_orlg] = [new_orlg_1, new_orlg_2]
self.node_to_dup[node_name] = [new_orlg_1, new_orlg_2]
# Step 2, update all other configurations
# They should be of same size as old_configuration
for node in self.node_to_conf:
self.node_to_conf[node].insert(pos, deepcopy(self.node_to_conf[node][pos])) # duplicate the parent position
# insert current node's configuration
new_configuration = deepcopy(old_configuration)
new_configuration[pos][0] = new_orlg_1
new_configuration[pos + 1][0] = new_orlg_2
self.node_to_conf[node_name] = new_configuration
else:
divided_positions = self.divide_positions(ortho_group_to_pos['extent'][old_orlg])
assert(len(divided_positions) == 1) # Now only consider one continuous representation
# TODO:implement more general case
pos_list = divided_positions[0]
# Step 1, replace old ortholog group with two new groups
new_orlg_1 = self.n_orlg
new_orlg_2 = self.n_orlg + 1
self.n_orlg += 2
self.dup_events[old_orlg] = [new_orlg_1, new_orlg_2]
self.node_to_dup[node_name] = [new_orlg_1, new_orlg_2]
# No need for Step 2: update all other configurations
# They should stay of the same size as old_configuration
# insert current node's configuration
new_configuration = deepcopy(old_configuration)
for i in range(len(pos_list)):
pos = pos_list[i]
if i < len(pos_list) / 2:
new_configuration[pos_list[i]][0] = new_orlg_1
else:
new_configuration[pos_list[i]][0] = new_orlg_2
self.node_to_conf[node_name] = new_configuration
elif type(orlg_pos) == list:
old_orlg_list = [ortho_group_to_pos['loc'][i] for i in orlg_pos]
sys.exit('TODO: Implement duplication events affecting multiple contiguous paralogs')
else:
sys.exit('Please check get_configuration_for_duplication_node() function in Tree class')
def divide_positions(self, pos_list):
results = []
for k, g in groupby(enumerate(pos_list), lambda (i, x):i-x):
results.append(map(itemgetter(1), g))
return results
def get_configuration_for_deletion_node(self, node_name, orlg_pos):
parent_clade = self.find_parent_clade(node_name)
assert(parent_clade.name in self.node_to_conf)
new_configuration = deepcopy(self.node_to_conf[parent_clade.name])
ortho_group_to_pos = divide_configuration(new_configuration)
deleted_orlg = ortho_group_to_pos['loc'][orlg_pos]
for pos in ortho_group_to_pos['extent'][deleted_orlg]:
assert(new_configuration[pos][1]) # the paralog should be alive before deletion
new_configuration[pos][1] = 0
self.node_to_conf[node_name] = new_configuration
def divide_configuration(self, configuration):
ortho_group_to_pos = dict(extent = {}, distinct = [], loc = [])
# extent positions that represent same paralog (by the same ortholog group number) have to be in the same state
# distinct positions don't change states, thus only track positions
for pos in range(len(configuration)):
if configuration[pos][1] == 1: # extent
ortho_group = configuration[pos][0]
if ortho_group in ortho_group_to_pos['extent']:
ortho_group_to_pos['extent'][ortho_group].append(pos)
else:
ortho_group_to_pos['extent'][ortho_group] = [pos]
ortho_group_to_pos['loc'].append(ortho_group)
elif configuration[pos][1] == 0: # distinct
ortho_group_to_pos['distinct'].append(pos)
return ortho_group_to_pos
def __str__(self): # overide for print function
Phylo.draw_ascii(self.phylo_tree)
for node in sorted(self.node_to_conf.keys()):
print node, self.node_to_conf[node]
print
return 'Tree newick file: ' + self.newicktree + '\n' + \
'Tree duplos file: ' + self.duploslist + '\n'
if __name__ == '__main__':
wd = '/Users/xji3/GitFolders/xji3ST790/CourseProject/'
tree_newick = wd + 'sim_tree.newick'
DupLosList = wd + 'Sim_DupLost.txt'
terminal_node_list = ['Out', 'A', 'B']
node_to_pos = node_to_pos = {'D1':0, 'D2':0}
tree = Tree(tree_newick, DupLosList, terminal_node_list, node_to_pos)
self = tree
tree.get_tree()
## tree_newick = '../test/PrimateTest.newick'
## DupLosList = '../test/PrimateTestDupLost.txt'
## tree = Phylo.read( tree_newick, "newick")
## terminal_node_list = ['Chinese_Tree_Shrew', 'Macaque', 'Olive_Baboon', 'Orangutan', 'Gorilla', 'Human']
## test = Tree(tree_newick, DupLosList)
## Phylo.draw_ascii(test.phylo_tree)
## self = test
#### father_node_name = 'N1'
#### child_node_name = 'N3'
#### test.unpack_x_rates(np.log([0.1] * len(test.edge_list)))
#### print test.edge_to_blen
#### print
####
#### test.init_root_conf()
#### terminal_node_name = 'Chinese_Tree_Shrew'
#### terminal_clade = self.find_clade(terminal_node_name)
#### path = self.phylo_tree.get_path(terminal_clade)
#### print path, test.is_duplication_node(path[0].name)
####
#### print test.node_to_conf
####
###### test.get_configuration_for_duplication_node('D0', 0)
###### print test.node_to_conf, test.dup_events
######
###### test.get_configuration_for_duplication_node('D5', 1)
###### print test.node_to_conf, test.dup_events
######
###### test.get_configuration_for_deletion_node('L0', 1)
###### print test.node_to_conf, test.dup_events
####
## node_to_pos = {'D1':0, 'D2':0, 'D3':1, 'D4':2, 'L1':2}
####
#### test.get_configurations_for_path('Chinese_Tree_Shrew', node_to_pos)
#### print test.node_to_conf, test.dup_events
####
#### test.get_configurations_for_path('Macaque', node_to_pos)
#### print test.node_to_conf, test.dup_events
##
## test.get_configurations(terminal_node_list, node_to_pos)
## print test.dup_events
## for i in test.node_to_conf:
## if i in terminal_node_list:
## print i, test.node_to_conf[i]
##
##
## tree_newick = '../test/YeastTree.newick'
## DupLosList = '../test/YeastTestDupLost.txt'
## terminal_node_list = ['kluyveri', 'castellii', 'bayanus', 'kudriavzevii', 'mikatae', 'paradoxus', 'cerevisiae']
## test = Tree(tree_newick, DupLosList)
## Phylo.draw_ascii(test.phylo_tree)
## node_to_pos = {'D1':0}
## test.get_configurations(terminal_node_list, node_to_pos)
##
## for i in test.node_to_conf:
## if i in terminal_node_list:
## print i, test.node_to_conf[i]
## tree_newick = '../test/Trigeneconv_ADH1Class_tree.newick'
## DupLosList = '../test/Trigeneconv_ADH_DupLost.txt'
## terminal_node_list = ['Baboon', 'Orangutan', 'Gorilla', 'Bonobo', 'Chimpanzee', 'Human']
## node_to_pos = {'D1':0, 'D2':0}
##
## test = Tree(tree_newick, DupLosList, terminal_node_list, node_to_pos)
##
## Phylo.draw_ascii(test.phylo_tree)
## self = test
## for node in test.node_to_conf:
## print node, test.node_to_conf[node]
## tree_newick = '../test/PrimateTree.newick'
## DupLosList = '../test/PrimateFullDupLost_P2.txt'
## terminal_node_list = ['Chinese_Tree_Shrew', 'Bushbaby', 'Mouse_Lemur',
## 'Tarsier', 'Marmoset', 'Vervet-AGM',
## 'Olive_Baboon', 'Macaque', 'Gibbon',
## 'Orangutan', 'Gorilla', 'Human']
## node_to_pos = {'D1':0, 'D2':0, 'D3':0, 'D4':0, 'D5':0}
## test = Tree(tree_newick, DupLosList, terminal_node_list, node_to_pos)
##
## Phylo.draw_ascii(test.phylo_tree)
## self = test
## test.get_configurations()
## for i in test.node_to_conf:
## print i, test.node_to_conf[i]
#
## test.root_with_duplication('D1')
## Phylo.draw_ascii(test.phylo_tree)
## for node in test.node_to_conf:
## print node, test.node_to_conf[node]
|
gpl-3.0
| -8,281,849,418,154,845,000 | 43.156317 | 168 | 0.593376 | false |
MostlyOpen/odoo_addons
|
myo_animal/models/address.py
|
1
|
2969
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
class Address(models.Model):
_inherit = 'myo.address'
animal_ids = fields.One2many(
'myo.animal',
'address_id',
'Animals'
)
count_animals = fields.Integer(
'Number of Animals',
compute='_compute_count_animals',
store=True
)
count_selected_animals = fields.Integer(
'Number of Selected Animals',
compute='_compute_count_selected_animals',
store=True
)
trigger_compute = fields.Boolean(
'Trigger Compute',
help="When checked it will trigger the updte of storedcomputet fields.",
default=False
)
@api.depends('animal_ids')
def _compute_count_animals(self):
for r in self:
r.count_animals = len(r.animal_ids)
@api.depends('animal_ids', 'trigger_compute')
def _compute_count_selected_animals(self):
for r in self:
count_selected_animals = 0
for animal in r.animal_ids:
if animal.state == 'selected':
count_selected_animals += 1
r.count_selected_animals = count_selected_animals
r.trigger_compute = False
class Animal(models.Model):
_inherit = 'myo.animal'
address_id = fields.Many2one('myo.address', 'Address', ondelete='restrict')
animal_phone = fields.Char('Phone', related='address_id.phone')
mobile_phone = fields.Char('Mobile', related='address_id.mobile')
animal_email = fields.Char('Email', related='address_id.email')
address_code = fields.Char('Address Code', related='address_id.code', store=False)
address_is_residence = fields.Boolean('Address Is Residence', related='address_id.is_residence', store=True)
address_state = fields.Selection('Address Status', related='address_id.state', store=True)
address_user_id = fields.Char('Address Responsible', related='address_id.user_id.name', store=True)
address_category_ids = fields.Char('Address Categories', related='address_id.category_ids.name', store=True)
|
agpl-3.0
| 2,762,671,541,979,413,500 | 38.586667 | 112 | 0.634557 | false |
malramsay64/MD-Molecules-Hoomd
|
test/dynamics_test.py
|
1
|
8955
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Testing the dynamics module."""
import numpy as np
import quaternion
from hypothesis import HealthCheck, assume, given, settings
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import floats
from statdyn.analysis import dynamics
from statdyn.analysis.read import process_gsd
from .math_helper_test import unit_quaternion
MAX_BOX = 20.
DTYPE = np.float32
EPS = 2*np.sqrt(np.finfo(DTYPE).eps)
HYP_DTYPE = DTYPE
def translationalDisplacement_reference(box: np.ndarray,
initial: np.ndarray,
final: np.ndarray,
) -> np.ndarray:
"""Simplified reference implementation for computing the displacement.
This computes the displacment using the shortest path from the original
position to the final position.
"""
result = np.empty(final.shape[0], dtype=final.dtype)
for index in range(len(result)):
temp = initial[index] - final[index]
for i in range(3):
if temp[i] > box[i]/2:
temp[i] -= box[i]
if temp[i] < -box[i]/2:
temp[i] += box[i]
result[index] = np.linalg.norm(temp, axis=0)
return result
def rotationalDisplacement_reference(initial: np.ndarray,
final: np.ndarray,
) -> np.ndarray:
"""Simplified reference implementation of the rotational displacement."""
init = quaternion.as_quat_array(initial)[0]
fin = quaternion.as_quat_array(final)[0]
return quaternion.rotation_intrinsic_distance(init, fin)
@given(arrays(HYP_DTYPE, (10, 3), elements=floats(-MAX_BOX/4, MAX_BOX/4)),
arrays(HYP_DTYPE, (10, 3), elements=floats(-MAX_BOX/4, MAX_BOX/4)))
def test_translationalDisplacement_noperiod(init, final):
"""Test calculation of the translational displacement.
This test ensures that the result is close to the numpy.linalg.norm
function in the case where there is no periodic boundaries to worry
about.
"""
box = np.array([MAX_BOX, MAX_BOX, MAX_BOX], dtype=DTYPE)
np_res = np.linalg.norm(init-final, axis=1)
result = dynamics.translationalDisplacement(box, init, final)
ref_res = translationalDisplacement_reference(box, init, final)
print(result)
assert np.allclose(result, np_res, atol=EPS)
assert np.allclose(result, ref_res, atol=EPS)
@given(arrays(HYP_DTYPE, (10, 3), elements=floats(-MAX_BOX/2, -MAX_BOX/4-1e-5)),
arrays(HYP_DTYPE, (10, 3), elements=floats(MAX_BOX/4, MAX_BOX/2)))
def test_translationalDisplacement_periodicity(init, final):
"""Ensure the periodicity is calulated appropriately.
This is testing that periodic boundaries are identified appropriately.
"""
box = np.array([MAX_BOX, MAX_BOX, MAX_BOX], dtype=DTYPE)
np_res = np.square(np.linalg.norm(init-final, axis=1))
result = dynamics.translationalDisplacement(box, init, final)
ref_res = translationalDisplacement_reference(box, init, final)
assert np.all(np.logical_not(np.isclose(result, np_res)))
assert np.allclose(result, ref_res, atol=EPS)
@given(arrays(HYP_DTYPE, (10, 3), elements=floats(-MAX_BOX/2, MAX_BOX/2)),
arrays(HYP_DTYPE, (10, 3), elements=floats(-MAX_BOX/2, MAX_BOX/2)))
def test_translationalDisplacement(init, final):
"""Ensure the periodicity is calulated appropriately.
This is testing that periodic boundaries are identified appropriately.
"""
box = np.array([MAX_BOX, MAX_BOX, MAX_BOX], dtype=DTYPE)
result = dynamics.translationalDisplacement(box, init, final)
ref_res = translationalDisplacement_reference(box, init, final)
assert np.allclose(result, ref_res, atol=EPS)
@settings(suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow])
@given(unit_quaternion(), unit_quaternion())
def test_rotationalDisplacement(init, final):
"""Test the calculation of the rotationalDisplacement.
This compares the result of my algorithm to the quaternion library
which is much slower on arrays of values.
"""
assume(not np.any(np.isnan(init)))
assume(not np.any(np.isnan(final)))
result = dynamics.rotationalDisplacement(init, final)
ref_res = rotationalDisplacement_reference(init, final)
assert np.allclose(result, ref_res, equal_nan=True, atol=10e-2)
@given(arrays(HYP_DTYPE, (100), elements=floats(0, 10)))
def test_alpha(displacement):
"""Test the computation of the non-gaussian parameter."""
alpha = dynamics.alpha_non_gaussian(displacement)
assume(not np.isnan(alpha))
assert alpha >= -1
@given(arrays(HYP_DTYPE, (100), elements=floats(0, 10)),
arrays(HYP_DTYPE, (100), elements=floats(0, 2*np.pi)))
def test_overlap(displacement, rotation):
"""Test the computation of the overlap of the largest values."""
overlap_same = dynamics.mobile_overlap(rotation, rotation)
assert np.isclose(overlap_same, 1)
overlap = dynamics.mobile_overlap(displacement, rotation)
assert 0. <= overlap <= 1.
@given(arrays(HYP_DTYPE, (100), elements=floats(0, 10)),
arrays(HYP_DTYPE, (100), elements=floats(0, 2*np.pi)))
def test_spearman_rank(displacement, rotation):
"""Test the spearman ranking coefficient."""
spearman_same = dynamics.spearman_rank(rotation, rotation)
assert np.isclose(spearman_same, 1)
spearman = dynamics.spearman_rank(rotation, rotation)
assert -1 <= spearman <= 1
def test_dynamics():
process_gsd('test/data/trajectory-13.50-3.00.gsd')
def test_molecularRelaxation():
num_elements = 10
threshold = 0.4
tau = dynamics.molecularRelaxation(num_elements, threshold)
invalid_values = np.full(num_elements, tau._max_value, dtype=np.uint32)
def move(dist):
return np.ones(num_elements) * dist
# No motion
tau.add(1, move(0))
assert np.all(tau.get_status() == invalid_values)
# Small motion inside threshold
tau.add(2, move(threshold - 0.1))
assert np.all(tau.get_status() == invalid_values)
# Move outside threshold
tau.add(3, move(threshold + 0.1))
assert np.all(tau.get_status() == np.full(num_elements, 3))
# Move inside threshold
tau.add(4, move(threshold - 0.1))
assert np.all(tau.get_status() == np.full(num_elements, 3))
# Move outside threshold again
tau.add(4, move(threshold + 0.1))
assert np.all(tau.get_status() == np.full(num_elements, 3))
def test_lastMolecularRelaxation():
num_elements = 10
threshold = 0.4
tau = dynamics.lastMolecularRelaxation(num_elements, threshold, 1.)
invalid_values = np.full(num_elements, tau._max_value, dtype=np.uint32)
def move(dist):
return np.ones(num_elements) * dist
# No motion
tau.add(1, move(0))
assert np.all(tau.get_status() == invalid_values)
assert np.all(tau._status == invalid_values)
assert np.all(tau._state == np.zeros(num_elements, dtype=np.uint8))
# Move past threshold
tau.add(2, move(threshold + 0.1))
assert np.all(tau.get_status() == invalid_values)
assert np.all(tau._status == np.full(num_elements, 2))
assert np.all(tau._state == np.ones(num_elements, dtype=np.uint8))
# Move inside threshold
tau.add(3, move(threshold - 0.1))
assert np.all(tau.get_status() == invalid_values)
assert np.all(tau._status == np.full(num_elements, 2))
assert np.all(tau._state == np.zeros(num_elements, dtype=np.uint8))
# Move outside threshold again
tau.add(4, move(threshold + 0.1))
assert np.all(tau.get_status() == invalid_values)
assert np.all(tau._status == np.full(num_elements, 4))
assert np.all(tau._state == np.ones(num_elements, dtype=np.uint8))
# Move outside threshold again
tau.add(5, move(threshold + 0.2))
assert np.all(tau.get_status() == invalid_values)
assert np.all(tau._status == np.full(num_elements, 4))
# Move past irreversibility
tau.add(6, move(1.1))
assert np.all(tau.get_status() == np.full(num_elements, 4))
assert np.all(tau._status == np.full(num_elements, 4))
assert np.all(tau._state == np.ones(num_elements, dtype=np.uint8) * tau._is_irreversible)
# Move inside threshold
tau.add(7, move(threshold - 0.1))
assert np.all(tau.get_status() == np.full(num_elements, 4))
assert np.all(tau._status == np.full(num_elements, 4))
assert np.all(tau._state == np.ones(num_elements, dtype=np.uint8) * tau._is_irreversible)
# Move outside threshold, shouldn't update
tau.add(8, move(threshold + 0.1))
assert np.all(tau.get_status() == np.full(num_elements, 4))
assert np.all(tau._status == np.full(num_elements, 4))
assert np.all(tau._state == np.ones(num_elements, dtype=np.uint8) * tau._is_irreversible)
|
mit
| -6,883,469,322,326,091,000 | 37.102128 | 93 | 0.669868 | false |
gpocentek/python-gitlab
|
gitlab/v4/objects/jobs.py
|
1
|
7109
|
from gitlab import cli, utils
from gitlab import exceptions as exc
from gitlab.base import RESTManager, RESTObject
from gitlab.mixins import RefreshMixin, RetrieveMixin
__all__ = [
"ProjectJob",
"ProjectJobManager",
]
class ProjectJob(RefreshMixin, RESTObject):
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabJobCancelError)
def cancel(self, **kwargs):
"""Cancel the job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobCancelError: If the job could not be canceled
"""
path = "%s/%s/cancel" % (self.manager.path, self.get_id())
return self.manager.gitlab.http_post(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabJobRetryError)
def retry(self, **kwargs):
"""Retry the job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobRetryError: If the job could not be retried
"""
path = "%s/%s/retry" % (self.manager.path, self.get_id())
return self.manager.gitlab.http_post(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabJobPlayError)
def play(self, **kwargs):
"""Trigger a job explicitly.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobPlayError: If the job could not be triggered
"""
path = "%s/%s/play" % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabJobEraseError)
def erase(self, **kwargs):
"""Erase the job (remove job artifacts and trace).
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobEraseError: If the job could not be erased
"""
path = "%s/%s/erase" % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabCreateError)
def keep_artifacts(self, **kwargs):
"""Prevent artifacts from being deleted when expiration is set.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the request could not be performed
"""
path = "%s/%s/artifacts/keep" % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabCreateError)
def delete_artifacts(self, **kwargs):
"""Delete artifacts of a job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the request could not be performed
"""
path = "%s/%s/artifacts" % (self.manager.path, self.get_id())
self.manager.gitlab.http_delete(path)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabGetError)
def artifacts(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job artifacts.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(
path, streamed=streamed, raw=True, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabGetError)
def artifact(self, path, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get a single artifact file from within the job's artifacts archive.
Args:
path (str): Path of the artifact
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts/%s" % (self.manager.path, self.get_id(), path)
result = self.manager.gitlab.http_get(
path, streamed=streamed, raw=True, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
@cli.register_custom_action("ProjectJob")
@exc.on_http_error(exc.GitlabGetError)
def trace(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job trace.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The trace
"""
path = "%s/%s/trace" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(
path, streamed=streamed, raw=True, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
class ProjectJobManager(RetrieveMixin, RESTManager):
_path = "/projects/%(project_id)s/jobs"
_obj_cls = ProjectJob
_from_parent_attrs = {"project_id": "id"}
|
lgpl-3.0
| -6,403,611,239,332,208,000 | 36.415789 | 85 | 0.617949 | false |
jpurma/Kataja
|
kataja/syntax/BaseFeature.py
|
1
|
7442
|
# coding=utf-8
""" ConfigurableFeature aims to be general implementation for a (syntactic) Feature """
# ############################################################################
#
# *** Kataja - Biolinguistic Visualization tool ***
#
# Copyright 2013 Jukka Purma
#
# This file is part of Kataja.
#
# Kataja is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kataja is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kataja. If not, see <http://www.gnu.org/licenses/>.
#
# ############################################################################
from kataja.SavedField import SavedField
from kataja.SavedObject import SavedObject
class BaseFeature(SavedObject):
""" BaseFeatures are a simple feature implementation.
BaseFeatures have name, which is used to distinguish between features.
What is generally understood as value of feature in literature is split into two:
sign and value.
Sign is used to signify if the feature should be treated as unvalued,
valued or as some other manner. Common signs are '=', or '-' to signify feature being
unvalued or '+' or '' for feature to have value. (I have parsers that
have >6 signs, e.g. to mark features that are unvalued, but won't get satisfied by one valued
feature, or unvalued, but whose host becomes head if satisfied etc.)
Value is used to express exclusive values that features give for their structures. Different
semantic cases in finnish are good examples of such values: different cases like 'nom',
'ill', 'ine', 'gen' are all values for
feature named 'case'.
Family -field can be used e.g. to additional classification of features, e.g.
phi-features, LF features etc.
Feature checking and interactions between features are not necessary to be
represented in features themselves, but often it helps when inspecting or modifying a structure.
When creating a syntax model, one can link features by assigning another feature to 'checks'
or 'checked_by' -fields.
Feature strength is included into base feature properties, and a syntactic model can use it
if it is found necessary.
"""
simple_signs = ('+', '-', '=', '_', '≤', '≈', '~', '>', '*')
syntactic_object = True
role = "Feature"
short_name = "F"
editable = {}
addable = {}
def __init__(self, name='Feature', sign='', value=None, family='', checks=None,
checked_by=None, strong=False, parts=None, **kwargs):
super().__init__()
self.name = str(name)
self.value = value or ''
self.sign = sign
self.family = family
# status of being checked, checking something and being in use could be deduced online,
# based on feature's surroundings, but it is cheaper to store them.
self.checks = checks
self.checked_by = checked_by
self.used = False
# Feature strength was a concept in early minimalism, but I have repurposed it in my version
self.strong = strong
# If there are complex features, they should behave like constituents. Not used.
self.parts = parts or []
# It is useful to have a fast route from a feature to lexical element where it is used.
self.host = None
def has_value(self, prop):
return self.value == prop
def is_inactive(self):
return self.used
def can_satisfy(self):
return not (self.unvalued() or self.checks or self.checked_by)
def is_satisfied(self):
return self.unvalued() and self.checked_by
def is_needy(self):
return self.unvalued()
def unvalued(self):
return self.sign in '=≈-~_'
def would_satisfy(self, feature):
return (
isinstance(feature, BaseFeature) and
feature.is_needy() and
feature.name == self.name and
self.can_satisfy()
)
def check(self, other):
self.checks = other
other.checked_by = self
def __eq__(self, other):
if other and isinstance(other, BaseFeature):
return self.value == other.value and self.sign == other.sign and \
self.name == other.name and self.family == other.family
return False
def reset(self):
self.checks = None
self.checked_by = None
self.used = False
def copy(self, done=None):
if done is None:
done = {}
if self.uid in done:
return done[self.uid]
other = self.__class__(name=self.name, sign=self.sign, value=self.value,
family=self.family, strong=self.strong)
done[self.uid] = other
if self.checked_by:
other.checked_by = self.checked_by.copy(done=done)
if self.checks:
other.checks = self.checks.copy(done=done)
return other
def get_shape(self):
if not self.sign or self.sign == '*':
return 2, 2
elif self.sign == '-' or self.sign == '_':
return -2, 1
elif self.sign == '=':
return 1, -2
elif self.sign == '>':
return 1, -2
return -2, 2
def __str__(self):
s = '✓' if self.checks or self.is_satisfied() else ''
s += self.sign
fam = ':' + self.family if self.family else ''
val = ':' + self.value if self.value else ''
strong = '*' if self.strong else ''
return s + str(self.name) + val + fam + strong
def __repr__(self):
c = '✓' if self.checks or self.is_satisfied() else ''
s = [c + self.sign + str(self.name)]
if self.value or self.family:
s.append(str(self.value))
if self.family:
s.append(str(self.family))
if self.strong:
s.append('*')
return ":".join(s)
def __hash__(self):
return id(self)
# ############## #
# #
# Save support #
# #
# ############## #
name = SavedField("name")
value = SavedField("value")
strong = SavedField("strong")
sign = SavedField("sign")
family = SavedField("family")
checks = SavedField("checks")
checked_by = SavedField("checked_by")
parts = SavedField("parts")
host = SavedField("host")
used = SavedField("used")
@classmethod
def from_string(cls, s):
if not s:
return
if s[0] in cls.simple_signs:
sign = s[0]
name = s[1:]
else:
sign = ''
name = s
strong = False
if name.endswith('*'):
name = name[:-1]
strong = True
parts = name.split(':') # 'case:acc' -> name = 'case', subtype = 'acc'
name = parts[0]
value = parts[1] if len(parts) > 1 else ''
family = parts[2] if len(parts) > 2 else ''
print('using basefeature from_string ', s)
return cls(name, sign, value, family, strong=strong)
|
gpl-3.0
| -1,221,267,077,624,129,500 | 35.253659 | 100 | 0.585441 | false |
facebookexperimental/eden
|
eden/integration/lib/edenclient.py
|
1
|
23827
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import json
import logging
import os
import pathlib
import shlex
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
from pathlib import Path
from types import TracebackType
from typing import Any, Dict, List, Optional, Union, cast, TextIO
from eden.fs.cli import util
from eden.thrift.legacy import EdenClient, create_thrift_client
from facebook.eden.ttypes import MountState
from .find_executables import FindExe
# Two minutes is very generous, but 30 seconds is not enough CI hosts
# and many-core machines under load.
EDENFS_START_TIMEOUT = 120
EDENFS_STOP_TIMEOUT = 240
class EdenFS(object):
"""Manages an instance of the EdenFS fuse server."""
_eden_dir: Path
_use_nfs: bool
def __init__(
self,
base_dir: Optional[Path] = None,
eden_dir: Optional[Path] = None,
etc_eden_dir: Optional[Path] = None,
home_dir: Optional[Path] = None,
logging_settings: Optional[Dict[str, str]] = None,
extra_args: Optional[List[str]] = None,
storage_engine: str = "memory",
use_nfs: bool = False,
) -> None:
"""
Construct a new EdenFS object.
By default, all of the state directories needed for the edenfs daemon will be
created under the directory specified by base_dir. If base_dir is not
specified, a temporary directory will be created. The temporary directory will
be removed when cleanup() or __exit__() is called on the EdenFS object.
Explicit locations for various state directories (eden_dir, etc_eden_dir,
home_dir) can also be given, if desired. For instance, this allows an EdenFS
object to be created for an existing eden state directory.
"""
if base_dir is None:
self._base_dir = Path(tempfile.mkdtemp(prefix="eden_test."))
self._cleanup_base_dir = True
else:
self._base_dir = base_dir
self._cleanup_base_dir = False
if eden_dir is None:
self._eden_dir = self._base_dir / "eden"
self._eden_dir.mkdir(exist_ok=True)
else:
self._eden_dir = eden_dir
if etc_eden_dir is None:
self._etc_eden_dir = self._base_dir / "etc_eden"
self._etc_eden_dir.mkdir(exist_ok=True)
else:
self._etc_eden_dir = etc_eden_dir
if home_dir is None:
self._home_dir = self._base_dir / "home"
self._home_dir.mkdir(exist_ok=True)
else:
self._home_dir = home_dir
self._storage_engine = storage_engine
self._logging_settings = logging_settings
self._extra_args = extra_args
self._use_nfs = use_nfs
self._process: Optional[subprocess.Popen] = None
@property
def eden_dir(self) -> Path:
return self._eden_dir
@property
def etc_eden_dir(self) -> Path:
return self._etc_eden_dir
@property
def home_dir(self) -> Path:
return self._home_dir
@property
def user_rc_path(self) -> Path:
return self._home_dir / ".edenrc"
@property
def system_rc_path(self) -> Path:
return self._etc_eden_dir / "edenfs.rc"
def __enter__(self) -> "EdenFS":
return self
def __exit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> bool:
self.cleanup()
return False
def cleanup(self) -> None:
"""Stop the instance and clean up its temporary directories"""
self.kill()
if self._cleanup_base_dir:
shutil.rmtree(self._base_dir, ignore_errors=True)
def kill(self) -> None:
"""Stops and unmounts this instance."""
process = self._process
if process is None or process.returncode is not None:
return
self.shutdown()
def get_thrift_client(self, timeout: Optional[float] = None) -> EdenClient:
return create_thrift_client(str(self._eden_dir), timeout=timeout)
def run_cmd(
self,
command: str,
*args: str,
cwd: Optional[str] = None,
capture_stderr: bool = False,
encoding: str = "utf-8",
) -> str:
"""
Run the specified eden command.
Args: The eden command name and any arguments to pass to it.
Usage example: run_cmd('mount', 'my_eden_client')
Throws a subprocess.CalledProcessError if eden exits unsuccessfully.
"""
cmd = self.get_eden_cli_args(command, *args)
try:
stderr = subprocess.STDOUT if capture_stderr else subprocess.PIPE
env = dict(os.environ)
# TODO(T37669726): Re-enable LSAN.
env["LSAN_OPTIONS"] = "detect_leaks=0:verbosity=1:log_threads=1"
completed_process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=stderr,
check=True,
cwd=cwd,
env=env,
encoding=encoding,
)
except subprocess.CalledProcessError as ex:
# Re-raise our own exception type so we can include the error
# output.
raise EdenCommandError(ex) from None
return completed_process.stdout
def run_unchecked(
self, command: str, *args: str, **kwargs: Any
) -> subprocess.CompletedProcess:
"""Run the specified eden command.
Args: The eden command name and any arguments to pass to it.
Usage example: run_cmd('mount', 'my_eden_client')
Returns a subprocess.CompletedProcess object.
"""
cmd = self.get_eden_cli_args(command, *args)
return subprocess.run(cmd, **kwargs)
def get_eden_cli_args(self, command: str, *args: str) -> List[str]:
"""Combines the specified eden command args with the appropriate
defaults.
Args:
command: the eden command
*args: extra string arguments to the command
Returns:
A list of arguments to run Eden that can be used with
subprocess.Popen() or subprocess.check_call().
"""
cmd = [
FindExe.EDEN_CLI,
"--config-dir",
str(self._eden_dir),
"--etc-eden-dir",
str(self._etc_eden_dir),
"--home-dir",
str(self._home_dir),
]
cmd.append(command)
cmd.extend(args)
return cmd
def wait_for_is_healthy(self, timeout: float = EDENFS_START_TIMEOUT) -> bool:
process = self._process
assert process is not None
health = util.wait_for_daemon_healthy(
proc=process,
config_dir=self._eden_dir,
get_client=self.get_thrift_client,
timeout=timeout,
)
return health.is_healthy()
def start(
self,
timeout: float = EDENFS_START_TIMEOUT,
takeover_from: Optional[int] = None,
extra_args: Optional[List[str]] = None,
) -> None:
"""
Run "eden daemon" to start the eden daemon.
"""
use_gdb = False
if os.environ.get("EDEN_GDB"):
use_gdb = True
# Starting up under GDB takes longer than normal.
# Allow an extra 90 seconds (for some reason GDB can take a very
# long time to load symbol information, particularly on dynamically
# linked builds).
timeout += 90
takeover = takeover_from is not None
self.spawn_nowait(gdb=use_gdb, takeover=takeover, extra_args=extra_args)
process = self._process
assert process is not None
util.wait_for_daemon_healthy(
proc=process,
config_dir=self._eden_dir,
get_client=self.get_thrift_client,
timeout=timeout,
exclude_pid=takeover_from,
)
def get_extra_daemon_args(self) -> List[str]:
extra_daemon_args: List[str] = [
# Defaulting to 8 import processes is excessive when the test
# framework runs tests on each CPU core.
"--num_hg_import_threads",
"2",
"--local_storage_engine_unsafe",
self._storage_engine,
"--hgPath",
FindExe.HG_REAL,
]
privhelper = FindExe.EDEN_PRIVHELPER
if privhelper is not None:
extra_daemon_args.extend(["--privhelper_path", privhelper])
if "SANDCASTLE" in os.environ:
extra_daemon_args.append("--allowRoot")
# Turn up the VLOG level for the fuse server so that errors are logged
# with an explanation when they bubble up to RequestData::catchErrors
logging_settings = self._logging_settings
if logging_settings:
logging_arg = ",".join(
"%s=%s" % (module, level)
for module, level in sorted(logging_settings.items())
)
extra_daemon_args.extend(["--logging=" + logging_arg])
extra_args = self._extra_args
if extra_args:
extra_daemon_args.extend(extra_args)
return extra_daemon_args
def spawn_nowait(
self,
gdb: bool = False,
takeover: bool = False,
extra_args: Optional[List[str]] = None,
) -> None:
"""
Start edenfs but do not wait for it to become healthy.
"""
if self._process is not None:
raise Exception("cannot start an already-running eden client")
args = self.get_eden_cli_args(
"daemon", "--daemon-binary", FindExe.EDEN_DAEMON, "--foreground"
)
extra_daemon_args = self.get_extra_daemon_args()
if extra_args:
extra_daemon_args.extend(extra_args)
if takeover:
args.append("--takeover")
# If the EDEN_GDB environment variable is set, run eden inside gdb
# so a developer can debug crashes
if os.environ.get("EDEN_GDB"):
gdb_exit_handler = (
"python gdb.events.exited.connect("
"lambda event: "
'gdb.execute("quit") if getattr(event, "exit_code", None) == 0 '
"else False"
")"
)
gdb_args = [
# Register a handler to exit gdb if the program finishes
# successfully.
# Start the program immediately when gdb starts
"-ex",
gdb_exit_handler,
# Start the program immediately when gdb starts
"-ex",
"run",
]
args.append("--gdb")
for arg in gdb_args:
args.append("--gdb-arg=" + arg)
if "EDEN_DAEMON_ARGS" in os.environ:
args.extend(shlex.split(os.environ["EDEN_DAEMON_ARGS"]))
full_args = args + ["--"] + extra_daemon_args
logging.info(
"Invoking eden daemon: %s", " ".join(shlex.quote(arg) for arg in full_args)
)
process = subprocess.Popen(
full_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
# TODO(T69605343): Until TPX properly knows how to redirect writes done
# to filedescriptors directly, we need to manually redirect EdenFS logs
# to sys.std{out,err}.
def redirect_stream(input_stream: TextIO, output_stream: TextIO) -> None:
while True:
line = input_stream.readline()
if line == "":
input_stream.close()
return
output_stream.write(line)
threading.Thread(
target=redirect_stream, args=(process.stdout, sys.stdout), daemon=True
).start()
threading.Thread(
target=redirect_stream, args=(process.stderr, sys.stderr), daemon=True
).start()
self._process = process
def shutdown(self) -> None:
"""
Run "eden shutdown" to stop the eden daemon.
"""
process = self._process
assert process is not None
# Before shutting down, get the current pid. This may differ from process.pid when
# edenfs is started with sudo.
daemon_pid = util.check_health(
self.get_thrift_client, self.eden_dir, timeout=30
).pid
# Run "edenfsctl stop" with a timeout of 0 to tell it not to wait for the EdenFS
# process to exit. Since we are running it directly (self._process) we will
# need to wait on it. Depending on exactly how it is being run the process may
# not go away until we wait on it.
self.run_cmd("stop", "-t", "0")
self._process = None
try:
return_code = process.wait(timeout=EDENFS_STOP_TIMEOUT)
except subprocess.TimeoutExpired:
# EdenFS did not exit normally on its own.
if can_run_sudo() and daemon_pid is not None:
os.kill(daemon_pid, signal.SIGKILL)
else:
process.kill()
process.wait(timeout=10)
raise Exception(
f"edenfs did not shutdown within {EDENFS_STOP_TIMEOUT} seconds; "
"had to send SIGKILL"
)
if return_code != 0:
raise Exception(
"eden exited unsuccessfully with status {}".format(return_code)
)
def restart(self) -> None:
self.shutdown()
self.start()
def get_pid_via_thrift(self) -> int:
with self.get_thrift_client() as client:
return client.getDaemonInfo().pid
def graceful_restart(self, timeout: float = EDENFS_START_TIMEOUT) -> None:
old_process = self._process
assert old_process is not None
# Get the process ID of the old edenfs process.
# Note that this is not necessarily self._process.pid, since the eden
# CLI may have spawned eden using sudo, and self._process may refer to
# a sudo parent process.
old_pid = self.get_pid_via_thrift()
self._process = None
self.start(timeout=timeout, takeover_from=old_pid)
# Check the return code from the old edenfs process
return_code = old_process.wait()
if return_code != 0:
raise Exception(
"eden exited unsuccessfully with status {}".format(return_code)
)
def run_takeover_tool(self, cmd: List[str]) -> None:
old_process = self._process
assert old_process is not None
subprocess.check_call(cmd)
self._process = None
return_code = old_process.wait()
if return_code != 0:
raise Exception(
f"eden exited unsuccessfully with status {return_code} "
"after a fake takeover stop"
)
def stop_with_stale_mounts(self) -> None:
"""Stop edenfs without unmounting any of its mount points.
This will leave the mount points mounted but no longer connected to a FUSE
daemon. Attempts to access files or directories inside the mount will fail with
an ENOTCONN error after this.
"""
cmd: List[str] = [FindExe.TAKEOVER_TOOL, "--edenDir", str(self._eden_dir)]
self.run_takeover_tool(cmd)
def fake_takeover_with_version(self, version: int) -> None:
"""
Execute a fake takeover to explicitly test downgrades and make sure
output is as expected. Right now, this is used as a sanity check to
make sure we don't crash.
"""
cmd: List[str] = [
FindExe.TAKEOVER_TOOL,
"--edenDir",
str(self._eden_dir),
"--takeoverVersion",
str(version),
]
self.run_takeover_tool(cmd)
def takeover_without_ping_response(self) -> None:
"""
Execute a fake takeover to explicitly test a failed takeover. The
takeover client does not send a ping with the nosendPing flag,
so the subprocess call will throw, and we expect the old process
to recover
"""
cmd: List[str] = [
FindExe.TAKEOVER_TOOL,
"--edenDir",
str(self._eden_dir),
"--noshouldPing",
]
try:
subprocess.check_call(cmd)
except Exception:
# We expect the new process to fail starting.
pass
def list_cmd(self) -> Dict[str, Dict[str, Any]]:
"""
Executes "eden list --json" to list the Eden checkouts and returns the result as
a dictionary.
"""
data = self.run_cmd("list", "--json")
return cast(Dict[str, Dict[str, Any]], json.loads(data))
def list_cmd_simple(self) -> Dict[str, str]:
"""
Executes "eden list --json" to list the Eden checkouts and returns the result in
a simplified format that can be more easily used in test case assertions.
The result is a dictionary of { mount_path: status }
The status is a string containing one of the MountState names, or "NOT_RUNNING"
if the mount is not running. If the mount is known to the running edenfs
instance but not listed in the configuration file, " (unconfigured)" will be
appended to the status string.
"""
results: Dict[str, str] = {}
for path, mount_info in self.list_cmd().items():
status_str = mount_info["state"]
if not mount_info["configured"]:
status_str += " (unconfigured)"
results[path] = status_str
return results
def get_mount_state(
self, mount: pathlib.Path, client: Optional[EdenClient] = None
) -> Optional[MountState]:
"""
Query edenfs over thrift for the state of the specified mount.
Returns the MountState enum, or None if edenfs does not currently know about
this mount path.
"""
if client is None:
with self.get_thrift_client() as client:
return self.get_mount_state(mount, client)
else:
for entry in client.listMounts():
entry_path = pathlib.Path(os.fsdecode(entry.mountPoint))
if entry_path == mount:
return entry.state
return None
def clone(
self,
repo: str,
path: Union[str, os.PathLike],
allow_empty: bool = False,
nfs: bool = False,
) -> None:
"""
Run "eden clone"
"""
params = ["clone", repo, str(path)]
if allow_empty:
params.append("--allow-empty-repo")
if nfs or self._use_nfs:
params.append("--nfs")
self.run_cmd(*params)
def remove(self, path: str) -> None:
"""
Run "eden remove <path>"
"""
self.run_cmd("remove", "--yes", path)
def in_proc_mounts(self, mount_path: str) -> bool:
"""Gets all eden mounts found in /proc/mounts, and returns
true if this eden instance exists in list.
"""
mount_path_bytes = mount_path.encode()
with open("/proc/mounts", "rb") as f:
return any(
mount_path_bytes == line.split(b" ")[1]
for line in f.readlines()
if util.is_edenfs_mount_device(line.split(b" ")[0])
)
def is_healthy(self) -> bool:
"""Executes `eden health` and returns True if it exited with code 0."""
cmd_result = self.run_unchecked("health")
return cmd_result.returncode == 0
def set_log_level(self, category: str, level: str) -> None:
with self.get_thrift_client() as client:
client.setOption("logging", f"{category}={level}")
def client_dir_for_mount(self, mount_path: pathlib.Path) -> pathlib.Path:
client_link = mount_path / ".eden" / "client"
return pathlib.Path(util.readlink_retry_estale(str(client_link)))
def overlay_dir_for_mount(self, mount_path: pathlib.Path) -> pathlib.Path:
return self.client_dir_for_mount(mount_path) / "local"
def mount(self, mount_path: pathlib.Path) -> None:
self.run_cmd("mount", "--", str(mount_path))
def unmount(self, mount_path: pathlib.Path) -> None:
self.run_cmd("unmount", "--", str(mount_path))
class EdenCommandError(subprocess.CalledProcessError):
def __init__(self, ex: subprocess.CalledProcessError) -> None:
super().__init__(ex.returncode, ex.cmd, output=ex.output, stderr=ex.stderr)
def __str__(self) -> str:
cmd_str = " ".join(shlex.quote(arg) for arg in self.cmd)
return "edenfsctl command returned non-zero exit status %d\n\nCommand:\n[%s]\n\nStderr:\n%s" % (
self.returncode,
cmd_str,
self.stderr,
)
_can_run_eden: Optional[bool] = None
_can_run_fake_edenfs: Optional[bool] = None
_can_run_sudo: Optional[bool] = None
def can_run_eden() -> bool:
"""
Determine if we can run eden.
This is used to determine if we should even attempt running the
integration tests.
"""
global _can_run_eden
can_run = _can_run_eden
if can_run is None:
can_run = _compute_can_run_eden()
_can_run_eden = can_run
return can_run
def can_run_fake_edenfs() -> bool:
"""
Determine if we can run the fake_edenfs helper program.
This is similar to can_run_eden(), but does not require FUSE.
"""
global _can_run_fake_edenfs
can_run = _can_run_fake_edenfs
if can_run is None:
can_run = _compute_can_run_eden(require_fuse=False)
_can_run_fake_edenfs = can_run
return can_run
def _compute_can_run_eden(require_fuse: bool = True) -> bool:
if sys.platform == "win32":
# On Windows ProjectedFS must be installed.
# Our CMake configure step checks for the availability of ProjectedFSLib.lib
# so that we can link against ProjectedFS at build time, but this doesn't
# guarantee that ProjectedFS.dll is available.
projfs_dll = r"C:\Windows\system32\ProjectedFSLib.dll"
return os.path.exists(projfs_dll)
# FUSE must be available
if require_fuse and not os.path.exists("/dev/fuse"):
return False
# We must be able to start eden as root.
if os.geteuid() == 0:
return True
# The daemon must either be setuid root, or we must have sudo priviliges.
# Typically for the tests the daemon process is not setuid root,
# so check if we have are able to run things under sudo.
return can_run_sudo()
def can_run_sudo() -> bool:
global _can_run_sudo
can_run = _can_run_sudo
if can_run is None:
can_run = _compute_can_run_sudo()
_can_run_sudo = can_run
return can_run
def _compute_can_run_sudo() -> bool:
if sys.platform == "win32":
return False
cmd = ["/usr/bin/sudo", "-E", "/bin/true"]
with open("/dev/null", "r") as dev_null:
# Close stdout, stderr, and stdin, and call setsid() to make
# sure we are detached from any controlling terminal. This makes
# sure that sudo can't prompt for a password if it needs one.
# sudo will only succeed if it can run with no user input.
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=dev_null,
preexec_fn=os.setsid,
)
process.communicate()
return process.returncode == 0
|
gpl-2.0
| -8,283,212,565,230,958,000 | 32.990014 | 104 | 0.578713 | false |
Vierkantor/RSwail
|
rswail/ast.py
|
1
|
4421
|
from rswail.bytecode import Instruction
from rswail.closure import Closure
from rswail.cons_list import cons_list, from_list, to_list
from rswail.struct import Struct, StructInstance, construct
from rswail.value import Integer, String, Value
statement = Struct(u"statement", {
u"declaration": [u"header", u"name", u"args", u"body"],
u"expression": [u"expr"],
})
expression = Struct(u"expression", {
u"name_access": [u"name"],
u"apply": [u"func", u"args"],
u"base_value": [u"value"],
})
def expr_name_access(name):
return construct(expression, u"name_access", name)
def expr_base_value(value):
assert isinstance(value, Value)
return construct(expression, u"base_value", value)
def expr_apply(function, args):
assert args.member.parent is cons_list
return construct(expression, u"apply", function, args)
def expr_from_int(value):
assert isinstance(value, int)
return expr_base_value(Integer.from_int(value))
def stmt_declaration(header, name, args, body):
return construct(statement, u"declaration", header, name, args, body)
def stmt_expression(expr):
return construct(statement, u"expression", expr)
def compile_statement(program, block_id, stmt, closure):
"""Add code to implement the statement to the given block.
After a statement is executed, the stack should not have changed, except
exactly one new value is now on top.
Returns the block id that any code after this statement should append to.
"""
assert isinstance(stmt, StructInstance)
if stmt.member.name == u"declaration":
(header, name, args, body) = stmt.values
header_expr = expr_name_access(header)
# convert all the arguments to base values so we can call with them
name_expr = expr_base_value(name)
args_expr = expr_base_value(args)
body_expr = expr_base_value(body)
call_expr = expr_apply(header_expr, from_list([name_expr, args_expr, body_expr]))
# run the header against the AST
block_id = compile_expression(program, block_id, call_expr, closure)
# store it as a name
program.add_instruction(block_id, Instruction.DUP, 1)
assert isinstance(name, String)
name_id = program.add_name(block_id, name.value)
program.add_instruction(block_id, Instruction.STORE_LOCAL, name_id)
closure.make_bound(name.value)
# return value of the statement is whatever we just stored
return block_id
elif stmt.member.name == u"expression":
(expr,) = stmt.values
# return value is the value of the expression
return compile_expression(program, block_id, expr, closure)
else: # pragma: no cover
raise NotImplementedError
def compile_expression(program, block_id, expr, closure):
"""Add code to implement the expression to the given block.
After an expression is executed, the stack should not have changed, except
exactly one new value is now on top.
Returns the block id that any code after this expression should append to.
"""
if expr.member.name == u"name_access":
# get the root and all its attributes
(name,) = expr.values
assert name.member is cons_list.members[u"cons"]
root, tail = name.values
assert isinstance(root, String)
root_name = root.value
assert isinstance(root_name, unicode)
# load the root
closure.make_used(root_name)
root_id = program.add_name(block_id, root_name)
assert isinstance(root_id, int)
program.add_instruction(block_id, Instruction.LOAD_LOCAL, root_id)
# load its attributes
while tail.member is cons_list.members[u"cons"]:
attr, tail = tail.values
assert isinstance(attr, String)
attr_id = program.add_name(block_id, attr.value)
program.add_instruction(block_id, Instruction.LOAD_ATTR, attr_id)
assert tail.member is cons_list.members[u"empty"]
return block_id
elif expr.member.name == u"apply":
(function_expr, arg_exprs) = expr.values
block_id = compile_expression(program, block_id, function_expr, closure)
arg_expr_list = to_list(arg_exprs)
for arg_expr in arg_expr_list:
block_id = compile_expression(program, block_id, arg_expr, closure)
program.add_instruction(block_id, Instruction.CALL, len(arg_expr_list))
# create the next block to return to
next_block = program.make_next_block(block_id)
return next_block
elif expr.member.name == u"base_value":
(value,) = expr.values
value_id = program.add_constant(block_id, value)
program.add_instruction(block_id, Instruction.PUSH_CONST, value_id)
return block_id
else: # pragma: no cover
raise NotImplementedError
|
gpl-3.0
| -4,315,598,364,805,932,500 | 36.466102 | 83 | 0.733092 | false |
springload/madewithwagtail
|
core/snippets.py
|
1
|
4962
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel
from wagtail.wagtailcore.models import Orderable
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
class LinkFields(models.Model):
"""
Represents a link to an external page, a document or a fellow page
"""
link_external = models.URLField(
"External link",
blank=True,
null=True,
help_text='Set an external link if you want the link to point somewhere outside the CMS.'
)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name='+',
help_text='Choose an existing page if you want the link to point somewhere inside the CMS.'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
on_delete=models.SET_NULL,
blank=True,
related_name='+',
help_text='Choose an existing document if you want the link to open a document.'
)
link_email = models.EmailField(
blank=True,
null=True,
help_text='Set the recipient email address if you want the link to send an email.'
)
link_phone = models.CharField(
max_length=20,
blank=True,
null=True,
help_text='Set the number if you want the link to dial a phone number.'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_external:
return self.link_external
elif self.link_document:
return self.link_document.url
elif self.link_email:
return 'mailto:%s' % self.link_email
elif self.link_phone:
return 'tel:%s' % self.link_phone.strip()
else:
return "#"
panels = [
MultiFieldPanel([
PageChooserPanel('link_page'),
FieldPanel('link_external'),
DocumentChooserPanel('link_document'),
FieldPanel('link_email'),
FieldPanel('link_phone'),
],
"Link"
),
]
class Meta:
abstract = True
@python_2_unicode_compatible
class MenuElement(LinkFields):
explicit_name = models.CharField(
max_length=64,
blank=True,
null=True,
help_text='If you want a different name than the page title.'
)
short_name = models.CharField(
max_length=32,
blank=True,
null=True,
help_text='If you need a custom name for responsive devices.'
)
css_class = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name="CSS Class",
help_text="Optional styling for the menu item"
)
icon_class = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name="Icon Class",
help_text="In case you need an icon element <i> for the menu item"
)
@property
def title(self):
if self.explicit_name:
return self.explicit_name
elif self.link_page:
return self.link_page.title
elif self.link_document:
return self.link_document.title
else:
return None
@property
def url(self):
return self.link
def __str__(self):
if self.explicit_name:
title = self.explicit_name
elif self.link_page:
title = self.link_page.title
else:
title = ''
return "%s ( %s )" % (title, self.short_name)
class Meta:
verbose_name = "Menu item"
panels = LinkFields.panels + [
FieldPanel('explicit_name'),
FieldPanel('short_name'),
FieldPanel('css_class'),
FieldPanel('icon_class'),
]
class NavigationMenuMenuElement(Orderable, MenuElement):
parent = ParentalKey(to='core.NavigationMenu', related_name='menu_items')
class NavigationMenuManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(menu_name=name)
@register_snippet
@python_2_unicode_compatible
class NavigationMenu(ClusterableModel):
objects = NavigationMenuManager()
menu_name = models.CharField(max_length=255, null=False, blank=False)
@property
def items(self):
return self.menu_items.all()
def __str__(self):
return self.menu_name
class Meta:
verbose_name = "Navigation menu"
NavigationMenu.panels = [
FieldPanel('menu_name', classname='full title'),
InlinePanel('menu_items', label="Menu Items", help_text='Set the menu items for the current menu.')
]
|
mit
| 5,090,397,503,307,461,000 | 27.033898 | 105 | 0.615881 | false |
42cc/p2psafety
|
p2psafety_django/events/tests/test_views.py
|
1
|
6230
|
import mock
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from tastypie.test import ResourceTestCase
from .helpers.mixins import UsersMixin
from .helpers.factories import EventFactory
from ..models import Event, EventUpdate
from .. import jabber
from users.tests.helpers import UserFactory
class ViewsTestCase(UsersMixin, TestCase):
def test_events_map(self):
url = reverse('events:map')
self.assertEqual(self.client.get(url).status_code, 302)
self.login_as(self.events_granted_user)
self.assertEqual(self.client.get(url).status_code, 200)
class MapTestCase(UsersMixin, ResourceTestCase):
def test_add_eventupdate_ok(self):
event = EventFactory(user=self.user)
url = reverse('events:map_add_eventupdate')
self.login_as_superuser()
data = dict(event_id=event.id, text='test')
resp = self.api_client.post(url, data=data)
self.assertValidJSONResponse(resp)
self.assertTrue(self.deserialize(resp)['success'])
def test_add_eventupdate_errors(self):
user, operator = self.user, self.superuser
event = EventFactory(user=user)
url = reverse('events:map_add_eventupdate')
valid_data = dict(event_id=event.id, text='test')
# No permissions
self.login_as_user()
self.assertHttpForbidden(self.api_client.post(url, data=valid_data))
self.login_as_superuser()
# No text
data = dict(event_id=event.id)
self.assertHttpBadRequest(self.api_client.post(url, data=data))
# Invalid id
data = dict(valid_data, event_id='test')
self.assertHttpBadRequest(self.api_client.post(url, data=data))
# No such event
data = dict(valid_data, event_id=event.id + 1)
self.assertHttpNotFound(self.api_client.post(url, data=data))
def test_close_event_ok(self):
event = EventFactory(user=self.user)
url = reverse('events:map_close_event')
self.login_as_superuser()
resp = self.api_client.post(url, data=dict(event_id=event.id))
event = Event.objects.get(id=event.id)
self.assertValidJSONResponse(resp)
self.assertTrue(self.deserialize(resp)['success'])
self.assertEqual(event.status, event.STATUS_FINISHED)
def test_close_event_errors(self):
user, operator = self.user, self.superuser
event = EventFactory(user=user)
url = reverse('events:map_close_event')
valid_data = dict(event_id=event.id)
# No permissions
self.login_as_user()
self.assertHttpForbidden(self.api_client.post(url, data=valid_data))
self.login_as_superuser()
# Invalid id
data = dict(valid_data, event_id='test')
self.assertHttpBadRequest(self.api_client.post(url, data=data))
# No such event
data = dict(event_id=event.id + 1)
self.assertHttpNotFound(self.api_client.post(url, data=data))
@mock.patch('events.views.jabber')
def test_notify_supporters_ok(self, mock_jabber):
user, operator = self.user, self.superuser
event = EventFactory(user=user)
url = reverse('events:map_notify_supporters')
data = dict(event_id=event.id)
self.login_as_superuser()
# Without radius
resp = self.api_client.post(url, data=data)
self.assertValidJSONResponse(resp)
self.assertTrue(self.deserialize(resp)['success'])
mock_jabber.notify_supporters.assert_called_once_with(event, radius=None)
mock_jabber.notify_supporters.reset_mock()
# With radius
data['radius'] = 123
resp = self.api_client.post(url, data=data)
self.assertValidJSONResponse(resp)
self.assertTrue(self.deserialize(resp)['success'])
mock_jabber.notify_supporters.assert_called_once_with(event, radius=123)
def test_notify_supporters_errors(self):
user, operator = self.user, self.superuser
event = EventFactory(user=user)
url = reverse('events:map_notify_supporters')
valid_data = dict(event_id=event.id, radius=123)
# No permissions
self.login_as_user()
self.assertHttpForbidden(self.api_client.post(url, data=valid_data))
self.login_as_superuser()
# Invalid id
data = dict(valid_data, event_id='test')
self.assertHttpBadRequest(self.api_client.post(url, data=data))
# Invalid radius
data = dict(valid_data, radius='test')
self.assertHttpBadRequest(self.api_client.post(url, data=data))
# No such event
data = dict(valid_data, event_id=event.id + 1)
self.assertHttpNotFound(self.api_client.post(url, data=data))
def test_create_test_event_ok(self):
url = reverse('events:map_create_test_event')
users_count, events_count = User.objects.count(), Event.objects.count()
eventupdates_count = EventUpdate.objects.count()
data = dict(longitude=1.2, latitude=2.3)
self.login_as_superuser()
self.assertHttpOK(self.api_client.post(url, data=data))
self.assertEqual(User.objects.count(), users_count + 1)
self.assertEqual(Event.objects.count(), events_count + 1)
self.assertEqual(EventUpdate.objects.count(), eventupdates_count + 1)
last_update = EventUpdate.objects.latest()
self.assertNotEqual(last_update.text, '')
self.assertEqual(last_update.location.x, 1.2)
self.assertEqual(last_update.location.y, 2.3)
def test_create_test_event_errors(self):
url = reverse('events:map_create_test_event')
# No permissions
self.login_as_user()
self.assertHttpForbidden(self.api_client.post(url))
self.login_as_superuser()
# Bad request method
self.assertHttpMethodNotAllowed(self.api_client.get(url))
# Invalid data
data = dict(longitude=1)
self.assertHttpBadRequest(self.api_client.post(url, data=data))
data = dict(longitude='asd', latitude='dsa')
self.assertHttpBadRequest(self.api_client.post(url, data=data))
|
apache-2.0
| -4,318,638,684,456,884,000 | 35.011561 | 81 | 0.653933 | false |
munin/munin
|
munin/mod/raw.py
|
1
|
1806
|
"""
Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This module doesn't have anything alliance specific.
# qebab, 24/6/08.
import re
from munin import loadable
class raw(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1000)
self.paramre = re.compile(r"^\s*(.*)")
self.usage = self.__class__.__name__ + ""
def execute(self, user, access, irc_msg):
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
irc_command = m.group(1)
if access < self.level:
irc_msg.reply("You do not have enough access to send raw commands")
return 0
print("%s sent raw '%s'" % (user, irc_command))
irc_msg.client.wline(irc_command)
irc_msg.reply("Sent raw command '%s'" % (irc_command,))
return 1
|
gpl-2.0
| 4,926,598,926,973,294,000 | 31.25 | 79 | 0.67165 | false |
erp12/pyshgp
|
pyshgp/gp/evaluation.py
|
1
|
7737
|
"""The :mod:`evaluation` module defines classes to evaluate program CodeBlocks."""
from abc import ABC, abstractmethod
from typing import Sequence, Union, Callable
from collections import defaultdict
import numpy as np
import pandas as pd
from pyshgp.push.interpreter import PushInterpreter, Program
from pyshgp.tap import tap
from pyshgp.utils import Token
def damerau_levenshtein_distance(a: Union[str, Sequence], b: Union[str, Sequence]) -> int:
"""Damerau-Levenshtein Distance that works for both strings and lists.
https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance.
This implementation is heavily inspired by the implementation in the
jellyfish package. https://github.com/jamesturk/jellyfish
"""
a_is_str = isinstance(a, str)
b_is_str = isinstance(b, str)
if a_is_str or b_is_str:
assert a_is_str and b_is_str
len1 = len(a)
len2 = len(b)
infinite = len1 + len2
da = defaultdict(int)
score = [[0] * (len2 + 2) for x in range(len1 + 2)]
score[0][0] = infinite
for i in range(0, len1 + 1):
score[i + 1][0] = infinite
score[i + 1][1] = i
for i in range(0, len2 + 1):
score[0][i + 1] = infinite
score[1][i + 1] = i
for i in range(1, len1 + 1):
db = 0
for j in range(1, len2 + 1):
i1 = da[b[j - 1]]
j1 = db
cost = 1
if a[i - 1] == b[j - 1]:
cost = 0
db = j
score[i + 1][j + 1] = min(score[i][j] + cost,
score[i + 1][j] + 1,
score[i][j + 1] + 1,
score[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))
da[a[i - 1]] = i
return score[len1 + 1][len2 + 1]
class Evaluator(ABC):
"""Base class or evaluators.
Parameters
----------
interpreter : PushInterpreter, optional
PushInterpreter used to run program and get their output. Default is
an interpreter with the default configuration and all core instructions
registered.
penalty : float, optional
When a program's output cannot be evaluated on a particular case, the
penalty error is assigned. Default is 5e5.
verbosity_config : Optional[VerbosityConfig] (default = None)
A VerbosityConfig controlling what is logged during evaluation.
Default is no verbosity.
"""
def __init__(self,
interpreter: PushInterpreter = "default",
penalty: float = 1e6):
self.penalty = penalty
if interpreter == "default":
self.interpreter = PushInterpreter()
else:
self.interpreter = interpreter
def default_error_function(self, actuals, expecteds) -> np.array:
"""Produce errors of actual program output given expected program output.
The default error function is intended to be a universal error function
for Push programs which only output a subset of the standard data types.
Parameters
----------
actuals : list
The values produced by running a Push program on a sequences of cases.
expecteds: list
The ground truth values for the sequence of cases used to produce the actuals.
Returns
-------
np.array
An array of error values describing the program's performance.
"""
errors = []
for ndx, actual in enumerate(actuals):
expected = expecteds[ndx]
if actual is Token.no_stack_item:
errors.append(self.penalty)
elif isinstance(expected, (bool, np.bool_)):
errors.append(int(not (bool(actual) == expected)))
elif isinstance(expected, (int, np.int64, float, np.float64)):
try:
errors.append(abs(float(actual) - expected))
except OverflowError:
errors.append(self.penalty)
elif isinstance(expected, str):
errors.append(damerau_levenshtein_distance(str(actual), expected))
elif isinstance(expected, list):
errors += list(self.default_error_function(list(actual), expected))
else:
raise ValueError("Unknown expected type for {e}".format(e=expected))
return np.array(errors)
@tap
@abstractmethod
def evaluate(self, program: Program) -> np.ndarray:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
pass
class DatasetEvaluator(Evaluator):
"""Evaluator driven by a labeled dataset."""
def __init__(self,
X, y,
interpreter: PushInterpreter = "default",
penalty: float = 1e6):
"""Create Evaluator based on a labeled dataset. Inspired by sklearn.
Parameters
----------
X : list, array-like, or pandas dataframe of shape = [n_samples, n_features]
The inputs to evaluate each program on.
y : list, array-like, or pandas dataframe.
The target values. Shape = [n_samples] or [n_samples, n_outputs]
interpreter : PushInterpreter or {"default"}
The interpreter used to run the push programs.
penalty : float
If no response is given by the program on a given input, assign this
error as the error.
"""
super().__init__(interpreter, penalty)
self.X = pd.DataFrame(X)
self.y = pd.DataFrame(y)
@tap
def evaluate(self, program: Program) -> np.array:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
super().evaluate(program)
errors = []
for ndx in range(self.X.shape[0]):
inputs = self.X.iloc[ndx].to_list()
expected = self.y.iloc[ndx].to_list()
actual = self.interpreter.run(program, inputs)
errors.append(self.default_error_function(actual, expected))
return np.array(errors).flatten()
class FunctionEvaluator(Evaluator):
"""Evaluator driven by an error function."""
def __init__(self, error_function: Callable):
"""Create Evaluator driven by an error function.
The given error function must take a push program in the form of a
CodeBlock and then return an np.ndarray of numeric errors. These errors
will be used as the program's error vector.
The error functions will typically instantiate its own PushInterpreter
and run the given program as needed.
Parameters
----------
error_function : Callable
A function which takes a program to evaluate and returns a
np.ndarray of errors.
"""
super().__init__()
self.error_function = error_function
@tap
def evaluate(self, program: Program) -> np.ndarray:
"""Evaluate the program and return the error vector.
Parameters
----------
program
Program (CodeBlock of Push code) to evaluate.
Returns
-------
np.ndarray
The error vector of the program.
"""
super().evaluate(program)
return self.error_function(program)
|
mit
| -1,309,455,749,332,372,500 | 31.783898 | 90 | 0.575158 | false |
mypopydev/bluez
|
client/bt601.py
|
1
|
2013
|
# Using bt601 with Python
import pexpect
import time
import socket
import sys
import os
import syslog
UNIX_SER="/tmp/ud_bluetooth_main"
# function to transform hex string like "0a" into signed integer
def hexStrToInt(hexstr):
val = int(hexstr[0:2],16)
val = (val * 6.0 - 20)/100.0
return val
def sendMessage(message):
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
s.connect(UNIX_SER)
s.send(message)
syslog.syslog("message " + str(message))
s.close()
def bt601Conn(DEVICE):
syslog.syslog("address: " + str(DEVICE))
# Run gatttool interactively.
syslog.syslog("Run gatttool...")
gatt = pexpect.spawn("gatttool -I")
# Connect to the device.
syslog.syslog("Connecting to " + str(DEVICE))
gatt.sendline("connect {0}".format(DEVICE))
gatt.expect("Connection successful", timeout=5)
syslog.syslog("Connected!")
def bt601GetVal(DEVICE):
syslog.syslog("address: " + str(DEVICE))
# Run gatttool interactively.
syslog.syslog("Run gatttool...")
gatt = pexpect.spawn("gatttool -I")
# Connect to the device.
try:
syslog.syslog("Connecting to " + str(DEVICE))
gatt.sendline("connect {0}".format(DEVICE))
gatt.expect("Connection successful", timeout=10)
syslog.syslog("Connected!")
except pexpect.TIMEOUT:
syslog.syslog("Conneccting time out!")
sys.exit(1);
#os._exit(1)
try:
gatt.expect("Notification handle = 0x0012 value: 03 ", timeout=45)
gatt.expect("\r\n", timeout=10)
syslog.syslog("Value: " + str(gatt.before))
syslog.syslog("Value 12: " + str(gatt.before[33:35]) + str(hexStrToInt(gatt.before[33:35])))
sendMessage("BT601 " + str(DEVICE) + " VALUE " + str(hexStrToInt(gatt.before[33:35])))
except pexpect.TIMEOUT:
syslog.syslog("Get value time out!")
sys.exit(1);
#os._exit(1)
#print(float(hexStrToInt(child.before[0:5]))/100),
address = sys.argv[1]
bt601GetVal(address)
|
gpl-2.0
| 7,865,876,888,992,136,000 | 27.757143 | 100 | 0.641331 | false |
FrodeSolheim/fs-uae-launcher
|
amitools/fs/block/FileHeaderBlock.py
|
1
|
3464
|
import time
from .Block import Block
from .EntryBlock import EntryBlock
from .CommentBlock import CommentBlock
from ..ProtectFlags import ProtectFlags
from ..TimeStamp import *
from ..FSString import FSString
class FileHeaderBlock(EntryBlock):
def __init__(self, blkdev, blk_num, is_longname):
EntryBlock.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_FILE, is_longname=is_longname)
def set(self, data):
self._set_data(data)
self._read()
def read(self):
self._read_data()
self._read()
def _read(self):
Block.read(self)
if not self.valid:
return False
# FileHeader fields
self.own_key = self._get_long(1)
self.block_count = self._get_long(2)
self.first_data = self._get_long(4)
# read (limited) data blocks table
bc = self.block_count
mbc = self.blkdev.block_longs - 56
if bc > mbc:
bc = mbc
self.data_blocks = []
for i in range(bc):
self.data_blocks.append(self._get_long(-51-i))
self.protect = self._get_long(-48)
self.protect_flags = ProtectFlags(self.protect)
self.byte_size = self._get_long(-47)
self._read_nac_modts()
self.hash_chain = self._get_long(-4)
self.parent = self._get_long(-3)
self.extension = self._get_long(-2)
self.valid = (self.own_key == self.blk_num)
return self.valid
def write(self):
Block._create_data(self)
self._put_long(1, self.own_key)
self._put_long(2, self.block_count)
self._put_long(4, self.first_data)
# data blocks
for i in range(len(self.data_blocks)):
self._put_long(-51-i, self.data_blocks[i])
self._put_long(-48, self.protect)
self._put_long(-47, self.byte_size)
self._write_nac_modts()
self._put_long(-4, self.hash_chain)
self._put_long(-3, self.parent)
self._put_long(-2, self.extension)
Block.write(self)
def create(self, parent, name, data_blocks, extension, byte_size=0, protect=0, comment=None, mod_ts=None, hash_chain=0):
Block.create(self)
self.own_key = self.blk_num
n = len(data_blocks)
self.block_count = n
if n == 0:
self.first_data = 0
else:
self.first_data = data_blocks[0]
self.data_blocks = data_blocks
self.protect = protect
self.protect_flags = ProtectFlags(self.protect)
self.byte_size = byte_size
if comment is None:
self.comment = FSString()
else:
assert isinstance(comment, FSString)
self.comment = comment
self.mod_ts = mod_ts
assert isinstance(name, FSString)
self.name = name
self.hash_chain = hash_chain
self.parent = parent
self.extension = extension
self.valid = True
return True
def dump(self):
Block.dump(self,"FileHeader")
print(" own_key: %d" % self.own_key)
print(" blk_cnt: %d" % self.block_count)
print(" first_data: %d" % self.first_data)
if self.data_blocks != None:
print(" data blks: %s #%d" % (self.data_blocks, len(self.data_blocks)))
pf = ProtectFlags(self.protect)
print(" protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf))
print(" byte_size: %d" % self.byte_size)
print(" comment: '%s'" % self.comment)
print(" mod_ts: %s" % self.mod_ts)
print(" name: '%s'" % self.name)
print(" hash_chain: %d" % self.hash_chain)
print(" parent: %d" % self.parent)
print(" extension: %d" % self.extension)
|
gpl-2.0
| 4,448,644,119,338,782,700 | 28.364407 | 122 | 0.618649 | false |
timsavage/denim
|
denim/scaffold/__init__.py
|
1
|
1859
|
# -*- encoding:utf8 -*-
try:
from jinja2 import Environment, PackageLoader
except ImportError:
raise ImportError('Scaffolding support requires the Jinja 2 templating library to be installed.')
template_environment = Environment(loader=PackageLoader('denim.scaffold'))
def single(template_file, output_name, context):
"""
Generate a single file.
:param template_file:
:param output_name:
:param context:
:return:
"""
template = template_environment.get_template(template_file)
print template.render(**context)
def environment(template_file, output_name, context):
"""
Generate multiple files based on the from the env list.
:param template_file:
:param output_name:
:param context:
:return:
"""
envs = context.env
for env in envs:
context['env'] = env
single(template_file, output_name, context)
# Name: (Template, Target, Generation method, Required parameters)
SCAFFOLDS = {
'nginx': ('nginx.conf.txt', 'conf/nginx/%(env)s.conf', environment, ('env', )),
'django.fabric': ('django/fabfile.py.txt', 'fabfile.py', single, ('env', ('scm', 'hg'))),
'django.supervisor': ('django/supervisor.conf.txt', 'conf/supervisor.conf', single, None),
}
def generate_scaffold(scaffold_code):
scaffold = SCAFFOLDS.get(scaffold_code)
if not scaffold:
raise NotImplementedError('This scaffold does not exist')
#template = template_environment.get_template('django/fabfile.py.txt')
#context = {
# 'deploy_scm': 'git',
# 'deployment_envs': [{
# 'name': 'production',
# 'hosts': ['192.168.0.1', '192.168.0.2',]
# }, {
# 'name': 'staging',
# 'hosts': ['192.168.1.1', '192.168.1.2',]
# }, {
# 'name': 'development',
# 'hosts': ['127.0.0.1',]
# }]
#}
#print template.render(**context)
|
bsd-2-clause
| -3,425,635,520,187,386,400 | 26.338235 | 101 | 0.629909 | false |
JanTkacik/nao-puzzle-solver
|
nao-module.py
|
1
|
2502
|
import sys
from naoqi import ALProxy
# To get the constants relative to the video.
import vision_definitions
import cv2
import numpy as np
import imageresolve.puzzlesolver.solver as slv
import imageextractor.imageextractor as ext
import imageresolve.puzzlesolver.model.puzzle as pzl
import imageresolve.puzzlesolver.model.piece as pcl
if __name__ == '__main__':
#10.10.48.252
IP = "10.10.48.252"
PORT = 9559
CameraID = 1
# Read IP address from first argument if any.
if len(sys.argv) > 1:
IP = sys.argv[1]
# Read CameraID from second argument if any.
if len(sys.argv) > 2:
CameraID = int(sys.argv[2])
videoProxy = ALProxy("ALVideoDevice", IP, PORT)
tts = ALProxy("ALTextToSpeech", IP, PORT)
posture = ALProxy("ALRobotPosture", IP, PORT)
posture.goToPosture("Crouch", 1.0)
tts.setLanguage("English")
resolution = vision_definitions.k4VGA # 320 * 240
colorSpace = vision_definitions.kRGBColorSpace
imgClient = videoProxy.subscribe("_client", resolution, colorSpace, 5)
# Select camera.
videoProxy.setParam(vision_definitions.kCameraSelectID, CameraID)
alImage = videoProxy.getImageRemote(imgClient)
videoProxy.unsubscribe(imgClient)
if alImage == None:
tts.say("I cannot see anything! Am I blind?")
print "Cannot retreive image from NAO"
else:
tts.say("OK, let me see this puzzle")
nparr = np.fromstring(alImage[6], np.uint8).reshape(alImage[1], alImage[0], alImage[2])
img = cv2.cvtColor(nparr, cv2.COLOR_BGR2RGB)
cv2.imwrite("Test.jpg", img)
output = ext.extract(img)
i = 0
pieces = []
for out in output:
pieces.append(pcl.Piece(out, i))
i += 1
if len(pieces) == 0:
tts.say("Oh intresting, but I cannot see any puzzle piece")
else:
tts.say("I have found {0} puzzles".format(len(pieces)))
if len(pieces) == 6:
tts.say("OK, I will try to solve it")
puzzle = pzl.Puzzle(pieces, img, 1, 2)
sol = slv.solve(puzzle)
print sol
cv2.imwrite("Result.jpg", puzzle.getvideoimage2(sol))
cv2.imwrite("Result2.jpg", puzzle.getvideoimage())
tts.say("Puzzle solved!")
else:
tts.say("Sorry, I cannot solve this puzzle")
#posture.goToPosture("Sit", 1.0)
|
mit
| -8,515,521,001,751,319,000 | 31.921053 | 95 | 0.605116 | false |
Ayase-252/waife-crawler
|
crawler/yandere/yandere.py
|
1
|
4275
|
"""
Yandere Crawler
"""
from requests import ConnectTimeout, get
from file_logger.file_logger import FileLogger
from crawler.crawler import Crawler
from crawler.selector import Selector
from request.request_async import AsyncRequestScheduler
from crawler.yandere.handler import QueryPageHandler
from crawler.yandere.parser import parse_detail_page
from crawler.yandere.selector import safe_selector, score_selector_factory
from crawler.configuration import Configuration
from conf import FILE_DESTINATION
class YandereCrawler(Crawler):
"""
Yandere Crawler
Configuration can be done by passing object carrying configuration to
constructor.
"""
def __init__(self, **kwargs):
"""
Acceptable parameters:
page_limit The max amount of pages being crawled
"""
if 'page_limit' in kwargs:
self._page_limit = kwargs['page_limit']
else:
self._page_limit = 10
if 'score_filter' in kwargs:
self._score_filter = kwargs['score_filter']
else:
self._score_filter = 70
self.request_scheduler = AsyncRequestScheduler(2000)
# TODO: refactor
def run(self, **kwargs):
"""
Runs the crawler
"""
base_url = r'https://yande.re/post'
qualified_pictures = []
file_logger = FileLogger('yandere.log')
# Prepare Selector
selector = Selector()
selector.add_normal_selector(safe_selector)
selector.add_normal_selector(
score_selector_factory(self._score_filter)
)
query_page_handler = QueryPageHandler(selector)
# Parse Query Page
for page_no in range(1, self._page_limit + 1):
try:
print('Requesting to page ' + str(page_no))
text = self.request_scheduler.get(base_url, params={
'page': page_no
}).text
new_qualified = query_page_handler(text)
print(str(len(new_qualified)) + ' pictures are added to '
'pending queue.')
qualified_pictures += new_qualified
except ConnectTimeout:
print('Connection to page ' + str(page_no) + ' timed out. '
'Please retry in stable network environmnent.')
# Parse download link and download it
for qualified_picture in qualified_pictures:
id_ = qualified_picture['id']
url = qualified_picture['detail url']
try:
if not file_logger.is_in(id_):
print('Requesting to page {}'.format(url))
text = self.request_scheduler.get(url).text
links = self._parse_detail_page(text, url)
print('Downloading picture {0}'.format(id_))
self._download(links, id_)
print('\nDownloaded.')
file_logger.add(id_)
except ConnectTimeout:
print('Connection timed out. '
'Please retry in stable network environmnent.')
def _download(self, parsed_links, id_):
"""
Download picture based on parsed_links
"""
type_codes = ['png', 'jpeg']
type_suffix = {
'png': '.png',
'jpeg': '.jpg'
}
for type_ in type_codes:
if type_ in parsed_links:
self.request_scheduler.download(
parsed_links[type_],
'yandere-' + str(id_) + type_suffix[type_],
Configuration.get_file_destination()
)
break
def _parse_detail_page(self, text, url):
"""
Wrapper of process to parsing detail page
"""
try:
links = parse_detail_page(text)
return links
except RuntimeError:
print('=' * 25)
print('Parsing Error: Please report an issue in '
'https://github.com/Ayase-252/waife-crawler/issues with '
'following message.')
print('URL: {}'.format(url))
print('=' * 25)
raise RuntimeError('Parse Error: {}'.format(url))
|
mit
| -1,782,669,152,618,254,800 | 33.756098 | 75 | 0.550175 | false |
Ichimonji10/robottelo
|
robottelo/ui/hosts.py
|
1
|
8600
|
"""Utilities to manipulate hosts via UI."""
from robottelo.ui.base import Base
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class Hosts(Base):
"""Provides the CRUD functionality for Host."""
def _configure_hosts_parameters(self, parameters_list):
"""Provide configuration capabilities for host entity generic
properties.
All values should be passed in absolute correspondence to UI. For
example, we need to choose a value from 'Lifecycle environment' select
list from 'Host' tab and input root password in corresponding field
from 'Operating System' tab, so next parameter list should be passed::
[
['Host', 'Lifecycle environment', 'Library'],
['Operating System', 'Root password', 'mynewpassword123'],
]
"""
for tab_name, parameter_name, parameter_value in parameters_list:
tab_locator = tab_locators['.tab_'.join((
'host',
(tab_name.lower()).replace(' ', '_')
))]
param_locator = locators['.'.join((
'host',
(parameter_name.lower()).replace(' ', '_')
))]
self.click(tab_locator)
if parameter_name == 'Reset Puppet Environment':
self.click(param_locator)
continue
# send_keys() can't send left parenthesis (see
# SeleniumHQ/selenium#674), which is used in compute resource name
# (e.g. 'test (Libvirt)')
elif parameter_name == 'Deploy on' and ' (' in parameter_value:
self.click(param_locator)
# typing compute resource name without parenthesis part
self.text_field_update(
common_locators['select_list_search_box'],
parameter_value.split(' (')[0]
)
strategy, value = common_locators['entity_select_list']
# selecting compute resource by its full name (with parenthesis
# part)
self.click((strategy, value % parameter_value))
continue
self.assign_value(param_locator, parameter_value)
def _configure_interface_parameters(self, parameters_list):
"""Provide configuration capabilities for host entity interface
All values should be passed in absolute correspondence to UI. For
example, we need to choose a value from 'Domain' select list and input
MAC address in corresponding field, so next parameter list should be
passed::
[
['Domain', host.domain.name],
['MAC address', '16:76:20:06:d4:c0'],
]
"""
for parameter_name, parameter_value in parameters_list:
param_locator = locators['.interface_'.join((
'host',
(parameter_name.lower()).replace(' ', '_')
))]
self.assign_value(param_locator, parameter_value)
self.click(locators['host.save_interface'])
def _configure_puppet_modules(self, puppet_modules_list):
"""Provide configuration capabilities for host entity puppet classes
tab.
"""
self.click(tab_locators['host.tab_puppet_classes'])
strategy1, value1 = locators['host.select_puppetmodule']
strategy2, value2 = locators['host.select_puppetclass']
for puppet_module in puppet_modules_list:
self.click((strategy1, value1 % puppet_module))
self.click((strategy2, value2 % puppet_module))
def _add_host_parameters(self, parameters_list):
"""Add new host parameters for 'parameters' tab. Example::
host_parameters=[['test01', 'value01'], ['test02', 'value02'],
['test03', 'value03']]
"""
self.click(tab_locators['host.tab_params'])
strategy1, value1 = locators['host.host_parameter_name']
strategy2, value2 = locators['host.host_parameter_value']
index = 1
for parameter_name, parameter_value in parameters_list:
self.click(locators['host.add_new_host_parameter'])
self.text_field_update((strategy1, value1 % index), parameter_name)
self.text_field_update(
(strategy2, value2 % index), parameter_value)
index += 1
def create(self, name, parameters_list=None, puppet_classes=None,
interface_parameters=None, host_parameters=None, ):
"""Creates a host."""
self.click(locators['host.new'])
self.text_field_update(locators['host.name'], name)
if parameters_list is not None:
self._configure_hosts_parameters(parameters_list)
if puppet_classes is not None:
self._configure_puppet_modules(puppet_classes)
if interface_parameters:
self.click(tab_locators['host.tab_interfaces'])
self.click(locators['host.edit_default_interface'])
self._configure_interface_parameters(interface_parameters)
if host_parameters:
self._add_host_parameters(host_parameters)
self.wait_until_element_is_not_visible(
common_locators['modal_background'])
self.click(common_locators['submit'])
def update(self, name, domain_name, new_name=None, parameters_list=None,
puppet_classes=None, interface_parameters=None,
host_parameters=None):
"""Updates a Host."""
element = self.search(u'{0}.{1}'.format(name, domain_name))
self.click(element)
self.click(locators['host.edit'])
if new_name:
self.wait_until_element(locators['host.name'])
self.field_update('host.name', new_name)
if parameters_list is not None:
self._configure_hosts_parameters(parameters_list)
if puppet_classes is not None:
self._configure_puppet_modules(puppet_classes)
if interface_parameters:
self.click(tab_locators['host.tab_interfaces'])
self.click(locators['host.edit_default_interface'])
self._configure_interface_parameters(interface_parameters)
if host_parameters:
self._add_host_parameters(host_parameters)
self.wait_until_element_is_not_visible(
common_locators['modal_background'])
self.click(common_locators['submit'])
def navigate_to_entity(self):
"""Navigate to Hosts entity page"""
Navigator(self.browser).go_to_hosts()
def _search_locator(self):
"""Specify locator for Hosts entity search procedure"""
return locators['host.select_name']
def delete(self, name, really=True):
"""Deletes a host."""
self.delete_entity(
name,
really,
locators['host.delete'],
drop_locator=locators['host.dropdown'],
)
def update_host_bulkactions(
self, hosts=None, action=None, parameters_list=None):
"""Updates host via bulkactions
:param hosts: List of hosts that should be selected for action
:param action: Specify exact action to perform according to UI list. At
that moment we support only Assign Organization and Run Job actions
:param parameters_list: List of parameters that are needed for the
dialogs that go after necessary action was selected. For example::
[{'organization': 'My_org01'}]
[{'command': 'ls'}]
"""
for host in hosts:
strategy, value = locators['host.checkbox']
self.click((strategy, value % host))
self.click(locators['host.select_action_list'])
strategy, value = locators['host.select_action']
self.click((strategy, value % action))
if parameters_list:
for parameter in parameters_list:
if action == 'Assign Organization':
self.click(locators['host.fix_mismatch'])
self.assign_value(
locators['host.select_org'],
parameter['organization']
)
self.click(locators['host.bulk_submit'])
if action == 'Run Job':
self.assign_value(
locators['job_invocation.command'],
parameter['command']
)
self.click(common_locators['submit'])
|
gpl-3.0
| -1,339,462,011,166,588,000 | 42.654822 | 79 | 0.587674 | false |
bit0001/chumme
|
database_manager/util.py
|
1
|
2568
|
import os
from sqlite3 import IntegrityError
from .friend_interest_manager import FriendInterestManager
from .friend_manager import FriendManager
from .friend_social_network_manager import FriendSocialNetworkManager
from .interest_manager import InterestManager
from .profile_photo_manager import ProfilePhotoManager
from .thought_manager import ThoughtManager
from .queries.create_table import CREATE_TABLES_QUERIES, OTHER_QUERIES
from .db_context_manager import DBContextManager
from model.social_network import SocialNetwork
DB_PATH = '{}/{}'.format(
os.path.dirname(os.path.abspath(__file__)), '../chumme.db'
)
def create_tables_if_not_exist():
for query in CREATE_TABLES_QUERIES.values():
with DBContextManager(DB_PATH) as cursor:
cursor.execute(query)
fill_social_networks_table()
def fill_social_networks_table():
with DBContextManager(DB_PATH) as cursor:
for social_network in SocialNetwork:
try:
cursor.execute(
OTHER_QUERIES['insert_social_network'],
(social_network.social_network_name,
social_network.base_url,
social_network.logo_path)
)
except IntegrityError:
pass
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = \
super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class ChumMeDBManager(metaclass=Singleton):
def __init__(self):
self._friend_manager = FriendManager(DB_PATH)
self._interest_manager = InterestManager(DB_PATH)
self._friend_interest_manager = FriendInterestManager(DB_PATH)
self._thought_manager = ThoughtManager(DB_PATH)
self._friend_social_network_manager = \
FriendSocialNetworkManager(DB_PATH)
self._profile_photo_manager = ProfilePhotoManager(DB_PATH)
@property
def friend_manager(self):
return self._friend_manager
@property
def interest_manager(self):
return self._interest_manager
@property
def friend_interest_manager(self):
return self._friend_interest_manager
@property
def thought_manager(self):
return self._thought_manager
@property
def friend_social_network_manager(self):
return self._friend_social_network_manager
@property
def profile_photo_manager(self):
return self._profile_photo_manager
|
apache-2.0
| -3,149,871,718,848,437,000 | 30.317073 | 70 | 0.662773 | false |
snbway/flask-rest-framework
|
rest_framework_flask/versioning.py
|
1
|
1485
|
# encoding: utf-8
from . import api_setting
import exceptions
class BaseVersioning(object):
default_version = api_setting.DEFAULT_VERSION
allowed_versions = api_setting.ALLOWED_VERSION
version_param = api_setting.VERSION_PARAM
def determine_version(self, request, *args, **kwargs):
msg = '{cls}.determin_version() must be iimplemented.'
raise NotImplementedError(msg.format(
cls=self.__class__.__name__
))
def reverse(self, *args, **kwargs):
pass
def is_allowed_version(self, version):
if not self.allowed_versions:
return True
return (version == self.default_version) or (version in self.allowed_versions)
class QueryParameterVersioning(BaseVersioning):
invalid_version_message = 'Invalid version in URL path.'
def determine_version(self, request, *args, **kwargs):
version = request.query_params.get(self.version_param, self.default_version)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = request.version
return super(QueryParameterVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
|
apache-2.0
| -8,468,332,575,219,012,000 | 34.357143 | 92 | 0.663973 | false |
sergiocorreia/panflute-filters
|
setup.py
|
1
|
4210
|
"""Filters for panflute
See:
https://github.com/sergiocorreia/panflute
https://github.com/sergiocorreia/panflute-filters
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='panflutefilters',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='Pandoc filters with panflute',
long_description=long_description,
# The project's main homepage.
url='https://github.com/sergiocorreia/panflute-filters',
# Author details
author="Sergio Correia",
author_email='sergio.correia@gmail.com',
# Choose your license
license='BSD3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
# What does your project relate to?
keywords='pandoc pandocfilters markdown latex csv',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['panflute'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pandoc-figure=panflutefilters.figure:main',
'pandoc-table=panflutefilters.table:main'
#'media=media:main',
#'stata=stata:main'
],
},
)
|
bsd-3-clause
| 6,421,271,382,656,776,000 | 34.386555 | 94 | 0.657007 | false |
ConstantinT/jAEk
|
crawler/crawler.py
|
1
|
58230
|
'''
Copyright (C) 2015 Constantin Tschuertz
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from asyncio.tasks import sleep
import logging
import random
import sys
from enum import Enum
from copy import deepcopy
from urllib.parse import urljoin
from PyQt5.Qt import QApplication, QObject
from PyQt5.QtNetwork import QNetworkAccessManager
from core.eventexecutor import EventExecutor, XHRBehavior, EventResult
from core.formhandler import FormHandler
from core.clustermanager import ClusterManager
from core.jaekcore import JaekCore
from models.url import Url
from utils.asyncrequesthandler import AsyncRequestHandler
from utils.execptions import PageNotFound, LoginFailed
from models.deltapage import DeltaPage
from models.webpage import WebPage
from models.clickabletype import ClickableType
from utils.domainhandler import DomainHandler
from analyzer.mainanalyzer import MainAnalyzer
from utils.utils import calculate_similarity_between_pages, subtract_parent_from_delta_page, count_cookies
potential_logout_urls = []
class Crawler(JaekCore):
def __init__(self, crawl_config, proxy="", port=0, database_manager=None):
QObject.__init__(self)
self.app = QApplication(sys.argv)
self._network_access_manager = QNetworkAccessManager(self)
#self._network_access_manager = self._dynamic_analyzer.networkAccessManager()
self._event_executor = EventExecutor(self, proxy, port, crawl_speed=crawl_config.process_speed,
network_access_manager=self._network_access_manager)
self._dynamic_analyzer = MainAnalyzer(self, proxy, port, crawl_speed=crawl_config.process_speed,
network_access_manager=self._network_access_manager)
self._form_handler = FormHandler(self, proxy, port, crawl_speed=crawl_config.process_speed,
network_access_manager=self._network_access_manager)
self.domain_handler = None
self.current_depth = 0
self.crawl_with_login = False
self.proxy = proxy
self.port = port
self.cookie_num = -1
self.crawler_state = CrawlState.NormalPage
self.crawl_config = crawl_config
self.tmp_delta_page_storage = [] # holds the deltapages for further analyses
self.url_frontier = []
self.user = None
self.page_id = 0
self.current_depth = 0
self.database_manager = database_manager
self.cluster_manager = ClusterManager(self.database_manager) # dict with url_hash and
def crawl(self, user):
logging.debug("Crawl with userId: {}".format(user.username))
self.user = user
self.domain_handler = DomainHandler(self.crawl_config.start_page_url, self.database_manager, self.cluster_manager)
self.async_request_handler = AsyncRequestHandler(self.database_manager)
self.start_page_url = Url(self.crawl_config.start_page_url)
self.database_manager.insert_url_into_db(self.start_page_url)
if self.user.login_data is not None:
self.crawl_with_login = True
successfull, self.interactive_login_form_search = self._initial_login()
round_counter = 0
while True:
logging.debug("=======================New Round=======================")
current_page = None
necessary_clicks = [] # Saves the actions the crawler need to reach a delta page
parent_page = None # Saves the parent of the delta-page (not other delta pages)
previous_pages = [] # Saves all the pages the crawler have to pass to reach my delta-page
delta_page = None
if round_counter < 10:
round_counter += 1
else:
# Need to do this to prevent memory leckages, issued by PyQT bindings or something else
logging.debug("10 rounds over, renew critical classes...")
round_counter = 0
self._network_access_manager = None
self._event_executor = None
self._form_handler = None
self._event_executor = None
self._network_access_manager = QNetworkAccessManager(self)
self._event_executor = EventExecutor(self, self.proxy, self.port, crawl_speed=self.crawl_config.process_speed,
network_access_manager=self._network_access_manager)
self._dynamic_analyzer = MainAnalyzer(self, self.proxy, self.port, crawl_speed=self.crawl_config.process_speed,
network_access_manager=self._network_access_manager)
self._form_handler = FormHandler(self, self.proxy, self.port, crawl_speed=self.crawl_config.process_speed,
network_access_manager=self._network_access_manager)
if len(self.tmp_delta_page_storage) > 0:
self.crawler_state = CrawlState.DeltaPage
current_page = self.tmp_delta_page_storage.pop(0)
logging.debug("Processing Deltapage with ID: {}, {} deltapages left...".format(str(current_page.id),
str(len(
self.tmp_delta_page_storage))))
parent_page = current_page
while isinstance(parent_page, DeltaPage):
necessary_clicks.insert(0,
parent_page.generator) # Insert as first element because of reverse order'
parent_page = self.database_manager.get_page_to_id(parent_page.parent_id)
if parent_page is None:
raise PageNotFound("This exception should never be raised...")
previous_pages.append(parent_page)
# Now I'm reaching a non delta-page
self.current_depth = parent_page.current_depth
url_to_request = parent_page.url
if current_page.generator.clickable_depth + 1 > self.crawl_config.max_click_depth:
logging.debug("Don't proceed with Deltapage(max click depth)...")
self.database_manager.store_delta_page(current_page)
continue
else:
logging.debug("Looking for the next url...")
possible_urls = self.database_manager.get_all_unvisited_urls_sorted_by_hash()
if len(possible_urls) > 0:
self.crawler_state = CrawlState.NormalPage
cluster_per_urls = []
for key in possible_urls:
cluster_per_urls.append((key, self.cluster_manager.calculate_cluster_per_visited_urls(key)))
next_url_hash, max_cluster_per_url = max(cluster_per_urls, key=lambda x: x[1])
possible_urls = possible_urls[next_url_hash]
url_to_request = possible_urls.pop(random.randint(0, len(possible_urls) - 1))
if url_to_request.depth_of_finding is None:
self.current_depth = 0
else:
self.current_depth = url_to_request.depth_of_finding + 1
else:
break
if self.crawler_state == CrawlState.NormalPage:
if not self.domain_handler.is_in_scope(url_to_request):
logging.debug("Ignoring {} (Not in scope)... ".format(url_to_request.toString()))
self.database_manager.visit_url(url_to_request, None, 1000)
continue
if url_to_request.depth_of_finding is not None:
if url_to_request.depth_of_finding + 1 > self.crawl_config.max_depth:
logging.debug("Ignoring {} (Max crawl depth)... ".format(url_to_request.toString()))
self.database_manager.visit_url(url_to_request, None, 1001)
continue
plain_url_to_request = url_to_request.toString()
if self.database_manager.url_visited(url_to_request):
logging.debug("Crawler tries to use url: {} twice".format(url_to_request.toString()))
continue
if not self.cluster_manager.need_more_urls_of_this_type(url_to_request.url_hash):
self.database_manager.visit_url(url_to_request, None, 1002)
logging.debug("Seen enough urls from {} ".format(url_to_request.toString()))
continue
current_page = None
num_of_tries = 0
logging.debug("Next Url is: {}".format(url_to_request.toString()))
while current_page is None and num_of_tries < 3:
response_code, current_page = self._dynamic_analyzer.analyze(url_to_request, current_depth=self.current_depth)
self.domain_handler.complete_urls_in_page(current_page)
self.domain_handler.analyze_urls(current_page)
self.domain_handler.set_url_depth(current_page, self.current_depth)
self.async_request_handler.handle_requests(current_page)
num_of_tries += 1
if current_page is None:
self.database_manager.visit_url(url_to_request, None, 1004)
logging.debug("Fetching url: {} fails.... continue".format(plain_url_to_request))
continue
if self.crawl_with_login and self.cookie_num > 0:
num_cookies = count_cookies(self._network_access_manager, url_to_request)
logging.debug("Having {} cookies...".format(num_cookies))
if num_cookies < self.cookie_num or self._find_form_with_special_parameters(current_page, self.user.login_data, self.interactive_login_form_search)[0] is not None:
logging.debug("Too less cookies... possible logout!")
if not self._handle_possible_logout():
response_code, current_page = self._dynamic_analyzer.analyze(url_to_request, current_depth=self.current_depth)
self.domain_handler.complete_urls_in_page(current_page)
self.domain_handler.analyze_urls(current_page)
self.domain_handler.set_url_depth(current_page, self.current_depth)
self.async_request_handler.handle_requests(current_page)
elif self.crawl_with_login \
and response_code in range(300, 350) \
and current_page.url != plain_url_to_request:
logging.debug("Redirect - Response code is: {} from {} to {}...".format(response_code, plain_url_to_request, current_page.url))
if not self._handle_possible_logout():
response_code, current_page = self._dynamic_analyzer.analyze(url_to_request, current_depth=self.current_depth)
self.domain_handler.complete_urls_in_page(current_page)
self.domain_handler.analyze_urls(current_page)
self.domain_handler.set_url_depth(current_page, self.current_depth)
self.async_request_handler.handle_requests(current_page)
elif self.crawl_with_login and response_code in [200]:
if self._find_form_with_special_parameters(current_page, self.user.login_data, self.interactive_login_form_search)[0] is not None:
if not self._handle_possible_logout():
logging.debug("Loginpage was visible...relaod page and continue")
response_code, current_page = self._dynamic_analyzer.analyze(url_to_request, current_depth=self.current_depth)
self.domain_handler.complete_urls_in_page(current_page)
self.domain_handler.analyze_urls(current_page)
self.domain_handler.set_url_depth(current_page, self.current_depth)
self.async_request_handler.handle_requests(current_page)
elif self.crawl_with_login and response_code in range(400,500):
logging.debug("Server responses with: {}...check if we are logged out".format(response_code))
if not self._handle_possible_logout():
logging.debug("Loginpage was visible...relaod page and continue")
response_code, current_page = self._dynamic_analyzer.analyze(url_to_request, current_depth=self.current_depth)
self.domain_handler.complete_urls_in_page(current_page)
self.domain_handler.analyze_urls(current_page)
self.domain_handler.set_url_depth(current_page, self.current_depth)
self.async_request_handler.handle_requests(current_page)
current_page.current_depth = self.current_depth
self.database_manager.store_web_page(current_page)
for current_clickable_to_work_on in current_page.clickables:
current_clickable_to_work_on.clickable_depth = 0
if response_code in range(300, 350) and current_page.url != plain_url_to_request:
wp_id = self.database_manager.get_id_to_url(current_page.url)
if wp_id is None or wp_id > 0:
logging.debug("Redirected page already seen, continue with next...")
self.database_manager.visit_url(url_to_request, wp_id, response_code, current_page.url)
continue #Page was already seen
self.database_manager.visit_url(url_to_request, current_page.id, response_code, current_page.url)
elif response_code > 399:
self.database_manager.visit_url(url_to_request, None, response_code)
logging.debug("{} returns code {}".format(url_to_request.toString(), response_code))
continue
else:
self.database_manager.visit_url(url_to_request, current_page.id, response_code)
self.domain_handler.extract_new_links_for_crawling(current_page)
if self.crawler_state == CrawlState.DeltaPage:
current_page.html = parent_page.html # Assigning html
logging.debug("Now at Deltapage: {}".format(current_page.id))
self.database_manager.store_delta_page(current_page)
num_clickables = len(current_page.clickables)
counter = 1 # Just a counter for displaying progress
errors = 0 # Count the errors(Missing preclickable or target elements)
login_retries_per_clickable = 0 # Count the login_retries
max_login_retires_per_clickable = 3
max_errors = 3
timeout_counter = 0
current_working_clickable_number = 0
if num_clickables > 0:
logging.debug("Start executing events...")
else:
logging.debug("Page has no events. Cluster it and throw it to the others...")
while current_working_clickable_number < len(current_page.clickables): # and login_retries_per_clickable < max_login_retires_per_clickable:
current_clickable_to_work_on = current_page.clickables[current_working_clickable_number]
if not self.should_execute_clickable(current_clickable_to_work_on):
current_clickable_to_work_on.clickable_type = ClickableType.IgnoredByCrawler
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
continue
logging.debug(
"Processing Clickable Number {} from {}".format(str(counter), str(len(current_page.clickables))))
counter += 1
"""
If event is something like "onclick", take off the "on"
"""
event = current_clickable_to_work_on.event
if event[0:2] == "on":
event = event[2:]
if current_clickable_to_work_on.clicked:
continue
"""
If event is not supported, mark it so in the database and continue
"""
if event not in self._event_executor.supported_events and "javascript:" not in event:
current_clickable_to_work_on.clickable_type = ClickableType.UnsupportedEvent
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
logging.debug("Unsupported event: {} in {}".format(event, current_clickable_to_work_on.toString()))
current_working_clickable_number += 1
continue
"""
Because I want first a run without sending something to the backend, I distinguish if I know an element or not.
If I know it(its clickable_type is set) I re-execute the event and let the ajax request pass.
If I don't know it, I execute each clickable with an interception.
"""
if current_clickable_to_work_on.clickable_type is not None:
"""
The clickable was executed in the past, and has triggered an backend request. Know execute it again and let that request pass
"""
xhr_behavior = XHRBehavior.ObserveXHR
event_result, delta_page = self._event_executor.execute(current_page, element_to_click=current_clickable_to_work_on,
pre_clicks=necessary_clicks,
xhr_options=xhr_behavior)
else:
"""
The clickable was never executed, so execute it with intercepting all backend requests.
"""
xhr_behavior = XHRBehavior.InterceptXHR
event_result, delta_page = self._event_executor.execute(current_page, element_to_click=current_clickable_to_work_on,
pre_clicks=necessary_clicks,
xhr_options=xhr_behavior)
if event_result == EventResult.UnsupportedTag:
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.clickable_type = ClickableType.UnsupportedEvent
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
current_working_clickable_number += 1
continue
elif event_result == EventResult.ErrorWhileInitialLoading:
if timeout_counter < 10:
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.clickable_type = ClickableType.Error
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
timeout_counter += 1
current_working_clickable_number += 1
continue
else:
timeout_counter = 0
logging.debug("Too many loading errors... mark all clickables as error and continue")
for current_working_clickable_number in range(0, len(current_page.clickables)):
current_clickable_to_work_on = current_page.clickable[current_working_clickable_number]
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.clickable_type = ClickableType.Error
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
break
continue
#Event execution error handling...
elif event_result == EventResult.PreviousClickNotFound or event_result == EventResult.TargetElementNotFound:
if self.crawl_with_login:
if login_retries_per_clickable < max_login_retires_per_clickable:
if errors < max_errors:
current_clickable_to_work_on.clicked = False
errors += 1
else:
logging.debug("Too many event errors, checking for logout...")
self._handle_possible_logout()
login_retries_per_clickable += 1
errors = 0
else:
logging.debug("Max Loginretires per clickable - set clickable to error and go on...")
current_clickable_to_work_on.clickable_type = ClickableType.Error
login_retries_per_clickable = 0
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
errors = 0
current_working_clickable_number += 1
continue
else:
if errors < max_errors:
errors += 1
else:
logging.debug("Clickable {} times not found, continue with next...".format(max_errors))
errors = 0
current_clickable_to_work_on.clickable_type = ClickableType.Error
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
current_working_clickable_number += 1
continue
elif event_result == EventResult.CreatesPopup:
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.links_to = delta_page.url
current_clickable_to_work_on.clickable_type = ClickableType.CreateNewWindow
new_url = Url(delta_page.url)
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
new_url = self.domain_handler.handle_url(new_url, None)
new_url.depth_of_finding = self.current_depth
self.database_manager.insert_url_into_db(new_url)
current_working_clickable_number += 1
continue
else:
try:
delta_page.delta_depth = current_page.delta_depth + 1
except AttributeError:
delta_page.delta_depth = 1
if event_result == EventResult.URLChanged:
logging.debug("DeltaPage has new Url...{}".format(delta_page.url))
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.links_to = delta_page.url
current_clickable_to_work_on.clickable_type = ClickableType.Link
new_url = Url(delta_page.url)
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
if self.database_manager.insert_url_into_db(new_url): # Page does not exist
delta_page.id = self.get_next_page_id()
self.database_manager.visit_url(new_url, delta_page.id, 1000) #1000 is the code for a redirected url
else:
current_working_clickable_number += 1
continue
"""
Everything works fine and I get a normal DeltaPage, now I have to:
- Assign the current depth to it -> DeltaPages have the same depth as its ParentPages
- Complete raw_db_urls of the deltapage and analyze it
- Analyze the Deltapage without addEventlisteners and timemimg check. This is done during event execution
- Substract the ParentPage, optional Parent + all previous visited DeltaPages, from the DeltaPage to get
the real DeltaPage
- Handle it after the result of the subtraction
"""
current_clickable_to_work_on.clicked = True
current_clickable_to_work_on.clickable_depth = delta_page.delta_depth
delta_page.current_depth = self.current_depth
delta_page = self.domain_handler.complete_urls_in_page(delta_page)
delta_page = self.domain_handler.analyze_urls(delta_page)
delta_page = self.domain_handler.set_url_depth(delta_page, self.current_depth)
delta_page = self.async_request_handler.handle_requests(delta_page)
if self.crawler_state == CrawlState.NormalPage:
delta_page = subtract_parent_from_delta_page(current_page, delta_page)
if self.crawler_state == CrawlState.DeltaPage:
delta_page = subtract_parent_from_delta_page(current_page, delta_page)
for p in previous_pages:
delta_page = subtract_parent_from_delta_page(p, delta_page)
clickable_process_again = False
if len(delta_page.clickables) > 0 or len(delta_page.links) > 0 or len(
delta_page.ajax_requests) > 0 or len(delta_page.forms) > 0:
if len(delta_page.links) != 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_only_new_links(current_clickable_to_work_on, delta_page, current_page,
xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_only_ajax_requests(current_clickable_to_work_on, delta_page,
current_page, xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_new_links_and_ajax_requests(current_clickable_to_work_on, delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_only_new_clickables(current_clickable_to_work_on, delta_page,
current_page, xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_new_links_and_clickables(current_clickable_to_work_on, delta_page,
current_page, xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_new_clickables_and_ajax_requests(current_clickable_to_work_on,
delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) == 0:
clickable_process_again = self.handle_delta_page_has_new_links_ajax_requests__clickables(current_clickable_to_work_on,
delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_only_new_forms(current_clickable_to_work_on, delta_page, current_page,
xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_links_and_forms(current_clickable_to_work_on, delta_page,
current_page, xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_forms_and_ajax_requests(current_clickable_to_work_on, delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) == 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_links_forms_ajax_requests(current_clickable_to_work_on, delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_clickable_and_forms(current_clickable_to_work_on, delta_page,
current_page, xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) == 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_links_clickables_forms(current_clickable_to_work_on, delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) == 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_clickables_forms_ajax_requests(current_clickable_to_work_on,
delta_page,
current_page,
xhr_behavior)
elif len(delta_page.links) != 0 and len(delta_page.ajax_requests) != 0 and len(
delta_page.clickables) != 0 and len(delta_page.forms) != 0:
clickable_process_again = self.handle_delta_page_has_new_links_clickables_forms_ajax_requests(current_clickable_to_work_on,
delta_page,
current_page,
xhr_behavior)
else:
logging.debug("Nothing matches...")
logging.debug(" Clickables: " + str(len(delta_page.clickables)))
logging.debug(" Links: " + str(len(delta_page.links)))
logging.debug(" Forms: " + str(len(delta_page.forms)))
logging.debug(" AjaxRequests: " + str(len(delta_page.ajax_requests)))
if clickable_process_again :
# If we want to process the clickable again. We pop it out of the list and append it at the end
# Important: not increase the counter here
current_clickable_to_work_on.clicked = False
current_clickable_to_work_on = current_page.clickables.pop(current_working_clickable_number)
current_page.clickables.append(current_clickable_to_work_on)
else:
current_working_clickable_number += 1
else:
current_clickable_to_work_on.clickable_type = ClickableType.UIChange
self.database_manager.update_clickable(current_page.id, current_clickable_to_work_on)
current_working_clickable_number += 1
if self.crawler_state == CrawlState.NormalPage:
self.cluster_manager.add_webpage_to_cluster(current_page)
logging.debug("Crawling is done...")
def handle_delta_page_has_only_new_links(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
self.domain_handler.extract_new_links_for_crawling(delta_page)
self.database_manager.store_delta_page(delta_page)
self.database_manager.update_clickable(parent_page.id, clickable)
return False
def handle_delta_page_has_only_new_clickables(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
def handle_delta_page_has_only_new_forms(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.database_manager.store_delta_page(delta_page)
self.domain_handler.extract_new_links_for_crawling(delta_page)
self.database_manager.update_clickable(parent_page.id, clickable)
return False
def handle_delta_page_has_only_ajax_requests(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
self.domain_handler.extract_new_links_for_crawling(delta_page)
clickable.clickable_type = ClickableType.SendingAjax
if xhr_behavior == XHRBehavior.ObserveXHR:
self.database_manager.extend_ajax_requests_to_webpage(parent_page, delta_page.ajax_requests)
return False
else:
return True
def handle_delta_page_has_new_links_and_clickables(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
def handle_delta_page_has_new_links_and_forms(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
self.database_manager.store_delta_page(delta_page)
self.database_manager.update_clickable(parent_page.id, clickable)
return False
def handle_delta_page_has_new_links_and_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.store_delta_page(delta_page)
self.database_manager.update_clickable(parent_page.id, clickable)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_page_has_new_clickable_and_forms(self, clickable, delta_page, parent_page=None, xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
def handle_delta_page_has_new_clickables_and_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_page_has_new_forms_and_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_page_has_new_links_clickables_forms(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
def handle_delta_page_has_new_links_forms_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_page_has_new_clickables_forms_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_pages_has_new_links_clickables_forms(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
self.domain_handler.extract_new_links_for_crawling(delta_page)
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
def handle_delta_page_has_new_links_ajax_requests__clickables(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
def handle_delta_page_has_new_links_clickables_forms_ajax_requests(self, clickable, delta_page, parent_page=None,
xhr_behavior=None):
if xhr_behavior == XHRBehavior.ObserveXHR:
delta_page.generator.clickable_type = ClickableType.CreatesNewNavigatables
if delta_page.id == -1:
delta_page.id = self.get_next_page_id()
self.domain_handler.extract_new_links_for_crawling(delta_page)
delta_page.generator_requests.extend(delta_page.ajax_requests)
delta_page.ajax_requests = []
self.database_manager.update_clickable(parent_page.id, clickable)
if self.should_delta_page_be_stored_for_crawling(delta_page):
self._store_delta_page_for_crawling(delta_page)
return False
else:
clickable.clickable_type = ClickableType.SendingAjax
return True
#def find_form_with_special_parameters(self, page, login_data, interactive_search=True):
# logging.debug("Searching for form with given parameter names...")
# keys = list(login_data.keys())
# data1 = keys[0]
# data2 = keys[1]
# for form in page.forms:
# if form.toString().find(data1) > -1 and form.toString().find(data2) > -1:
# logging.debug("Login form found, without clicking...")
# self.interactive_login_form_search= False
# return form, None
# if interactive_search:
# for clickable in page.clickables:
# tmp_page = deepcopy(page)
# event_state, delta_page = self._event_executor.execute(tmp_page, element_to_click=clickable)
# if delta_page is None:
# continue
# delta_page = self.domain_handler.complete_urls_in_page(delta_page)
# delta_page = self.domain_handler.analyze_urls(delta_page)
# if event_state == EventResult.Ok:
# for form in delta_page.forms:
# if form.toString().find(data1) > -1 and form.toString().find(data2) > -1:
# logging.debug("Login form found, after clicking {}".format(clickable.toString()))
# return form, clickable
# return None, None
@staticmethod
def convert_action_url_to_absolute(form, base):
form.action = urljoin(base, form.action)
return form
def print_to_file(self, item, filename):
f = open("result/"+ str(filename), "w")
f.write(item)
f.close()
def should_delta_page_be_stored_for_crawling(self, delta_page):
for d_pages in self.tmp_delta_page_storage:
if d_pages.url == delta_page.url:
page_similarity = calculate_similarity_between_pages(delta_page, d_pages, clickable_weight=1,
form_weight=1, link_weight=1)
if page_similarity >= 0.9:
logging.debug("Equal page is already stored...")
return False
for d_pages in self.get_all_crawled_deltapages_to_url(delta_page.url):
if d_pages.url == delta_page.url:
page_similarity = calculate_similarity_between_pages(delta_page, d_pages, clickable_weight=1,
form_weight=1, link_weight=1)
if page_similarity >= 0.9:
logging.debug("Equal page is already seen...")
return False
return True
def _store_delta_page_for_crawling(self, delta_page):
self.tmp_delta_page_storage.append(delta_page)
def get_all_stored_delta_pages(self):
return self.tmp_delta_page_storage
def get_all_crawled_deltapages_to_url(self, url):
result = self.database_manager.get_all_crawled_delta_pages(url)
return result
def get_next_page_id(self):
tmp = self.page_id
self.page_id += 1
return tmp
def extend_ajax_requests_to_webpage(self, web_page, ajax_requests):
web_page.ajax_requests.extend(ajax_requests)
self.database_manager._extend_ajax_requests_to_webpage(web_page, ajax_requests)
"""
Is called right before event execution starts. Here you can change the order or delete clickables
"""
def edit_clickables_for_execution(self, clickables):
return clickables
"""
Is called right before an clickable will be executed. You have to return True or False
"""
def should_execute_clickable(self, clickable):
# logging.debug(str(clickable.html_class) + " : " + str(clickable.event))
return True
#def initial_login(self):
# logging.debug("Initial Login...")
# self._page_with_loginform_logged_out = self._get_webpage(self.user.url_with_login_form)
# num_of_cookies_before_login = count_cookies(self._network_access_manager, self.user.url_with_login_form)
# logging.debug("Number of cookies before initial login: {}".format(num_of_cookies_before_login))
# self._login_form, login_clickables = self.find_form_with_special_parameters(self._page_with_loginform_logged_out, self.user.login_data)
# if self._login_form is None:
# #f = open("No_login_form.txt", "w")
# #f.write(self._page_with_loginform_logged_out.html)
# #f.close()
# raise LoginFailed("Cannot find Login form, please check the parameters...")
# page_after_login = self._login_and_return_webpage(self._login_form, self._page_with_loginform_logged_out, self.user.login_data, login_clickables)
# if page_after_login is None:
# raise LoginFailed("Cannot load loginpage anymore...stop...")
# login_successfull = calculate_similarity_between_pages(self._page_with_loginform_logged_out, page_after_login) < 0.5
# if login_successfull:
# num_cookies_after_login = count_cookies(self._network_access_manager, self.user.url_with_login_form)
# if num_cookies_after_login > num_of_cookies_before_login:
# self.cookie_num = num_cookies_after_login
# logging.debug("Initial login successfull!")
# return True
# raise LoginFailed("Cannot login, sorry...")
#def _login_and_return_webpage(self, login_form, page_with_login_form=None, login_data=None, login_clickable= None):
# if page_with_login_form is None:
# page_with_login_form = self._page_with_loginform_logged_out
# try:
# if login_clickable is not None:
# tmp_page = deepcopy(page_with_login_form)
# event_state, page_with_login_form = self._event_executor.execute(tmp_page, element_to_click=login_clickable)
# if event_state == EventResult.ErrorWhileInitialLoading:
# sleep(2000)
# event_state, page_with_login_form = self._event_executor.execute(tmp_page, element_to_click=login_clickable)
# if event_state == EventResult.ErrorWhileInitialLoading:
# logging.debug("Two time executing fails.. stop crawling")
# return None
# self.domain_handler.complete_urls_in_page(page_with_login_form)
# self.domain_handler.analyze_urls(page_with_login_form)
# self.async_request_handler.handle_requests(page_with_login_form)
# logging.debug("Start submitting login form...")
# response_code, html_after_timeouts, new_clickables, forms, links, timemimg_requests = self._form_handler.submit_form(login_form, page_with_login_form, login_data)
# except ValueError:
# return None
# #TODO: Put building of Webpage inside submit function
# page_after_login = WebPage(-1, page_with_login_form.url, html_after_timeouts)
# page_after_login.clickables = new_clickables
# page_after_login.links = links
# page_after_login.timing_requests = timemimg_requests
# page_after_login.forms = forms
# self.domain_handler.complete_urls_in_page(page_after_login)
# self.domain_handler.analyze_urls(page_after_login)
# self.async_request_handler.handle_requests(page_after_login)
# return page_after_login
#def handle_possible_logout(self):
# """
# Handles a possible logout
# :return: True is we were not logged out and false if we were logged out
# """
# retries = 0
# max_retries = 3
# while retries < max_retries:
# logging.debug("Start with relogin try number: {}".format(retries+1))
# page_with_login_form = self._get_webpage(self.user.url_with_login_form)
# login_form, login_clickable = self.find_form_with_special_parameters(page_with_login_form, self.user.login_data, self.interactive_login_form_search)
# if login_form is not None: #So login_form is visible, we are logged out
# logging.debug("Logout detected, visible login form...")
# hopefully_reloggedin_page = self._login_and_return_webpage(login_form, page_with_login_form, self.user.login_data, login_clickable)
# if hopefully_reloggedin_page is None:
# retries += 1
# logging.debug("Relogin attempt number {} failed".format(retries))
# sleep(2000)
# else:
# login_form, login_clickable = self.find_form_with_special_parameters(hopefully_reloggedin_page, self.user.login_data)
# if login_form is None:
# logging.debug("Relogin successfull...continue")
# return False
# else:
# logging.debug("Relogin fails, loginform is still present...")
# retries += 1
# sleep(2000)
# else:
# logging.debug("Login Form is not there... we can continue (I hope)")
# if retries < 3:
# return True
# else:
# return False
# raise LoginFailed("We cannot login anymore... stop crawling here")
#def _get_webpage(self, url):
# response_code, result = self._dynamic_analyzer.analyze(url, timeout=10)
# self.domain_handler.complete_urls_in_page(result)
# self.domain_handler.analyze_urls(result)
# self.async_request_handler.handle_requests(result)
# return result
class CrawlState(Enum):
NormalPage = 0
EventGeneratedPage = 1
DeltaPage = 2
AnalyzeLoginPage = 3
Login = 4
|
gpl-3.0
| -2,113,222,505,680,327,400 | 59.910042 | 183 | 0.560124 | false |
eviljeff/olympia
|
src/olympia/shelves/forms.py
|
1
|
2006
|
import requests
from rest_framework.reverse import reverse as drf_reverse
from django import forms
from django.conf import settings
from django.urls import NoReverseMatch
from olympia.shelves.models import Shelf
class ShelfForm(forms.ModelForm):
class Meta:
model = Shelf
fields = ('title', 'endpoint', 'criteria',
'footer_text', 'footer_pathname',)
def clean(self):
data = self.cleaned_data
baseUrl = settings.INTERNAL_SITE_URL
endpoint = data.get('endpoint')
criteria = data.get('criteria')
if criteria is None:
return
try:
if endpoint == 'search':
if not criteria.startswith('?') or criteria.count('?') > 1:
raise forms.ValidationError('Check criteria field.')
else:
api = drf_reverse('v4:addon-search')
url = baseUrl + api + criteria
elif endpoint == 'collections':
api = drf_reverse('v4:collection-addon-list', kwargs={
'user_pk': settings.TASK_USER_ID,
'collection_slug': criteria
})
url = baseUrl + api
else:
return
except NoReverseMatch:
raise forms.ValidationError(
'No data found - check criteria parameters.')
try:
response = requests.get(url)
if response.status_code == 404:
raise forms.ValidationError('Check criteria - No data found')
if response.status_code != 200:
raise forms.ValidationError(
'Check criteria - %s' % response.json()[0])
if response.json().get('count', 0) == 0:
raise forms.ValidationError(
'Check criteria parameters - e.g., "type"')
except requests.exceptions.ConnectionError:
raise forms.ValidationError('Connection Error')
|
bsd-3-clause
| -3,409,508,105,795,122,700 | 32.433333 | 77 | 0.548355 | false |
bastibl/gnuradio
|
gr-fec/python/fec/bercurve_generator.py
|
1
|
3627
|
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from gnuradio import gr, blocks
import numpy
from .fec_test import fec_test
class bercurve_generator(gr.hier_block):
def __init__(self, encoder_list, decoder_list, esno=numpy.arange(0.0, 3.0, .25),
samp_rate=3200000, threading='capillary', puncpat='11', seed=0):
gr.hier_block.__init__(
self, "ber_curve_generator",
gr.io_signature(0, 0, 0),
gr.io_signature(len(esno) * 2, len(esno) * 2, gr.sizeof_char*1))
self.esno = esno
self.samp_rate = samp_rate
self.encoder_list = encoder_list
self.decoder_list = decoder_list
self.puncpat = puncpat
self.random_gen_b_0 = blocks.vector_source_b(list(map(int, numpy.random.randint(0, 256, 100000))), True)
self.deinterleave = blocks.deinterleave(gr.sizeof_char*1)
self.connect(self.random_gen_b_0, self.deinterleave)
self.ber_generators = []
# FIXME It would be good to check that the encoder_list and
# decoder_list have parallelism set to > 0. If parallelism
# is set to 0, a map isn't passed and an indexing error is
# thrown on line 53 or 54 below.
for i in range(0, len(esno)):
ber_generator_temp = fec_test(
generic_encoder=encoder_list[i],
generic_decoder=decoder_list[i],
esno=esno[i],
samp_rate=samp_rate,
threading=threading,
puncpat=puncpat,
seed=seed)
self.ber_generators.append(ber_generator_temp);
for i in range(0, len(esno)):
self.connect((self.deinterleave, i), (self.ber_generators[i]))
self.connect((self.ber_generators[i], 0), (self, i*2));
self.connect((self.ber_generators[i], 1), (self, i*2 + 1));
def get_esno(self):
return self.esno
def set_esno(self, esno):
self.esno = esno
self.ber_generator_0.set_esno(self.esno)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.ber_generator_0.set_samp_rate(self.samp_rate)
def get_encoder_list(self):
return self.encoder_list
def set_encoder_list(self, encoder_list):
self.encoder_list = encoder_list
self.ber_generator_0.set_generic_encoder(self.encoder_list)
def get_decoder_list(self):
return self.decoder_list
def set_decoder_list(self, decoder_list):
self.decoder_list = decoder_list
self.ber_generator_0.set_generic_decoder(self.decoder_list)
def get_puncpat(self):
return self.puncpat
def set_puncpat(self, puncpat):
self.puncpat = puncpat
|
gpl-3.0
| 8,351,089,192,773,113,000 | 34.213592 | 112 | 0.639096 | false |
dvannoy/baxter
|
setup.py
|
1
|
1233
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# requirements = None
# with open('requirements.txt', 'r') as f:
# requirements = [
# line.split('==')[0]
# for line in f.read().split('\n')
# ]
# Read the version number
with open("baxter/_version.py") as f:
exec(f.read())
setup(name='baxter',
version=__version__, # use the same version that's in _version.py
packages=['baxter'],
license='LICENSE.txt',
description='libraries for data engineering, created by Pluralsight data team',
long_description=open('README.rst').read(),
install_requires=[
'httplib2',
'google-api-python-client',
'urllib3',
'oauth2client < 4.0.0',
'pyodbc',
'requests',
'psycopg2'
]
#,
#'impyla',
#'pexpect',
#'MySQL-python'
)
|
apache-2.0
| -6,166,464,703,021,763,000 | 29.825 | 85 | 0.505272 | false |
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/learn/tf2/tf_runner.py
|
1
|
19091
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import numpy as np
import ray
from contextlib import closing
import logging
import socket
from zoo.orca.data.utils import ray_partitions_get_data_label
logger = logging.getLogger(__name__)
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _try_import_strategy():
"""Late import for Tesnorflow"""
import tensorflow as tf
return tf.distribute.experimental.MultiWorkerMirroredStrategy
class DatasetHandler:
def __init__(self, rank, size):
self.rank = rank
self.size = size
def handle_datasets_train(self, data_creator,
validation_data_creator,
config, epochs, steps_per_epoch,
validation_steps):
config, local_batch_size = self._handle_batch_size(config)
train_dataset = data_creator(config, config["batch_size"])
if isinstance(train_dataset, list) and \
all([isinstance(x, ray.ObjectID) for x in train_dataset]):
assert steps_per_epoch is not None, "steps_per_epoch must be provided for xshard"
train_dataset = self._handle_xshards(train_dataset,
steps=steps_per_epoch * epochs,
local_batch_size=local_batch_size,
shuffle=True)
else:
train_dataset = self._handle_sharding(train_dataset)
if validation_data_creator is not None:
test_dataset = validation_data_creator(config, config["batch_size"])
if isinstance(test_dataset, list) and \
all([isinstance(x, ray.ObjectID) for x in test_dataset]):
assert validation_steps is not None, "validation_steps must be provided" \
"when use xshards for evaluate"
test_dataset = self._handle_xshards(test_dataset,
steps=validation_steps,
local_batch_size=local_batch_size,
shuffle=False)
else:
test_dataset = self._handle_sharding(test_dataset)
else:
test_dataset = None
return train_dataset, test_dataset
def handle_dataset_validation(self, data_creator, config, steps):
config, local_batch_size = self._handle_batch_size(config)
dataset = data_creator(config, config["batch_size"])
if isinstance(dataset, list) and all([isinstance(x, ray.ObjectID) for x in dataset]):
assert steps is not None, "steps must be provided for xshard"
dataset = self._handle_xshards(dataset,
steps=steps,
local_batch_size=local_batch_size,
shuffle=False)
else:
dataset = self._handle_sharding(dataset)
return dataset
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
raise NotImplementedError
def _handle_sharding(self, dataset):
raise NotImplementedError
def _handle_batch_size(self, config):
raise NotImplementedError
@staticmethod
def get_handler(backend, rank, size):
if backend == "horovod":
return HorovodDatasetHanlder(rank, size)
if backend == "tf-distributed":
return TFDistributedDatasetHandler(rank, size)
if backend == "tf-local":
return LocalDatasetHandler(rank, size)
raise Exception(f"invalid backend: {backend}")
class HorovodDatasetHanlder(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partitions_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
def _handle_sharding(self, dataset):
from tensorflow.python.distribute.input_ops import auto_shard_dataset
dataset = auto_shard_dataset(dataset, self.size, self.rank)
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
config["batch_size"] = config["batch_size"] // self.size
return config, config["batch_size"]
class TFDistributedDatasetHandler(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partitions_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
def dataset_fn(input_context):
dataset = tf.data.Dataset.from_tensor_slices((data, label))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
from tensorflow.python.distribute import distribution_strategy_context as ds_context
strategy = ds_context.get_strategy()
dataset = strategy.experimental_distribute_datasets_from_function(dataset_fn)
return dataset
def _handle_sharding(self, dataset):
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
local_batch_size = config["batch_size"] // self.size
return config, local_batch_size
class LocalDatasetHandler(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partitions_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
def _handle_sharding(self, dataset):
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
return config, config["batch_size"]
class TFRunner:
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, compile_args_creator,
config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.compile_args_creator = compile_args_creator
self.config = {} if config is None else config
self.inter_op_parallelism = self.config.get("inter_op_parallelism", 1)
self.intra_op_parallelism = self.config.get("intra_op_parallelism", 1)
self.epoch = 0
self.verbose = verbose
def setup(self):
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(self.inter_op_parallelism)
tf.config.threading.set_intra_op_parallelism_threads(self.intra_op_parallelism)
os.environ["KMP_BLOCKING_TIME"] = self.config.get("KMP_BLOCKING_TIME",
os.environ.get("KMP_BLOCKING_TIME", "0"))
def setup_local(self):
"""Initializes the model."""
logger.debug("Creating model")
self.model = self.model_creator(self.config)
self.model.compile(**self.compile_args_creator(self.config))
self.backend = "tf-local"
self.size = 1
self.rank = 0
from tensorflow.python.distribute import distribution_strategy_context as ds_context
self.strategy = ds_context.get_strategy()
def setup_horovod(self):
import horovod.tensorflow.keras as hvd
hvd.init()
self.model = self.model_creator(self.config)
compile_args = self.compile_args_creator(self.config)
compile_args["optimizer"] = hvd.DistributedOptimizer(compile_args["optimizer"])
self.model.compile(**compile_args)
self.backend = "horovod"
self.size = hvd.size()
self.rank = hvd.rank()
from tensorflow.python.distribute import distribution_strategy_context as ds_context
self.strategy = ds_context.get_strategy()
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
no_proxy = os.environ.get("no_proxy", "")
ips = [url.split(":")[0] for url in urls]
os.environ["no_proxy"] = ",".join(ips) + "," + no_proxy
MultiWorkerMirroredStrategy = _try_import_strategy()
# MultiWorkerMirroredStrategy handles everything for us, from
# sharding the dataset (or even sharding the data itself if the loader
# reads files from disk) to merging the metrics and weight updates
#
# worker 0 is the "chief" worker and will handle the map-reduce
# every worker ends up with the exact same metrics and model
# after model.fit
#
# because of this, we only really ever need to query its state
self.strategy = MultiWorkerMirroredStrategy()
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
self.backend = "tf-distributed"
self.size = world_size
self.rank = world_rank
def step(self, data_creator, epochs=1, batch_size=32, verbose=1,
callbacks=None, validation_data_creator=None, class_weight=None,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
data_config=None):
"""Runs a training epoch and updates the model parameters."""
config = self.config.copy()
if data_config is not None:
config.update(data_config)
config["batch_size"] = batch_size
with self.strategy.scope():
dataset_handler = DatasetHandler.get_handler(self.backend, self.rank, self.size)
train_dataset, test_dataset = dataset_handler \
.handle_datasets_train(data_creator,
validation_data_creator,
config=config, epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
# process other arguments
if self.backend == "horovod":
import horovod.tensorflow.keras as hvd
hvd_callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback()]
if hvd.rank() != 0:
verbose = 0
if callbacks is not None:
callbacks = hvd_callbacks + callbacks
else:
callbacks = hvd_callbacks
elif self.backend == "tf-distributed":
if self.strategy.cluster_resolver.task_id != 0:
verbose = 0
history = self.model.fit(train_dataset,
epochs=self.epoch + epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=test_dataset,
class_weight=class_weight,
initial_epoch=self.epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += epochs
return [stats]
def validate(self, data_creator, batch_size=32, verbose=1, sample_weight=None,
steps=None, callbacks=None, data_config=None):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
if data_config is not None:
config.update(data_config)
config["batch_size"] = batch_size
with self.strategy.scope():
dataset_handler = DatasetHandler.get_handler(self.backend,
self.rank,
self.size)
dataset = dataset_handler.handle_dataset_validation(data_creator,
config=config,
steps=steps)
if self.backend == "horovod":
import horovod.tensorflow.keras as hvd
if hvd.rank() != 0:
verbose = 0
elif self.backend == "tf-distributed":
if self.strategy.cluster_resolver.task_id != 0:
verbose = 0
params = dict(
verbose=verbose,
sample_weight=sample_weight,
steps=steps,
callbacks=callbacks,
)
results = self.model.evaluate(dataset, **params)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(dataset, **params)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"results": results}
return [stats]
def predict(self, data_creator, batch_size, verbose, steps, callbacks, data_config):
config = self.config.copy()
if data_config is not None:
config.update(data_config)
dataset = data_creator(config, batch_size)
if not isinstance(dataset, ray.ObjectID):
raise ValueError("Only xshards is supported for predict")
partition = ray.get(dataset)
params = dict(
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
)
if self.backend == "tf-distributed":
local_model = self.model_creator(self.config)
local_model.set_weights(self.model.get_weights())
else:
local_model = self.model
def predict_fn(shard):
y = local_model.predict(shard["x"], **params)
return {"prediction": y}
new_part = [predict_fn(shard) for shard in partition]
return new_part
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray._private.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return find_free_port()
|
apache-2.0
| 589,034,591,972,608,000 | 38.690229 | 100 | 0.577812 | false |
roebel/py_find_1st
|
setup.py
|
1
|
6624
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
#from distutils.core import setup
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from distutils.command.sdist import sdist
import hashlib
import io
from pkg_resources import parse_version
import numpy as np
import os
import sys
import re
import subprocess
def utf8_to_bytes(ss):
try:
return bytes(ss, encoding="UTF-8")
except TypeError :
return bytes(ss)
def compiler_is_clang(comp) :
print("check for clang compiler ...", end=' ')
try:
cc_output = subprocess.check_output(comp+['--version'],
stderr = subprocess.STDOUT, shell=False)
except OSError as ex:
print("compiler test call failed with error {0:d} msg: {1}".format(ex.errno, ex.strerror))
print("no")
return False
ret = re.search(b'clang', cc_output) is not None
if ret :
print("yes")
else:
print("no")
return ret
class build_ext_subclass( build_ext ):
def build_extensions(self):
#c = self.compiler.compiler_type
#print "compiler attr", self.compiler.__dict__
#print "compiler", self.compiler.compiler
#print "compiler is",c
try:
if compiler_is_clang(self.compiler.compiler):
for e in self.extensions:
#e.extra_compile_args.append('-stdlib=libc++')
e.extra_compile_args.append('-Wno-unused-function')
#for e in self.extensions:
# e.extra_link_args.append('-stdlib=libc++')
except AttributeError:
pass
build_ext.build_extensions(self)
find_1st_ext = Extension("find_1st", ["utils_find_1st/find_1st.cpp"],
include_dirs=[np.get_include()],
language="c++",
define_macros = [("NPY_NO_DEPRECATED_API", "NPY_1_13_API_VERSION") ])
ext_modules=[find_1st_ext]
with open('./requirements.txt') as f:
install_requires = [line.strip('\n') for line in f.readlines()]
# get _pysndfile version number
for line in open("utils_find_1st/__init__.py") :
if "version" in line:
version = re.split('[()]', line)[1].replace(',','.').replace(',','-',1).replace('"','').replace(' ','')
break
if sys.argv[1] == "get_version":
print(parse_version(version))
sys.exit(0)
README_path = os.path.join(os.path.dirname(__file__), 'README.md')
README_cksum_path = os.path.join(os.path.dirname(__file__), 'README.md.cksum')
def write_readme_checksum(rdlen, rdsum):
with open(README_cksum_path, "w") as fi:
print("{} {}".format(rdlen, rdsum), file=fi)
def read_readme_checksum():
try:
with open(README_cksum_path, "r") as fi:
rdlen, rdsum = fi.read().split()
return rdlen, rdsum
except IOError:
return 0, 0
def calc_readme_checksum():
readme = open(README_path).read()
readme_length = len(readme)
readme_sum = hashlib.sha256(utf8_to_bytes(readme)).hexdigest()
return readme_length, readme_sum
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def update_long_descr():
LONG_DESCR_path = os.path.join(os.path.dirname(__file__), 'LONG_DESCR')
crdck = calc_readme_checksum()
rrdck = read_readme_checksum()
if ((not os.path.exists(LONG_DESCR_path)) or rrdck[1] != crdck[1]):
if rrdck[1] != crdck[1]:
print("readme check sum {} does not match readme {}, recalculate LONG_DESCR".format(rrdck[1], crdck[1]))
try :
subprocess.check_call(["pandoc", "-f", "markdown", '-t', 'rst', '--ascii', '-o', LONG_DESCR_path, README_path], shell=False)
# pandoc version before 2.4 seems to write non ascii files even if the ascii flag is given
# fix this to ensure LONG_DESCR is ASCII, use io.open to make this work with python 2.7
with io.open(LONG_DESCR_path, "r", encoding="UTF-8") as fi:
# this creates a byte stream
inp = fi.read()
# replace utf8 characters that are generated by pandoc to ASCII
# and create a byte string
ascii_long_desc = inp.replace(u"’","'").replace(u"–","--").replace(u'“','"').replace(u'”','"')
with open(LONG_DESCR_path, "w") as fw:
fw.write(ascii_long_desc)
except (OSError, subprocess.CalledProcessError) as ex:
print("setup.py::error:: pandoc command failed. Cannot update LONG_DESCR.txt from modified README.md" + str(
ex))
write_readme_checksum(crdck[0], crdck[1])
return open(LONG_DESCR_path).read()
def read_long_descr():
LONG_DESCR_path = os.path.join(os.path.dirname(__file__), 'LONG_DESCR')
return open(LONG_DESCR_path).read()
class sdist_subclass(sdist) :
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
update_long_descr()
sdist.run(self)
setup( name = "py_find_1st",
version = version,
packages = ['utils_find_1st'],
ext_package = 'utils_find_1st',
ext_modules = ext_modules,
author = "A. Roebel",
install_requires= install_requires,
description = "Numpy extension module for efficient search of first array index that compares true",
cmdclass = {'build_ext': build_ext_subclass, "sdist": sdist_subclass },
author_email = "axel.dot.roebel@ircam.dot.fr",
long_description = read_long_descr(),
license = "GPL",
url = "http://github.com/roebel/py_find_1st",
download_url = "https://github.com/roebel/py_find_1st/archive/v{0}.tar.gz".format(version),
keywords = "numpy,extension,find",
classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
],
# don't install as zip file because those cannot be analyzed by pyinstaller
zip_safe = False,
)
|
gpl-3.0
| 6,156,561,315,358,962,000 | 36.168539 | 136 | 0.5987 | false |
aitormf/JdeRobot
|
src/tools/3DVizWeb/test/jderobot/pose3dmotors_ice.py
|
1
|
19332
|
# -*- coding: utf-8 -*-
# **********************************************************************
#
# Copyright (c) 2003-2016 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Ice version 3.6.3
#
# <auto-generated>
#
# Generated from file `pose3dmotors.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
#
from sys import version_info as _version_info_
import Ice, IcePy
# Start of module jderobot
_M_jderobot = Ice.openModule('jderobot')
__name__ = 'jderobot'
if 'Time' not in _M_jderobot.__dict__:
_M_jderobot.Time = Ice.createTempClass()
class Time(object):
def __init__(self, seconds=0, useconds=0):
self.seconds = seconds
self.useconds = useconds
def __hash__(self):
_h = 0
_h = 5 * _h + Ice.getHash(self.seconds)
_h = 5 * _h + Ice.getHash(self.useconds)
return _h % 0x7fffffff
def __compare(self, other):
if other is None:
return 1
elif not isinstance(other, _M_jderobot.Time):
return NotImplemented
else:
if self.seconds is None or other.seconds is None:
if self.seconds != other.seconds:
return (-1 if self.seconds is None else 1)
else:
if self.seconds < other.seconds:
return -1
elif self.seconds > other.seconds:
return 1
if self.useconds is None or other.useconds is None:
if self.useconds != other.useconds:
return (-1 if self.useconds is None else 1)
else:
if self.useconds < other.useconds:
return -1
elif self.useconds > other.useconds:
return 1
return 0
def __lt__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r < 0
def __le__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r <= 0
def __gt__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r > 0
def __ge__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r >= 0
def __eq__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r == 0
def __ne__(self, other):
r = self.__compare(other)
if r is NotImplemented:
return r
else:
return r != 0
def __str__(self):
return IcePy.stringify(self, _M_jderobot._t_Time)
__repr__ = __str__
_M_jderobot._t_Time = IcePy.defineStruct('::jderobot::Time', Time, (), (
('seconds', (), IcePy._t_long),
('useconds', (), IcePy._t_long)
))
_M_jderobot.Time = Time
del Time
# End of module jderobot
# Start of module jderobot
__name__ = 'jderobot'
if 'JderobotException' not in _M_jderobot.__dict__:
_M_jderobot.JderobotException = Ice.createTempClass()
class JderobotException(Ice.UserException):
def __init__(self, what=''):
self.what = what
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::JderobotException'
_M_jderobot._t_JderobotException = IcePy.defineException('::jderobot::JderobotException', JderobotException, (), False, None, (('what', (), IcePy._t_string, False, 0),))
JderobotException._ice_type = _M_jderobot._t_JderobotException
_M_jderobot.JderobotException = JderobotException
del JderobotException
if 'ConfigurationNotExistException' not in _M_jderobot.__dict__:
_M_jderobot.ConfigurationNotExistException = Ice.createTempClass()
class ConfigurationNotExistException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::ConfigurationNotExistException'
_M_jderobot._t_ConfigurationNotExistException = IcePy.defineException('::jderobot::ConfigurationNotExistException', ConfigurationNotExistException, (), False, _M_jderobot._t_JderobotException, ())
ConfigurationNotExistException._ice_type = _M_jderobot._t_ConfigurationNotExistException
_M_jderobot.ConfigurationNotExistException = ConfigurationNotExistException
del ConfigurationNotExistException
if 'DataNotExistException' not in _M_jderobot.__dict__:
_M_jderobot.DataNotExistException = Ice.createTempClass()
class DataNotExistException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::DataNotExistException'
_M_jderobot._t_DataNotExistException = IcePy.defineException('::jderobot::DataNotExistException', DataNotExistException, (), False, _M_jderobot._t_JderobotException, ())
DataNotExistException._ice_type = _M_jderobot._t_DataNotExistException
_M_jderobot.DataNotExistException = DataNotExistException
del DataNotExistException
if 'HardwareFailedException' not in _M_jderobot.__dict__:
_M_jderobot.HardwareFailedException = Ice.createTempClass()
class HardwareFailedException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::HardwareFailedException'
_M_jderobot._t_HardwareFailedException = IcePy.defineException('::jderobot::HardwareFailedException', HardwareFailedException, (), False, _M_jderobot._t_JderobotException, ())
HardwareFailedException._ice_type = _M_jderobot._t_HardwareFailedException
_M_jderobot.HardwareFailedException = HardwareFailedException
del HardwareFailedException
if 'NoTopicException' not in _M_jderobot.__dict__:
_M_jderobot.NoTopicException = Ice.createTempClass()
class NoTopicException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::NoTopicException'
_M_jderobot._t_NoTopicException = IcePy.defineException('::jderobot::NoTopicException', NoTopicException, (), False, _M_jderobot._t_JderobotException, ())
NoTopicException._ice_type = _M_jderobot._t_NoTopicException
_M_jderobot.NoTopicException = NoTopicException
del NoTopicException
if 'SubscriptionFailedException' not in _M_jderobot.__dict__:
_M_jderobot.SubscriptionFailedException = Ice.createTempClass()
class SubscriptionFailedException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::SubscriptionFailedException'
_M_jderobot._t_SubscriptionFailedException = IcePy.defineException('::jderobot::SubscriptionFailedException', SubscriptionFailedException, (), False, _M_jderobot._t_JderobotException, ())
SubscriptionFailedException._ice_type = _M_jderobot._t_SubscriptionFailedException
_M_jderobot.SubscriptionFailedException = SubscriptionFailedException
del SubscriptionFailedException
if 'SubscriptionPushFailedException' not in _M_jderobot.__dict__:
_M_jderobot.SubscriptionPushFailedException = Ice.createTempClass()
class SubscriptionPushFailedException(_M_jderobot.JderobotException):
def __init__(self, what=''):
_M_jderobot.JderobotException.__init__(self, what)
def __str__(self):
return IcePy.stringifyException(self)
__repr__ = __str__
_ice_name = 'jderobot::SubscriptionPushFailedException'
_M_jderobot._t_SubscriptionPushFailedException = IcePy.defineException('::jderobot::SubscriptionPushFailedException', SubscriptionPushFailedException, (), False, _M_jderobot._t_JderobotException, ())
SubscriptionPushFailedException._ice_type = _M_jderobot._t_SubscriptionPushFailedException
_M_jderobot.SubscriptionPushFailedException = SubscriptionPushFailedException
del SubscriptionPushFailedException
# End of module jderobot
# Start of module jderobot
__name__ = 'jderobot'
if '_t_ByteSeq' not in _M_jderobot.__dict__:
_M_jderobot._t_ByteSeq = IcePy.defineSequence('::jderobot::ByteSeq', (), IcePy._t_byte)
if '_t_IntSeq' not in _M_jderobot.__dict__:
_M_jderobot._t_IntSeq = IcePy.defineSequence('::jderobot::IntSeq', (), IcePy._t_int)
if '_t_seqFloat' not in _M_jderobot.__dict__:
_M_jderobot._t_seqFloat = IcePy.defineSequence('::jderobot::seqFloat', (), IcePy._t_float)
# End of module jderobot
# Start of module jderobot
__name__ = 'jderobot'
# End of module jderobot
# Start of module jderobot
__name__ = 'jderobot'
if 'Pose3DMotorsData' not in _M_jderobot.__dict__:
_M_jderobot.Pose3DMotorsData = Ice.createTempClass()
class Pose3DMotorsData(Ice.Object):
"""
Pose3DMotorsData a class that contains the pantilt data
"""
def __init__(self, x=0.0, y=0.0, z=0.0, pan=0.0, tilt=0.0, roll=0.0, panSpeed=0.0, tiltSpeed=0.0):
self.x = x
self.y = y
self.z = z
self.pan = pan
self.tilt = tilt
self.roll = roll
self.panSpeed = panSpeed
self.tiltSpeed = tiltSpeed
def ice_ids(self, current=None):
return ('::Ice::Object', '::jderobot::Pose3DMotorsData')
def ice_id(self, current=None):
return '::jderobot::Pose3DMotorsData'
def ice_staticId():
return '::jderobot::Pose3DMotorsData'
ice_staticId = staticmethod(ice_staticId)
def __str__(self):
return IcePy.stringify(self, _M_jderobot._t_Pose3DMotorsData)
__repr__ = __str__
_M_jderobot.Pose3DMotorsDataPrx = Ice.createTempClass()
class Pose3DMotorsDataPrx(Ice.ObjectPrx):
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_jderobot.Pose3DMotorsDataPrx.ice_checkedCast(proxy, '::jderobot::Pose3DMotorsData', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_jderobot.Pose3DMotorsDataPrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
def ice_staticId():
return '::jderobot::Pose3DMotorsData'
ice_staticId = staticmethod(ice_staticId)
_M_jderobot._t_Pose3DMotorsDataPrx = IcePy.defineProxy('::jderobot::Pose3DMotorsData', Pose3DMotorsDataPrx)
_M_jderobot._t_Pose3DMotorsData = IcePy.defineClass('::jderobot::Pose3DMotorsData', Pose3DMotorsData, -1, (), False, False, None, (), (
('x', (), IcePy._t_float, False, 0),
('y', (), IcePy._t_float, False, 0),
('z', (), IcePy._t_float, False, 0),
('pan', (), IcePy._t_float, False, 0),
('tilt', (), IcePy._t_float, False, 0),
('roll', (), IcePy._t_float, False, 0),
('panSpeed', (), IcePy._t_float, False, 0),
('tiltSpeed', (), IcePy._t_float, False, 0)
))
Pose3DMotorsData._ice_type = _M_jderobot._t_Pose3DMotorsData
_M_jderobot.Pose3DMotorsData = Pose3DMotorsData
del Pose3DMotorsData
_M_jderobot.Pose3DMotorsDataPrx = Pose3DMotorsDataPrx
del Pose3DMotorsDataPrx
if 'Pose3DMotorsParams' not in _M_jderobot.__dict__:
_M_jderobot.Pose3DMotorsParams = Ice.createTempClass()
class Pose3DMotorsParams(Ice.Object):
"""
Pose3DMotorsParams a class that contains the motors parametres.
"""
def __init__(self, maxPan=0.0, minPan=0.0, maxTilt=0.0, minTilt=0.0, maxPanSpeed=0.0, maxTiltSpeed=0.0):
self.maxPan = maxPan
self.minPan = minPan
self.maxTilt = maxTilt
self.minTilt = minTilt
self.maxPanSpeed = maxPanSpeed
self.maxTiltSpeed = maxTiltSpeed
def ice_ids(self, current=None):
return ('::Ice::Object', '::jderobot::Pose3DMotorsParams')
def ice_id(self, current=None):
return '::jderobot::Pose3DMotorsParams'
def ice_staticId():
return '::jderobot::Pose3DMotorsParams'
ice_staticId = staticmethod(ice_staticId)
def __str__(self):
return IcePy.stringify(self, _M_jderobot._t_Pose3DMotorsParams)
__repr__ = __str__
_M_jderobot.Pose3DMotorsParamsPrx = Ice.createTempClass()
class Pose3DMotorsParamsPrx(Ice.ObjectPrx):
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_jderobot.Pose3DMotorsParamsPrx.ice_checkedCast(proxy, '::jderobot::Pose3DMotorsParams', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_jderobot.Pose3DMotorsParamsPrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
def ice_staticId():
return '::jderobot::Pose3DMotorsParams'
ice_staticId = staticmethod(ice_staticId)
_M_jderobot._t_Pose3DMotorsParamsPrx = IcePy.defineProxy('::jderobot::Pose3DMotorsParams', Pose3DMotorsParamsPrx)
_M_jderobot._t_Pose3DMotorsParams = IcePy.defineClass('::jderobot::Pose3DMotorsParams', Pose3DMotorsParams, -1, (), False, False, None, (), (
('maxPan', (), IcePy._t_float, False, 0),
('minPan', (), IcePy._t_float, False, 0),
('maxTilt', (), IcePy._t_float, False, 0),
('minTilt', (), IcePy._t_float, False, 0),
('maxPanSpeed', (), IcePy._t_float, False, 0),
('maxTiltSpeed', (), IcePy._t_float, False, 0)
))
Pose3DMotorsParams._ice_type = _M_jderobot._t_Pose3DMotorsParams
_M_jderobot.Pose3DMotorsParams = Pose3DMotorsParams
del Pose3DMotorsParams
_M_jderobot.Pose3DMotorsParamsPrx = Pose3DMotorsParamsPrx
del Pose3DMotorsParamsPrx
if 'Pose3DMotors' not in _M_jderobot.__dict__:
_M_jderobot.Pose3DMotors = Ice.createTempClass()
class Pose3DMotors(Ice.Object):
"""
Interface to the Pose3DMotors Actuators interaction.
"""
def __init__(self):
if Ice.getType(self) == _M_jderobot.Pose3DMotors:
raise RuntimeError('jderobot.Pose3DMotors is an abstract class')
def ice_ids(self, current=None):
return ('::Ice::Object', '::jderobot::Pose3DMotors')
def ice_id(self, current=None):
return '::jderobot::Pose3DMotors'
def ice_staticId():
return '::jderobot::Pose3DMotors'
ice_staticId = staticmethod(ice_staticId)
def setPose3DMotorsData(self, data, current=None):
pass
def getPose3DMotorsData(self, current=None):
pass
def getPose3DMotorsParams(self, current=None):
pass
def __str__(self):
return IcePy.stringify(self, _M_jderobot._t_Pose3DMotors)
__repr__ = __str__
_M_jderobot.Pose3DMotorsPrx = Ice.createTempClass()
class Pose3DMotorsPrx(Ice.ObjectPrx):
def setPose3DMotorsData(self, data, _ctx=None):
return _M_jderobot.Pose3DMotors._op_setPose3DMotorsData.invoke(self, ((data, ), _ctx))
def begin_setPose3DMotorsData(self, data, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_jderobot.Pose3DMotors._op_setPose3DMotorsData.begin(self, ((data, ), _response, _ex, _sent, _ctx))
def end_setPose3DMotorsData(self, _r):
return _M_jderobot.Pose3DMotors._op_setPose3DMotorsData.end(self, _r)
def getPose3DMotorsData(self, _ctx=None):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsData.invoke(self, ((), _ctx))
def begin_getPose3DMotorsData(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsData.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPose3DMotorsData(self, _r):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsData.end(self, _r)
def getPose3DMotorsParams(self, _ctx=None):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsParams.invoke(self, ((), _ctx))
def begin_getPose3DMotorsParams(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsParams.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getPose3DMotorsParams(self, _r):
return _M_jderobot.Pose3DMotors._op_getPose3DMotorsParams.end(self, _r)
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_jderobot.Pose3DMotorsPrx.ice_checkedCast(proxy, '::jderobot::Pose3DMotors', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_jderobot.Pose3DMotorsPrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
def ice_staticId():
return '::jderobot::Pose3DMotors'
ice_staticId = staticmethod(ice_staticId)
_M_jderobot._t_Pose3DMotorsPrx = IcePy.defineProxy('::jderobot::Pose3DMotors', Pose3DMotorsPrx)
_M_jderobot._t_Pose3DMotors = IcePy.defineClass('::jderobot::Pose3DMotors', Pose3DMotors, -1, (), True, False, None, (), ())
Pose3DMotors._ice_type = _M_jderobot._t_Pose3DMotors
Pose3DMotors._op_setPose3DMotorsData = IcePy.Operation('setPose3DMotorsData', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_jderobot._t_Pose3DMotorsData, False, 0),), (), ((), IcePy._t_int, False, 0), ())
Pose3DMotors._op_getPose3DMotorsData = IcePy.Operation('getPose3DMotorsData', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, False, None, (), (), (), ((), _M_jderobot._t_Pose3DMotorsData, False, 0), ())
Pose3DMotors._op_getPose3DMotorsParams = IcePy.Operation('getPose3DMotorsParams', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, False, None, (), (), (), ((), _M_jderobot._t_Pose3DMotorsParams, False, 0), ())
_M_jderobot.Pose3DMotors = Pose3DMotors
del Pose3DMotors
_M_jderobot.Pose3DMotorsPrx = Pose3DMotorsPrx
del Pose3DMotorsPrx
# End of module jderobot
|
gpl-3.0
| -2,707,966,393,643,148,300 | 37.50996 | 240 | 0.626371 | false |
basimkhajwal/RaspberryPi
|
utils/ReadResistance.py
|
1
|
3314
|
import RPi.GPIO as GPIO, time, os
#Constants
NUM_TESTS = 3
NUM_TESTS_PER_VALUE = 5
def readResistance(RCpin):
'''
Read the resistance with the pin stated in the input, returns an integer with the number
of clock cycles passed
'''
#Discharge the pins and capacitor
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
#Get the time from before
reading = 0 #Set the initial reading to 0
#Start the count down
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
#Return the results
return reading
#Test code if the file is actually being run
if __name__ == "__main__":
try:
GPIO.setmode(GPIO.BCM)
#Get the user input
pin = input("Which GPIO pin are you using? ")
capValue = input("How many microFarads is the capacitor? ")
#Input was in microFarads so divide accordingly
capValue /= 1000000.0
print "\nWe will first run some resistor tests to have a more accurate reading. Connect a resistor with a known resistance to the circuit and follow the instructions"
print "Test atleast one value but then just press enter to quit at any time"
#Set the initial ratio, needs to be changed
ratio = 0
num = 0
for test in range(NUM_TESTS):
try:
resValue = input("\nTest " + str(test + 1) + ": resistor size (ohms): ")
except Exception:
if ratio == 0:
continue
break
values = []
average = 0.0
print "Calculating..."
#Read some values
for i in range(NUM_TESTS_PER_VALUE):
values.append(readResistance(pin))
average += values[i]
time.sleep(0.1)
#Take the average
average /= NUM_TESTS_PER_VALUE
print "Average No. of Clock Cycles: %f" % (average)
#This is the time it should take for the
#capacitor to charge in an RC circuit
exactTime = resValue * capValue
#Add the ratio of the time found and the clock cycles
ratio += (exactTime / average)
num += 1
#Take the average of the ratios
ratio /= num
print "\nTests completed\n"
#Get the sleep time limit
timeLimit = min(max(0.2, input("How often to update resistance(seconds and 0.2 < s < 5): ")), 5)
#Loop while user is running
while True:
#Get the number of cycles
numCycles = readResistance(pin)
#Predict the resistance in ohms
resistance = (numCycles * ratio) / capValue
#Print the results
print "Number Of Clock Cycles: %d" % (numCycles)
print "Predicted Resistance: %f Ohms\n" % (resistance)
#Sleep for the desired time
time.sleep(timeLimit)
except KeyboardInterrupt:
GPIO.cleanup()
|
mit
| -3,765,451,516,040,908,300 | 29.685185 | 174 | 0.538624 | false |
zinic/netbox_api
|
netbox_api/model/rack.py
|
1
|
3137
|
from netbox_api.model.common import CustomFields
from enum import Enum
class RackWidthConstant(Enum):
WIDTH_19_INCHES = 19
WIDTH_23_INCHES = 23
class RackTypeConstant(Enum):
FRAME_2_POST = 100
FRAME_4_POST = 200
CABINET_4_POST = 300
FRAME_WALL_MOUNTED = 1000
CABINET_WALL_MOUNTED = 1100
class RackSite(object):
def __init__(self, id=None, url=None, name=None, slug=None):
self.id = id
self.url = url
self.name = name
self.slug = slug
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class RackGroup(object):
def __init__(self, id=None, url=None, name=None, slug=None):
self.id = id
self.url = url
self.name = name
self.slug = slug
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class RackTenant(object):
def __init__(self, id=None, url=None, name=None, slug=None):
self.id = id
self.url = url
self.name = name
self.slug = slug
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class RackRole(object):
def __init__(self, id=None, url=None, name=None, slug=None):
self.id = id
self.url = url
self.name = name
self.slug = slug
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class RackType(object):
def __init__(self, value=None, label=None):
self.value = value
self.label = label
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class RackWidth(object):
def __init__(self, value=None, label=None):
self.value = value
self.label = label
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
class Rack(object):
def __init__(self, site=None, group=None, tenant=None, role=None, type=None, width=None, custom_fields=None,
id=None, name=None, facility_id=None, display_name=None, u_height=None, desc_units=None,
comments=None):
self.site = RackSite.from_dict(site)
self.group = RackGroup.from_dict(group)
self.tenant = RackTenant.from_dict(tenant)
self.role = RackRole.from_dict(role)
self.type = RackType.from_dict(type)
self.width = RackWidth.from_dict(width)
self.custom_fields = CustomFields.from_dict(custom_fields)
self.id = id
self.name = name
self.facility_id = facility_id
self.display_name = display_name
self.u_height = u_height
self.desc_units = desc_units
self.comments = comments
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
|
mit
| 2,123,164,056,186,972,000 | 23.507813 | 112 | 0.586548 | false |
linuxmuster/linuxmuster-base7
|
share/firewall/opnsense/create-keytab.py
|
1
|
2968
|
#!/usr/bin/python3
#
# create web proxy sso keytab
# thomas@linuxmuster.net
# 20200311
#
import constants
import getopt
import os
import sys
from functions import datetime
from functions import firewallApi
from functions import getSetupValue
from functions import printScript
from functions import readTextfile
# check first if firewall is skipped by setup
skipfw = getSetupValue('skipfw')
if skipfw == 'True':
printScript('Firewall is skipped by setup!')
sys.exit(0)
def usage():
print('Usage: create-keytab.py [options]')
print('Creates opnsense web proxy sso keytable.')
print('If adminpw is omitted saved administrator credentials are used.')
print(' [options] may be:')
print(' -a <adminpw>, --adminpw=<adminpw>: global-admin password (optional)')
print(' -c, --check : check only the presence of keytable file')
print(' -v, --verbose : be more verbose')
print(' -h, --help : print this help')
# get cli args
try:
opts, args = getopt.getopt(sys.argv[1:], "a:chv", ["adminpw=", "check", "help", "verbose"])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
verbose = False
adminpw = None
adminlogin = 'global-admin'
check = False
# evaluate options
for o, a in opts:
if o in ("-v", "--verbose"):
verbose = True
elif o in ("-a", "--adminpw"):
adminpw = a
elif o in ("-c", "--check"):
check = True
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option"
now = str(datetime.datetime.now()).split('.')[0]
printScript('create-keytab.py ' + now)
if not check:
# get firewall ip from setupini
firewallip = getSetupValue('firewallip')
# get administrator credentials if global-admin password was not provided
if adminpw is None:
rc, adminpw = readTextfile(constants.ADADMINSECRET)
adminlogin = 'administrator'
# reload relevant services
sshconnect = 'ssh -q -oBatchmode=yes -oStrictHostKeyChecking=accept-new ' + firewallip
for item in ['unbound', 'squid']:
printScript('Restarting ' + item)
sshcmd = sshconnect + ' pluginctl -s ' + item + ' restart'
rc = os.system(sshcmd)
if rc != 0:
sys.exit(1)
# create keytab
payload = '{"admin_login": "' + adminlogin + '", "admin_password": "' + adminpw + '"}'
apipath = '/proxysso/service/createkeytab'
res = firewallApi('post', apipath, payload)
if verbose:
print(res)
# check success
keytabtest = 'No keytab'
apipath = '/proxysso/service/showkeytab'
res = firewallApi('get', apipath)
if verbose:
print(res)
if keytabtest in str(res):
rc = 1
printScript('Keytab is not present :-(')
else:
rc = 0
printScript('Keytab is present :-)')
sys.exit(rc)
|
gpl-3.0
| -4,581,221,292,962,990,000 | 25.738739 | 95 | 0.632075 | false |
taolei87/rcnn
|
code/rationale/rationale.py
|
1
|
24504
|
import os, sys, gzip
import time
import math
import json
import cPickle as pickle
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from nn import create_optimization_updates, get_activation_by_name, sigmoid, linear
from nn import EmbeddingLayer, Layer, LSTM, RCNN, apply_dropout, default_rng
from utils import say
import myio
import options
from extended_layers import ExtRCNN, ExtLSTM
class Generator(object):
def __init__(self, args, embedding_layer, nclasses, encoder):
self.args = args
self.embedding_layer = embedding_layer
self.nclasses = nclasses
self.encoder = encoder
def ready(self):
encoder = self.encoder
embedding_layer = self.embedding_layer
args = self.args
padding_id = embedding_layer.vocab_map["<padding>"]
dropout = self.dropout = encoder.dropout
# len*batch
x = self.x = encoder.x
z = self.z = encoder.z
n_d = args.hidden_dimension
n_e = embedding_layer.n_d
activation = get_activation_by_name(args.activation)
layers = self.layers = [ ]
layer_type = args.layer.lower()
for i in xrange(2):
if layer_type == "rcnn":
l = RCNN(
n_in = n_e,# if i == 0 else n_d,
n_out = n_d,
activation = activation,
order = args.order
)
elif layer_type == "lstm":
l = LSTM(
n_in = n_e,# if i == 0 else n_d,
n_out = n_d,
activation = activation
)
layers.append(l)
# len * batch
#masks = T.cast(T.neq(x, padding_id), theano.config.floatX)
masks = T.cast(T.neq(x, padding_id), "int8").dimshuffle((0,1,"x"))
# (len*batch)*n_e
embs = embedding_layer.forward(x.ravel())
# len*batch*n_e
embs = embs.reshape((x.shape[0], x.shape[1], n_e))
embs = apply_dropout(embs, dropout)
flipped_embs = embs[::-1]
# len*bacth*n_d
h1 = layers[0].forward_all(embs)
h2 = layers[1].forward_all(flipped_embs)
h_final = T.concatenate([h1, h2[::-1]], axis=2)
h_final = apply_dropout(h_final, dropout)
size = n_d * 2
output_layer = self.output_layer = Layer(
n_in = size,
n_out = 1,
activation = sigmoid
)
# len*batch*1
probs = output_layer.forward(h_final)
# len*batch
probs2 = probs.reshape(x.shape)
self.MRG_rng = MRG_RandomStreams()
z_pred = self.z_pred = T.cast(self.MRG_rng.binomial(size=probs2.shape, p=probs2), "int8")
# we are computing approximated gradient by sampling z;
# so should mark sampled z not part of the gradient propagation path
#
self.z_pred = theano.gradient.disconnected_grad(z_pred)
z2 = z.dimshuffle((0,1,"x"))
logpz = - T.nnet.binary_crossentropy(probs, z2) * masks
logpz = self.logpz = logpz.reshape(x.shape)
probs = self.probs = probs.reshape(x.shape)
# batch
zsum = T.sum(z, axis=0, dtype=theano.config.floatX)
zdiff = T.sum(T.abs_(z[1:]-z[:-1]), axis=0, dtype=theano.config.floatX)
loss_mat = encoder.loss_mat
if args.aspect < 0:
loss_vec = T.mean(loss_mat, axis=1)
else:
assert args.aspect < self.nclasses
loss_vec = loss_mat[:,args.aspect]
self.loss_vec = loss_vec
coherent_factor = args.sparsity * args.coherent
loss = self.loss = T.mean(loss_vec)
sparsity_cost = self.sparsity_cost = T.mean(zsum) * args.sparsity + \
T.mean(zdiff) * coherent_factor
cost_vec = loss_vec + zsum * args.sparsity + zdiff * coherent_factor
cost_logpz = T.mean(cost_vec * T.sum(logpz, axis=0))
self.obj = T.mean(cost_vec)
params = self.params = [ ]
for l in layers + [ output_layer ]:
for p in l.params:
params.append(p)
nparams = sum(len(x.get_value(borrow=True).ravel()) \
for x in params)
say("total # parameters: {}\n".format(nparams))
l2_cost = None
for p in params:
if l2_cost is None:
l2_cost = T.sum(p**2)
else:
l2_cost = l2_cost + T.sum(p**2)
l2_cost = l2_cost * args.l2_reg
cost = self.cost = cost_logpz * 10 + l2_cost
print "cost.dtype", cost.dtype
self.cost_e = loss * 10 + encoder.l2_cost
class Encoder(object):
def __init__(self, args, embedding_layer, nclasses):
self.args = args
self.embedding_layer = embedding_layer
self.nclasses = nclasses
def ready(self):
embedding_layer = self.embedding_layer
args = self.args
padding_id = embedding_layer.vocab_map["<padding>"]
dropout = self.dropout = theano.shared(
np.float64(args.dropout).astype(theano.config.floatX)
)
# len*batch
x = self.x = T.imatrix()
z = self.z = T.bmatrix()
z = z.dimshuffle((0,1,"x"))
# batch*nclasses
y = self.y = T.fmatrix()
n_d = args.hidden_dimension
n_e = embedding_layer.n_d
activation = get_activation_by_name(args.activation)
layers = self.layers = [ ]
depth = args.depth
layer_type = args.layer.lower()
for i in xrange(depth):
if layer_type == "rcnn":
l = ExtRCNN(
n_in = n_e if i == 0 else n_d,
n_out = n_d,
activation = activation,
order = args.order
)
elif layer_type == "lstm":
l = ExtLSTM(
n_in = n_e if i == 0 else n_d,
n_out = n_d,
activation = activation
)
layers.append(l)
# len * batch * 1
masks = T.cast(T.neq(x, padding_id).dimshuffle((0,1,"x")) * z, theano.config.floatX)
# batch * 1
cnt_non_padding = T.sum(masks, axis=0) + 1e-8
# (len*batch)*n_e
embs = embedding_layer.forward(x.ravel())
# len*batch*n_e
embs = embs.reshape((x.shape[0], x.shape[1], n_e))
embs = apply_dropout(embs, dropout)
pooling = args.pooling
lst_states = [ ]
h_prev = embs
for l in layers:
# len*batch*n_d
h_next = l.forward_all(h_prev, z)
if pooling:
# batch * n_d
masked_sum = T.sum(h_next * masks, axis=0)
lst_states.append(masked_sum/cnt_non_padding) # mean pooling
else:
lst_states.append(h_next[-1]) # last state
h_prev = apply_dropout(h_next, dropout)
if args.use_all:
size = depth * n_d
# batch * size (i.e. n_d*depth)
h_final = T.concatenate(lst_states, axis=1)
else:
size = n_d
h_final = lst_states[-1]
h_final = apply_dropout(h_final, dropout)
output_layer = self.output_layer = Layer(
n_in = size,
n_out = self.nclasses,
activation = sigmoid
)
# batch * nclasses
preds = self.preds = output_layer.forward(h_final)
# batch
loss_mat = self.loss_mat = (preds-y)**2
loss = self.loss = T.mean(loss_mat)
pred_diff = self.pred_diff = T.mean(T.max(preds, axis=1) - T.min(preds, axis=1))
params = self.params = [ ]
for l in layers + [ output_layer ]:
for p in l.params:
params.append(p)
nparams = sum(len(x.get_value(borrow=True).ravel()) \
for x in params)
say("total # parameters: {}\n".format(nparams))
l2_cost = None
for p in params:
if l2_cost is None:
l2_cost = T.sum(p**2)
else:
l2_cost = l2_cost + T.sum(p**2)
l2_cost = l2_cost * args.l2_reg
self.l2_cost = l2_cost
cost = self.cost = loss * 10 + l2_cost
class Model(object):
def __init__(self, args, embedding_layer, nclasses):
self.args = args
self.embedding_layer = embedding_layer
self.nclasses = nclasses
def ready(self):
args, embedding_layer, nclasses = self.args, self.embedding_layer, self.nclasses
self.encoder = Encoder(args, embedding_layer, nclasses)
self.generator = Generator(args, embedding_layer, nclasses, self.encoder)
self.encoder.ready()
self.generator.ready()
self.dropout = self.encoder.dropout
self.x = self.encoder.x
self.y = self.encoder.y
self.z = self.encoder.z
self.z_pred = self.generator.z_pred
def save_model(self, path, args):
# append file suffix
if not path.endswith(".pkl.gz"):
if path.endswith(".pkl"):
path += ".gz"
else:
path += ".pkl.gz"
# output to path
with gzip.open(path, "wb") as fout:
pickle.dump(
([ x.get_value() for x in self.encoder.params ], # encoder
[ x.get_value() for x in self.generator.params ], # generator
self.nclasses,
args # training configuration
),
fout,
protocol = pickle.HIGHEST_PROTOCOL
)
def load_model(self, path):
if not os.path.exists(path):
if path.endswith(".pkl"):
path += ".gz"
else:
path += ".pkl.gz"
with gzip.open(path, "rb") as fin:
eparams, gparams, nclasses, args = pickle.load(fin)
# construct model/network using saved configuration
self.args = args
self.nclasses = nclasses
self.ready()
for x,v in zip(self.encoder.params, eparams):
x.set_value(v)
for x,v in zip(self.generator.params, gparams):
x.set_value(v)
def train(self, train, dev, test, rationale_data):
args = self.args
dropout = self.dropout
padding_id = self.embedding_layer.vocab_map["<padding>"]
if dev is not None:
dev_batches_x, dev_batches_y = myio.create_batches(
dev[0], dev[1], args.batch, padding_id
)
if test is not None:
test_batches_x, test_batches_y = myio.create_batches(
test[0], test[1], args.batch, padding_id
)
if rationale_data is not None:
valid_batches_x, valid_batches_y = myio.create_batches(
[ u["xids"] for u in rationale_data ],
[ u["y"] for u in rationale_data ],
args.batch,
padding_id,
sort = False
)
start_time = time.time()
train_batches_x, train_batches_y = myio.create_batches(
train[0], train[1], args.batch, padding_id
)
say("{:.2f}s to create training batches\n\n".format(
time.time()-start_time
))
updates_e, lr_e, gnorm_e = create_optimization_updates(
cost = self.generator.cost_e,
params = self.encoder.params,
method = args.learning,
lr = args.learning_rate
)[:3]
updates_g, lr_g, gnorm_g = create_optimization_updates(
cost = self.generator.cost,
params = self.generator.params,
method = args.learning,
lr = args.learning_rate
)[:3]
sample_generator = theano.function(
inputs = [ self.x ],
outputs = self.z_pred,
#updates = self.generator.sample_updates
#allow_input_downcast = True
)
get_loss_and_pred = theano.function(
inputs = [ self.x, self.z, self.y ],
outputs = [ self.generator.loss_vec, self.encoder.preds ]
)
eval_generator = theano.function(
inputs = [ self.x, self.y ],
outputs = [ self.z, self.generator.obj, self.generator.loss,
self.encoder.pred_diff ],
givens = {
self.z : self.generator.z_pred
},
#updates = self.generator.sample_updates,
#no_default_updates = True
)
train_generator = theano.function(
inputs = [ self.x, self.y ],
outputs = [ self.generator.obj, self.generator.loss, \
self.generator.sparsity_cost, self.z, gnorm_g, gnorm_e ],
givens = {
self.z : self.generator.z_pred
},
#updates = updates_g,
updates = updates_g.items() + updates_e.items() #+ self.generator.sample_updates,
#no_default_updates = True
)
eval_period = args.eval_period
unchanged = 0
best_dev = 1e+2
best_dev_e = 1e+2
dropout_prob = np.float64(args.dropout).astype(theano.config.floatX)
for epoch in xrange(args.max_epochs):
unchanged += 1
if unchanged > 10: return
train_batches_x, train_batches_y = myio.create_batches(
train[0], train[1], args.batch, padding_id
)
processed = 0
train_cost = 0.0
train_loss = 0.0
train_sparsity_cost = 0.0
p1 = 0.0
start_time = time.time()
N = len(train_batches_x)
for i in xrange(N):
if (i+1) % 100 == 0:
say("\r{}/{} ".format(i+1,N))
bx, by = train_batches_x[i], train_batches_y[i]
mask = bx != padding_id
cost, loss, sparsity_cost, bz, gl2_g, gl2_e = train_generator(bx, by)
k = len(by)
processed += k
train_cost += cost
train_loss += loss
train_sparsity_cost += sparsity_cost
p1 += np.sum(bz*mask) / (np.sum(mask)+1e-8)
if (i == N-1) or (eval_period > 0 and processed/eval_period >
(processed-k)/eval_period):
say("\n")
say(("Generator Epoch {:.2f} costg={:.4f} scost={:.4f} lossg={:.4f} " +
"p[1]={:.2f} |g|={:.4f} {:.4f}\t[{:.2f}m / {:.2f}m]\n").format(
epoch+(i+1.0)/N,
train_cost / (i+1),
train_sparsity_cost / (i+1),
train_loss / (i+1),
p1 / (i+1),
float(gl2_g),
float(gl2_e),
(time.time()-start_time)/60.0,
(time.time()-start_time)/60.0/(i+1)*N
))
say("\t"+str([ "{:.1f}".format(np.linalg.norm(x.get_value(borrow=True))) \
for x in self.encoder.params ])+"\n")
say("\t"+str([ "{:.1f}".format(np.linalg.norm(x.get_value(borrow=True))) \
for x in self.generator.params ])+"\n")
if dev:
self.dropout.set_value(0.0)
dev_obj, dev_loss, dev_diff, dev_p1 = self.evaluate_data(
dev_batches_x, dev_batches_y, eval_generator, sampling=True)
if dev_obj < best_dev:
best_dev = dev_obj
unchanged = 0
if args.dump and rationale_data:
self.dump_rationales(args.dump, valid_batches_x, valid_batches_y,
get_loss_and_pred, sample_generator)
if args.save_model:
self.save_model(args.save_model, args)
say(("\tsampling devg={:.4f} mseg={:.4f} avg_diffg={:.4f}" +
" p[1]g={:.2f} best_dev={:.4f}\n").format(
dev_obj,
dev_loss,
dev_diff,
dev_p1,
best_dev
))
if rationale_data is not None:
r_mse, r_p1, r_prec1, r_prec2 = self.evaluate_rationale(
rationale_data, valid_batches_x,
valid_batches_y, eval_generator)
say(("\trationale mser={:.4f} p[1]r={:.2f} prec1={:.4f}" +
" prec2={:.4f}\n").format(
r_mse,
r_p1,
r_prec1,
r_prec2
))
self.dropout.set_value(dropout_prob)
def evaluate_data(self, batches_x, batches_y, eval_func, sampling=False):
padding_id = self.embedding_layer.vocab_map["<padding>"]
tot_obj, tot_mse, tot_diff, p1 = 0.0, 0.0, 0.0, 0.0
for bx, by in zip(batches_x, batches_y):
if not sampling:
e, d = eval_func(bx, by)
else:
mask = bx != padding_id
bz, o, e, d = eval_func(bx, by)
p1 += np.sum(bz*mask) / (np.sum(mask) + 1e-8)
tot_obj += o
tot_mse += e
tot_diff += d
n = len(batches_x)
if not sampling:
return tot_mse/n, tot_diff/n
return tot_obj/n, tot_mse/n, tot_diff/n, p1/n
def evaluate_rationale(self, reviews, batches_x, batches_y, eval_func):
args = self.args
assert args.aspect >= 0
padding_id = self.embedding_layer.vocab_map["<padding>"]
aspect = str(args.aspect)
p1, tot_mse, tot_prec1, tot_prec2 = 0.0, 0.0, 0.0, 0.0
tot_z, tot_n = 1e-10, 1e-10
cnt = 0
for bx, by in zip(batches_x, batches_y):
mask = bx != padding_id
bz, o, e, d = eval_func(bx, by)
tot_mse += e
p1 += np.sum(bz*mask)/(np.sum(mask) + 1e-8)
for z,m in zip(bz.T, mask.T):
z = [ vz for vz,vm in zip(z,m) if vm ]
assert len(z) == len(reviews[cnt]["xids"])
truez_intvals = reviews[cnt][aspect]
prec = sum( 1 for i, zi in enumerate(z) if zi>0 and \
any(i>=u[0] and i<u[1] for u in truez_intvals) )
nz = sum(z)
if nz > 0:
tot_prec1 += prec/(nz+0.0)
tot_n += 1
tot_prec2 += prec
tot_z += nz
cnt += 1
assert cnt == len(reviews)
n = len(batches_x)
return tot_mse/n, p1/n, tot_prec1/tot_n, tot_prec2/tot_z
def dump_rationales(self, path, batches_x, batches_y, eval_func, sample_func):
embedding_layer = self.embedding_layer
padding_id = self.embedding_layer.vocab_map["<padding>"]
lst = [ ]
for bx, by in zip(batches_x, batches_y):
bz = np.ones(bx.shape, dtype="int8")
loss_vec_t, preds_t = eval_func(bx, bz, by)
bz = sample_func(bx)
loss_vec_r, preds_r = eval_func(bx, bz, by)
assert len(loss_vec_r) == bx.shape[1]
for loss_t, p_t, loss_r, p_r, x,y,z in zip(loss_vec_t, preds_t, \
loss_vec_r, preds_r, bx.T, by, bz.T):
loss_t, loss_r = float(loss_t), float(loss_r)
p_t, p_r, x, y, z = p_t.tolist(), p_r.tolist(), x.tolist(), y.tolist(), z.tolist()
w = embedding_layer.map_to_words(x)
r = [ u if v == 1 else "__" for u,v in zip(w,z) ]
diff = max(y)-min(y)
lst.append((diff, loss_t, loss_r, r, w, x, y, z, p_t, p_r))
#lst = sorted(lst, key=lambda x: (len(x[3]), x[2]))
with open(path,"w") as fout:
for diff, loss_t, loss_r, r, w, x, y, z, p_t, p_r in lst:
fout.write( json.dumps( { "diff": diff,
"loss_t": loss_t,
"loss_r": loss_r,
"rationale": " ".join(r),
"text": " ".join(w),
"x": x,
"z": z,
"y": y,
"p_t": p_t,
"p_r": p_r } ) + "\n" )
def main():
print args
assert args.embedding, "Pre-trained word embeddings required."
embedding_layer = myio.create_embedding_layer(
args.embedding
)
max_len = args.max_len
if args.train:
train_x, train_y = myio.read_annotations(args.train)
train_x = [ embedding_layer.map_to_ids(x)[:max_len] for x in train_x ]
if args.dev:
dev_x, dev_y = myio.read_annotations(args.dev)
dev_x = [ embedding_layer.map_to_ids(x)[:max_len] for x in dev_x ]
if args.load_rationale:
rationale_data = myio.read_rationales(args.load_rationale)
for x in rationale_data:
x["xids"] = embedding_layer.map_to_ids(x["x"])
if args.train:
model = Model(
args = args,
embedding_layer = embedding_layer,
nclasses = len(train_y[0])
)
model.ready()
model.train(
(train_x, train_y),
(dev_x, dev_y) if args.dev else None,
None, #(test_x, test_y),
rationale_data if args.load_rationale else None
)
if args.load_model and args.dev and not args.train:
model = Model(
args = None,
embedding_layer = embedding_layer,
nclasses = -1
)
model.load_model(args.load_model)
say("model loaded successfully.\n")
# compile an evaluation function
eval_func = theano.function(
inputs = [ model.x, model.y ],
outputs = [ model.z, model.generator.obj, model.generator.loss,
model.encoder.pred_diff ],
givens = {
model.z : model.generator.z_pred
},
)
# compile a predictor function
pred_func = theano.function(
inputs = [ model.x ],
outputs = [ model.z, model.encoder.preds ],
givens = {
model.z : model.generator.z_pred
},
)
# batching data
padding_id = embedding_layer.vocab_map["<padding>"]
dev_batches_x, dev_batches_y = myio.create_batches(
dev_x, dev_y, args.batch, padding_id
)
# disable dropout
model.dropout.set_value(0.0)
dev_obj, dev_loss, dev_diff, dev_p1 = model.evaluate_data(
dev_batches_x, dev_batches_y, eval_func, sampling=True)
say("{} {} {} {}\n".format(dev_obj, dev_loss, dev_diff, dev_p1))
if __name__=="__main__":
args = options.load_arguments()
main()
|
apache-2.0
| -6,567,655,606,038,944,000 | 35.959276 | 98 | 0.461843 | false |
UoK-Psychology/rmas-oe-adapter
|
rmas_oe_adapter/settings.py
|
1
|
2184
|
'''
This is the main settings module for your adapter.
It will be imported and available to your adapter package:::
from rmas_adapter.conf import settings
poll_interval = settings.POLL_INTERVAL
You will need to have some code somehwere (usually in your runner.py) that sets
the location of this settings module, as you won't use this module directly, instead
it is imported into the rmas_adapter.conf.settings module.::
os.environ.setdefault("RMAS_ADAPTER_SETTINGS", "rmas_oe_adapter.settings")
This is so that your settings are all in the same predicatable location (as there are settings that the RMAS Adapter
framework rely on). This pattern is borrowed from the way that Django manages its settings.
There are a couple of required settings:
* **RMAS_BUS_WSDL** : This is the url for the RMAS bus (specifically the wsdl file for this soap service)
* **POLL_INTERVAL** : This is the duration in milliseconds that the adapter will pause between polling the bus for messages
* **EVENTS**: This is a list of tuples describing the RMAS Events to listen for and the event handling that
should be called when this event of one of these events occuring.
The tuple should look like:::
('name of rmas event', 'location to the handler module')
The rest of the settings in this module are specific to the RMAS-to-Openethics adapter
'''
import os
import pika
basepath = os.path.dirname(globals()["__file__"])
dirname = os.path.abspath(os.path.join(basepath, ".."))
RMAS_BUS_WSDL='http://localhost:7789/?wsdl'
POLL_INTERVAL=5000
EVENTS=[('proposal-created', 'handlers.proposal_created'),]
TEMPLATE_DIR=os.path.abspath(os.path.join(dirname,'templates'))
OE_API_BASE_URL = 'http://127.0.0.1:8000/'
OE_API_AUTH_KEY = 'ApiKey admin:1234567890'
OE_API_USER_ENDPOINT = OE_API_BASE_URL+ 'api/v1/user/'
OE_API_APPLICATION_ENDPOINT = OE_API_BASE_URL + 'api/v1/application/'
AMQP_CONNECTION_PARAMETERS= pika.ConnectionParameters()
AMQP_EXCHANGE_NAME='openethics_events'
AMQP_EXCHANGE_TYPE='fanout'
AMQP_QUEUE_NAME='adapter'
AMQP_QUEUE_ROUTING_KEY=''
|
mit
| -5,019,034,746,536,723,000 | 38.017857 | 127 | 0.712912 | false |
kannon92/psi4
|
psi4/driver/util/filesystem.py
|
1
|
1284
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
|
gpl-2.0
| 7,767,472,610,928,574,000 | 28.860465 | 73 | 0.720405 | false |
calatre/epidemics_network
|
treat/excel_clipper.py
|
1
|
1352
|
# Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 28/6/2017
# Selecting Data from an excel file to another
#import numpy as np
import pandas as pd
from openpyxl import load_workbook
#r = [0, 301, 302, 303, 304, 305, 306]
#desired = ['S_Avg', 'I_Avg', 'R_Avg', 'S_StD', 'I_StD', 'R_StD']
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
book = load_workbook('data/ns_shift.xlsx')
writer = pd.ExcelWriter('data/nd_shift.xlsx', engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
for cvar in cvalues:
for rvar in rvalues:
print('retrieving...')
tblnm = 'c='+str(cvar)+'|r='+ str(rvar)
data = pd.read_excel('data/ns_shift.xlsx',
sheetname = tblnm, index_col = 0)
print('...retrieved')
#data.drop(data.columns[r], axis = 1, inplace= True)
sel = data[:1000]
print('copying...............................'+str(tblnm))
sel.to_excel(writer,'c='+str(cvar)+'|r='+ str(rvar))
print('copied!')
writer.save()
|
apache-2.0
| 6,151,415,062,032,155,000 | 39.969697 | 70 | 0.54068 | false |
d9pouces/StarterPyth
|
starterpyth/cliforms.py
|
1
|
8391
|
#coding=utf-8
import os
import re
from six import PY2
from starterpyth.log import display, RED
from starterpyth.translation import ugettext as _
__author__ = 'flanker'
# noinspection PyUnresolvedReferences
input_ = raw_input if PY2 else input
class InvalidValue(ValueError):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Input(object):
__ORDER = 0
def __init__(self, label='', initial=None, show=None):
self.show = show
self.label = label
self.initial = initial
self.order = Input.__ORDER
Input.__ORDER += 1
def read(self, initial=None):
if initial is None:
initial = self.initial
raw_value = input_(self.widget(initial=initial))
if not raw_value and initial is not None:
raw_value = self.to_str(initial)
while True:
try:
valid_value = self.to_python(raw_value)
break
except InvalidValue as e:
display(_('Invalid value: %(value)s (%(msg)s)') % {'value': raw_value, 'msg': str(e)}, color=RED,
bold=True)
raw_value = input_(self.widget())
if not raw_value and initial is not None:
raw_value = self.to_str(initial)
# noinspection PyUnboundLocalVariable
return valid_value
def widget(self, initial=None):
raise NotImplementedError
def to_str(self, value):
raise NotImplementedError
def to_python(self, value):
raise NotImplementedError
class CharInput(Input):
def __init__(self, label='', initial=None, max_length=None, min_length=None, show=None):
super(CharInput, self).__init__(label=label, initial=initial, show=show)
self.max_length = max_length
self.min_length = min_length
def widget(self, initial=None):
if initial:
return _('%(label)s [%(init)s]: ') % {'label': self.label, 'init': self.to_str(initial)}
else:
return _('%(label)s: ') % {'label': self.label}
def to_str(self, value):
return str(value)
def to_python(self, value):
if self.min_length is not None and self.min_length > len(value):
raise InvalidValue(_('Value must be at least %(l)d character long') % {'l': self.min_length})
if self.max_length is not None and self.max_length < len(value):
raise InvalidValue(_('Value must be at most %(l)d character long') % {'l': self.max_length})
return value
class RegexpInput(CharInput):
def __init__(self, regexp, label='', initial=None, show=None):
super(RegexpInput, self).__init__(label=label, initial=initial, show=show)
self.regexp = regexp
def to_python(self, value):
if not self.regexp.match(value):
raise InvalidValue(_('Value must match %(l)s regexp') % {'l': self.regexp.pattern})
return value
class IntegerInput(RegexpInput):
def __init__(self, min_value=None, max_value=None, label='', initial=None, required=True, show=None):
super(IntegerInput, self).__init__(re.compile('\d+'), label=label, initial=initial, show=show)
self.max_value = max_value
self.min_value = min_value
self.required = required
def to_python(self, value):
if not self.required and not value:
return None
if not self.regexp.match(value):
raise InvalidValue(_('Value must be a integer'))
value = int(value)
if self.min_value is not None and self.min_value > value:
raise InvalidValue(_('Value must be greater than %(l)d ') % {'l': self.min_value})
if self.max_value is not None and self.max_value < value:
raise InvalidValue(_('Value must be less than %(l)d') % {'l': self.max_value})
return value
class BooleanInput(CharInput):
true_values = [_('yes'), _('y')]
false_values = [_('no'), _('n')]
def to_python(self, value):
value = value.lower()
if value in self.true_values:
return True
elif value in self.false_values:
return False
raise InvalidValue(_('Value must be one of %(l)s') % {'l': ', '.join(self.true_values + self.false_values)})
def to_str(self, value):
if value:
return self.true_values[0]
return self.false_values[0]
def widget(self, initial=None):
if initial is None:
choices = _('%s/%s') % (self.true_values[0], self.false_values[0])
elif initial:
choices = _('%s/%s') % (self.true_values[0].upper(), self.false_values[0])
else:
choices = _('%s/%s') % (self.true_values[0], self.false_values[0].upper())
return _('%(label)s [%(choices)s]: ') % {'label': self.label, 'choices': choices}
class PathInput(CharInput):
def __init__(self, cwd=None, label='', initial=None, required=True, show=None):
super(PathInput, self).__init__(label=label, initial=initial, show=show)
self.cwd = cwd
self.required = required
def to_python(self, value):
if not value and self.required:
raise InvalidValue(_('Please enter a valid path'))
elif not value:
return None
if self.cwd:
value = os.path.join(self.cwd, value)
if not os.path.exists(value):
raise InvalidValue(_('%(l)s is not a valid path') % {'l': value})
return value
class ChoiceInput(CharInput):
int_re = re.compile(r'\d+')
def __init__(self, choices, label='', initial=None, required=True, show=None):
super(ChoiceInput, self).__init__(label=label, initial=initial, show=show)
if hasattr(choices, '__call__'):
choices = choices()
self.choices = choices
self.required = required
def to_python(self, value):
if not value and self.required:
raise InvalidValue(_('Please enter a valid choice'))
elif not value:
return None
if not self.int_re.match(value) or not (1 <= int(value) <= len(self.choices)):
raise InvalidValue(_('Please enter a number between 1 and %(max)d') % {'max': len(self.choices)})
return self.choices[int(value) - 1][0]
def display(self, value):
return self.choices[int(value) - 1][1]
def widget(self, initial=None):
def l(i, x):
if initial is not None and x[0] == initial:
return _(' [%d] %s') % (i + 1, x[1])
return _(' %d %s') % (i + 1, x[1])
choices = _('\n').join([l(i, x) for (i, x) in enumerate(self.choices)])
return _('%(label)s:\n%(choices)s\n ') % {'label': self.label, 'choices': choices}
def to_str(self, value):
for i, (k, v) in enumerate(self.choices):
if value == k:
return str(i + 1)
return ''
class BaseForm(object):
def __init__(self, initial=None, extra_env=None):
super(BaseForm, self).__init__()
self.extra_env = {} if extra_env is None else extra_env
self.initial = {} if initial is None else initial
def read(self, interactive=True):
fields = []
for key, field in self.__class__.__dict__.items():
if isinstance(field, Input):
fields.append((key, field))
fields.sort(key=lambda f_: f_[1].order)
values = {}
extra_env = {}
extra_env.update(self.extra_env)
for key, field in fields:
kwargs = {}
show = field.show
init_value = self.initial.get(key, field.initial)
extra_env.update(values)
if hasattr(init_value, '__call__') and not isinstance(init_value, type):
init_value = init_value(**extra_env)
if show is not None:
if hasattr(show, '__call__'):
show = show(**extra_env)
if not show:
values[key] = init_value
continue
kwargs['initial'] = init_value
if interactive:
values[key] = field.read(**kwargs)
else:
values[key] = kwargs.get('initial', field.initial)
return values
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-2.0
| 341,928,209,486,795,200 | 33.677686 | 116 | 0.563223 | false |
develersrl/rooms
|
editor/roomeditor.py
|
1
|
10771
|
#!/usr/bin/env python
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from structdata import g_project
from structdata import Area
from structdata import Event
from utils import g_ptransform
import os
from arearesize import AreaResize
class EditorButton(QToolButton):
"""
classe base per i bottoni di editing delle room
al costruttore deve essere passato il path dell'icona che deve essere
mostrata e la room che sta venendo editata
"""
attr = None
def sizeHint(self):
return QSize(30, 30)
def __init__(self, icon_path, room, parent=None):
super(EditorButton, self).__init__(parent)
self.icon_path = icon_path
self.room = room
self.icon = QPixmap(self.icon_path).scaled(30, 30,
Qt.KeepAspectRatio,
Qt.SmoothTransformation)
def paintEvent(self, event=None):
super(EditorButton, self).paintEvent(event)
p = QPainter(self)
p.setOpacity(self.getOpacity())
p.drawPixmap(QPoint(0, 0), self.icon)
def getOpacity(self):
"""
funzione che ritorna il valore di opacita' per l'immagine che deve
essere disegnata sul bottone. Se la room associata al bottone ha
settato il parametro corrispondente al bottone la funzione torna 1.
altrimenti 0.5
"""
if self.room is not None and getattr(self.room, self.attr):
return 1.
return 0.5
def setRoom(self, room):
"""
funzione per settare la room associata al bottone, da utilizzare
quando cambia la room in fase di editing
"""
self.room = room
class ChangeBGMButton(EditorButton):
attr = "bgm"
class ChangeBGButton(EditorButton):
attr = "bg"
class ChangeRoomName(QLineEdit):
pass
class RoomEditor(QWidget):
"""
classe per l'editing della room. E' possibile andare a modificare il
background e la background music della room, il suo nome e aggiungere
nuove area. Inoltre visualizza tutte le aree precedentemente create
"""
def __init__(self, room=None, parent=None):
super(RoomEditor, self).__init__(parent)
self.room = room
if self.room is not None:
self.room_bg = QPixmap(g_ptransform.relativeToAbsolute(room.bg))
self.setMinimumSize(self.room_bg.width(), self.room_bg.height())
else:
self.room_bg = None
self.setMinimumSize(int(g_project.data['world'].width),
int(g_project.data['world'].height))
self.setSizePolicy(QSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Preferred))
self.area_drag_start = None
self.area_drag_curr = None
self.resize_areas = []
self.change_room_name = ChangeRoomName()
self.change_room_name.setAlignment(Qt.AlignCenter)
self.change_room_bgm = ChangeBGMButton("image/musical_note.png", self.room)
self.change_room_bg = ChangeBGButton("image/PageTurn.jpg", self.room)
self.setRoom(self.room)
self.setMouseTracking(True)
horizontal_button_layout = QHBoxLayout()
horizontal_button_layout.addWidget(self.change_room_bgm)
horizontal_button_layout.addStretch()
horizontal_button_layout.addWidget(self.change_room_bg)
vertical_layout = QVBoxLayout()
vertical_layout.addLayout(horizontal_button_layout)
vertical_layout.addStretch()
horizontal = QHBoxLayout()
horizontal.setAlignment(Qt.AlignCenter)
horizontal.addStretch()
horizontal.addWidget(self.change_room_name)
horizontal.addStretch()
vertical_layout.addLayout(horizontal)
self.setLayout(vertical_layout)
self.createAllAreaResize()
self.connect(self.change_room_name,
SIGNAL("textEdited(const QString &)"),
self.updateRoomName)
self.connect(self.change_room_bg, SIGNAL("clicked()"), self.setRoomBg)
self.connect(self.change_room_bgm, SIGNAL("clicked()"), self.setRoomBgm)
def createAllAreaResize(self):
"""
funzione che mostra tutte le aree mediante AreaResize
nella room corrente
"""
self.resize_areas = []
if self.room is not None:
for area in self.room.areas:
self.createAreaResize(area)
def createAreaResize(self, area):
"""
funzione che crea un oggetto di tipo AreaResize associato ad un'area
che deve essere passata come parametro
"""
area_resize = AreaResize(area, self.room_bg.width(),
self.room_bg.height(), self)
area_resize.move(float(area.x), float(area.y))
self.resize_areas.append(area_resize)
area_resize.show()
def setRoomBg(self):
"""funzione per settare il background della room"""
file_open = QFileDialog()
path_file = file_open.getOpenFileName(filter="*.png *.jpg")
if path_file:
room_bg = os.path.relpath(unicode(path_file))
g_project.changeImage(self.room.bg, room_bg)
self.room.bg = room_bg
g_project.notify()
def setRoomBgm(self):
"""funzione per settare la background music della room"""
file_open = QFileDialog()
path_file = file_open.getOpenFileName(filter="*.mp3")
if path_file:
self.room.bgm = os.path.relpath(unicode(path_file))
g_project.notify()
def createArea(self, x_start, y_start, x_stop, y_stop, event_name):
"""
funzione che crea una nuova area e mostra l'AreaResize associata.
Alla funzione vengono passate le coordinate dell'angolo in alto a
sinistra, il punto in basso a destra e il nome dell'evento a essa
associata
"""
area = Area.create(self.room, max(0, x_start), max(0, y_start),
min(x_stop, self.room_bg.width()),
min(y_stop, self.room_bg.height()),
event_name)
self.createAreaResize(area)
def mousePressEvent(self, event=None):
if 0 <= event.pos().x() <= self.room_bg.width() and\
0 <= event.pos().y() <= self.room_bg.height():
self.area_drag_start = event.pos()
self.area_drag_curr = event.pos()
self.update()
def keyPressEvent(self, event=None):
if event.key() == Qt.Key_Escape:
self.area_drag_start = None
self.area_drag_curr = None
self.update()
def mouseMoveEvent(self, e):
if self.area_drag_start is None:
return
#controllo di essere nell'area dell'mmagine di sfondo
x = self.clamp(e.x(), self.room_bg.width())
y = self.clamp(e.y(), self.room_bg.height())
self.area_drag_curr = QPoint(x, y)
self.update()
def clamp(self, value, maximium):
return min(value, maximium) if value > 0 else maximium(0, value)
def mouseReleaseEvent(self, e):
if self.area_drag_curr is None:
return
x = min(self.area_drag_start.x(), self.area_drag_curr.x())
y = min(self.area_drag_start.y(), self.area_drag_curr.y())
width = abs(self.area_drag_curr.x() - self.area_drag_start.x())
height = abs(self.area_drag_curr.y() - self.area_drag_start.y())
event = Event.create()
g_project.data['events'][event.id] = event
self.createArea(x, y, width, height, event.id)
g_project.notify()
self.area_drag_start = None
self.area_drag_curr = None
self.update()
def paintEvent(self, e):
QWidget.paintEvent(self, e)
p = QPainter(self)
if self.room is not None:
p.drawPixmap(QPoint(0, 0), self.room_bg)
p.setPen(Qt.blue)
# Draw currently painted area
if self.area_drag_start is not None and self.area_drag_curr is not None:
p.drawRect(QRect(self.area_drag_start.x(),
self.area_drag_start.y(),
self.area_drag_curr.x() - self.area_drag_start.x(),
self.area_drag_curr.y() - self.area_drag_start.y()))
def updateRoomName(self, name):
"""funzione per il cambio del nome della room"""
new_room_name = unicode(name)
self.emit(SIGNAL("currentRoomNameChanged(QString)"),
new_room_name)
g_project.changeRoomName(self.room.id, new_room_name)
def setRoom(self, room):
if room:
self.room = room
self.change_room_name.setText(self.room.id)
if os.path.exists(self.room.bg):
self.room_bg = QPixmap(self.room.bg)
else:
self.room_bg = QPixmap(g_ptransform.relativeToAbsolute(self.room.bg))
def changeCurrentRoom(self, room_id):
"""
funzione per cambiare la room visualizzata, prende in ingresso
l'id della room che deve essere visualizzata
"""
self.room = g_project.data['rooms'][unicode(room_id)]
if self.room:
if os.path.exists(self.room.bg):
self.room_bg = QPixmap(self.room.bg)
else:
self.room_bg = QPixmap(g_ptransform.relativeToAbsolute(self.room.bg))
self.change_room_name.setText(self.room.id)
for resize_area in self.resize_areas:
resize_area.setParent(None)
self.resize_areas = []
self.createAllAreaResize()
self.change_room_bg.setRoom(self.room)
self.change_room_bgm.setRoom(self.room)
self.setEnableEditor(True)
self.update()
def updateData(self):
if self.room is not None and self.room.id in g_project.data['rooms'].keys():
self.setRoom(self.room)
self.setMinimumSize(int(g_project.data['world'].width),
int(g_project.data['world'].height))
for resize_area in self.resize_areas:
resize_area.setParent(None)
self.resize_areas = []
self.createAllAreaResize()
self.setEnableEditor(True)
else:
self.change_room_name.setText("")
self.setEnableEditor(False)
def setEnableEditor(self, value):
self.change_room_bg.setEnabled(value)
self.change_room_bgm.setEnabled(value)
self.change_room_name.setEnabled(value)
if __name__ == "__main__":
app = QApplication(sys.argv)
rm = RoomEditor(None)
rm.show()
app.exec_()
|
mit
| 5,918,950,865,908,647,000 | 35.265993 | 85 | 0.593074 | false |
sassoftware/conary
|
conary/repository/shimclient.py
|
1
|
10116
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from conary.lib import util
from conary.repository import calllog, changeset, filecontents, netclient
from conary.repository.netrepos import netserver
import gzip
import os
import tempfile
import time
# this returns the same server for any server name or label
# requested; because a shim can only refer to one server.
class FakeServerCache(netclient.ServerCache):
def __init__(self, server, cfg):
self._server = server
netclient.ServerCache.__init__(self, cfg=cfg)
def __getitem__(self, item):
serverName = self._getServerName(item)
# return the proxy object for anything that matches the
# serverNames on this repository
if serverName in self._server._server.serverNameList:
return self._server
# otherwise get a real repository client
return netclient.ServerCache.__getitem__(self, item)
class NetworkRepositoryServer(netserver.NetworkRepositoryServer):
@netserver.accessReadOnly
def getFileContents(self, *args, **kwargs):
location = netserver.NetworkRepositoryServer.getFileContents(self,
*args, **kwargs)[0]
path = os.path.join(self.tmpPath,location.split('?')[1] + '-out')
paths = open(path).readlines()
os.unlink(path)
return [ x.split(" ")[0] for x in paths ]
@netserver.accessReadOnly
def getFileContentsFromTrove(self, *args, **kwargs):
location, sizes = netserver.NetworkRepositoryServer.getFileContentsFromTrove(
self, *args, **kwargs)
path = os.path.join(self.tmpPath,location.split('?')[1] + '-out')
paths = open(path).readlines()
os.unlink(path)
return [ x.split(" ")[0] for x in paths ]
def getChangeSet(self, authToken, clientVersion, chgSetList, recurse,
withFiles, withFileContents, excludeAutoSource):
csList = []
def _cvtTroveList(l):
new = []
for (name, (oldV, oldF), (newV, newF), absolute) in l:
if oldV:
oldV = self.fromVersion(oldV)
oldF = self.fromFlavor(oldF)
else:
oldV = 0
oldF = 0
if newV:
newV = self.fromVersion(newV)
newF = self.fromFlavor(newF)
else:
# this happens when a distributed group has a trove
# on a remote repository disappear
newV = 0
newF = 0
new.append((name, (oldV, oldF), (newV, newF), absolute))
return new
for (name, (old, oldFlavor), (new, newFlavor), absolute) in chgSetList:
if old == 0:
l = (name, (None, None),
(self.toVersion(new), self.toFlavor(newFlavor)),
absolute)
else:
l = (name, (self.toVersion(old), self.toFlavor(oldFlavor)),
(self.toVersion(new), self.toFlavor(newFlavor)),
absolute)
csList.append(l)
ret = self.repos.createChangeSet(csList,
recurse = recurse,
withFiles = withFiles,
withFileContents = withFileContents,
excludeAutoSource = excludeAutoSource)
(cs, trovesNeeded, filesNeeded, removedTroveList) = ret
assert(not filesNeeded)
assert(not removedTroveList)
# FIXME: we need a way to remove these temporary
# files when we're done with them.
fd, tmpFile = tempfile.mkstemp(suffix = '.ccs')
os.close(fd)
cs.writeToFile(tmpFile)
size = os.stat(tmpFile).st_size
return (tmpFile, [size], _cvtTroveList(trovesNeeded), [], [])
class ShimNetClient(netclient.NetworkRepositoryClient):
"""
A subclass of NetworkRepositoryClient which can take a
shimclient.NetworkRepositoryServer instance (plus a few other
pieces of information) and expose the netclient interface without
the overhead of XMLRPC.
If 'server' is a regular netserver.NetworkRepositoryServer
instance, the shim won't be able to return changesets. If 'server'
is a shimclient.NetworkRepositoryServer, it will.
NOTE: Conary proxies are only used for "real" netclients
outside this repository's serverNameList.
"""
def getFileContentsObjects(self, server, fileList, callback, outF,
compressed):
if not isinstance(self.c[server], ShimServerProxy):
return netclient.NetworkRepositoryClient.getFileContentsObjects(
self, server, fileList, callback, outF, compressed)
filePaths = self.c[server].getFileContents(fileList)
fileObjList = []
for path in filePaths:
if compressed:
fileObjList.append(
filecontents.FromFilesystem(path, compressed = True))
else:
f = gzip.GzipFile(path, "r")
fileObjList.append(filecontents.FromFile(f))
return fileObjList
def getFileContentsFromTrove(self, n, v, f, pathList,
callback = None, compressed = False):
server = v.trailingLabel().getHost()
if not isinstance(self.c[server], ShimServerProxy):
return netclient.NetworkRepositoryClient.getFileContentsFromTrove(
self, n, v, f, pathList, callback = callback,
compressed = compressed)
pathList = [self.fromPath(x) for x in pathList]
v = self.fromVersion(v)
f = self.fromFlavor(f)
filePaths = self.c[server].getFileContentsFromTrove(n,v,f,
pathList)
fileObjList = []
for path in filePaths:
if compressed:
fileObjList.append(
filecontents.FromFilesystem(path, compressed = True))
else:
f = gzip.GzipFile(path, "r")
fileObjList.append(filecontents.FromFile(f))
return fileObjList
def commitChangeSet(self, chgSet, callback = None, mirror = False,
hidden = False):
trvCs = chgSet.iterNewTroveList().next()
newLabel = trvCs.getNewVersion().trailingLabel()
if not isinstance(self.c[newLabel], ShimServerProxy):
return netclient.NetworkRepositoryClient.commitChangeSet(self,
chgSet, callback = callback, mirror = False, hidden = False)
(fd, path) = tempfile.mkstemp(dir = self.c[newLabel]._server.tmpPath,
suffix = '.ccs-in')
os.close(fd)
chgSet.writeToFile(path)
base = os.path.basename(path)[:-3]
url = util.normurl(self.c[newLabel]._server.basicUrl) + "?" + base
self.c[newLabel].commitChangeSet(url, mirror = mirror,
hidden = hidden)
def commitChangeSetFile(self, fName, mirror = False, callback = None,
hidden = False):
# this could be more efficient. it rewrites the trove every time,
# but it doesn't seem to be heavily used
cs = changeset.ChangeSetFromFile(fName)
self.commitChangeSet(cs, callback = callback, mirror = mirror,
hidden = hidden)
def __init__(self, server, protocol, port, authToken, cfg):
if type(authToken[2]) is not list:
# old-style [single entitlement] authToken
authToken = (authToken[0], authToken[1],
[ ( authToken[2], authToken[3]) ], None )
elif len(authToken) == 3:
authToken = authToken + (None,)
netclient.NetworkRepositoryClient.__init__(self, cfg=cfg)
proxy = ShimServerProxy(server, protocol, port, authToken,
systemId=self.c.systemId)
self.c = FakeServerCache(proxy, cfg=cfg)
class ShimServerProxy(netclient.ServerProxy):
def __init__(self, server, protocol, port, authToken, systemId=None):
self._authToken = authToken
self._server = server
self._protocol = protocol
self._port = port
self._systemId = systemId
self._protocolVersion = netclient.CLIENT_VERSIONS[-1]
if 'CONARY_CLIENT_LOG' in os.environ:
self._callLog = calllog.ClientCallLogger(
os.environ['CONARY_CLIENT_LOG'])
else:
self._callLog = None
def __repr__(self):
return '<ShimServerProxy for %r>' % (self._server,)
def setAbortCheck(self, *args):
pass
def getChangeSetObj(self, *args):
return self._server._getChangeSetObj(self._authToken, *args)
def usedProxy(self, *args):
return False
def _request(self, method, args, kwargs):
args = [self._protocolVersion] + list(args)
start = time.time()
result = self._server.callWrapper(self._protocol, self._port, method,
self._authToken, args, kwargs, systemId=self._systemId)
if self._callLog:
self._callLog.log("shim-" + self._server.repos.serverNameList[0],
[], method, result, args,
latency = time.time() - start)
return result
|
apache-2.0
| -142,238,687,992,929,890 | 38.98419 | 85 | 0.584618 | false |
againer/supercda
|
clinics/models.py
|
1
|
3815
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Alex Gainer (superrawr@gmail.com)"
__copyright__ = "Copyright 2014, Health Records For Everyone (HR4E)"
import cPickle as pickle
import datetime
from lxml import etree
from lxml import objectify
import os
from django.core.urlresolvers import reverse
from django.db import models
class Clinic(models.Model):
"""Model for creating a clinic document."""
confidentiality_code = models.CharField(max_length=28)
confidentiality_display_name = models.CharField(max_length=128)
date_created = models.DateTimeField(auto_now=True)
id_authority = models.CharField(max_length=28)
id_extension = models.CharField(max_length=28)
id_root = models.CharField(max_length=28)
name = models.CharField(max_length=128)
title = models.CharField(max_length=255)
slug = models.SlugField()
def create_document_xml(self):
"""Creates XML representation of the clinic document."""
document_header = self._create_document_header()
document_body = self._create_document_body()
def _create_document_body(self):
"""Creates the clinical document header (CCD style)."""
clinical_document_compoents = self._get_document_components()
return ''
def _create_document_header(self):
"""Creates CDA compliant document header in XML."""
return ''
def _get_document_components(self):
"""Gets a list of all Clinic document components."""
return []
class DocumentComponentBaseClass(models.Model):
"""Class for document components, such as vital sign components."""
clinic = models.ForeignKey(Clinic)
def to_xml(self):
"""Takes self.__dict__ and interpolates with xml representation."""
raise NotImplementedError('DocumentComponent.to_xml')
class PlanOfCareComponent(DocumentComponentBaseClass):
"""Document component for indicating clinic care plans per patient.
<component>
<section>
<templateId root="2.16.840.1.113883.10.20.22.2.10" />
<code code="18776-5" codeSystem="2.16.840.1.113883.6.1"
codeSystemName="LOINC" displayName="Treatment plan" />
<title>Plan of Care</title>
<text>
<paragraph>{text}</paragraph>
</text>
</section>
</component>"""
_CODE = {
'code': '18776-6',
'codeSystem': '2.16.840.1.113883.6.1',
'codeSystemName': 'LOINC',
'displayName': 'Treatment plan'
}
_TEMPLATE_ID = {'root': '2.16.840.1.113883.10.20.22.2.10'}
_TITLE = 'Plan of Care'
text = models.TextField()
def to_xml(self):
root = etree.Element('component')
section = etree.SubElement(root, 'section')
template_id = etree.SubElement(
section,
'templateId',
**self._TEMPLATE_ID)
code = etree.SubElement(
section,
'code',
**self._CODE)
title = etree.SubElement(
section,
'title',
text=self._TITLE)
text = etree.SubElement(section, 'text')
paragraph = etree.SubElement(
text,
'paragraph',
text=self.text or 'NA')
return etree.tostring(root, pretty_print=True)
|
apache-2.0
| 896,341,156,927,606,900 | 32.761062 | 75 | 0.640891 | false |
bobbyrward/fr0st
|
scripts/random_flame.py
|
1
|
1374
|
from fr0stlib.functions import randrange2
from fr0stlib import Flame, Xform
def GenRandomBatch(numrand,*a, **k):
lst = []
if len(a)==0:
raise ValueError, "number of xform config specs must be > 0"
if 'numbasic' in k and k['numbasic']>0 and len(a)>1:
print "more than one xform config spec specified for basic mode, using only first one"
for i in range(numrand):
f = GenRandomFlame(*a, **k)
f.name = "random_flame_%03d" % i
lst.append(f)
return lst
def GenRandomFlame(*a,**k):
if 'numbasic' in k:
nb = k['numbasic']
nxforms=randrange2(2,nb+1,int=int)
else:
nb=0
nxforms = len(a)
f = Flame()
for i in range(nxforms):
if (nb>0):
Xform.random(f,col=float(i)/(nxforms-1),**a[0])
else:
Xform.random(f,col=float(i)/(nxforms-1),**a[i])
f.reframe()
f.gradient.random(hue=(0, 1),saturation=(0, 1),value=(.25, 1),nodes=(4, 6))
return f
if __name__ == "__main__":
randopt = [ { 'xv':range(1,6), 'n':2, 'xw':0},
{ 'xv':range(20,26), 'n':2, 'xw':0},
{ 'xv':range(6,9), 'n':1, 'fx':.5} ]
batchsize=20
lst = GenRandomBatch(batchsize,*randopt,numbasic=3)
save_flames("parameters/random_batch.flame",*lst)
|
gpl-3.0
| -4,427,127,534,052,115,000 | 24.444444 | 94 | 0.525473 | false |
ugoertz/igelgrafik
|
resources/pygments-patch/javascript.py
|
1
|
47743
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for JavaScript and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, iteritems
import pygments.unistring as uni
__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
'TypeScriptLexer', 'LassoLexer', 'ObjectiveJLexer',
'CoffeeScriptLexer', 'MaskLexer']
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL | re.UNICODE | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class KalLexer(RegexLexer):
"""
For `Kal`_ source code.
.. _Kal: http://rzimmerman.github.io/kal
.. versionadded:: 2.0
"""
name = 'Kal'
aliases = ['kal']
filenames = ['*.kal']
mimetypes = ['text/kal', 'application/kal']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'functiondef': [
(r'[$a-zA-Z_][\w$]*\s*', Name.Function, '#pop'),
include('commentsandwhitespace'),
],
'classdef': [
(r'\binherits\s+from\b', Keyword),
(r'[$a-zA-Z_][\w$]*\s*\n', Name.Class, '#pop'),
(r'[$a-zA-Z_][\w$]*\s*', Name.Class),
include('commentsandwhitespace'),
],
'listcomprehension': [
(r'\]', Punctuation, '#pop'),
(r'\b(property|value)\b', Keyword),
include('root'),
],
'waitfor': [
(r'\n', Punctuation, '#pop'),
(r'\bfrom\b', Keyword),
include('root'),
],
'root': [
include('commentsandwhitespace'),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex),
(r'\?|:|_(?=\n)|==?|!=|-(?!>)|[<>+*/-]=?',
Operator),
(r'\b(and|or|isnt|is|not|but|bitwise|mod|\^|xor|exists|'
r'doesnt\s+exist)\b', Operator.Word),
(r'(?:\([^()]+\))?\s*>', Name.Function),
(r'[{(]', Punctuation),
(r'\[', Punctuation, 'listcomprehension'),
(r'[})\].,]', Punctuation),
(r'\b(function|method|task)\b', Keyword.Declaration, 'functiondef'),
(r'\bclass\b', Keyword.Declaration, 'classdef'),
(r'\b(safe\s+)?wait\s+for\b', Keyword, 'waitfor'),
(r'\b(me|this)(\.[$a-zA-Z_][\w.$]*)?\b', Name.Variable.Instance),
(r'(?<![.$])(for(\s+(parallel|series))?|in|of|while|until|'
r'break|return|continue|'
r'when|if|unless|else|otherwise|except\s+when|'
r'throw|raise|fail\s+with|try|catch|finally|new|delete|'
r'typeof|instanceof|super|run\s+in\s+parallel|'
r'inherits\s+from)\b', Keyword),
(r'(?<![.$])(true|false|yes|no|on|off|null|nothing|none|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window|'
r'print)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.$]*\s*(:|[+\-*/]?\=)?\b', Name.Variable),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all kal strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
# (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![.$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![.$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][\w.\-:$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][\w-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\\S+', String),
(r'<\[.*?\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('string_literal'),
(r'#!(.*?)$', Comment.Preproc),
(r'\b(import|export)\b', Keyword, 'import_decl'),
(r'\b(library|source|part of|part)\b', Keyword),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'\b(class)\b(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'\b(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'\b(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
(r'[~!%^&*+=|?:<>/-]|as\b', Operator),
(r'[a-zA-Z_$]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$]\w*', Name.Class, '#pop')
],
'import_decl': [
include('string_literal'),
(r'\s+', Text),
(r'\b(as|show|hide)\b', Keyword),
(r'[a-zA-Z_$]\w*', Name),
(r'\,', Punctuation),
(r'\;', Punctuation, '#pop')
],
'string_literal': [
# Raw strings.
(r'r"""([\w\W]*?)"""', String.Double),
(r"r'''([\w\W]*?)'''", String.Single),
(r'r"(.*?)"', String.Double),
(r"r'(.*?)'", String.Single),
# Normal Strings.
(r'"""', String.Double, 'string_double_multiline'),
(r"'''", String.Single, 'string_single_multiline'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single')
],
'string_common': [
(r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z'\"$\\])",
String.Escape),
(r'(\$)([a-zA-Z_]\w*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol))
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$\\\n]+', String.Double),
include('string_common'),
(r'\$+', String.Double)
],
'string_double_multiline': [
(r'"""', String.Double, '#pop'),
(r'[^"$\\]+', String.Double),
include('string_common'),
(r'(\$|\")+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$\\\n]+", String.Single),
include('string_common'),
(r'\$+', String.Single)
],
'string_single_multiline': [
(r"'''", String.Single, '#pop'),
(r'[^\'$\\]+', String.Single),
include('string_common'),
(r'(\$|\')+', String.Single)
]
}
class TypeScriptLexer(RegexLexer):
"""
For `TypeScript <http://typescriptlang.org/>`_ source code.
.. versionadded:: 1.6
"""
name = 'TypeScript'
aliases = ['ts']
filenames = ['*.ts']
mimetypes = ['text/x-typescript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
# Match stuff like: module name {...}
(r'\b(module)(\s*)(\s*[\w?.$][\w?.$]*)(\s*)',
bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'),
# Match variable type keywords
(r'\b(string|bool|number)\b', Keyword.Type),
# Match stuff like: constructor
(r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([\w,?.$\s]+\s*\))',
bygroups(Keyword.Reserved, Text), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([\w?.$][\w?.$]*)(\s*:\s*)([\w?.$][\w?.$]*)',
bygroups(Name.Other, Text, Keyword.Type)),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin types, traits, methods, and
members (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
.. versionadded:: 1.6
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<(!--.*?-->)?', Other, 'delimiters'),
(r'\s+', Other),
default(('delimiters', 'lassofile')),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<(!--.*?-->)?', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lassofile': [
(r'\]|\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
],
'lasso': [
# whitespace/comments
include('whitespacecomments'),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance),
(r"(\.)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(\s*->\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(\.\.?)([a-z_][\w.]*(=(?!=))?)',
bygroups(Name.Builtin.Pseudo, Name.Other.Member)),
(r'(->\\?\s*|&\s*)([a-z_][\w.]*(=(?!=))?)',
bygroups(Operator, Name.Other.Member)),
(r'(self|inherited)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%])',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function), 'signature'),
(r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*\())', bygroups(Keyword, Text, Name.Function),
'signature'),
(r'(public|protected|private|provide)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all|void)\b', Keyword.Constant),
(r'(local|var|variable|global|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null|bytes|list|queue|set|stack|staticarray|tie)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'require\b', Keyword, 'requiresection'),
(r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|'
r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|'
r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|'
r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|'
r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|'
r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|'
r'Tag_Name|ascending|average|by|define|descending|do|equals|'
r'frozen|group|handle_failure|import|in|into|join|let|match|max|'
r'min|on|order|parent|protected|provide|public|require|returnhome|'
r'skip|split_thread|sum|take|thread|to|trait|type|where|with|'
r'yield|yieldhome)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*::\s*[a-z_][\w.]*)?(\s*=(?!=))',
bygroups(Name, Name.Label, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(n?bw|n?ew|n?cn|lte?|gte?|n?eq|n?rx|ft)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv?"\'\\]|$)', String.Escape),
],
'signature': [
(r'=>', Operator, '#pop'),
(r'\)', Punctuation, '#pop'),
(r'[(,]', Punctuation, 'parameter'),
include('lasso'),
],
'parameter': [
(r'\)', Punctuation, '#pop'),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\.\.\.', Name.Builtin.Pseudo),
include('lasso'),
],
'requiresection': [
(r'(([a-z_][\w.]*=?|[-+*/%])(?=\s*\())', Name, 'requiresignature'),
(r'(([a-z_][\w.]*=?|[-+*/%])(?=(\s*::\s*[\w.]+)?\s*,))', Name),
(r'[a-z_][\w.]*=?|[-+*/%]', Name, '#pop'),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r',', Punctuation),
include('whitespacecomments'),
],
'requiresignature': [
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
(r'::\s*[a-z_][\w.]*', Name.Label),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
(r'(([a-z_][\w.]*=?|[-+*/%])'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
default('#pop'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
for key, value in iteritems(BUILTINS):
self._builtins.update(value)
for key, value in iteritems(MEMBERS):
self._members.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
token is Name.Other.Member and
value.lower().rstrip('=') in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?lasso', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
return rv
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
.. versionadded:: 1.3
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{()}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_]\w*', Name),
],
'classname': [
# interface definition that inherits
(r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws +
r')([a-zA-Z_]\w*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_]\w*)', Name.Class, '#pop'),
],
'forward_classname': [
(r'([a-zA-Z_]\w*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_]\w*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_]\w+)', # function name
bygroups(Name.Function), "#pop"),
default('#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_]\w+)', Text),
],
'expression': [
(r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
.. versionadded:: 1.3
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL | re.UNICODE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#\{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
# (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|'
r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&|^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]*\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![.$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'wenn|dann|sonst|während|zurückgeben|für|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|wahr|falsch|an|aus|ja|nein|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-ZäöüÄÖÜß_][a-zA-Z0-9äöüÄÖÜß_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-ZäöüÄÖÜß_][a-zA-Z0-9äöüÄÖÜß_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-ZäöüÄÖÜß_][äöüAÄÖÜßa-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#\{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class MaskLexer(RegexLexer):
"""
For `Mask <http://github.com/atmajs/MaskJS>`__ markup.
.. versionadded:: 2.0
"""
name = 'Mask'
aliases = ['mask']
filenames = ['*.mask']
mimetypes = ['text/x-mask']
flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'[{};>]', Punctuation),
(r"'''", String, 'string-trpl-single'),
(r'"""', String, 'string-trpl-double'),
(r"'", String, 'string-single'),
(r'"', String, 'string-double'),
(r'([\w-]+)', Name.Tag, 'node'),
(r'([^.#;{>\s]+)', Name.Class, 'node'),
(r'(#[\w-]+)', Name.Function, 'node'),
(r'(\.[\w-]+)', Name.Variable.Class, 'node')
],
'string-base': [
(r'\\.', String.Escape),
(r'~\[', String.Interpol, 'interpolation'),
(r'.', String.Single),
],
'string-single': [
(r"'", String.Single, '#pop'),
include('string-base')
],
'string-double': [
(r'"', String.Single, '#pop'),
include('string-base')
],
'string-trpl-single': [
(r"'''", String.Single, '#pop'),
include('string-base')
],
'string-trpl-double': [
(r'"""', String.Single, '#pop'),
include('string-base')
],
'interpolation': [
(r'\]', String.Interpol, '#pop'),
(r'\s*:', String.Interpol, 'expression'),
(r'\s*\w+:', Name.Other),
(r'[^\]]+', String.Interpol)
],
'expression': [
(r'[^\]]+', using(JavascriptLexer), '#pop')
],
'node': [
(r'\s+', Text),
(r'\.', Name.Variable.Class, 'node-class'),
(r'\#', Name.Function, 'node-id'),
(r'style[ \t]*=', Name.Attribute, 'node-attr-style-value'),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'node-attr-value'),
(r'[\w:-]+', Name.Attribute),
(r'[>{;]', Punctuation, '#pop')
],
'node-class': [
(r'[\w-]+', Name.Variable.Class),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-id': [
(r'[\w-]+', Name.Function),
(r'~\[', String.Interpol, 'interpolation'),
default('#pop')
],
'node-attr-value': [
(r'\s+', Text),
(r'\w+', Name.Variable, '#pop'),
(r"'", String, 'string-single-pop2'),
(r'"', String, 'string-double-pop2'),
default('#pop')
],
'node-attr-style-value': [
(r'\s+', Text),
(r"'", String.Single, 'css-single-end'),
(r'"', String.Single, 'css-double-end'),
include('node-attr-value')
],
'css-base': [
(r'\s+', Text),
(r";", Punctuation),
(r"[\w\-]+\s*:", Name.Builtin)
],
'css-single-end': [
include('css-base'),
(r"'", String.Single, '#pop:2'),
(r"[^;']+", Name.Entity)
],
'css-double-end': [
include('css-base'),
(r'"', String.Single, '#pop:2'),
(r'[^;"]+', Name.Entity)
],
'string-single-pop2': [
(r"'", String.Single, '#pop:2'),
include('string-base')
],
'string-double-pop2': [
(r'"', String.Single, '#pop:2'),
include('string-base')
],
}
|
bsd-3-clause
| 440,068,771,751,930,940 | 38.741667 | 93 | 0.450052 | false |
JohnGriffiths/LabNotebook
|
site_source/pelicanconf.py
|
1
|
3500
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = u'John D. Griffiths'
SITENAME = u"JDG Lab Notebook"
SITEURL = 'https://johngriffiths.github.io/LabNotebook'
#SITESUBTITLE = u'Open Notebook Science.'
TIMEZONE = 'US/Pacific'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
TAG_FEED_ATOM = "feeds/tag_%s.atom.xml"
MENUITEMS = [('About', 'https://johngriffiths.github.io/LabNotebook/about-the-notebook.html'),
('Archives', 'https://johngriffiths.github.io/LabNotebook/archives.html'),
('Github repo', 'https://github.com/JohnGriffiths/LabNotebook.html'),
('Open Notebook Science', 'http://en.wikipedia.org/wiki/Open_notebook_science')]
#('Home Page', 'http://www.astro.washington.edu/users/vanderplas')]
NEWEST_FIRST_ARCHIVES = False
# Blogroll
#LINKS = (('Lab notebook github repo', 'https://github.com/JohnGriffiths/LabNotebook'),
# ('JDG Website', 'http://johndavidgriffiths.strikingly.com'),
# ('JDG Blog', 'https://johngriffiths.github.io/neurodidact'),
# ('Open Notebook Science', 'http://en.wikipedia.org/wiki/Open_notebook_science'))
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),
# ('Another social link', '#'),)
DEFAULT_PAGINATION = 1
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
#THEME='pelican-bootstrap3'
CODE_DIR = '../../resources/code' #'downloads/code'
NOTEBOOK_DIR = '../../resources/notebooks' # 'downloads/notebooks'
# Theme and plugins
#THEME = '../pelican-themes/cebong' # 'waterspill-en' # 'pelican-octopress-theme/'
#THEME = 'pelican-octopress-theme'
THEME = 'theme/labnotebook-pelican-octopress-theme'
PLUGIN_PATHS = ['plugins/pelican-plugins']
# Sharing
#TWITTER_USER = 'neurodidact'
#GOOGLE_PLUS_USER = 'j.davidgriffiths'
#GOOGLE_PLUS_ONE = True
#GOOGLE_PLUS_HIDDEN = False
#FACEBOOK_LIKE = False
#TWITTER_TWEET_BUTTON = True
#TWITTER_LATEST_TWEETS = True
#TWITTER_FOLLOW_BUTTON = True
#TWITTER_TWEET_COUNT = 3
#TWITTER_SHOW_REPLIES = 'false'
#TWITTER_SHOW_FOLLOWER_COUNT = 'true'
# This requires Pelican 3.3+
STATIC_PATHS = ['images', 'figures', 'downloads', 'favicon.png']
#CODE_DIR = '../../resources/code' # downloads/code
#NOTEBOOK_DIR = '../../resources/notebooks' # downloads/notebooks @'
#PLUGIN_PATH = 'pelican-plugins'
#PLUGIN_PATH='/media/sf_SharedFolder/Code/git_repos_of_others/PythonicPerambulations/pelican-plugins'
PLUGINS = ['summary', 'liquid_tags.img', 'liquid_tags.video',
'liquid_tags.include_code', 'liquid_tags.notebook',
'liquid_tags.literal',
'encrypt_content',
'render_math']
DISPLAY_PAGES_ON_MENU = False
# The theme file should be updated so that the base header contains the line:
#
# {% if EXTRA_HEADER %}
# {{ EXTRA_HEADER }}
# {% endif %}
#
# This header file is automatically generated by the notebook plugin
if not os.path.exists('_nb_header.html'):
import warnings
warnings.warn("_nb_header.html not found. "
"Rerun make html to finalize build.")
else:
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
# Search
SEARCH_BOX = True
|
gpl-2.0
| 3,857,400,785,375,380,500 | 27.688525 | 101 | 0.675143 | false |
navinpai/LMTAS
|
server/app.py
|
1
|
9818
|
from werkzeug.utils import secure_filename
from flask import Flask, request, render_template
import cognitive_face as CF
import pymysql.cursors
from PIL import Image
import requests
import string
import random
import json
import os
import pyfcm
from pyfcm import FCMNotification
import kairos_face
import constants
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'static/uploads/'
def get_db_connection():
return pymysql.connect(host='localhost', \
user=constants.MYSQL_USERNAME, \
password=constants.MYSQL_PASSWORD, \
db='gohack', \
charset='utf8mb4', \
cursorclass=pymysql.cursors.DictCursor)
def kairos_identify(img_file):
kairos_face.settings.app_id = constants.KAIROS_APPID
kairos_face.settings.app_key = constants.KAIROS_APPKEY
recognized_faces = kairos_face.recognize_face('actors', file=img_file)
return recognized_faces
def make_db_entries(identified_people, userName, amount, img_link):
individual_share = amount * 1.0 / len(identified_people)
connection = get_db_connection()
try:
with connection.cursor() as cursor:
for person in identified_people:
if person != userName:
sql = "INSERT into `txns` (`payer`, `payee`, `img`, `amount`) values (%s, %s, %s, %s)"
cursor.execute(sql, (userName, person, img_link, str(individual_share)))
connection.commit()
finally:
connection.close()
def firebase_notify(identified_users, user):
push_service = FCMNotification(api_key=constants.FIREBASE_API_KEY)
message_title = "Update: " + user + " has added a new bill with you!"
message_body = "Update: " + user + " has added a new bill with you!"
#for user in identified_users:
result = push_service.notify_topic_subscribers(topic_name="updates", message_title=message_title, message_body=message_body)
def recognize_faces(img_file, faceCoords):
imgMain = Image.open(img_file)
identified_faces = set()
for face in faceCoords:
faceRect = face['faceRectangle']
cropped = imgMain.crop((faceRect['left'] - 10, faceRect['top'] - 10, faceRect['left'] + faceRect['width'] + 10, faceRect['top'] + faceRect['height'] + 10))
tempImg = os.path.join(app.config['UPLOAD_FOLDER'], 'tempFace.jpg')
cropped.save(tempImg)
fallback = False
try:
result = kairos_identify(tempImg)
except:
fallback = True
result = kairos_identify(img_file)
if len(result) > 0:
identified_faces.add(result[0].subject)
return list(identified_faces), len(faceCoords), fallback
def getUserTxnDetails(user):
connection = get_db_connection()
result = []
try:
with connection.cursor() as cursor:
sql = 'SELECT * from `txns` where `payee`= %s or `payer`=%s order by id DESC'
cursor.execute(sql, (user,user))
result = cursor.fetchall();
finally:
connection.close()
txnStrings = []
balances = {}
total = 0.0
for txn in result:
if(txn["payer"] == user):
total = total + txn['amount']
if(txn['payee'] in balances):
balances[txn['payee']] = balances[txn['payee']] + txn['amount']
else:
balances[txn['payee']] = txn['amount']
txnPT = "You lent Rs." + str(txn["amount"]) +" to "+ txn["payee"].title()
else:
total = total - txn['amount']
if(txn['payer'] in balances):
balances[txn['payer']] = balances[txn['payer']] - txn['amount']
else:
balances[txn['payer']] = txn['amount']
txnPT = txn["payer"].title() + " lent you Rs." + str(txn["amount"])
txnImg = txn["img"]
txnStrings.append({'txnPT': txnPT, 'txnImg': txnImg})
return (balances, txnStrings, total)
@app.route('/home')
def home():
(balances, txns, total) = getUserTxnDetails("Arch")
return render_template('index.html', balances=balances, txns=txns, total=total)
@app.route('/add')
def add():
return render_template('add_new.html')
@app.route('/enroll')
def enroll():
return render_template('enroll.html')
@app.route('/addToKairos', methods=['POST'])
def addToKairos():
(balances, txns, total) = getUserTxnDetails("Arch")
return render_template('index.html', balances=balances, txns=txns, total=total)
@app.route('/upload', methods=['POST'])
def upload():
success = False
imgData = request.form['file']
amount = request.form['amount']
user = request.form['userName']
img_title = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + '.jpg'
if imgData:
filename = secure_filename(img_title)
with open(os.path.join(app.config['UPLOAD_FOLDER'], filename), 'wb') as fh:
fh.write(imgData.decode('base64'))
KEY = constants.MS_OXFORD_KEY
CF.Key.set(KEY)
img_file = os.path.join(app.config['UPLOAD_FOLDER'], img_title)
result = CF.face.detect(img_file)
(identified_people, num_of_faces, fallback) = recognize_faces(img_file, result)
if num_of_faces < 1:
message = 'Could not make out any faces! Try with better light or crisper photos'
else:
success = True
if(len(identified_people) == num_of_faces):
message = 'Success! Get Back to the party!'
make_db_entries(identified_people, user, float(amount), img_title)
else:
if(user not in identified_people):
make_db_entries(identified_people + [user], user, float(amount), img_title)
message = 'Was some work, but got it done!'
else:
message = 'Couldn\'t recognize all faces . Manual intervention required! :('
else:
message = 'Dafuq? No Image Data Sent!'
return json.dumps({"success": success, "message": message})
@app.route('/addNew', methods=['POST'])
def addNew():
img=request.files['file']
user = request.form['person']
img_title = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + '.jpg'
if img:
filename = secure_filename(img_title)
img.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
KEY = constants.MS_OXFORD_KEY
CF.Key.set(KEY)
img_file = os.path.join(app.config['UPLOAD_FOLDER'], img_title)
result = CF.face.detect(img_file)
(identified_people, num_of_faces, fallback) = recognize_faces(img_file, result)
if num_of_faces < 1:
message = 'Could not make out any faces! Try with better light or crisper photos'
else:
success = True
if(len(identified_people) == num_of_faces):
message = 'Success! Get Back to the party!'
firebase_notify(identified_users, user)
make_db_entries(identified_people, user, float(amount), img_title)
else:
if(user not in identified_people):
firebase_notify(identified_users)
make_db_entries(identified_people + [user], user, float(amount), img_title)
message = 'Was some work, but got it done!'
else:
message = 'Couldn\'t recognize all faces . Manual intervention required! :('
else:
message = 'Dafuq? No Image Data Sent!'
return json.dumps({"success": success, "message": message})
@app.route('/enrollNew', methods=['POST'])
def enrollNew():
img=request.files['file']
img_title = "ENR" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) + '.jpg'
if img:
filename = secure_filename(img_title)
img.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
kairos_face.settings.app_id = constants.KAIROS_APPID
kairos_face.settings.app_key = constants.KAIROS_APPKEY
subject_id, success = kairos_face.enroll_face(request.form['person'], 'actors', file=os.path.join(app.config['UPLOAD_FOLDER'], filename))
if success:
return "Successfully enrolled " + subject_id
else:
return "Failed to enroll. Try a different photo?"
@app.route('/view/<img>')
def show_image(img):
return render_template('view.html', balances=balances, txns=txns, total=total)
@app.route("/test")
def getMys():
'''
connection = pymysql.connect(host='localhost', \
user=constants.MYSQL_USERNAME, \
password=constants.MYSQL_PASSWORD, \
db='gohack', \
charset='utf8mb4', \
cursorclass=pymysql.cursors.DictCursor)
#try:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `id`FROM `txns`" # WHERE `email`=%s"
cursor.execute(sql) #, ('webmaster@python.org',))
result = cursor.fetchone()
return json.dumps(result)
#finally:
connection.close()
return "Some error"
'''
KEY = constants.MS_OXFORD_KEY
CF.Key.set(KEY)
img_file = os.path.join(app.config['UPLOAD_FOLDER'], 'AAAAAA.jpg')
result = CF.face.detect(img_file)
faces_detected = len(result)
recognize_faces(img_file, result)
return json.dumps(result)
@app.route('/getDetails')
def getDetails():
(balances, txns, total) = getUserTxnDetails("navin")
response = {"lastTransactions": txns, "balances": balances, "total": total}
return json.dumps(response)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=1337)
|
mit
| -6,674,061,538,280,654,000 | 38.119522 | 163 | 0.600733 | false |
wesleybowman/karsten
|
project/rawADCPclass.py
|
1
|
4107
|
from __future__ import division
import numpy as np
import sys
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv, ut_reconstr
#from shortest_element_path import shortest_element_path
#import matplotlib.pyplot as plt
#import matplotlib.tri as Tri
#import matplotlib.ticker as ticker
#import seaborn
import scipy.io as sio
import h5py
from os import path
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class rawADCP:
def __init__(self, filename):
self.QC = ['raw data']
self.load(filename)
self.Params_Stn4_SWNSreport(filename)
self.load_rbrdata()
## set options
self.options = {}
self.options['showPA'] = 1
self.options['showRBRavg'] = 1
## save a flow file in BPformat
#save_FlowFile_BPFormat(fileinfo,adcp,rbr,saveparams,options)
def load(self, filename):
try:
self.mat = sio.loadmat(filename,
struct_as_record=False, squeeze_me=True)
self.adcp = self.mat['adcp']
except NotImplementedError:
self.mat = h5py.File(filename)
self.adcp = self.mat['adcp']
#self.adcp = Struct(**self.mat['adcp'])
def Params_Stn4_SWNSreport(self, filename):
fname = filename.split('/')
filebase = fname[-1].split('_')[0]
self.fileinfo = {}
self.fileinfo['datadir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['ADCP'] = filebase + '_raw'
self.fileinfo['outdir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['flowfile'] = filebase + '_Flow'
self.fileinfo['rbr']= 'station4_grandPassageII_RBRSN_011857.mat'
self.fileinfo['paramfile']= 'Params_Stn4_SWNSreport'
#%% ADCP parameters
self.saveparams = {}
self.saveparams['tmin'] = 209
self.saveparams['tmax'] = 240
self.saveparams['zmin'] = 0
self.saveparams['zmax'] = 20
self.saveparams['approxdepth'] = 15.5
self.saveparams['flooddir'] = 0
self.saveparams['declination'] = -17.25
self.saveparams['lat'] = 44.2605
self.saveparams['lon'] = -66.3354
self.saveparams['dabADCP'] = 0.5
self.saveparams['dabPS'] = -0.6
self.saveparams['rbr_hr_offset'] = 3
def load_rbrdata(self):
rbrFile = self.fileinfo['datadir'] + self.fileinfo['rbr']
try:
rbrMat = sio.loadmat(rbrFile,
struct_as_record=False, squeeze_me=True)
except NotImplementedError:
rbrMat = h5py.File(rbrFile)
rbr = rbrMat['rbr']
rbrout = {}
rbrout['mtime'] = rbr.yd
rbrout['temp'] = rbr.temperature
rbrout['pres'] = rbr.pressure
rbrout['depth'] = rbr.depth
rbrout['mtime'] = rbr.yd
self.rbr = rbrout
if __name__ == '__main__':
#filename = 'GP-120726-BPd_raw.mat'
filename = '140703-EcoEII_database/data/GP-120726-BPd_raw.mat'
data = rawADCP(filename)
#stn = 'GP-120726-BPd';
#%% File information
#fileinfo.datadir = '../data/'; %path to raw data files
#fileinfo.ADCP = [stn '_raw']; %name of ADCP file
#fileinfo.outdir = '../data/'; %path to output directory
#fileinfo.flowfile = [stn,'_Flow']; %name of output file with Flow data
#fileinfo.rbr = ['station4_grandPassageII_RBRSN_011857.mat'];
#fileinfo.paramfile = mfilename;
#
#%% ADCP parameters
#saveparams.tmin = 209; %tmin (year day)
#saveparams.tmax = 240; %tmax (year day)
#saveparams.zmin = 0; %minimum z to include in saves file
#saveparams.zmax = 20;
#saveparams.approxdepth = 15.5; %Approximate depth
#saveparams.flooddir= 0; %Flood direction (relative to true north, CW is positive)
#saveparams.declination = -17.25;%Declination angle
#saveparams.lat = 44.2605; %latitude
#saveparams.lon = -66.3354; %longitude
#saveparams.dabADCP = 0.5; %depth above bottom of ADCP
#saveparams.dabPS = -0.6; %depth above bottom of pressure sensor
#saveparams.rbr_hr_offset = 3; % hour offset to convert rbr time to UTC
|
mit
| 7,166,734,428,525,729,000 | 31.338583 | 84 | 0.612126 | false |
giometti/beaglebone_home_automation_blueprints
|
chapter_04/wfrog/bbb_habp.py
|
1
|
3558
|
## Copyright 2010 Rodolfo Giometti <giometti@hce-engineering.com>
## derived from ws23xx by Laurent Bovet
##
## This file is part of wfrog
##
## wfrog is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import logging
from wfcommon import units
class BBBhabpStation(object):
'''
Station driver for BeagleBone Black Home Automation Blueprints.
[Properties]
period [numeric] (optional):
Polling interval in seconds. Defaults to 60.
'''
period=60
logger = logging.getLogger('station.bbb_habp')
name = 'BeagleBone Home Automation Blueprints weather station'
def get_press(self):
f = open("/sys/bus/iio/devices/iio:device1/in_pressure_input", "r")
v = f.read()
f.close()
return float(v) * 10.0
def get_temp(self):
f = open("/sys/class/hwmon/hwmon0/device/temp1_input", "r")
v = f.read()
f.close()
return int(v) / 1000.0
def get_hum(self):
f = open("/sys/class/hwmon/hwmon0/device/humidity1_input", "r")
v = f.read()
f.close()
return int(v) / 1000.0
def run(self, generate_event, send_event, context={}):
while True:
try:
e = generate_event('press')
e.value = self.get_press()
send_event(e)
self.logger.debug("press=%fhPa" % e.value)
except Exception, e:
self.logger.error(e)
try:
e = generate_event('temp')
e.sensor = 0
e.value = self.get_temp()
send_event(e)
self.logger.debug("temp=%fC" % e.value)
except Exception, e:
self.logger.error(e)
try:
e = generate_event('hum')
e.sensor = 0
e.value = self.get_hum()
send_event(e)
self.logger.debug("hum=%f%%RH" % e.value)
except Exception, e:
self.logger.error(e)
try:
e = generate_event('temp')
e.sensor = 1
e.value = self.get_temp()
send_event(e)
self.logger.debug("temp=%fC" % e.value)
except Exception, e:
self.logger.error(e)
try:
e = generate_event('hum')
e.sensor = 1
e.value = self.get_hum()
send_event(e)
self.logger.debug("hum=%f%%RH" % e.value)
except Exception, e:
self.logger.error(e)
# pause until next update time
next_update = self.period - (time.time() % self.period)
time.sleep(next_update)
|
gpl-2.0
| 4,234,070,682,089,319,000 | 30.210526 | 75 | 0.513491 | false |
openstack/rally
|
rally/cli/commands/task.py
|
1
|
37125
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rally command: task"""
import itertools
import json
import os
import sys
import webbrowser
from rally.cli import cliutils
from rally.cli import envutils
from rally.cli import task_results_loader
from rally.cli import yamlutils as yaml
from rally.common import logging
from rally.common import utils as rutils
from rally.common import version
from rally import consts
from rally import exceptions
from rally import plugins
from rally.task import atomic
from rally.task.processing import charts
from rally.utils import strutils
LOG = logging.getLogger(__name__)
class FailedToLoadTask(exceptions.RallyException):
error_code = 117
msg_fmt = "Invalid %(source)s passed:\n\n\t %(msg)s"
class TaskCommands(object):
"""Set of commands that allow you to manage tasks and results.
"""
def _load_and_validate_task(self, api, task_file, args_file=None,
raw_args=None):
"""Load, render and validate tasks template from file with passed args.
:param task_file: Path to file with input task
:param raw_args: JSON or YAML representation of dict with args that
will be used to render input task with jinja2
:param args_file: Path to file with JSON or YAML representation
of dict, that will be used to render input with jinja2. If both
specified task_args and task_args_file they will be merged.
raw_args has bigger priority so it will update values
from args_file.
:returns: Str with loaded and rendered task
"""
print(cliutils.make_header("Preparing input task"))
try:
with open(task_file) as f:
input_task = f.read()
except IOError as err:
raise FailedToLoadTask(
source="--task",
msg="Error reading %s: %s" % (task_file, err))
task_dir = os.path.expanduser(os.path.dirname(task_file)) or "./"
task_args = {}
if args_file:
try:
with open(args_file) as f:
args_data = f.read()
except IOError as err:
raise FailedToLoadTask(
source="--task-args-file",
msg="Error reading %s: %s" % (args_file, err))
try:
task_args.update(yaml.safe_load(args_data))
except yaml.ParserError as e:
raise FailedToLoadTask(
source="--task-args-file",
msg="File '%s' has to be YAML or JSON. Details:\n\n%s"
% (args_file, e))
if raw_args:
try:
data = yaml.safe_load(raw_args)
if isinstance(data, str):
raise yaml.ParserError("String '%s' doesn't look like a "
"dictionary." % raw_args)
task_args.update(data)
except yaml.ParserError as e:
args = [keypair.split("=", 1)
for keypair in raw_args.split(",")]
if len([a for a in args if len(a) != 1]) != len(args):
raise FailedToLoadTask(
source="--task-args",
msg="Value has to be YAML or JSON. Details:\n\n%s" % e)
else:
task_args.update(dict(args))
try:
rendered_task = api.task.render_template(task_template=input_task,
template_dir=task_dir,
**task_args)
except Exception as e:
raise FailedToLoadTask(
source="--task",
msg="Failed to render task template.\n\n%s" % e)
print("Task is:\n%s\n" % rendered_task.strip())
try:
parsed_task = yaml.safe_load(rendered_task)
except Exception as e:
raise FailedToLoadTask(
source="--task",
msg="Wrong format of rendered input task. It should be YAML or"
" JSON. Details:\n\n%s" % e)
print("Task syntax is correct :)")
return parsed_task
@cliutils.args("--deployment", dest="deployment", type=str,
metavar="<uuid>", required=False,
help="UUID or name of a deployment.")
@cliutils.args("--task", "--filename", metavar="<path>",
dest="task_file",
help="Path to the input task file.")
@cliutils.args("--task-args", metavar="<json>", dest="task_args",
help="Input task args (JSON dict). These args are used "
"to render the Jinja2 template in the input task.")
@cliutils.args("--task-args-file", metavar="<path>", dest="task_args_file",
help="Path to the file with input task args (dict in "
"JSON/YAML). These args are used "
"to render the Jinja2 template in the input task.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def validate(self, api, task_file, deployment=None, task_args=None,
task_args_file=None):
"""Validate a task configuration file.
This will check that task configuration file has valid syntax and
all required options of scenarios, contexts, SLA and runners are set.
If both task_args and task_args_file are specified, they will
be merged. task_args has a higher priority so it will override
values from task_args_file.
"""
task = self._load_and_validate_task(api, task_file, raw_args=task_args,
args_file=task_args_file)
api.task.validate(deployment=deployment, config=task)
print("Input Task is valid :)")
@cliutils.args("--deployment", dest="deployment", type=str,
metavar="<uuid>", required=False,
help="UUID or name of a deployment.")
@cliutils.args("--task", "--filename", metavar="<path>",
dest="task_file",
help="Path to the input task file.")
@cliutils.args("--task-args", dest="task_args", metavar="<json>",
help="Input task args (JSON dict). These args are used "
"to render the Jinja2 template in the input task.")
@cliutils.args("--task-args-file", dest="task_args_file", metavar="<path>",
help="Path to the file with input task args (dict in "
"JSON/YAML). These args are used "
"to render the Jinja2 template in the input task.")
@cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False,
help="Mark the task with a tag or a few tags.")
@cliutils.args("--no-use", action="store_false", dest="do_use",
help="Don't set new task as default for future operations.")
@cliutils.args("--abort-on-sla-failure", action="store_true",
dest="abort_on_sla_failure",
help="Abort the execution of a task when any SLA check "
"for it fails for subtask or workload.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def start(self, api, task_file, deployment=None, task_args=None,
task_args_file=None, tags=None, do_use=False,
abort_on_sla_failure=False):
"""Run task.
If both task_args and task_args_file are specified, they are going to
be merged. task_args has a higher priority so it overrides
values from task_args_file.
There are 3 kinds of return codes, 0: no error, 1: running error,
2: sla check failed.
"""
input_task = self._load_and_validate_task(api, task_file,
raw_args=task_args,
args_file=task_args_file)
print("Running Rally version", version.version_string())
return self._start_task(api, deployment, task_config=input_task,
tags=tags, do_use=do_use,
abort_on_sla_failure=abort_on_sla_failure)
def _start_task(self, api, deployment, task_config, tags=None,
do_use=False, abort_on_sla_failure=False):
try:
task_instance = api.task.create(deployment=deployment, tags=tags)
tags = "[tags: '%s']" % "', '".join(tags) if tags else ""
print(cliutils.make_header(
"Task %(tags)s %(uuid)s: started"
% {"uuid": task_instance["uuid"], "tags": tags}))
print("Running Task... This can take a while...\n")
print("To track task status use:\n")
print("\trally task status\n\tor\n\trally task detailed\n")
if do_use:
self.use(api, task_instance["uuid"])
api.task.start(deployment=deployment, config=task_config,
task=task_instance["uuid"],
abort_on_sla_failure=abort_on_sla_failure)
except exceptions.DeploymentNotFinishedStatus as e:
print("Cannot start a task on unfinished deployment: %s" % e)
return 1
if self._detailed(api, task_id=task_instance["uuid"]):
return 2
return 0
@cliutils.args("--deployment", dest="deployment", type=str,
metavar="<uuid>", required=False,
help="UUID or name of a deployment.")
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
@cliutils.args("--scenario", type=str, dest="scenarios", nargs="+",
help="scenario name of workload")
@cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False,
help="Mark the task with a tag or a few tags.")
@cliutils.args("--no-use", action="store_false", dest="do_use",
help="Don't set new task as default for future operations.")
@cliutils.args("--abort-on-sla-failure", action="store_true",
dest="abort_on_sla_failure",
help="Abort the execution of a task when any SLA check "
"for it fails for subtask or workload.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@envutils.with_default_task_id
@plugins.ensure_plugins_are_loaded
def restart(self, api, deployment=None, task_id=None, scenarios=None,
tags=None, do_use=False, abort_on_sla_failure=False):
"""Restart a task or some scenarios in workloads of task."""
if scenarios is not None:
scenarios = (isinstance(scenarios, list) and scenarios
or [scenarios])
task = api.task.get(task_id=task_id, detailed=True)
if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
consts.TaskStatus.VALIDATION_FAILED):
print("-" * 80)
print("\nUnable to restart task.")
validation = task["validation_result"]
if logging.is_debug():
print(yaml.safe_load(validation["trace"]))
else:
print(validation["etype"])
print(validation["msg"])
print("\nFor more details run:\nrally -d task detailed %s"
% task["uuid"])
return 1
retask = {"version": 2, "title": task["title"],
"description": task["description"],
"tags": task["tags"], "subtasks": []}
for subtask in task["subtasks"]:
workloads = []
for workload in subtask["workloads"]:
if scenarios is None or workload["name"] in scenarios:
workloads.append({
"scenario": {workload["name"]: workload["args"]},
"contexts": workload["contexts"],
"runner": {
workload["runner_type"]: workload["runner"]},
"hooks": workload["hooks"],
"sla": workload["sla"]
})
if workloads:
retask["subtasks"].append({
"title": subtask["title"],
"description": subtask["description"],
"workloads": workloads})
if retask["subtasks"]:
return self._start_task(api, deployment, retask, tags=tags,
do_use=do_use,
abort_on_sla_failure=abort_on_sla_failure)
else:
print("Not Found matched scenario.")
return 1
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
@envutils.with_default_task_id
@cliutils.args(
"--soft", action="store_true",
help="Abort task after current scenario finishes execution.")
def abort(self, api, task_id=None, soft=False):
"""Abort a running task."""
if soft:
print("INFO: please be informed that soft abort won't stop "
"a running workload, but will prevent new ones from "
"starting. If you are running task with only one "
"scenario, soft abort will not help at all.")
api.task.abort(task_uuid=task_id, soft=soft, wait=True)
print("Task %s successfully stopped." % task_id)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
@envutils.with_default_task_id
def status(self, api, task_id=None):
"""Display the current status of a task."""
task = api.task.get(task_id=task_id)
print("Task %(task_id)s: %(status)s"
% {"task_id": task_id, "status": task["status"]})
@cliutils.args("--uuid", type=str, dest="task_id",
help=("UUID of task. If --uuid is \"last\" the results of "
" the most recently created task will be displayed."))
@cliutils.args("--iterations-data", dest="iterations_data",
action="store_true",
help="Print detailed results for each iteration.")
@cliutils.args("--filter-by", dest="filters", nargs="+", type=str,
help="Filter the displayed workloads."
"<sla-failures>: only display the failed workloads.\n"
"<scenarios>: filter the workloads by scenarios.,"
"scenarios=scenario_name1[,scenario_name2]...")
@envutils.with_default_task_id
def detailed(self, api, task_id=None, iterations_data=False,
filters=None):
self._detailed(api, task_id, iterations_data, filters)
def _detailed(self, api, task_id=None, iterations_data=False,
filters=None):
"""Print detailed information about given task."""
scenarios_filter = []
only_sla_failures = False
for filter in filters or []:
if filter.startswith("scenario="):
filter_value = filter.split("=")[1]
scenarios_filter = filter_value.split(",")
if filter == "sla-failures":
only_sla_failures = True
task = api.task.get(task_id=task_id, detailed=True)
print()
print("-" * 80)
print("Task %(task_id)s: %(status)s"
% {"task_id": task_id, "status": task["status"]})
if task["status"] == consts.TaskStatus.CRASHED or task["status"] == (
consts.TaskStatus.VALIDATION_FAILED):
print("-" * 80)
validation = task["validation_result"]
if logging.is_debug():
print(yaml.safe_load(validation["trace"]))
else:
print(validation["etype"])
print(validation["msg"])
print("\nFor more details run:\nrally -d task detailed %s"
% task["uuid"])
return 0
elif task["status"] not in [consts.TaskStatus.FINISHED,
consts.TaskStatus.ABORTED]:
print("-" * 80)
print("\nThe task %s marked as '%s'. Results "
"available when it is '%s'."
% (task_id, task["status"], consts.TaskStatus.FINISHED))
return 0
for workload in itertools.chain(
*[s["workloads"] for s in task["subtasks"]]):
if scenarios_filter and workload["name"] not in scenarios_filter:
continue
if only_sla_failures and workload["pass_sla"]:
continue
print("-" * 80)
print()
print("test scenario %s" % workload["name"])
print("args position %s" % workload["position"])
print("args values:")
print(json.dumps(
{"args": workload["args"],
"runner": workload["runner"],
"contexts": workload["contexts"],
"sla": workload["sla"],
"hooks": [r["config"] for r in workload["hooks"]]},
indent=2))
print()
duration_stats = workload["statistics"]["durations"]
iterations = []
iterations_headers = ["iteration", "duration"]
iterations_actions = []
output = []
task_errors = []
if iterations_data:
atomic_names = [a["display_name"]
for a in duration_stats["atomics"]]
for i, atomic_name in enumerate(atomic_names, 1):
action = "%i. %s" % (i, atomic_name)
iterations_headers.append(action)
iterations_actions.append((atomic_name, action))
for idx, itr in enumerate(workload["data"], 1):
if iterations_data:
row = {"iteration": idx, "duration": itr["duration"]}
for name, action in iterations_actions:
atomic_actions = atomic.merge_atomic_actions(
itr["atomic_actions"])
row[action] = atomic_actions.get(name, {}).get(
"duration", 0)
iterations.append(row)
if "output" in itr:
iteration_output = itr["output"]
else:
iteration_output = {"additive": [], "complete": []}
for idx, additive in enumerate(iteration_output["additive"]):
if len(output) <= idx + 1:
output_table = charts.OutputStatsTable(
workload, title=additive["title"])
output.append(output_table)
output[idx].add_iteration(additive["data"])
if itr.get("error"):
task_errors.append(TaskCommands._format_task_error(itr))
self._print_task_errors(task_id, task_errors)
cols = charts.MainStatsTable.columns
formatters = {
"Action": lambda x: x["display_name"],
"Min (sec)": lambda x: x["data"]["min"],
"Median (sec)": lambda x: x["data"]["median"],
"90%ile (sec)": lambda x: x["data"]["90%ile"],
"95%ile (sec)": lambda x: x["data"]["95%ile"],
"Max (sec)": lambda x: x["data"]["max"],
"Avg (sec)": lambda x: x["data"]["avg"],
"Success": lambda x: x["data"]["success"],
"Count": lambda x: x["data"]["iteration_count"]
}
rows = []
def make_flat(r, depth=0):
if depth > 0:
r["display_name"] = (" %s> %s" % ("-" * depth,
r["display_name"]))
rows.append(r)
for children in r["children"]:
make_flat(children, depth + 1)
for row in itertools.chain(duration_stats["atomics"],
[duration_stats["total"]]):
make_flat(row)
cliutils.print_list(rows,
fields=cols,
formatters=formatters,
normalize_field_names=True,
table_label="Response Times (sec)",
sortby_index=None)
print()
if iterations_data:
formatters = dict(zip(iterations_headers[1:],
[cliutils.pretty_float_formatter(col, 3)
for col in iterations_headers[1:]]))
cliutils.print_list(iterations,
fields=iterations_headers,
table_label="Atomics per iteration",
formatters=formatters)
print()
if output:
cols = charts.OutputStatsTable.columns
float_cols = cols[1:7]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
for out in output:
data = out.render()
rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
if rows:
# NOTE(amaretskiy): print title explicitly because
# prettytable fails if title length is too long
print(data["title"])
cliutils.print_list(rows, fields=cols,
formatters=formatters)
print()
print("Load duration: %s"
% strutils.format_float_to_str(workload["load_duration"]))
print("Full duration: %s"
% strutils.format_float_to_str(workload["full_duration"]))
print("\nHINTS:")
print("* To plot HTML graphics with this data, run:")
print("\trally task report %s --out output.html\n" % task["uuid"])
print("* To generate a JUnit report, run:")
print("\trally task export %s --type junit-xml --to output.xml\n" %
task["uuid"])
print("* To get raw JSON output of task results, run:")
print("\trally task report %s --json --out output.json\n" %
task["uuid"])
if not task["pass_sla"]:
print("At least one workload did not pass SLA criteria.\n")
return 1
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
@envutils.with_default_task_id
@cliutils.suppress_warnings
def results(self, api, task_id=None):
"""DEPRECATED since Rally 3.0.0."""
LOG.warning("CLI method `rally task results` is deprecated since "
"Rally 3.0.0 and will be removed soon. "
"Use `rally task report --json` instead.")
try:
self.export(api, tasks=[task_id], output_type="old-json-results")
except exceptions.RallyException as e:
print(e.format_message())
return 1
@cliutils.args("--deployment", dest="deployment", type=str,
metavar="<uuid>", required=False,
help="UUID or name of a deployment.")
@cliutils.args("--all-deployments", action="store_true",
dest="all_deployments",
help="List tasks from all deployments.")
@cliutils.args("--status", type=str, dest="status",
help="List tasks with specified status."
" Available statuses: %s" % ", ".join(consts.TaskStatus))
@cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False,
help="Tags to filter tasks by.")
@cliutils.args("--uuids-only", action="store_true",
dest="uuids_only", help="List task UUIDs only.")
@envutils.with_default_deployment(cli_arg_name="deployment")
def list(self, api, deployment=None, all_deployments=False, status=None,
tags=None, uuids_only=False):
"""List tasks, started and finished.
Displayed tasks can be filtered by status or deployment. By
default 'rally task list' will display tasks from the active
deployment without filtering by status.
"""
filters = {}
headers = ["UUID", "Deployment name", "Created at", "Load duration",
"Status", "Tag(s)"]
if status in consts.TaskStatus:
filters["status"] = status
elif status:
print("Error: Invalid task status '%s'.\nAvailable statuses: %s"
% (status, ", ".join(consts.TaskStatus)),
file=sys.stderr)
return(1)
if not all_deployments:
filters["deployment"] = deployment
if tags:
filters["tags"] = tags
task_list = api.task.list(**filters)
if uuids_only:
if task_list:
print("\n".join([t["uuid"] for t in task_list]))
elif task_list:
def tags_formatter(t):
if not t["tags"]:
return ""
return "'%s'" % "', '".join(t["tags"])
formatters = {
"Tag(s)": tags_formatter,
"Load duration": cliutils.pretty_float_formatter(
"task_duration", 3),
"Created at": lambda t: t["created_at"].replace("T", " ")
}
cliutils.print_list(
task_list, fields=headers, normalize_field_names=True,
sortby_index=headers.index("Created at"),
formatters=formatters)
else:
if status:
print("There are no tasks in '%s' status. "
"To run a new task, use:\n\trally task start"
% status)
else:
print("There are no tasks. To run a new task, use:\n"
"\trally task start")
@cliutils.args("--out", metavar="<path>",
type=str, dest="out", required=False,
help="Path to output file.")
@cliutils.args("--open", dest="open_it", action="store_true",
help="Open the output in a browser.")
@cliutils.args("--tasks", dest="tasks", nargs="+",
help="UUIDs of tasks, or JSON files with task results")
@cliutils.args("--html-static", dest="out_format",
action="store_const", const="trends-html-static")
@cliutils.suppress_warnings
def trends(self, api, *args, **kwargs):
"""Generate workloads trends HTML report."""
tasks = kwargs.get("tasks", []) or list(args)
if not tasks:
print("ERROR: At least one task must be specified",
file=sys.stderr)
return 1
self.export(api, tasks=tasks,
output_type=kwargs.get("out_format", "trends-html"),
output_dest=kwargs.get("out"),
open_it=kwargs.get("open_it", False))
@cliutils.args("--out", metavar="<path>",
type=str, dest="out", required=False,
help="Report destination. Can be a path to a file (in case"
" of HTML, HTML-STATIC, etc. types) to save the"
" report to or a connection string.")
@cliutils.args("--open", dest="open_it", action="store_true",
help="Open the output in a browser.")
@cliutils.args("--html", dest="out_format",
action="store_const", const="html")
@cliutils.args("--html-static", dest="out_format",
action="store_const", const="html-static")
@cliutils.args("--json", dest="out_format",
action="store_const", const="json")
@cliutils.args("--uuid", dest="tasks", nargs="+", type=str,
help="UUIDs of tasks or json reports of tasks")
@cliutils.args("--deployment", dest="deployment", type=str,
help="Report all tasks with defined deployment",
required=False)
@envutils.default_from_global("tasks", envutils.ENV_TASK, "uuid")
@cliutils.suppress_warnings
def report(self, api, tasks=None, out=None,
open_it=False, out_format="html", deployment=None):
"""Generate a report for the specified task(s)."""
self.export(api, tasks=tasks,
output_type=out_format,
output_dest=out,
open_it=open_it,
deployment=deployment)
@cliutils.args("--force", action="store_true", help="force delete")
@cliutils.args("--uuid", type=str, dest="task_id", nargs="*",
metavar="<task-id>",
help="UUID of task or a list of task UUIDs.")
@envutils.with_default_task_id
def delete(self, api, task_id=None, force=False):
"""Delete task and its results."""
def _delete_single_task(tid, force):
try:
api.task.delete(task_uuid=tid, force=force)
print("Successfully deleted task `%s`" % tid)
except exceptions.DBConflict as e:
print(e)
print("Use '--force' option to delete the task with vague "
"state.")
if isinstance(task_id, list):
for tid in task_id:
_delete_single_task(tid, force)
else:
_delete_single_task(task_id, force)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
@cliutils.args("--json", dest="tojson",
action="store_true",
help="Output in JSON format.")
@envutils.with_default_task_id
def sla_check(self, api, task_id=None, tojson=False):
"""Display SLA check results table."""
task = api.task.get(task_id=task_id, detailed=True)
failed_criteria = 0
data = []
STATUS_PASS = "PASS"
STATUS_FAIL = "FAIL"
for workload in itertools.chain(
*[s["workloads"] for s in task["subtasks"]]):
for sla in sorted(workload["sla_results"].get("sla", []),
key=lambda x: x["criterion"]):
success = sla.pop("success")
sla["status"] = success and STATUS_PASS or STATUS_FAIL
sla["benchmark"] = workload["name"]
sla["pos"] = workload["position"]
failed_criteria += int(not success)
data.append(sla if tojson else rutils.Struct(**sla))
if tojson:
print(json.dumps(data, sort_keys=False))
else:
cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
if not data:
return 2
return failed_criteria
@cliutils.args("--uuid", type=str, dest="task_id",
help="UUID of the task")
def use(self, api, task_id):
"""Set active task."""
print("Using task: %s" % task_id)
api.task.get(task_id=task_id)
envutils.update_globals_file("RALLY_TASK", task_id)
@cliutils.args("--uuid", dest="tasks", nargs="+", type=str,
help="UUIDs of tasks or json reports of tasks")
@cliutils.args("--type", dest="output_type", type=str,
required=True,
help="Report type. Out-of-the-box "
"types: JSON, HTML, HTML-Static, Elastic, JUnit-XML. "
"HINT: You can list all types, executing "
"`rally plugin list --plugin-base TaskExporter` "
"command.")
@cliutils.args("--to", dest="output_dest", type=str,
metavar="<dest>", required=False,
help="Report destination. Can be a path to a file (in case"
" of JSON, HTML, HTML-Static, JUnit-XML, Elastic etc. "
"types) to save the report to or a connection string."
" It depends on the report type."
)
@cliutils.args("--deployment", dest="deployment", type=str,
help="Report all tasks with defined deployment",
required=False)
@envutils.default_from_global("tasks", envutils.ENV_TASK, "uuid")
@plugins.ensure_plugins_are_loaded
def export(self, api, tasks=None, output_type=None, output_dest=None,
open_it=False, deployment=None):
"""Export task results to the custom task's exporting system."""
if deployment is not None:
tasks = api.task.list(deployment=deployment, uuids_only=True)
tasks = [task["uuid"] for task in tasks]
else:
tasks = isinstance(tasks, list) and tasks or [tasks]
exported_tasks = []
for task_file_or_uuid in tasks:
if os.path.exists(os.path.expanduser(task_file_or_uuid)):
exported_tasks.extend(
task_results_loader.load(task_file_or_uuid)
)
else:
exported_tasks.append(task_file_or_uuid)
report = api.task.export(tasks=exported_tasks,
output_type=output_type,
output_dest=output_dest)
if "files" in report:
for path in report["files"]:
output_file = os.path.expanduser(path)
with open(output_file, "w+") as f:
f.write(report["files"][path])
if open_it:
if "open" in report:
webbrowser.open_new_tab(report["open"])
if "print" in report:
print(report["print"])
@staticmethod
def _print_task_errors(task_id, task_errors):
print(cliutils.make_header("Task %s has %d error(s)" %
(task_id, len(task_errors))))
for err_data in task_errors:
print(*err_data, sep="\n")
print("-" * 80)
@staticmethod
def _format_task_error(data):
error_type = "Unknown type"
error_message = "Rally hasn't caught anything yet"
error_traceback = "No traceback available."
try:
error_type = data["error"][0]
error_message = data["error"][1]
error_traceback = data["error"][2]
except IndexError:
pass
return ("%(error_type)s: %(error_message)s\n" %
{"error_type": error_type, "error_message": error_message},
error_traceback)
@cliutils.args("--file", dest="task_file", type=str, metavar="<path>",
required=True, help="JSON file with task results")
@cliutils.args("--deployment", dest="deployment", type=str,
metavar="<uuid>", required=False,
help="UUID or name of a deployment.")
@cliutils.args("--tag", nargs="+", dest="tags", type=str, required=False,
help="Mark the task with a tag or a few tags.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@cliutils.alias("import")
@cliutils.suppress_warnings
def import_results(self, api, deployment=None, task_file=None, tags=None):
"""Import json results of a test into rally database"""
if os.path.exists(os.path.expanduser(task_file)):
tasks_results = task_results_loader.load(task_file)
for task_results in tasks_results:
task = api.task.import_results(deployment=deployment,
task_results=task_results,
tags=tags)
print("Task UUID: %s." % task["uuid"])
else:
print("ERROR: Invalid file name passed: %s" % task_file,
file=sys.stderr)
return 1
|
apache-2.0
| -3,849,538,224,684,864,500 | 43.301909 | 79 | 0.522478 | false |
qbuat/rootpy
|
rootpy/stats/tests/test_plottable.py
|
1
|
2365
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from nose.plugins.skip import SkipTest
from rootpy.utils.silence import silence_sout
try:
with silence_sout():
import ROOT
from ROOT import (RooFit, RooRealVar, RooGaussian, RooArgusBG,
RooAddPdf, RooArgList, RooArgSet, RooAbsData)
from rootpy.stats import mute_roostats; mute_roostats()
from rootpy import asrootpy
except ImportError:
raise SkipTest("ROOT is not compiled with RooFit and RooStats enabled")
from rootpy.io import TemporaryFile
from nose.tools import assert_true
def test_plottable():
# construct pdf and toy data following example at
# http://root.cern.ch/drupal/content/roofit
# --- Observable ---
mes = RooRealVar("mes", "m_{ES} (GeV)", 5.20, 5.30)
# --- Parameters ---
sigmean = RooRealVar("sigmean", "B^{#pm} mass", 5.28, 5.20, 5.30)
sigwidth = RooRealVar("sigwidth", "B^{#pm} width", 0.0027, 0.001, 1.)
# --- Build Gaussian PDF ---
signal = RooGaussian("signal", "signal PDF", mes, sigmean, sigwidth)
# --- Build Argus background PDF ---
argpar = RooRealVar("argpar", "argus shape parameter", -20.0, -100., -1.)
background = RooArgusBG("background", "Argus PDF",
mes, RooFit.RooConst(5.291), argpar)
# --- Construct signal+background PDF ---
nsig = RooRealVar("nsig", "#signal events", 200, 0., 10000)
nbkg = RooRealVar("nbkg", "#background events", 800, 0., 10000)
model = RooAddPdf("model", "g+a",
RooArgList(signal, background),
RooArgList(nsig, nbkg))
# --- Generate a toyMC sample from composite PDF ---
data = model.generate(RooArgSet(mes), 2000)
# --- Perform extended ML fit of composite PDF to toy data ---
fitresult = model.fitTo(data, RooFit.Save(), RooFit.PrintLevel(-1))
# --- Plot toy data and composite PDF overlaid ---
mesframe = asrootpy(mes.frame())
type(mesframe)
data.plotOn(mesframe)
model.plotOn(mesframe)
for obj in mesframe.objects:
assert_true(obj)
for curve in mesframe.curves:
assert_true(curve)
for hist in mesframe.data_hists:
assert_true(hist)
assert_true(mesframe.plotvar)
with TemporaryFile():
mesframe.Write()
|
gpl-3.0
| -3,899,456,638,177,250,000 | 33.779412 | 77 | 0.640169 | false |
rackerlabs/marconi
|
marconi/queues/transport/wsgi/v1_0/__init__.py
|
1
|
2932
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from marconi.queues.transport.wsgi.v1_0 import claims
from marconi.queues.transport.wsgi.v1_0 import health
from marconi.queues.transport.wsgi.v1_0 import homedoc
from marconi.queues.transport.wsgi.v1_0 import messages
from marconi.queues.transport.wsgi.v1_0 import metadata
from marconi.queues.transport.wsgi.v1_0 import queues
from marconi.queues.transport.wsgi.v1_0 import shards
from marconi.queues.transport.wsgi.v1_0 import stats
def public_endpoints(driver):
queue_controller = driver._storage.queue_controller
message_controller = driver._storage.message_controller
claim_controller = driver._storage.claim_controller
return [
# Home
('/',
homedoc.Resource()),
# Queues Endpoints
('/queues',
queues.CollectionResource(driver._validate,
queue_controller)),
('/queues/{queue_name}',
queues.ItemResource(queue_controller,
message_controller)),
('/queues/{queue_name}/stats',
stats.Resource(queue_controller)),
('/queues/{queue_name}/metadata',
metadata.Resource(driver._wsgi_conf, driver._validate,
queue_controller)),
# Messages Endpoints
('/queues/{queue_name}/messages',
messages.CollectionResource(driver._wsgi_conf,
driver._validate,
message_controller)),
('/queues/{queue_name}/messages/{message_id}',
messages.ItemResource(message_controller)),
# Claims Endpoints
('/queues/{queue_name}/claims',
claims.CollectionResource(driver._wsgi_conf,
driver._validate,
claim_controller)),
('/queues/{queue_name}/claims/{claim_id}',
claims.ItemResource(driver._wsgi_conf,
driver._validate,
claim_controller)),
# Health
('/health',
health.Resource(driver._storage))
]
def private_endpoints(driver):
shards_controller = driver._control.shards_controller
return [
('/shards',
shards.Listing(shards_controller)),
('/shards/{shard}',
shards.Resource(shards_controller)),
]
|
apache-2.0
| -5,074,667,331,260,138,000 | 35.65 | 79 | 0.620737 | false |
mrkurt/proxy-mixer
|
proxyjs/javascript.py
|
1
|
2093
|
import re
import os
import spidermonkey
class Extractor:
__mixer_js_lib = open(os.path.dirname(__file__) + '/../javascript/mixer.js', 'r').read()
__js_block = re.compile(r"(<script.+?runat=\"proxy\".*?>)(.*?)(</script>)", re.S)
__script_start = re.compile(r"<script.+?runat=\"proxy\".*?>")
__script_src = re.compile(r"src=\"([^\"]+)\"")
__script_end = "</script>"
def __init__(self, raw):
self.data = Extractor.__js_block.split(raw)
def js(self, skip_mixer_helpers = False):
js = ''
js_instances = 0
in_script = False
for i, t in enumerate(self.data):
if t == Extractor.__script_end:
in_script = False
elif t.startswith("<script") and Extractor.__script_start.match(t):
in_script = True
js_instances += 1
js += self.__extract_js_includes(t)
elif in_script:
js += t + '\n'
else:
js += 'Mixer.append_raw_from_array(%s);\n' % i
if not skip_mixer_helpers:
js = 'var window = {};\n' + Extractor.__mixer_js_lib + '\n' + js
js += '\nMixer.output;'
return js
def __extract_js_includes(self, chunk):
js = ''
for s in Extractor.__script_src.findall(chunk):
js += '// Including: ' + s
return js
class Runner:
__runtime = spidermonkey.Runtime()
__result_handlers = {
'raw' : lambda c, d : d[c.index],
'include' : lambda c, d : '<p><strong>Including: %s</strong></p>' % c.src
}
def __init__(self, raw = None):
if raw:
self.__extractor = Extractor(raw)
def set_extractor(self, extractor):
self.__extractor = extractor
def run_js(self, js = None):
if not js and self.__extractor:
js = self.__extractor.js()
ctx = Runner.__runtime.new_context()
return ctx.execute(js)
def assemble_result(self, raw = None):
if not self.__extractor and not raw:
raise Error, "No source to work with"
elif raw:
self.__extractor = Extractor(raw)
result = self.run_js()
data = self.__extractor.data
output = ''
handlers = Runner.__result_handlers
for i in range(len(result)):
command = result[i]
if command:
output += handlers[command.type](command, data)
return output
|
mit
| 255,322,132,879,189,540 | 25.846154 | 89 | 0.609651 | false |
trosa/forca
|
gluon/contrib/memdb.py
|
1
|
28147
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and
Robin B <robi123@gmail.com>.
License: GPL v2
"""
__all__ = ['MEMDB', 'Field']
import re
import sys
import os
import types
import datetime
import thread
import cStringIO
import csv
import copy
import gluon.validators as validators
from gluon.storage import Storage
import random
SQL_DIALECTS = {'memcache': {
'boolean': bool,
'string': unicode,
'text': unicode,
'password': unicode,
'blob': unicode,
'upload': unicode,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': int,
'reference': int,
'lower': None,
'upper': None,
'is null': 'IS NULL',
'is not null': 'IS NOT NULL',
'extract': None,
'left join': None,
}}
def cleanup(text):
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError, \
'Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text
return text
def assert_filter_fields(*fields):
for field in fields:
if isinstance(field, (Field, Expression)) and field.type\
in ['text', 'blob']:
raise SyntaxError, 'AppEngine does not index by: %s'\
% field.type
def dateobj_to_datetime(object):
# convert dates,times to datetimes for AppEngine
if isinstance(object, datetime.date):
object = datetime.datetime(object.year, object.month,
object.day)
if isinstance(object, datetime.time):
object = datetime.datetime(
1970,
1,
1,
object.hour,
object.minute,
object.second,
object.microsecond,
)
return object
def sqlhtml_validators(field_type, length):
v = {
'boolean': [],
'string': validators.IS_LENGTH(length),
'text': [],
'password': validators.IS_LENGTH(length),
'blob': [],
'upload': [],
'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
'date': validators.IS_DATE(),
'time': validators.IS_TIME(),
'datetime': validators.IS_DATETIME(),
'reference': validators.IS_INT_IN_RANGE(0, 1e100),
}
try:
return v[field_type[:9]]
except KeyError:
return []
class DALStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError, 'Object \'%s\'exists and cannot be redefined' % key
self[key] = value
def __repr__(self):
return '<DALStorage ' + dict.__repr__(self) + '>'
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class MEMDB(DALStorage):
"""
an instance of this class represents a database connection
Example::
db=MEMDB(Client())
db.define_table('tablename',Field('fieldname1'),
Field('fieldname2'))
"""
def __init__(self, client):
self._dbname = 'memdb'
self['_lastsql'] = ''
self.tables = SQLCallableList()
self._translator = SQL_DIALECTS['memcache']
self.client = client
def define_table(
self,
tablename,
*fields,
**args
):
tablename = cleanup(tablename)
if tablename in dir(self) or tablename[0] == '_':
raise SyntaxError, 'invalid table name: %s' % tablename
if not tablename in self.tables:
self.tables.append(tablename)
else:
raise SyntaxError, 'table already defined: %s' % tablename
t = self[tablename] = Table(self, tablename, *fields)
t._create()
return t
def __call__(self, where=''):
return Set(self, where)
class SQLALL(object):
def __init__(self, table):
self.table = table
class Table(DALStorage):
"""
an instance of this class represents a database table
Example::
db=MEMDB(Client())
db.define_table('users',Field('name'))
db.users.insert(name='me')
"""
def __init__(
self,
db,
tablename,
*fields
):
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self._referenced_by = []
fields = list(fields)
fields.insert(0, Field('id', 'id'))
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
def _create(self):
fields = []
myfields = {}
for k in self.fields:
field = self[k]
attr = {}
if not field.type[:9] in ['id', 'reference']:
if field.notnull:
attr = dict(required=True)
if field.type[:2] == 'id':
continue
if field.type[:9] == 'reference':
referenced = field.type[10:].strip()
if not referenced:
raise SyntaxError, \
'Table %s: reference \'%s\' to nothing!' % (self._tablename, k)
if not referenced in self._db:
raise SyntaxError, \
'Table: table %s does not exist' % referenced
referee = self._db[referenced]
ftype = \
self._db._translator[field.type[:9]](
self._db[referenced]._tableobj)
if self._tablename in referee.fields: # ## THIS IS OK
raise SyntaxError, \
'Field: table \'%s\' has same name as a field ' \
'in referenced table \'%s\'' % (self._tablename, referenced)
self._db[referenced]._referenced_by.append((self._tablename,
field.name))
elif not field.type in self._db._translator\
or not self._db._translator[field.type]:
raise SyntaxError, 'Field: unkown field type %s' % field.type
self._tableobj = self._db.client
return None
def create(self):
# nothing to do, here for backward compatility
pass
def drop(self):
# nothing to do, here for backward compatibility
self._db(self.id > 0).delete()
def insert(self, **fields):
id = self._create_id()
if self.update(id, **fields):
return long(id)
else:
return None
def get(self, id):
val = self._tableobj.get(self._id_to_key(id))
if val:
return Storage(val)
else:
return None
def update(self, id, **fields):
for field in fields:
if not field in fields and self[field].default\
!= None:
fields[field] = self[field].default
if field in fields:
fields[field] = obj_represent(fields[field],
self[field].type, self._db)
return self._tableobj.set(self._id_to_key(id), fields)
def delete(self, id):
return self._tableobj.delete(self._id_to_key(id))
def _shard_key(self, shard):
return self._id_to_key('s/%s' % shard)
def _id_to_key(self, id):
return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
def _create_id(self):
shard = random.randint(10, 99)
shard_id = self._shard_key(shard)
id = self._tableobj.incr(shard_id)
if not id:
if self._tableobj.set(shard_id, '0'):
id = 0
else:
raise Exception, 'cannot set memcache'
return long(str(shard) + str(id))
def __str__(self):
return self._tablename
class Expression(object):
def __init__(
self,
name,
type='string',
db=None,
):
(self.name, self.type, self._db) = (name, type, db)
def __str__(self):
return self.name
def __or__(self, other): # for use in sortby
assert_filter_fields(self, other)
return Expression(self.name + '|' + other.name, None, None)
def __invert__(self):
assert_filter_fields(self)
return Expression('-' + self.name, self.type, None)
# for use in Query
def __eq__(self, value):
return Query(self, '=', value)
def __ne__(self, value):
return Query(self, '!=', value)
def __lt__(self, value):
return Query(self, '<', value)
def __le__(self, value):
return Query(self, '<=', value)
def __gt__(self, value):
return Query(self, '>', value)
def __ge__(self, value):
return Query(self, '>=', value)
# def like(self,value): return Query(self,' LIKE ',value)
# def belongs(self,value): return Query(self,' IN ',value)
# for use in both Query and sortby
def __add__(self, other):
return Expression('%s+%s' % (self, other), 'float', None)
def __sub__(self, other):
return Expression('%s-%s' % (self, other), 'float', None)
def __mul__(self, other):
return Expression('%s*%s' % (self, other), 'float', None)
def __div__(self, other):
return Expression('%s/%s' % (self, other), 'float', None)
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, required=False,
default=None, requires=IS_NOT_EMPTY(), notnull=False,
unique=False, uploadfield=True)
to be used as argument of GQLDB.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length or 512 by default.
fields should have a default or they will be required in SQLFORMs
the requires argument are used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=None,
required=False,
requires=sqlhtml_validators,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
):
self.name = cleanup(fieldname)
if fieldname in dir(Table) or fieldname[0] == '_':
raise SyntaxError, 'Field: invalid field name: %s' % fieldname
if isinstance(type, Table):
type = 'reference ' + type._tablename
if not length:
length = 512
self.type = type # 'string', 'integer'
self.length = length # the length of the string
self.default = default # default value for field
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
if requires == sqlhtml_validators:
requires = sqlhtml_validators(type, length)
elif requires is None:
requires = []
self.requires = requires # list of validators
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def __str__(self):
return '%s.%s' % (self._tablename, self.name)
MEMDB.Field = Field # ## required by gluon/globals.py session.connect
def obj_represent(object, fieldtype, db):
if object != None:
if fieldtype == 'date' and not isinstance(object,
datetime.date):
(y, m, d) = [int(x) for x in str(object).strip().split('-')]
object = datetime.date(y, m, d)
elif fieldtype == 'time' and not isinstance(object, datetime.time):
time_items = [int(x) for x in str(object).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.time(h, mi, s)
elif fieldtype == 'datetime' and not isinstance(object,
datetime.datetime):
(y, m, d) = [int(x) for x in
str(object)[:10].strip().split('-')]
time_items = [int(x) for x in
str(object)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
elif fieldtype == 'integer' and not isinstance(object, long):
object = long(object)
return object
class QueryException:
def __init__(self, **a):
self.__dict__ = a
class Query(object):
"""
A query object necessary to define a set.
It can be stored or can be passed to GQLDB.__call__() to obtain a Set
Example:
query=db.users.name=='Max'
set=db(query)
records=set.select()
"""
def __init__(
self,
left,
op=None,
right=None,
):
if isinstance(right, (Field, Expression)):
raise SyntaxError, \
'Query: right side of filter must be a value or entity'
if isinstance(left, Field) and left.name == 'id':
if op == '=':
self.get_one = \
QueryException(tablename=left._tablename,
id=long(right))
return
else:
raise SyntaxError, 'only equality by id is supported'
raise SyntaxError, 'not supported'
def __str__(self):
return str(self.left)
class Set(object):
"""
As Set represents a set of records in the database,
the records are identified by the where=Query(...) object.
normally the Set is generated by GQLDB.__call__(Query(...))
given a set, for example
set=db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
and take subsets:
subset=set(db.users.id<5)
"""
def __init__(self, db, where=None):
self._db = db
self._tables = []
self.filters = []
if hasattr(where, 'get_all'):
self.where = where
self._tables.insert(0, where.get_all)
elif hasattr(where, 'get_one') and isinstance(where.get_one,
QueryException):
self.where = where.get_one
else:
# find out which tables are involved
if isinstance(where, Query):
self.filters = where.left
self.where = where
self._tables = [field._tablename for (field, op, val) in
self.filters]
def __call__(self, where):
if isinstance(self.where, QueryException) or isinstance(where,
QueryException):
raise SyntaxError, \
'neither self.where nor where can be a QueryException instance'
if self.where:
return Set(self._db, self.where & where)
else:
return Set(self._db, where)
def _get_table_or_raise(self):
tablenames = list(set(self._tables)) # unique
if len(tablenames) < 1:
raise SyntaxError, 'Set: no tables selected'
if len(tablenames) > 1:
raise SyntaxError, 'Set: no join in appengine'
return self._db[tablenames[0]]._tableobj
def _getitem_exception(self):
(tablename, id) = (self.where.tablename, self.where.id)
fields = self._db[tablename].fields
self.colnames = ['%s.%s' % (tablename, t) for t in fields]
item = self._db[tablename].get(id)
return (item, fields, tablename, id)
def _select_except(self):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return []
new_item = []
for t in fields:
if t == 'id':
new_item.append(long(id))
else:
new_item.append(getattr(item, t))
r = [new_item]
return Rows(self._db, r, *self.colnames)
def select(self, *fields, **attributes):
"""
Always returns a Rows object, even if it may be empty
"""
if isinstance(self.where, QueryException):
return self._select_except()
else:
raise SyntaxError, 'select arguments not supported'
def count(self):
return len(self.select())
def delete(self):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
self._db[tablename].delete(id)
else:
raise Exception, 'deletion not implemented'
def update(self, **update_fields):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
for (key, value) in update_fields.items():
setattr(item, key, value)
self._db[tablename].update(id, **item)
else:
raise Exception, 'update not implemented'
def update_record(
t,
s,
id,
a,
):
item = s.get(id)
for (key, value) in a.items():
t[key] = value
setattr(item, key, value)
s.update(id, **item)
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## this class still needs some work to care for ID/OID
def __init__(
self,
db,
response,
*colnames
):
self._db = db
self.colnames = colnames
self.response = response
def __len__(self):
return len(self.response)
def __getitem__(self, i):
if i >= len(self.response) or i < 0:
raise SyntaxError, 'Rows: no such row: %i' % i
if len(self.response[0]) != len(self.colnames):
raise SyntaxError, 'Rows: internal error'
row = DALStorage()
for j in xrange(len(self.colnames)):
value = self.response[i][j]
if isinstance(value, unicode):
value = value.encode('utf-8')
packed = self.colnames[j].split('.')
try:
(tablename, fieldname) = packed
except:
if not '_extra' in row:
row['_extra'] = DALStorage()
row['_extra'][self.colnames[j]] = value
continue
table = self._db[tablename]
field = table[fieldname]
if not tablename in row:
row[tablename] = DALStorage()
if field.type[:9] == 'reference':
referee = field.type[10:].strip()
rid = value
row[tablename][fieldname] = rid
elif field.type == 'boolean' and value != None:
# row[tablename][fieldname]=Set(self._db[referee].id==rid)
if value == True or value == 'T':
row[tablename][fieldname] = True
else:
row[tablename][fieldname] = False
elif field.type == 'date' and value != None\
and not isinstance(value, datetime.date):
(y, m, d) = [int(x) for x in
str(value).strip().split('-')]
row[tablename][fieldname] = datetime.date(y, m, d)
elif field.type == 'time' and value != None\
and not isinstance(value, datetime.time):
time_items = [int(x) for x in
str(value).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.time(h, mi, s)
elif field.type == 'datetime' and value != None\
and not isinstance(value, datetime.datetime):
(y, m, d) = [int(x) for x in
str(value)[:10].strip().split('-')]
time_items = [int(x) for x in
str(value)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
else:
row[tablename][fieldname] = value
if fieldname == 'id':
id = row[tablename].id
row[tablename].update_record = lambda t = row[tablename], \
s = self._db[tablename], id = id, **a: update_record(t,
s, id, a)
for (referee_table, referee_name) in \
table._referenced_by:
s = self._db[referee_table][referee_name]
row[tablename][referee_table] = Set(self._db, s
== id)
if len(row.keys()) == 1:
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
writer = csv.writer(s)
writer.writerow(self.colnames)
c = len(self.colnames)
for i in xrange(len(self)):
row = [self.response[i][j] for j in xrange(c)]
for k in xrange(c):
if isinstance(row[k], unicode):
row[k] = row[k].encode('utf-8')
writer.writerow(row)
return s.getvalue()
def xml(self):
"""
serializes the table using sqlhtml.SQLTABLE (if present)
"""
return sqlhtml.SQLTABLE(self).xml()
def test_all():
"""
How to run from web2py dir:
export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
python gluon/contrib/memdb.py
Setup the UTC timezone and database stubs
>>> import os
>>> os.environ['TZ'] = 'UTC'
>>> import time
>>> if hasattr(time, 'tzset'):
... time.tzset()
>>>
>>> from google.appengine.api import apiproxy_stub_map
>>> from google.appengine.api.memcache import memcache_stub
>>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
>>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
Create a table with all possible field types
>>> from google.appengine.api.memcache import Client
>>> db=MEMDB(Client())
>>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table')
Insert a field
>>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15))
>>> user_id != None
True
Select all
# >>> all = db().select(db.users.ALL)
Drop the table
# >>> db.users.drop()
Select many entities
>>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime'))
>>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow)
>>> few = 5
>>> most = many - few
>>> 0 < few < most < many
True
>>> for i in range(many):
... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
>>>
# test timezones
>>> class TZOffset(datetime.tzinfo):
... def __init__(self,offset=0):
... self.offset = offset
... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
... def dst(self, dt): return datetime.timedelta(0)
... def tzname(self, dt): return 'UTC' + str(self.offset)
...
>>> SERVER_OFFSET = -8
>>>
>>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
>>> post_id = db.posts.insert(created_at=stamp,body='body1')
>>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
>>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
>>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
>>> stamp == naive_stamp
True
>>> utc_stamp == server_stamp
True
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 1
True
>>> rows[0].body == 'body1'
True
>>> db(db.posts.id==post_id).delete()
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 0
True
>>> id = db.posts.insert(total='0') # coerce str to integer
>>> rows = db(db.posts.id==id).select()
>>> len(rows) == 1
True
>>> rows[0].total == 0
True
Examples of insert, select, update, delete
>>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
>>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
>>> me=db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.id==person_id).update(name='massimo') # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.name
'massimo'
>>> str(me.birth)
'1971-12-21'
# resave date to ensure it comes back the same
>>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.birth
datetime.date(1971, 12, 21)
>>> db(db.person.id==marco_id).delete() # test delete
>>> len(db(db.person.id==marco_id).select())
0
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
>>> me = db(db.person.id == person_id).select()[0]
>>> me.name
'Max'
"""
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = DALStorage
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gpl-2.0
| -1,616,034,788,288,685,800 | 30.033076 | 594 | 0.5251 | false |
JianGoForIt/YellowFin_Pytorch
|
word_language_model/model.py
|
1
|
2562
|
import torch.nn as nn
from torch.autograd import Variable
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
|
apache-2.0
| 393,465,775,236,503,040 | 42.423729 | 110 | 0.599922 | false |
couwbat/couwbatns3
|
ns-allinone-3.23/bake/bake/ModuleBuild.py
|
1
|
33519
|
###############################################################################
# Copyright (c) 2013 INRIA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Daniel Camara <daniel.camara@inria.fr>
# Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
###############################################################################
'''
ModuleBuild.py
This file stores the real build implementation for each one of the handled
tools. It is this class that defines how a build with, for example, make
will be done and how different is it from the build done with cmake
'''
import bake.Utils
import os
import platform
import commands
import re
import sys
import shlex
from bake.Utils import ModuleAttributeBase
from bake.Exceptions import NotImplemented
from bake.Exceptions import TaskError
class ModuleBuild(ModuleAttributeBase):
""" Generic build, to be extended by the specialized classes,
one for each handled kind of tool.
"""
def __init__(self):
""" Default values for the generic attributes."""
ModuleAttributeBase.__init__(self)
# self._libpaths = []
self.add_attribute('objdir', 'no', 'Module supports objdir != srcdir.', mandatory=False)
self.add_attribute('patch', '', 'code to patch before build', mandatory=False)
self.add_attribute('v_PATH', '', 'Directory, or directories separated'
' by a \";\", to append to PATH environment variable', mandatory=False)
self.add_attribute('v_LD_LIBRARY', '', 'Directory, or directories'
' separated by a \";\", to append LD_LIBRARY'
' environment variable', mandatory=False)
self.add_attribute('v_PKG_CONFIG', '', 'Directory, or directories'
' separated by a \";\", to append to PKG_CONFIG'
' environment variable', mandatory=False)
self.add_attribute('post_installation', '', 'UNIX Command to run'
' after the installation', mandatory=False)
self.add_attribute('no_installation', '', 'Does not call the installation'
' by default', mandatory=False)
self.add_attribute('pre_installation', '', 'UNIX Command to run'
' before the installation', mandatory=False)
self.add_attribute('supported_os', '', 'List of supported Operating'
' Systems for the module', mandatory=False)
self.add_attribute('ignore_predefined_flags', 'False', 'True if the'
' build should ignore the predefined flag settings')
self.add_attribute('new_variable', '', 'Appends the value to the'
' system variable on the format VARIABLE1=value1'
';VARIABLE2=value2', mandatory=False)
# self.add_attribute('condition_to_build', '', 'Condition that, if '
# 'existent, should be true for allowing the instalation')
@classmethod
def subclasses(self):
return ModuleBuild.__subclasses__()
@classmethod
def create(cls, name):
""" Instantiates the Build class."""
for subclass in ModuleBuild.subclasses():
if subclass.name() == name:
instance = subclass()
return instance
return None
@property
def supports_objdir(self):
return self.attribute('objdir').value == 'yes'
def build(self, env, jobs):
raise NotImplemented()
def clean(self, env):
raise NotImplemented()
def check_version(self, env):
raise NotImplemented()
def check_os(self, supportedOs) :
""" Verifies the minimum OS requirements."""
osName = platform.system().lower()
if len(supportedOs) is 0 :
elements = []
else :
elements = supportedOs.strip().split(';')
supportedOS = False
for element in elements :
especification = element.strip().split(' ')
# if the main version is correct e.g linux/darwin/windows`
if(osName.startswith(especification[0].lower())):
# if we need to go into a distribuition of the OS e.g. Debian/Fedora
if(len(especification)>1):
(distname,version,id)=platform.linux_distribution()
for providedName in especification:
if distname.lower() == providedName.lower():
supportedOS = True
else :
supportedOS = True
return supportedOS
def perform_pre_installation(self, env):
""" Executes a list of Linux commands BEFORE calling the build process."""
if self.attribute('pre_installation').value != '':
commandList = env.replace_variables(self.attribute('pre_installation').value).split(' or ')
for comandToExecute in commandList :
try:
env._logger.commands.write(" > " +env.replace_variables(comandToExecute)+ '\n');
resultStatus = commands.getstatusoutput(env.replace_variables(comandToExecute))
if(resultStatus[0] == 0) :
return True
except Exception as e:
print (" > Error executing pre installation : " + e + "\n")
return False
def perform_post_installation(self, env):
""" Executes a list of Linux commands AFTER the build is finished """
if self.attribute('post_installation').value != '':
try:
env._logger.commands.write(" > " + env.replace_variables(self.attribute('post_installation').value))
var = commands.getoutput(env.replace_variables(self.attribute('post_installation').value))
if env.debug:
print(" -> " + var)
except Exception as e:
print (" > Error executing post installation : " + e )
# applies a patch if available
def threat_patch(self, env, patchStr):
""" Applies a patch, or a series of patches, over the source code. """
hasPatch = env.check_program('patch')
if hasPatch == False:
raise TaskError('Patch tool is not present and it is required for'
' applying: %s, in: %s'
% (self.attribute('patch').value, env._module_name))
vectorPath = env.replace_variables(patchStr).split(';')
for item in vectorPath:
if not env.exist_file(item) :
raise TaskError('Patch file is not present! missing file:'
' %s, in: %s' % (item, env._module_name))
try:
env._logger.commands.write('cd ' + env.srcdir + '; patch -p1 < ' + item + '\n')
status = commands.getstatusoutput('cd ' + env.srcdir + '; patch -p1 < ' + item)
except:
raise TaskError('Patch error: %s, in: %s' % (item, env._module_name))
# if there were an error
if status[0] != 0:
if status[0] == 256:
env._logger.commands.write(' > Patch problem: Ignoring'
' patch, either the patch file'
' does not exist or it was '
'already applied!\n')
else:
raise TaskError('Patch error %s: %s, in: %s' %
(status[0], item, env._module_name))
# Threats the parameter variables
def threat_variables(self, env):
""" Append the defined variables to the internal environment. """
elements = []
if self.attribute('v_PATH').value != '':
elements = env.replace_variables(self.attribute('v_PATH').value).split(";")
env.add_libpaths(elements)
env.add_binpaths(elements)
if self.attribute('v_LD_LIBRARY').value != '':
elements = env.replace_variables(self.attribute('v_LD_LIBRARY').value).split(";")
env.add_libpaths(elements)
if self.attribute('v_PKG_CONFIG').value != '':
elements = env.replace_variables(self.attribute('v_PKG_CONFIG').value).split(";")
env.add_pkgpaths(elements)
if self.attribute('new_variable').value != '':
elements = env.replace_variables(self.attribute('new_variable').value).split(";")
env.add_variables(elements)
def _flags(self):
""" Adds the defined flags as a default for the build. """
variables = []
if self.attribute('ignore_predefined_flags').value == 'True':
return variables
if self.attribute('CFLAGS').value != '':
variables.append('CFLAGS=%s'% (self.attribute('CFLAGS').value))
if self.attribute('CXXFLAGS').value != '':
variables.append('CXXFLAGS=%s'% (self.attribute('CXXFLAGS').value))
return variables
class NoneModuleBuild(ModuleBuild):
""" Class defined for the modules that do not need a build mechanism,
e.g system dependencies.
"""
def __init__(self):
ModuleBuild.__init__(self)
@classmethod
def name(cls):
return 'none'
def build(self, env, jobs):
pass
def clean(self, env):
pass
def check_version(self, env):
return True
class InlineModuleBuild(ModuleBuild):
""" Class defined for the modules that will use a Python code to be
installed. The build may be programmed in Python using all the Bake
functionalities.
"""
def __init__(self):
ModuleBuild.__init__(self)
@classmethod
def name(cls):
return 'inline'
@classmethod
def className(self, code):
if code :
myre = re.compile(".*class (?P<class_name>[a-zA-Z0-9_-]*)\(.*")
m = myre.match(code)
if m :
return m.groupdict()['class_name']
return self.__class__.__name__
def check_version(self, env):
return True
class PythonModuleBuild(ModuleBuild):
""" Performs the build for python based projects."""
def __init__(self):
ModuleBuild.__init__(self)
@classmethod
def name(cls):
""" Specific build type identifier."""
return 'python'
def build(self, env, jobs):
""" Specific build implementation method. Basically call the setup.py
program passed as parameter."""
# if self.attribute('patch').value != '':
# self.threat_patch(env)
# TODO: Add the options, there is no space for the configure_arguments
env.run(['python', os.path.join(env.srcdir, 'setup.py'), 'build',
'--build-base=' + env.objdir], directory=env.srcdir)
if self.attribute('no_installation').value != True:
sudoOp=[]
if(env.sudoEnabled):
sudoOp = ['sudo']
env.run(sudoOp + ['python', os.path.join(env.srcdir, 'setup.py'), 'install',
'--install-base=' + env.installdir,
'--install-purelib=' + env.installdir + '/lib',
# --install-platlib=' + env.installdir + '/lib.$PLAT,
'--install-scripts=' + env.installdir + '/scripts',
'--install-headers=' + env.installdir + '/include',
'--install-data=' + env.installdir + '/data',
],
directory=env.srcdir)
def clean(self, env):
""" Call the code with the setup.py with the clean option,
to remove the older code.
"""
env.run(['python', os.path.join(env.srcdir, 'setup.py'), 'clean',
'--build-base=' + env.objdir],
directory=env.srcdir)
def distclean(self, env):
""" Call the code with the setup.py with the distclean option,
to remove the older code.
"""
env.run(['python', os.path.join(env.srcdir, 'setup.py'), 'distclean'],
directory=env.srcdir)
def check_version(self, env):
"""Verifies only if python exists in the machine."""
try:
env.run(['python', '--version'])
except TaskError as e:
return False
return True
class WafModuleBuild(ModuleBuild):
""" Performs the build for Waf based projects."""
def __init__(self):
""" Instantiate the list of specific attributes for the waf build."""
ModuleBuild.__init__(self)
self.add_attribute('CC', '', 'C compiler to use')
self.add_attribute('CXX', '', 'C++ compiler to use')
self.add_attribute('CFLAGS', '', 'Flags to use for C compiler')
self.add_attribute('CXXFLAGS', '', 'Flags to use for C++ compiler')
self.add_attribute('LDFLAGS', '', 'Flags to use for Linker')
self.add_attribute('configure_arguments', '', 'Arguments to pass to'
' "waf configure"')
self.add_attribute('build_arguments', '', 'Arguments to pass to "waf"')
self.add_attribute('install_arguments', '', 'Command-line arguments'
' to pass to waf install')
@classmethod
def name(cls):
""" Specific build type identifier."""
return 'waf'
def _binary(self, srcdir):
""" Searches for the waf program."""
if os.path.isfile(os.path.join(srcdir, 'waf')):
waf_binary = os.path.join(srcdir, 'waf')
else:
waf_binary = 'waf'
return waf_binary
def _env(self, objdir):
""" Verifies if the main environment variables where defined and
sets them accordingly.
"""
env = dict()
for a, b in [['CC', 'CC'],
['CXX', 'CXX'],
['CFLAGS', 'CFLAGS'],
['CXXFLAGS', 'CXXFLAGS'],
['LDFLAGS', 'LINKFLAGS']]:
if self.attribute(a).value != '':
env[b] = self.attribute(a).value
# todo: Evaluate the situations where a waf lock may be required, and if so
# implement something on this line
# env['WAFLOCK'] = '.lock-waf_%s_build'%sys.platform #'.lock-%s' % os.path.basename(objdir)
return env
def _is_1_6_x(self, env):
""" Searches for the waf version, it should be bigger than 1.6.0."""
return env.check_program(self._binary(env.srcdir), version_arg='--version',
version_regexp='(\d+)\.(\d+)\.(\d+)',
version_required=(1, 6, 0))
def build(self, env, jobs):
""" Specific build implementation method. In order:
1. It apply possible patches,
2. Call waf configuration, if the configuration is set,
3. Call waf with the set build arguments,
4. Call waf with the install parameter.
"""
# if self.attribute('patch').value != '':
# self.threat_patch(env)
extra_configure_options = []
if self.attribute('configure_arguments').value != '':
extra_configure_options = [env.replace_variables(tmp) for tmp in
bake.Utils.split_args(env.replace_variables(self.attribute('configure_arguments').value))]
if self._is_1_6_x(env):
env.run([self._binary(env.srcdir)] + extra_configure_options,
directory=env.srcdir,
env=self._env(env.objdir))
else:
env.run([self._binary(env.srcdir)] + extra_configure_options,
directory=env.srcdir,
env=self._env(env.objdir))
extra_build_options = []
if self.attribute('build_arguments').value != '':
extra_build_options = [env.replace_variables(tmp) for tmp in
bake.Utils.split_args(env.replace_variables(self.attribute('build_arguments').value))]
jobsrt=[]
if not jobs == -1:
jobsrt = ['-j', str(jobs)]
env.run([self._binary(env.srcdir)] + extra_build_options + jobsrt,
directory=env.srcdir,
env=self._env(env.objdir))
if self.attribute('no_installation').value != True:
sudoOp=[]
if(env.sudoEnabled):
sudoOp = ['sudo']
try :
options = bake.Utils.split_args(env.replace_variables(self.attribute('install_arguments').value))
env.run(sudoOp + [self._binary(env.srcdir), 'install'] + options,
directory=env.srcdir,
env=self._env(env.objdir))
except TaskError as e:
print(' Could not install, probably you do not have permission to'
' install %s: Verify if you have the required rights. Original'
' message: %s' % (env._module_name, e._reason))
def clean(self, env):
""" Call waf with the clean option to remove the results of the
last build.
"""
env.run([self._binary(env.srcdir), '-k', 'clean'],
directory=env.srcdir,
env=self._env(env.objdir))
def distclean(self, env):
""" Call waf with the distclean option to remove the results of the
last build.
"""
env.run([self._binary(env.srcdir), '-k', 'distclean'],
directory=env.srcdir,
env=self._env(env.objdir))
def check_version(self, env):
""" Verifies the waf version."""
for path in [os.path.join(env.srcdir, 'waf'), 'waf']:
if env.check_program(path, version_arg='--version',
version_regexp='(\d+)\.(\d+)\.(\d+)',
version_required=(1, 5, 9)):
return True
return False
class Cmake(ModuleBuild):
""" Performs the build for CMake based projects."""
def __init__(self):
""" Instantiate the list of specific attributes for the CMake build."""
ModuleBuild.__init__(self)
self.add_attribute('CC', '', 'C compiler to use')
self.add_attribute('CXX', '', 'C++ compiler to use')
self.add_attribute('CFLAGS', '', 'Flags to use for C compiler')
self.add_attribute('CXXFLAGS', '', 'Flags to use for C++ compiler')
self.add_attribute('LDFLAGS', '', 'Flags to use for Linker')
self.add_attribute('build_arguments', '', 'Targets to make before'
' install')
self.add_attribute('cmake_arguments', '', 'Command-line arguments'
' to pass to cmake')
self.add_attribute('configure_arguments', '', 'Command-line arguments'
' to pass to cmake')
self.add_attribute('install_arguments', '', 'Command-line arguments'
' to pass to make install')
@classmethod
def name(cls):
""" Specific build type identifier."""
return 'cmake'
def _variables(self):
""" Verifies if the main environment variables where defined and
sets them accordingly.
"""
variables = []
for a, b in [['CC', 'C_COMPILER'],
['CXX', 'CXX_COMPILER'],
['CFLAGS', 'CFLAGS'],
['CXXFLAGS', 'CXXFLAGS'],
['LDFLAGS', 'EXE_LINKER_FLAGS']]:
if self.attribute(a).value != '':
variables.append('-DCMAKE_%s=%s' % (b, self.attribute(a).value))
return variables
def build(self, env, jobs):
""" Specific build implementation method. In order:
1. Call cmake to create the make files
2. It apply possible patches,
3. Call make to build the code,
4. Call make with the set build arguments
5. Call make with the install parameters.
"""
# if self.attribute('patch').value != '':
# self.threat_patch(env)
options = []
if self.attribute('cmake_arguments').value != '':
options = bake.Utils.split_args(
env.replace_variables(self.attribute('cmake_arguments').value))
# if the object directory does not exist, it should create it, to
# avoid build error, since the cmake does not create the directory
# it also makes it orthogonal to waf, that creates the target object dir
try:
env.run(['mkdir', env.objdir],
directory=env.srcdir)
except TaskError as e:
# assume that if an error is thrown is because the directory already
# exist, otherwise re-propagates the error
if not "error 1" in e._reason :
raise TaskError(e._reason)
jobsrt=[]
if not jobs == -1:
jobsrt = ['-j', str(jobs)]
env.run(['cmake', env.srcdir, '-DCMAKE_INSTALL_PREFIX:PATH=' + env.installdir] +
self._variables() + options,
directory=env.objdir)
env.run(['make']+ jobsrt, directory=env.objdir)
if self.attribute('build_arguments').value != '':
env.run(['make'] + bake.Utils.split_args(env.replace_variables(self.attribute('build_arguments').value)),
directory=env.objdir)
if self.attribute('no_installation').value != True:
sudoOp=[]
if(env.sudoEnabled):
sudoOp = ['sudo']
try:
options = bake.Utils.split_args(env.replace_variables(self.attribute('install_arguments').value))
env.run(sudoOp + ['make', 'install'] + options, directory=env.objdir)
except TaskError as e:
print(' Could not install, probably you do not have permission to'
' install %s: Verify if you have the required rights. Original'
' message: %s' % (env._module_name, e._reason))
def clean(self, env):
""" Call make clean to remove the results of the last build."""
if not os.path.isfile(os.path.join(env.objdir, 'Makefile')):
return
env.run(['make','-i', 'clean'], directory=env.objdir)
def distclean(self, env):
""" Call make distclean to remove the results of the last build."""
if not os.path.isfile(os.path.join(env.objdir, 'Makefile')):
return
env.run(['make','-i', 'distclean'], directory=env.objdir)
def check_version(self, env):
""" Verifies if CMake and Make are available and their versions."""
if not env.check_program('cmake', version_arg='--version',
version_regexp='(\d+)\.(\d+)\.(\d+)',
version_required=(2, 8, 2)):
return False
if not env.check_program('make', version_arg='--version',
version_regexp='(\d+)\.(\d+)',
version_required=(3, 80)):
return False
return True
# Class to handle the make build tool
class Make(ModuleBuild):
def __init__(self):
""" Instantiate the list of specific attributes for the make build."""
ModuleBuild.__init__(self)
self.add_attribute('CFLAGS', '', 'Flags to use for C compiler')
self.add_attribute('CXXFLAGS', '', 'Flags to use for C++ compiler')
self.add_attribute('LDFLAGS', '', 'Flags to use for Linker')
self.add_attribute('build_arguments', '', 'Targets to make before install')
self.add_attribute('configure_arguments', '', 'Command-line arguments'
' to pass to make')
self.add_attribute('install_arguments', '', 'Command-line arguments'
' to pass to make install')
@classmethod
def name(cls):
""" Specific build type identifier."""
return 'make'
def build(self, env, jobs):
""" Specific build implementation method. In order:
1. It apply possible patches,
2. Call make configure, if the configurations are available,
3. Call make with the set build arguments
4. Call make with the install arguments.
"""
# if self.attribute('patch').value != '':
# self.threat_patch(env)
# if the object directory does not exist, it should create it, to
# avoid build error, since the make does not create the directory
# it also makes it orthogonal to waf, that creates the target object dir
try:
env.run(['mkdir', env.objdir],
directory=env.srcdir)
except TaskError as e:
# assume that if an error is thrown is because the directory already
# exist, otherwise re-propagates the error
if not "error 1" in e._reason :
raise TaskError(e._reason)
# Configures make, if there is a configuration argument that was passed as parameter
options = []
if self.attribute('configure_arguments').value != '':
options = bake.Utils.split_args(env.replace_variables(self.attribute('configure_arguments').value))
env.run(['make'] + self._flags() + options, directory=env.srcdir)
jobsrt=[]
if not jobs == -1:
jobsrt = ['-j', str(jobs)]
options = bake.Utils.split_args(env.replace_variables(self.attribute('build_arguments').value))
env.run(['make']+jobsrt + self._flags() + options, directory=env.srcdir)
if self.attribute('no_installation').value != str(True):
sudoOp=[]
if(env.sudoEnabled):
sudoOp = ['sudo']
try:
options = bake.Utils.split_args(env.replace_variables(self.attribute('install_arguments').value))
env.run(sudoOp + ['make', 'install'] + self._flags() + options, directory=env.srcdir)
except TaskError as e:
raise TaskError(' Could not install, probably you do not have permission to'
' install %s: Verify if you have the required rights. Original'
' message: %s' % (env._module_name, e._reason))
def clean(self, env):
""" Call make clean to remove the results of the last build ."""
if not os.path.isfile(os.path.join(env.objdir, 'Makefile')):
return
env.run(['make', 'clean'], directory=env.objdir)
def distclean(self, env):
""" Call make distclean to remove the results of the last build."""
if not os.path.isfile(os.path.join(env.objdir, 'Makefile')):
return
env.run(['make', '-i', 'distclean'], directory=env.objdir)
def check_version(self, env):
""" Verifies if Make are available and its versions."""
if platform.system() == 'FreeBSD':
# FreeBSD make program does not identify version the same way
# as does GNU make, so we just check for availability of make
if not env.check_program('make'):
return False
else:
return True
if not env.check_program('make', version_arg='--version',
version_regexp='(\d+)\.(\d+)',
version_required=(3, 80)):
return False
return True
class Autotools(ModuleBuild):
def __init__(self):
""" Instantiate the list of specific attributes for the Autotools build."""
ModuleBuild.__init__(self)
self.add_attribute('CC', '', 'C compiler to use')
self.add_attribute('CXX', '', 'C++ compiler to use')
self.add_attribute('CFLAGS', '', 'Flags to use for C compiler')
self.add_attribute('CXXFLAGS', '', 'Flags to use for C++ compiler')
self.add_attribute('LDFLAGS', '', 'Flags to use for Linker')
self.add_attribute('maintainer', 'no', 'Maintainer mode ?')
self.add_attribute('configure_arguments', '', 'Command-line arguments'
' to pass to configure')
self.add_attribute('install_arguments', '', 'Command-line arguments'
' to pass to make install')
@classmethod
def name(cls):
""" Specific build type identifier."""
return 'autotools'
def _variables(self):
""" Verifies if the main environment variables where defined and
sets them accordingly."""
variables = []
for tmp in ['CC', 'CXX', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']:
if self.attribute(tmp).value != '':
variables.append('%s=%s' % (tmp, self.attribute(tmp).value))
return variables
def build(self, env, jobs):
""" Specific build implementation method. In order:
1. It apply possible patches,
2. Call autoreconf, if on maintainer mode
2. Call make configure, if the configure arguments are available,
3. Call make to perform the build
4. Call make with the install arguments.
"""
# if self.attribute('patch').value != '':
# self.threat_patch(env)
if self.attribute('maintainer').value != 'no':
env.run(['autoreconf', '--install'],
directory=env.srcdir)
options = []
if self.attribute('configure_arguments').value != '':
command= (env.replace_variables(env.replace_variables(self.attribute('configure_arguments').value)))
if not "--prefix" in command:
command = command + ' --prefix=' + env.objdir
command = shlex.split(command)
env.run(command, directory=env.objdir)
jobsrt=[]
if not jobs == -1:
jobsrt = ['-j', str(jobs)]
env.run(['make']+jobsrt, directory=env.objdir)
if self.attribute('no_installation').value != True:
sudoOp=[]
if(env.sudoEnabled):
sudoOp = ['sudo']
try :
options = bake.Utils.split_args(env.replace_variables(self.attribute('install_arguments').value))
env.run(sudoOp + ['make', 'install'] + options, directory=env.objdir)
except TaskError as e:
print(' Could not install, probably you do not have permission to'
' install %s: Verify if you have the required rights. Original'
' message: %s' % (env._module_name, e._reason))
def clean(self, env):
""" Call make maintainerclean or distclean to remove the results of
the last build.
"""
if not os.path.isfile(os.path.join(env.objdir, 'Makefile')):
return
if self.attribute('maintainer').value != 'no':
env.run(['make', '-k', 'maintainerclean'], directory=env.objdir)
else:
env.run(['make', '-k', 'distclean'], directory=env.objdir)
try:
os.remove(os.path.join(env.objdir, 'config.cache'))
except OSError:
pass
def distclean(self, env):
""" Call make distclean to remove the results of the last build."""
Autotools.clean(self, env)
def check_version(self, env):
""" Verifies if Autoreconf and Make are available and their versions."""
if not env.check_program('autoreconf', version_arg='--version',
version_regexp='(\d+)\.(\d+)',
version_required=(2, 13)):
return False
if not env.check_program('make', version_arg='--version',
version_regexp='(\d+)\.(\d+)',
version_required=(3, 80)):
return False
return True
|
gpl-2.0
| 606,902,162,483,470,800 | 39.142515 | 129 | 0.535965 | false |
talkincode/toughlib
|
toughlib/utils.py
|
1
|
10450
|
#!/usr/bin/env python
#coding:utf-8
import decimal
import datetime
from Crypto.Cipher import AES
from Crypto import Random
import hashlib
import binascii
import hashlib
import base64
import calendar
import random
import os
import time
import uuid
import json
import functools
import logging
import urlparse
random_generator = random.SystemRandom()
decimal.getcontext().prec = 32
decimal.getcontext().rounding = decimal.ROUND_UP
_base_id = 0
_CurrentID = random_generator.randrange(1, 1024)
def CurrentID():
global _CurrentID
_CurrentID = (_CurrentID + 1) % 1024
return str(_CurrentID)
class AESCipher:
def __init__(self,key=None):
if key:self.setup(key)
def is_pwd_encrypt(self):
return os.environ.get("CLOSE_PASSWORD_ENCRYPTION")
def setup(self, key):
self.bs = 32
self.ori_key = key
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
is_encrypt = self.is_pwd_encrypt()
if is_encrypt:
return raw
raw = safestr(raw)
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
is_encrypt = self.is_pwd_encrypt()
if is_encrypt:
return enc
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return safeunicode(self._unpad(cipher.decrypt(enc[AES.block_size:])))
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def _unpad(self,s):
return s[:-ord(s[len(s)-1:])]
aescipher = AESCipher()
encrypt = aescipher.encrypt
decrypt = aescipher.decrypt
def update_tz(tz_val,default_val="CST-8"):
try:
os.environ["TZ"] = tz_val or default_val
time.tzset()
except:
pass
def check_ssl(config):
use_ssl = False
privatekey = None
certificate = None
if config.has_option('DEFAULT','ssl') and config.getboolean('DEFAULT','ssl'):
privatekey = config.get('DEFAULT','privatekey')
certificate = config.get('DEFAULT','certificate')
if os.path.exists(privatekey) and os.path.exists(certificate):
use_ssl = True
return use_ssl,privatekey,certificate
def get_uuid():
return uuid.uuid1().hex.upper()
def bps2mbps(bps):
_bps = decimal.Decimal(bps or 0)
_mbps = _bps / decimal.Decimal(1024*1024)
return str(_mbps.quantize(decimal.Decimal('1.000')))
def mbps2bps(mbps):
_mbps = decimal.Decimal(mbps or 0)
_kbps = _mbps * decimal.Decimal(1024*1024)
return int(_kbps.to_integral_value())
def bb2mb(ik):
_kb = decimal.Decimal(ik or 0)
_mb = _kb / decimal.Decimal(1024*1024)
return str(_mb.quantize(decimal.Decimal('1.00')))
def bbgb2mb(bb,gb):
bl = decimal.Decimal(bb or 0)/decimal.Decimal(1024*1024)
gl = decimal.Decimal(gb or 0)*decimal.Decimal(4*1024*1024*1024)
tl = bl + gl
return str(tl.quantize(decimal.Decimal('1.00')))
def kb2mb(ik,fmt='1.00'):
_kb = decimal.Decimal(ik or 0)
_mb = _kb / decimal.Decimal(1024)
return str(_mb.quantize(decimal.Decimal(fmt)))
def mb2kb(im=0):
_mb = decimal.Decimal(im or 0)
_kb = _mb * decimal.Decimal(1024)
return int(_kb.to_integral_value())
def kb2gb(ik,fmt='1.00'):
_kb = decimal.Decimal(ik or 0)
_mb = _kb / decimal.Decimal(1024*1024)
return str(_mb.quantize(decimal.Decimal(fmt)))
def gb2kb(im=0):
_mb = decimal.Decimal(im or 0)
_kb = _mb * decimal.Decimal(1024*1024)
return int(_kb.to_integral_value())
def hour2sec(hor=0):
_hor = decimal.Decimal(hor or 0)
_sec = _hor * decimal.Decimal(3600)
return int(_sec.to_integral_value())
def sec2hour(sec=0):
_sec = decimal.Decimal(sec or 0)
_hor = _sec / decimal.Decimal(3600)
return str(_hor.quantize(decimal.Decimal('1.00')))
def fen2yuan(fen=0):
f = decimal.Decimal(fen or 0)
y = f / decimal.Decimal(100)
return str(y.quantize(decimal.Decimal('1.00')))
def yuan2fen(yuan=0):
y = decimal.Decimal(yuan or 0)
f = y * decimal.Decimal(100)
return int(f.to_integral_value())
def get_currtime():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def get_currdate():
return datetime.datetime.now().strftime("%Y-%m-%d")
def get_datetime(second):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(second))
def datetime2msec(dtime_str):
_datetime = datetime.datetime.strptime(dtime_str,"%Y-%m-%d %H:%M:%S")
return int(time.mktime(_datetime.timetuple()))
def gen_backup_id():
global _base_id
if _base_id >= 9999:_base_id=0
_base_id += 1
_num = str(_base_id).zfill(4)
return datetime.datetime.now().strftime("%Y%m%d_%H%M%S_") + _num
gen_backep_id = gen_backup_id
def gen_order_id():
global _base_id
if _base_id >= 9999:_base_id=0
_base_id += 1
_num = str(_base_id).zfill(4)
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") + _num
def fmt_second(time_total):
"""
>>> fmt_second(100)
'00:01:40'
"""
def _ck(t):
return t < 10 and "0%s" % t or t
times = int(time_total)
h = times / 3600
m = times % 3600 / 60
s = times % 3600 % 60
return "%s:%s:%s" % (_ck(h), _ck(m), _ck(s))
def is_expire(dstr):
if not dstr:
return False
try:
expire_date = datetime.datetime.strptime("%s 23:59:59" % dstr, "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
return expire_date < now
except:
import traceback
traceback.print_exc()
return False
def fmt_online_time(ctime):
if not ctime:
return ''
cdate = datetime.datetime.strptime(ctime, '%Y-%m-%d %H:%M:%S')
nowdate = datetime.datetime.now()
dt = nowdate - cdate
times = dt.total_seconds()
if times <= 60:
return u"%s秒"%int(times)
d = times / (3600 * 24)
h = times % (3600 * 24) / 3600
m = times % (3600 * 24) % 3600 / 60
s = times % (3600 * 24) % 3600 % 60
if int(d) > 0:
return u"%s天%s小时%s分钟%s秒" % (int(d), int(h), int(m),int(s))
elif int(d) == 0 and int(h) > 0:
return u"%s小时%s分钟%s秒" % (int(h), int(m), int(s))
elif int(d) == 0 and int(h) == 0 and int(m) > 0:
return u"%s分钟%s秒" % (int(m),int(s))
def add_months(dt,months, days=0):
month = dt.month - 1 + months
year = dt.year + month / 12
month = month % 12 + 1
day = min(dt.day,calendar.monthrange(year,month)[1])
dt = dt.replace(year=year, month=month, day=day)
return dt + datetime.timedelta(days=days)
def is_connect(timestr, period=600):
if not timestr:
return False
try:
last_ping = datetime.datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
tt = now - last_ping
return tt.seconds < period
except:
return False
def serial_model(mdl):
if not mdl:return
if not hasattr(mdl,'__table__'):return
data = {}
for c in mdl.__table__.columns:
data[c.name] = getattr(mdl, c.name)
return json.dumps(data,ensure_ascii=False)
def safestr(val):
if val is None:
return ''
if isinstance(val, unicode):
try:
return val.encode('utf-8')
except:
return val.encode('gb2312')
elif isinstance(val, str):
return val
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
return str(val)
elif isinstance(val, (dict,list)):
return json.dumps(val, ensure_ascii=False)
else:
try:
return str(val)
except:
return val
return val
def safeunicode(val):
if val is None:
return u''
if isinstance(val, str):
try:
return val.decode('utf-8')
except:
try:
return val.decode('gb2312')
except:
return val
elif isinstance(val, unicode):
return val
elif isinstance(val, int):
return str(val).decode('utf-8')
elif isinstance(val, float):
return str(val).decode('utf-8')
elif isinstance(val, (dict,list)):
return json.dumps(val)
else:
try:
return str(val).decode('utf-8')
except:
return val
return val
def gen_secret(clen=32):
rg = random.SystemRandom()
r = list('1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
return ''.join([rg.choice(r) for _ in range(clen)])
def timecast(func):
from twisted.python import log
@functools.wraps(func)
def warp(*args,**kargs):
_start = time.clock()
result = func(*args,**kargs)
log.msg("%s cast %.6f second"%(func.__name__,time.clock()-_start))
return result
return warp
def split_mline(src,wd=32,rstr='\r\n'):
_idx = 0
ss = []
for c in src:
if _idx > 0 and _idx%wd == 0:
ss.append(rstr)
ss.append(c)
_idx += 1
return ''.join(ss)
def get_cron_interval(cron_time):
if cron_time:
cron_time = "%s:00"%cron_time
date_now = datetime.datetime.now()
_now_hm = date_now.strftime("%H:%M:%S")
_ymd = get_currdate()
if _now_hm > cron_time:
_ymd = (date_now + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
_interval = datetime.datetime.strptime("%s %s"%(_ymd,cron_time),"%Y-%m-%d %H:%M:%S") - date_now
_itimes = int(_interval.total_seconds())
return _itimes if _itimes > 0 else 86400
else:
return 120
if __name__ == '__main__':
aes = AESCipher("LpWE9AtfDPQ3ufXBS6gJ37WW8TnSF920")
# aa = aes.encrypt(u"中文".encode('utf-8'))
# print aa
# cc = aes.decrypt(aa)
# print cc.encode('utf-8')
# aa = aes.decrypt("+//J9HPYQ+5PccoBZml6ngcLLu1/XQh2KyWakfcExJeb0wyq1C9+okztyaFbspYZ")
# print aa
# print get_cron_interval('09:32')
now = datetime.datetime.now()
mon = now.month + 1
mon = mon if mon <= 12 else 1
timestr = "%s-%s-1 01:00:00" % (now.year,mon)
_date = datetime.datetime.strptime(timestr, "%Y-%m-%d %H:%M:%S")
tt = (time.mktime(_date.timetuple()) - time.time()) /86400
print _date,tt
|
mit
| 374,508,963,388,416,800 | 25.984456 | 103 | 0.587462 | false |
F5Networks/f5-common-python
|
f5/bigip/tm/sys/ntp.py
|
1
|
2225
|
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® system ntp module
REST URI
``http://localhost/mgmt/tm/sys/ntp``
GUI Path
``System --> Configuration --> Device --> NTP``
REST Kind
``tm:sys:ntp:*``
"""
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
from f5.bigip.resource import UnnamedResource
class Ntp(UnnamedResource):
"""BIG-IP® system NTP unnamed resource
.. note::
This is an unnamed resource so it has not ~Partition~Name pattern
at the end of its URI.
"""
def __init__(self, sys):
super(Ntp, self).__init__(sys)
self._meta_data['required_load_parameters'] = set()
self._meta_data['required_json_kind'] = 'tm:sys:ntp:ntpstate'
self._meta_data['attribute_registry'] = {
'tm:sys:ntp:restrict:restrictcollectionstate': Restricts
}
self._meta_data['allowed_lazy_attributes'] = [Restricts]
class Restricts(Collection):
"""BIG-IP® system NTP restrict sub-collection"""
def __init__(self, ntp):
super(Restricts, self).__init__(ntp)
self._meta_data['allowed_lazy_attributes'] = [Restrict]
self._meta_data['required_json_kind'] =\
'tm:sys:ntp:restrict:restrictcollectionstate'
self._meta_data['attribute_registry'] =\
{'tm:sys:ntp:restrict:restrictstate': Restrict}
class Restrict(Resource):
"""BIG-IP® system NTP restrict sub-collection resource"""
def __init__(self, restricts):
super(Restrict, self).__init__(restricts)
self._meta_data['required_json_kind'] =\
'tm:sys:ntp:restrict:restrictstate'
|
apache-2.0
| 849,571,690,387,145,700 | 31.188406 | 76 | 0.659163 | false |
luboslenco/lowpolyfactory
|
populate.py
|
1
|
2844
|
import bpy
import bmesh
from random import seed, uniform
import math
import mathutils
from mathutils import *
from add_mesh_LowPolyFactory import LowPolyFactory
from add_mesh_LowPolyFactory.createRockObject import *
from add_mesh_LowPolyFactory.createTreeObject import *
from add_mesh_LowPolyFactory.createBushObject import *
from add_mesh_LowPolyFactory.createGrassObject import *
from add_mesh_LowPolyFactory.createCloudObject import *
from add_mesh_LowPolyFactory.createTerrainObject import *
from add_mesh_LowPolyFactory.createWaterObject import *
from add_mesh_LowPolyFactory.drop import *
def populate(self, context, ground=None, spherical=False,
merge=False, num_oaks=0, num_pines=0, num_palms=0,
num_rocks=0, num_bushes=0, num_grass=0):
# ground = create_terrain_object(
# self,
# context, 90, 90, size,
# strength=(40,40), scale=(20,20),
# weight1=(1,1), weight2=(2,2), weight3=(1,1))
if ground is None:
if len(bpy.context.selected_objects) > 0:
ground = bpy.context.selected_objects[0]
else:
return
obs = []
# Trees
tree_options = LowPolyFactory.add_mesh_tree.get_options(context)
tree_type = tree_options.lp_Tree_Type
tree_options.lp_Tree_Type = 'lp_Tree_Oak'
for i in range(0, num_oaks):
o = create_tree_object(self, context, tree_options)
obs.append(o)
tree_options.lp_Tree_Type = 'lp_Tree_Pine'
for i in range(0, num_pines):
o = create_tree_object(self, context, tree_options)
obs.append(o)
tree_options.lp_Tree_Type = 'lp_Tree_Palm'
for i in range(0, num_palms):
o = create_tree_object(self, context, tree_options)
o.rotation_euler = (0, 0, uniform(0, math.pi * 2))
obs.append(o)
tree_options.lp_Tree_Type = tree_type
# Rocks
rock_options = LowPolyFactory.add_mesh_rock.get_options(context)
for i in range(0, num_rocks):
o = create_rock_object(self, context, rock_options)
obs.append(o)
# Bushes
bushes_options = LowPolyFactory.add_mesh_bush.get_options(context)
for i in range(0, num_bushes):
o = create_bush_object(self, context, bushes_options)
obs.append(o)
# Grass
grass_options = LowPolyFactory.add_mesh_grass.get_options(context)
for i in range(0, num_grass):
o = create_grass_object(self, context, grass_options)
o.rotation_euler = (0, 0, uniform(0, math.pi * 2))
obs.append(o)
drop_objects(self, context, ground, obs, spherical)
if merge:
bpy.ops.object.select_all(action='DESELECT')
for o in obs:
o.select = True
bpy.context.scene.objects.active = obs[0]
obs[0].name = 'Population'
obs[0].data.name = 'Population'
bpy.ops.object.join()
|
gpl-2.0
| 7,375,870,862,560,782,000 | 29.913043 | 70 | 0.65225 | false |
maas/maas
|
src/maasserver/migrations/maasserver/0008_use_new_arrayfield.py
|
1
|
2693
|
# -*- coding: utf-8 -*-
import django.contrib.postgres.fields
from django.db import migrations, models
import maasserver.fields
class Migration(migrations.Migration):
dependencies = [("maasserver", "0007_create_node_proxy_models")]
operations = [
migrations.AlterField(
model_name="blockdevice",
name="tags",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="bootsourceselection",
name="arches",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="bootsourceselection",
name="labels",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="bootsourceselection",
name="subarches",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="interface",
name="tags",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="node",
name="routers",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=maasserver.fields.MACAddressField(),
null=True,
blank=True,
default=list,
),
),
migrations.AlterField(
model_name="subnet",
name="dns_servers",
field=django.contrib.postgres.fields.ArrayField(
size=None,
base_field=models.TextField(),
null=True,
blank=True,
default=list,
),
),
]
|
agpl-3.0
| -2,146,013,334,715,708,400 | 28.593407 | 68 | 0.47642 | false |
benkirk/mpi_playground
|
mpi4py/tests/test_file.py
|
2
|
6343
|
from mpi4py import MPI
import mpiunittest as unittest
import sys, os, tempfile
class BaseTestFile(object):
COMM = MPI.COMM_NULL
FILE = MPI.FILE_NULL
prefix = 'mpi4py'
def setUp(self):
fd, self.fname = tempfile.mkstemp(prefix=self.prefix)
os.close(fd)
self.amode = MPI.MODE_RDWR | MPI.MODE_CREATE
#self.amode |= MPI.MODE_DELETE_ON_CLOSE
try:
self.FILE = MPI.File.Open(self.COMM,
self.fname, self.amode,
MPI.INFO_NULL)
#self.fname=None
except Exception:
os.remove(self.fname)
raise
def tearDown(self):
if self.FILE == MPI.FILE_NULL: return
amode = self.FILE.amode
self.FILE.Close()
if not (amode & MPI.MODE_DELETE_ON_CLOSE):
MPI.File.Delete(self.fname, MPI.INFO_NULL)
@unittest.skipMPI('openmpi(==2.0.0)')
@unittest.skipMPI('MPICH2(<1.1.0)')
def testPreallocate(self):
size = self.FILE.Get_size()
self.assertEqual(size, 0)
self.FILE.Preallocate(1)
size = self.FILE.Get_size()
self.assertEqual(size, 1)
self.FILE.Preallocate(100)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
self.FILE.Preallocate(10)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
self.FILE.Preallocate(200)
size = self.FILE.Get_size()
self.assertEqual(size, 200)
def testGetSetSize(self):
size = self.FILE.Get_size()
self.assertEqual(size, 0)
size = self.FILE.size
self.assertEqual(size, 0)
self.FILE.Set_size(100)
size = self.FILE.Get_size()
self.assertEqual(size, 100)
size = self.FILE.size
self.assertEqual(size, 100)
def testGetGroup(self):
fgroup = self.FILE.Get_group()
cgroup = self.COMM.Get_group()
gcomp = MPI.Group.Compare(fgroup, cgroup)
self.assertEqual(gcomp, MPI.IDENT)
fgroup.Free()
cgroup.Free()
def testGetAmode(self):
amode = self.FILE.Get_amode()
self.assertEqual(self.amode, amode)
self.assertEqual(self.FILE.amode, self.amode)
def testGetSetInfo(self):
#info = MPI.INFO_NULL
#self.FILE.Set_info(info)
info = MPI.Info.Create()
self.FILE.Set_info(info)
info.Free()
info = self.FILE.Get_info()
self.FILE.Set_info(info)
info.Free()
def testGetSetView(self):
fsize = 100 * MPI.DOUBLE.size
self.FILE.Set_size(fsize)
displacements = range(100)
datatypes = [MPI.SHORT, MPI.INT, MPI.LONG, MPI.FLOAT, MPI.DOUBLE]
datareps = ['native'] #['native', 'internal', 'external32']
for disp in displacements:
for dtype in datatypes:
for datarep in datareps:
etype, ftype = dtype, dtype
self.FILE.Set_view(disp, etype, ftype,
datarep, MPI.INFO_NULL)
of, et, ft, dr = self.FILE.Get_view()
self.assertEqual(disp, of)
self.assertEqual(etype.Get_extent(), et.Get_extent())
self.assertEqual(ftype.Get_extent(), ft.Get_extent())
self.assertEqual(datarep, dr)
try:
if not et.is_predefined: et.Free()
except NotImplementedError:
if et != etype: et.Free()
try:
if not ft.is_predefined: ft.Free()
except NotImplementedError:
if ft != ftype: ft.Free()
def testGetSetAtomicity(self):
atom = self.FILE.Get_atomicity()
self.assertFalse(atom)
for atomicity in [True, False] * 4:
self.FILE.Set_atomicity(atomicity)
atom = self.FILE.Get_atomicity()
self.assertEqual(atom, atomicity)
def testSync(self):
self.FILE.Sync()
def testSeekGetPosition(self):
offset = 0
self.FILE.Seek(offset, MPI.SEEK_END)
self.FILE.Seek(offset, MPI.SEEK_CUR)
self.FILE.Seek(offset, MPI.SEEK_SET)
pos = self.FILE.Get_position()
self.assertEqual(pos, offset)
def testSeekGetPositionShared(self):
offset = 0
self.FILE.Seek_shared(offset, MPI.SEEK_END)
self.FILE.Seek_shared(offset, MPI.SEEK_CUR)
self.FILE.Seek_shared(offset, MPI.SEEK_SET)
pos = self.FILE.Get_position_shared()
self.assertEqual(pos, offset)
@unittest.skipMPI('openmpi(==2.0.0)')
def testGetByteOffset(self):
for offset in range(10):
disp = self.FILE.Get_byte_offset(offset)
self.assertEqual(disp, offset)
def testGetTypeExtent(self):
extent = self.FILE.Get_type_extent(MPI.BYTE)
self.assertEqual(extent, 1)
def testGetErrhandler(self):
eh = self.FILE.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
class TestFileNull(unittest.TestCase):
def setUp(self):
self.eh_save = MPI.FILE_NULL.Get_errhandler()
def tearDown(self):
MPI.FILE_NULL.Set_errhandler(self.eh_save)
self.eh_save.Free()
def testGetSetErrhandler(self):
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_ARE_FATAL)
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_ARE_FATAL)
eh.Free()
MPI.FILE_NULL.Set_errhandler(MPI.ERRORS_RETURN)
eh = MPI.FILE_NULL.Get_errhandler()
self.assertEqual(eh, MPI.ERRORS_RETURN)
eh.Free()
class TestFileSelf(BaseTestFile, unittest.TestCase):
COMM = MPI.COMM_SELF
prefix = BaseTestFile.prefix + ('-%d' % MPI.COMM_WORLD.Get_rank())
def have_feature():
case = BaseTestFile()
case.COMM = TestFileSelf.COMM
case.prefix = TestFileSelf.prefix
case.setUp()
case.tearDown()
try:
have_feature()
except NotImplementedError:
unittest.disable(BaseTestFile, 'mpi-file')
unittest.disable(TestFileNull, 'mpi-file')
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -5,663,708,502,370,594,000 | 31.362245 | 73 | 0.575122 | false |
lukacu/foldersync
|
foldersync/storage/ssh.py
|
1
|
3894
|
# -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
"""
Friendly Python SSH2 interface.
Copied from http://media.commandline.org.uk//code/ssh.txt
Modified by James Yoneda to include command-line arguments.
"""
import getopt
import os
import time
import paramiko
import sys
import tempfile
from foldersync.storage import Status
class SSHStorage(object):
"""Connects and logs into the specified hostname.
Arguments that are not given are guessed from the environment."""
def __init__(self,
host,
username = None,
private_key = None,
password = None,
port = 22,
):
if port == None:
port = 22
else:
port = int(port);
self._sftp_live = False
self._sftp = None
if not username:
username = os.environ['LOGNAME']
# Log to a temporary file.
templog = tempfile.mkstemp('.txt', 'con-')[1]
paramiko.util.log_to_file(templog)
# Begin the SSH transport.
self._transport = paramiko.Transport((host, port))
self._tranport_live = True
# Authenticate the transport.
if password:
# Using Password.
self._transport.connect(username = username, password = password)
else:
## Use Private Key.
#if not private_key:
# # Try to use default key.
# if os.path.exists(os.path.expanduser('~/.con/id_rsa')):
# private_key = '~/.con/id_rsa'
# elif os.path.exists(os.path.expanduser('~/.con/id_dsa')):
# private_key = '~/.con/id_dsa'
# else:
# raise TypeError, "You have not specified a password or key."
private_key_file = os.path.expanduser(private_key)
rsa_key = paramiko.RSAKey.from_private_key_file(private_key_file)
self._transport.connect(username = username, pkey = rsa_key)
self._sftp_connect()
self._time_offset = 0
try:
remote_time = int(self._execute("date +%s")[0].strip())
self._time_offset = time.time() - remote_time
except:
pass
def _sftp_connect(self):
"""Establish a SFTP connection."""
if not self._sftp_live:
self._sftp = paramiko.SFTPClient.from_transport(self._transport)
self._sftp_live = True
def put(self, localpath, remotepath = None):
"""Copies a file between the local host and the remote host."""
if not remotepath:
remotepath = os.path.split(localpath)[1]
if not os.path.exists(localpath):
return
self._sftp_connect()
if os.path.isdir(localpath):
try:
self._sftp.mkdir(remotepath)
except IOError:
pass
else:
self._sftp.put(localpath, remotepath)
def stat(self, remotepath):
"""Provides information about the remote file."""
self._sftp_connect()
try:
status = self._sftp.stat(remotepath)
return Status(status.st_mtime + self._time_offset, status.st_size)
except IOError:
return None
def _execute(self, command):
"""Execute a given command on a remote machine."""
channel = self._transport.open_session()
channel.exec_command(command)
output = channel.makefile('rb', -1).readlines()
if output:
return output
else:
return channel.makefile_stderr('rb', -1).readlines()
def _get(self, remotepath, localpath = None):
"""Copies a file between the remote host and the local host."""
if not localpath:
localpath = os.path.split(remotepath)[1]
self._sftp_connect()
self._sftp.get(remotepath, localpath)
def close(self):
"""Closes the connection and cleans up."""
# Close SFTP Connection.
if self._sftp_live:
self._sftp.close()
self._sftp_live = False
# Close the SSH Transport.
if self._tranport_live:
self._transport.close()
self._tranport_live = False
def __del__(self):
"""Attempt to clean up if not explicitly closed."""
self.close()
|
gpl-3.0
| -3,656,374,407,454,393,300 | 27.423358 | 78 | 0.628403 | false |
bitonic/troppotardi
|
troppotardi/config/routing.py
|
1
|
1701
|
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from routes import Mapper
def make_map(config):
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
map.explicit = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
# CUSTOM ROUTES HERE
map.connect('index', '/', controller='pages', action='index', page='home')
map.connect('home', '/home', controller='pages', action='index', page='home')
map.connect('months', '/months/{year}/{month}', controller='images', action='months')
map.connect('show_image', '/photo/{day}', controller='images', action='show')
map.connect('last', '/photos/last', controller='images', action='last')
map.connect('page', '/pages/{page}', controller='pages', action='index')
map.connect('feed', '/feed.atom', controller='feed', action='index')
# legacy redirect
map.connect('/image/{day}', controller='images', action='show_redir')
map.connect('/images/last', controller='images', action='last_redir')
map.connect('admin_home', '/admin/', controller='admin', action='index')
map.connect('/{controller}/{action}')
map.connect('/{controller}/{action}/{id}')
return map
|
gpl-2.0
| -996,590,310,799,494,400 | 42.615385 | 89 | 0.670194 | false |
uclouvain/OSIS-Louvain
|
base/migrations/0046_scoresencoding.py
|
1
|
1993
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-02 15:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0045_update_justification_values'),
]
operations = [
migrations.RunSQL(
"""
DROP VIEW IF EXISTS app_scores_encoding;
CREATE OR REPLACE VIEW app_scores_encoding AS
SELECT row_number() OVER () as id,
base_programmanager.id as program_manager_id,
program_manager_person.id as pgm_manager_person_id,
base_offeryear.id as offer_year_id,
base_learningunityear.id as learning_unit_year_id,
count(base_examenrollment.id) as total_exam_enrollments,
sum(case when base_examenrollment.score_final is not null or base_examenrollment.justification_final is not null then 1 else 0 end) exam_enrollments_encoded
from base_examenrollment
join base_sessionexam on base_sessionexam.id = base_examenrollment.session_exam_id
join base_learningunityear on base_learningunityear.id = base_sessionexam.learning_unit_year_id
join base_offeryearcalendar on base_offeryearcalendar.id = base_sessionexam.offer_year_calendar_id
join base_offeryear on base_offeryear.id = base_offeryearcalendar.offer_year_id
join base_programmanager on base_programmanager.offer_year_id = base_offeryear.id
join base_person program_manager_person on program_manager_person.id = base_programmanager.person_id
where base_offeryearcalendar.start_date < CURRENT_TIMESTAMP
and base_offeryearcalendar.end_date > CURRENT_TIMESTAMP
group by
base_programmanager.id,
program_manager_person.id,
base_offeryear.id,
base_learningunityear.id
;
"""
),
]
|
agpl-3.0
| 1,676,644,467,102,433,000 | 38.078431 | 172 | 0.644757 | false |
npp/npp-api
|
data/migrations/0029_auto__del_field_usaspendingassistanceraw_cfda_program__add_field_usasp.py
|
1
|
162121
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UsaspendingAssistanceRaw.cfda_program'
db.delete_column('data_usaspendingassistanceraw', 'cfda_program')
# Adding field 'UsaspendingAssistanceRaw.cfda_program_num'
db.add_column('data_usaspendingassistanceraw', 'cfda_program_num',
self.gf('django.db.models.fields.CharField')(default='missing', max_length=100),
keep_default=False)
def backwards(self, orm):
# Adding field 'UsaspendingAssistanceRaw.cfda_program'
db.add_column('data_usaspendingassistanceraw', 'cfda_program',
self.gf('django.db.models.fields.CharField')(default='missing', max_length=100),
keep_default=False)
# Deleting field 'UsaspendingAssistanceRaw.cfda_program_num'
db.delete_column('data_usaspendingassistanceraw', 'cfda_program_num')
models = {
'data.agegroup': {
'Meta': {'object_name': 'AgeGroup'},
'age_group_desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'age_group_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.alternativefuelvehicles': {
'Meta': {'object_name': 'AlternativeFuelVehicles'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.annualstateenergyexpenditures': {
'Meta': {'object_name': 'AnnualStateEnergyExpenditures'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ansicountystate': {
'Meta': {'object_name': 'AnsiCountyState'},
'ansi_class': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.ansistate': {
'Meta': {'object_name': 'AnsiState'},
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'gnisid': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'data.atcodes': {
'Meta': {'object_name': 'AtCodes'},
'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.averageteachersalary': {
'Meta': {'object_name': 'AverageTeacherSalary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.bilingualedspending': {
'Meta': {'object_name': 'BilingualEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.budgetcategorysubfunctions': {
'Meta': {'object_name': 'BudgetCategorySubfunctions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npp_budget_category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.cffr': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'cffrprogram'),)", 'object_name': 'Cffr'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffragency': {
'Meta': {'object_name': 'CffrAgency'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrgeo': {
'Meta': {'object_name': 'CffrGeo'},
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'split_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_gu': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'type_gu': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualcounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'CffrIndividualCounty'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'CffrIndividualState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrobjectcode': {
'Meta': {'object_name': 'CffrObjectCode'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.cffrprogram': {
'Meta': {'unique_together': "(('year', 'program_code'),)", 'object_name': 'CffrProgram'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrprogramraw': {
'Meta': {'object_name': 'CffrProgramRaw'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_id_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '74'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrraw': {
'Meta': {'object_name': 'CffrRaw'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_adjusted': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'funding_sign': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_postal': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrstate': {
'Meta': {'unique_together': "(('year', 'state', 'cffrprogram'),)", 'object_name': 'CffrState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpovertystate': {
'Meta': {'object_name': 'ChildrenPovertyState'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpovertystateraw': {
'Meta': {'object_name': 'ChildrenPovertyStateRaw'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.county': {
'Meta': {'unique_together': "(('state', 'county_ansi'),)", 'object_name': 'County'},
'county_abbr': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'county_ansi': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.diplomarecipienttotal': {
'Meta': {'object_name': 'DiplomaRecipientTotal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.dropoutsrace': {
'Meta': {'object_name': 'DropoutsRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.drugfreeschoolspending': {
'Meta': {'object_name': 'DrugFreeSchoolSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.educationalattainment': {
'Meta': {'object_name': 'EducationalAttainment'},
'category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'gender': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_type': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstate': {
'Meta': {'unique_together': "(('year', 'state', 'producer_type', 'energy_source'),)", 'object_name': 'ElectricEmissionsState'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstateraw': {
'Meta': {'object_name': 'ElectricEmissionsStateRaw'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ellstudentsdistrict': {
'Meta': {'object_name': 'EllStudentsDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.employment': {
'Meta': {'object_name': 'Employment'},
'black_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyConsumptionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstateraw': {
'Meta': {'object_name': 'EnergyConsumptionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyProductionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstateraw': {
'Meta': {'object_name': 'EnergyProductionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.enrolledstudentsdistrict': {
'Meta': {'object_name': 'EnrolledStudentsDistrict'},
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'data.enrollmentrace': {
'Meta': {'object_name': 'EnrollmentRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ethnicity': {
'Meta': {'object_name': 'Ethnicity'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnicity_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}),
'ethnicity_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ethnicity_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.expenditureperpupil': {
'Meta': {'object_name': 'ExpenditurePerPupil'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespovertystate': {
'Meta': {'object_name': 'FamiliesPovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespovertystateraw': {
'Meta': {'object_name': 'FamiliesPovertyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fcnaspending': {
'Meta': {'object_name': 'FcnaSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federalimpactaid': {
'Meta': {'object_name': 'FederalImpactAid'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FederalTaxCollectionState'},
'business_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'individual_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'notwitheld_income_and_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'witheld_income_and_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstateraw': {
'Meta': {'object_name': 'FederalTaxCollectionStateRaw'},
'business_income_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income_employment_estate_trust_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_notwitheld_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_witheld_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_collections': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fipscountycongressdistrict': {
'Meta': {'object_name': 'FipsCountyCongressDistrict'},
'congress': ('django.db.models.fields.IntegerField', [], {}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.fipsstate': {
'Meta': {'object_name': 'FipsState'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.foodsecuritystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FoodSecurityState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_insecure': ('django.db.models.fields.IntegerField', [], {}),
'food_insecure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_low_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_marginal_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_very_low_percent': ('django.db.models.fields.FloatField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.foodsecuritystateraw': {
'Meta': {'object_name': 'FoodSecurityStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_secure': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freeluncheligible': {
'Meta': {'object_name': 'FreeLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligible': {
'Meta': {'object_name': 'FreeReducedLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligiblecounty': {
'Meta': {'object_name': 'FreeReducedLunchEligibleCounty'},
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.gender': {
'Meta': {'object_name': 'Gender'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gender_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1'}),
'gender_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.halfpints': {
'Meta': {'object_name': 'HalfPints'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.headstartenrollment': {
'Meta': {'object_name': 'HeadStartEnrollment'},
'enrollment': ('django.db.models.fields.IntegerField', [], {}),
'funding': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurance': {
'Meta': {'object_name': 'HealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurancestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HealthInsuranceState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pop': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_18_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurancestateraw': {
'Meta': {'object_name': 'HealthInsuranceStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'geoid': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pop': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}),
'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschooldropouts': {
'Meta': {'object_name': 'HighSchoolDropouts'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschoolother': {
'Meta': {'object_name': 'HighSchoolOther'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HousingOccupancyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystateraw': {
'Meta': {'object_name': 'HousingOccupancyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.individualeducationprograms': {
'Meta': {'object_name': 'IndividualEducationPrograms'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.kidshealthinsurance': {
'Meta': {'object_name': 'KidsHealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'LaborForceCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecountyraw': {
'Meta': {'object_name': 'LaborForceCountyRaw'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'unemployed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborForceState'},
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestateraw': {
'Meta': {'object_name': 'LaborForceStateRaw'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'area_fips': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborUnderutilizationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstateraw': {
'Meta': {'object_name': 'LaborUnderutilizationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.mathsciencespending': {
'Meta': {'object_name': 'MathScienceSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianhouseholdincomestateraw': {
'Meta': {'object_name': 'MedianHouseholdIncomeStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianincomestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'MedianIncomeState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicaidparticipation': {
'Meta': {'object_name': 'MedicaidParticipation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicareenrollment': {
'Meta': {'object_name': 'MedicareEnrollment'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.migrantstudents': {
'Meta': {'object_name': 'MigrantStudents'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.militarypersonnel': {
'Meta': {'object_name': 'MilitaryPersonnel'},
'civilian_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'military_personnel': ('django.db.models.fields.IntegerField', [], {}),
'reserve_national_guard_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.msn': {
'Meta': {'object_name': 'Msn'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'msn_desc': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msn_unit': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.nativeedspending': {
'Meta': {'object_name': 'NativeEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ncesschooldistrict': {
'Meta': {'object_name': 'NcesSchoolDistrict'},
'congress_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.newaidscases': {
'Meta': {'object_name': 'NewAidsCases'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.otherfederalrevenue': {
'Meta': {'object_name': 'OtherFederalRevenue'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'data.peoplepovertystate': {
'Meta': {'object_name': 'PeoplePovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.peoplepovertystateraw': {
'Meta': {'object_name': 'PeoplePovertyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationAgeCounty'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationAgeState'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationcongressionaldistrict': {
'Meta': {'object_name': 'PopulationCongressionalDistrict'},
'american_indian_alaskan_alone': ('django.db.models.fields.IntegerField', [], {}),
'asian_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'district': ('django.db.models.fields.IntegerField', [], {}),
'hawaiian_pacific_island_alone': ('django.db.models.fields.IntegerField', [], {}),
'households': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_alone': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'two_or_more_races': ('django.db.models.fields.IntegerField', [], {}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationest00raw': {
'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst00Raw'},
'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'estimatesbase2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popestimate2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2001': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2002': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2003': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2004': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2005': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2006': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2007': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2008': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2009': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.populationest10raw': {
'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst10Raw'},
'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'census2020pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'estimatesbase2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2011': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2012': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2013': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2014': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2015': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2016': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2017': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2018': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2019': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2020': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.populationest90raw': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'agegrp', 'race_gender', 'ethnic_origin'),)", 'object_name': 'PopulationEst90Raw'},
'agegrp': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race_gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.populationfamilies': {
'Meta': {'object_name': 'PopulationFamilies'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgendercounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationGenderCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgenderstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationGenderState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationRaceCounty'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationRaceState'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.presidentsbudget': {
'Meta': {'object_name': 'PresidentsBudget'},
'account_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bea_category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'budget_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'bureau_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bureau_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'grant_non_grant': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_off_budget': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'source_category_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_category_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_subcategory_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_subcategory_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'subfunction_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subfunction_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'treasury_agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'data.presidentsbudgetyear': {
'Meta': {'object_name': 'PresidentsBudgetYear'},
'budget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'years'", 'to': "orm['data.PresidentsBudget']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'data.pupilteacherdistrict': {
'Meta': {'object_name': 'PupilTeacherDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PupilTeacherState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstateraw': {
'Meta': {'object_name': 'PupilTeacherStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.race': {
'Meta': {'object_name': 'Race'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'race_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'race_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.racecombo': {
'Meta': {'object_name': 'RaceCombo'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_combo_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'race_combo_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.retireddisablednilf': {
'Meta': {'object_name': 'RetiredDisabledNilf'},
'disabled_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'employed_absent': ('django.db.models.fields.IntegerField', [], {}),
'employed_at_work': ('django.db.models.fields.IntegerField', [], {}),
'employed_on_layoff': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'retired_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'unemployed_looking': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipecountystate': {
'Meta': {'object_name': 'SaipeCountyState'},
'age_0_17_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'file_tag': ('django.db.models.fields.CharField', [], {'max_length': '22'}),
'fips_county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state_county_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'state_postal_abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipeschool': {
'Meta': {'object_name': 'SaipeSchool'},
'ccd_district_id': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '65'}),
'file_stamp': ('django.db.models.fields.CharField', [], {'max_length': '21'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population_poverty': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchipEnrollmentState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstateraw': {
'Meta': {'object_name': 'SchipEnrollmentStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolBreakfastParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstateraw': {
'Meta': {'object_name': 'SchoolBreakfastParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstateraw': {
'Meta': {'object_name': 'SchoolLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.shelterpopulation': {
'Meta': {'object_name': 'ShelterPopulation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapbenefitsrecipients': {
'Meta': {'object_name': 'SnapBenefitsRecipients'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapMonthlyBenefitsPersonState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstateraw': {
'Meta': {'object_name': 'SnapMonthlyBenefitsPersonStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationHouseholdsState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstateraw': {
'Meta': {'object_name': 'SnapParticipationHouseholdsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationPeopleState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestateraw': {
'Meta': {'object_name': 'SnapParticipationPeopleStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.source': {
'Meta': {'object_name': 'Source'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'string_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'data.specialedfunding': {
'Meta': {'object_name': 'SpecialEdFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.state': {
'Meta': {'object_name': 'State'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_ansi': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'state_gnisid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.statecompletionrate': {
'Meta': {'object_name': 'StateCompletionRate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdp': {
'Meta': {'object_name': 'StateGdp'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdppre97': {
'Meta': {'object_name': 'StateGdpPre97'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.statepostalcodes': {
'Meta': {'object_name': 'StatePostalCodes'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.staterenewableenergy': {
'Meta': {'object_name': 'StateRenewableEnergy'},
'fossil_coal': ('django.db.models.fields.FloatField', [], {}),
'fossil_gas': ('django.db.models.fields.FloatField', [], {}),
'fossil_oil': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nuclear_electric': ('django.db.models.fields.FloatField', [], {}),
'renewable_biofuels': ('django.db.models.fields.FloatField', [], {}),
'renewable_other': ('django.db.models.fields.FloatField', [], {}),
'renewable_total': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.subfunctionscffr': {
'Meta': {'object_name': 'SubfunctionsCffr'},
'at_code_1': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_2': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_3': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_4': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_5': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_6': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_7': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_8': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'cfda_program_code': ('django.db.models.fields.TextField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_number': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.summerlunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SummerLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.summerlunchparticipationstateraw': {
'Meta': {'object_name': 'SummerLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanffamilystateraw': {
'Meta': {'object_name': 'TanfFamilyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'TanfParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstateraw': {
'Meta': {'object_name': 'TanfParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.titleifunding': {
'Meta': {'object_name': 'TitleIFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.totalstudents': {
'Meta': {'object_name': 'TotalStudents'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.usaspendingassistanceraw': {
'Meta': {'object_name': 'UsaspendingAssistanceRaw'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'assistance_type_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'asst_cat_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cfda_program_num': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'cfda_program_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'face_loan_guran': ('django.db.models.fields.BigIntegerField', [], {}),
'fed_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'fiscal_year': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'non_fed_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'orig_sub_guran': ('django.db.models.fields.BigIntegerField', [], {}),
'recip_cat_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'recip_cat_type_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'recipient_country_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'recipient_county_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'recipient_state_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'total_funding_amount': ('django.db.models.fields.BigIntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.vehicleregistrations': {
'Meta': {'object_name': 'VehicleRegistrations'},
'all_private': ('django.db.models.fields.IntegerField', [], {}),
'all_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_total': ('django.db.models.fields.IntegerField', [], {}),
'auto_private': ('django.db.models.fields.IntegerField', [], {}),
'auto_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'auto_total': ('django.db.models.fields.IntegerField', [], {}),
'buses_private': ('django.db.models.fields.IntegerField', [], {}),
'buses_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'buses_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'motorcycle_private': ('django.db.models.fields.IntegerField', [], {}),
'motorcycle_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'private_commercial_per_capita': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'trucks_private': ('django.db.models.fields.IntegerField', [], {}),
'trucks_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trucks_total': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.vocationaledspending': {
'Meta': {'object_name': 'VocationalEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicBenefitsState'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstateraw': {
'Meta': {'object_name': 'WicBenefitsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstateraw': {
'Meta': {'object_name': 'WicParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['data']
|
mit
| 735,446,432,755,450,200 | 79.738048 | 158 | 0.548886 | false |
amlyj/pythonStudy
|
2.7/monitor/rrdtools/createrrds.py
|
1
|
3160
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import os
import rrdtool
import threading
import time
import fetchrrd
import updaterrd
rrd_db_base_path = os.path.dirname(
os.path.dirname(os.path.dirname(__file__))) + '/render/static/rrd/'
task_dick = {}
ipaddr_list = []
bools = True
def create_rrd(ip):
if ip == '':
return
global bools, ipaddr_list, task_dick
os.system("mkdir -p " + rrd_db_base_path + ip + "/")
create_cpu(ip)
create_disk(ip)
create_ech0(ip)
create_mem(ip)
if ip is not '' and ip in task_dick.keys():
task_dick[ip] = int(time.time())
if ip is not '' and ip not in ipaddr_list:
fetchrrd.fetch_rrd(ip)
ipaddr_list.append(ip)
task_dick[ip] = int(time.time())
if bools:
threading.Thread(target=run).start();
threading.Thread(target=default_timer).start();
bools = False
def run():
global ipaddr_list
while True:
for ip in ipaddr_list:
threading.Thread(target=dowork, kwargs={'ip': ip}).start();
time.sleep(1);
def default_timer():
global task_dick
while True:
nowTime = int(time.time())
for ip in task_dick.keys():
if nowTime - task_dick[ip] > 60 * 30:
remove_monitors(ip)
time.sleep(60 * 5)
def dowork(**kwargs):
updaterrd.update_rrd(kwargs.get('ip'))
fetchrrd.fetch_rrd(kwargs.get('ip'))
def create_cpu(ip):
cur_time = str(int(time.time()))
rrdpath = rrd_db_base_path + str(ip) + r'/cpu.rrd'
try:
rrd = rrdtool.create(rrdpath, '--step', '1', '--start', cur_time,
'DS:cpu:GAUGE:2:0:U',
'RRA:AVERAGE:0.5:1:60')
except Exception, e:
print # e
if rrd:
print rrdtool.error()
def create_disk(ip):
cur_time = str(int(time.time()))
rrdpath = rrd_db_base_path + str(ip) + r'/disk.rrd'
rrd = rrdtool.create(rrdpath, '--step', '1', '--start', cur_time,
'DS:disk_t:GAUGE:2:0:U',
'DS:disk_a:GAUGE:2:0:U',
'RRA:AVERAGE:0.5:1:60')
if rrd:
print rrdtool.error()
def create_ech0(ip):
cur_time = str(int(time.time()))
rrdpath = rrd_db_base_path + str(ip) + r'/ech0.rrd'
rrd = rrdtool.create(rrdpath, '--step', '1', '--start', cur_time,
'DS:eth0_in:COUNTER:2:0:U',
'DS:eth0_out:COUNTER:2:0:U',
'RRA:AVERAGE:0.5:1:60')
if rrd:
print rrdtool.error()
def create_mem(ip):
cur_time = str(int(time.time()))
rrdpath = rrd_db_base_path + str(ip) + r'/memory.rrd'
rrd = rrdtool.create(rrdpath, '--step', '1', '--start', cur_time,
'DS:memory_t:GAUGE:2:0:U',
'DS:memory_a:GAUGE:2:0:U',
'RRA:AVERAGE:0.5:1:60')
if rrd:
print rrdtool.error()
def remove_monitors(ip):
if ip == '':
return
global ipaddr_list, task_dick
if ip in ipaddr_list:
ipaddr_list.remove(ip)
if ip in task_dick.keys():
task_dick.pop(ip)
|
mit
| 403,738,827,919,875,300 | 26.241379 | 73 | 0.531329 | false |
jreese/seinfeld
|
views/main.py
|
1
|
1344
|
# Copyright 2014 John Reese
# Licensed under the MIT license
from flask import abort
from jinja2.filters import do_capitalize
from core import app, context, get, template
from models import Quote, Passage
@get('/', 'Seinfeld Quote')
@template('index.html')
def index():
#passage = Passage(uid=37592)
passage = Passage(34663)
return {
'passage': passage,
}
@get('/about', 'About')
@template('about.html')
def about():
return {}
@get('/search', 'Search Quotes')
@template('search.html')
def search(subject=None, speaker=None):
return {
'speaker': speaker,
'subject': subject,
}
@get('/quote/<int:uid>', 'Passage')
@template('quote.html')
def quote(uid):
try:
return {
'passage': Passage(uid),
}
except (KeyError, ValueError) as e:
abort(404)
@get('/random', cache=False)
@get('/random/<subject>', cache=False)
@get('/random/subject/<subject>', cache=False)
@get('/random/speaker/<speaker>', cache=False)
@get('/random/speaker/<speaker>/<subject>', cache=False)
@template('random.html')
def random(subject=None, speaker=None):
passage = Passage.random(subject=subject, speaker=speaker)
return {
'title': speaker or subject or 'Random',
'passage': passage,
'speaker': speaker,
'subject': subject,
}
|
mit
| -254,260,297,995,363,300 | 22.578947 | 62 | 0.633185 | false |
RyanSkraba/beam
|
sdks/python/apache_beam/runners/interactive/display/pipeline_graph_renderer.py
|
1
|
3987
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For rendering pipeline graph in HTML-compatible format.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import subprocess
from typing import TYPE_CHECKING
from typing import Optional
from typing import Type
from future.utils import with_metaclass
from apache_beam.utils.plugin import BeamPlugin
if TYPE_CHECKING:
from apache_beam.runners.interactive.display.pipeline_graph import PipelineGraph
class PipelineGraphRenderer(with_metaclass(abc.ABCMeta, BeamPlugin)): # type: ignore[misc]
"""Abstract class for renderers, who decide how pipeline graphs are rendered.
"""
@classmethod
@abc.abstractmethod
def option(cls):
# type: () -> str
"""The corresponding rendering option for the renderer.
"""
raise NotImplementedError
@abc.abstractmethod
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
"""Renders the pipeline graph in HTML-compatible format.
Args:
pipeline_graph: (pipeline_graph.PipelineGraph) the graph to be rendererd.
Returns:
unicode, str or bytes that can be expressed as HTML.
"""
raise NotImplementedError
class MuteRenderer(PipelineGraphRenderer):
"""Use this renderer to mute the pipeline display.
"""
@classmethod
def option(cls):
# type: () -> str
return 'mute'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return ''
class TextRenderer(PipelineGraphRenderer):
"""This renderer simply returns the dot representation in text format.
"""
@classmethod
def option(cls):
# type: () -> str
return 'text'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return pipeline_graph.get_dot()
class PydotRenderer(PipelineGraphRenderer):
"""This renderer renders the graph using pydot.
It depends on
1. The software Graphviz: https://www.graphviz.org/
2. The python module pydot: https://pypi.org/project/pydot/
"""
@classmethod
def option(cls):
# type: () -> str
return 'graph'
def render_pipeline_graph(self, pipeline_graph):
# type: (PipelineGraph) -> str
return pipeline_graph._get_graph().create_svg().decode("utf-8") # pylint: disable=protected-access
def get_renderer(option=None):
# type: (Optional[str]) -> Type[PipelineGraphRenderer]
"""Get an instance of PipelineGraphRenderer given rendering option.
Args:
option: (str) the rendering option.
Returns:
(PipelineGraphRenderer)
"""
if option is None:
if os.name == 'nt':
exists = subprocess.call(['where', 'dot.exe']) == 0
else:
exists = subprocess.call(['which', 'dot']) == 0
if exists:
option = 'graph'
else:
option = 'text'
renderer = [r for r in PipelineGraphRenderer.get_all_subclasses()
if option == r.option()]
if len(renderer) == 0:
raise ValueError()
elif len(renderer) == 1:
return renderer[0]()
else:
raise ValueError('Found more than one renderer for option: %s',
option)
|
apache-2.0
| -1,365,545,391,829,221,000 | 26.881119 | 103 | 0.702533 | false |
franklingu/leetcode-solutions
|
questions/utf-8-validation/Solution.py
|
1
|
2536
|
"""
A character in UTF8 can be from 1 to 4 bytes long, subjected to the following rules:
For 1-byte character, the first bit is a 0, followed by its unicode code.
For n-bytes character, the first n-bits are all one's, the n+1 bit is 0, followed by n-1 bytes with most significant 2 bits being 10.
This is how the UTF-8 encoding would work:
Char. number range | UTF-8 octet sequence
(hexadecimal) | (binary)
--------------------+---------------------------------------------
0000 0000-0000 007F | 0xxxxxxx
0000 0080-0000 07FF | 110xxxxx 10xxxxxx
0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
Given an array of integers representing the data, return whether it is a valid utf-8 encoding.
Note:
The input is an array of integers. Only the least significant 8 bits of each integer is used to store the data. This means each integer represents only 1 byte of data.
Example 1:
data = [197, 130, 1], which represents the octet sequence: 11000101 10000010 00000001.
Return true.
It is a valid utf-8 encoding for a 2-bytes character followed by a 1-byte character.
Example 2:
data = [235, 140, 4], which represented the octet sequence: 11101011 10001100 00000100.
Return false.
The first 3 bits are all one's and the 4th bit is 0 means it is a 3-bytes character.
The next byte is a continuation byte which starts with 10 and that's correct.
But the second continuation byte does not start with 10, so it is invalid.
"""
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
num_follow = 0
for n in data:
s = bin(n)[2:].rjust(8, '0')
if s.startswith('0'):
if num_follow > 0:
return False
continue
elif s.startswith('110'):
if num_follow > 0:
return False
num_follow = 1
elif s.startswith('1110'):
if num_follow > 0:
return False
num_follow = 2
elif s.startswith('11110'):
if num_follow > 0:
return False
num_follow = 3
elif s.startswith('10'):
if num_follow == 0:
return False
num_follow -= 1
else:
return False
return num_follow == 0
|
mit
| -9,106,393,001,833,455,000 | 31.230769 | 167 | 0.575315 | false |
runt18/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/colors.py
|
1
|
31702
|
"""
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#{0:02x}{1:02x}{2:02x}'.format(*tuple([round(val*255) for val in rgb]))
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "{0!s}"'.format(s))
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "{0!s}" is unhashable even inside a tuple'.format(str(arg)))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is {0:d}; must be 3 or 4'.format(len(arg)))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "{0!s}"\n{1!s}'.format(str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "{0!s}"\n{1!s}'.format(str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
|
agpl-3.0
| 257,668,737,263,501,540 | 35.065984 | 95 | 0.518106 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.