repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gpanders/Janitor
|
src/pushbroom/console.py
|
1
|
4525
|
"""
Pushbroom entry point
"""
import argparse
import configparser
import fnmatch
import logging
import os
import re
import sys
from pathlib import Path
from typing import Dict
from pushbroom import __version__, sweep
def run() -> None:
"""Main entry point"""
args = parse_args()
setup_logging(args)
config = read_config(args.config)
pushbroom(config, args.dry_run)
def pushbroom(config: configparser.ConfigParser, dry_run: bool = False) -> None:
"""Run pushbroom"""
logging.info("Starting pushbroom")
for section in config.sections():
path = config.get(section, "path")
fullpath = Path(path).expanduser().absolute()
if not fullpath.is_dir():
logging.error("No such directory: %s", fullpath)
else:
opts = parse_opts(config, section)
sweep(section, fullpath, opts, dry_run)
def parse_args() -> argparse.Namespace:
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description="Clean up your filesystem.")
parser.add_argument("-c", "--config", type=str, help="path to config file")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose output")
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {version}".format(version=__version__),
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="show what would be done without actually doing anything",
)
return parser.parse_args()
def setup_logging(args: argparse.Namespace) -> None:
"""Set up logging"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter("%(message)s"))
stream_handler.setLevel(logging.ERROR)
if not args.dry_run:
# If not doing a dry run log, to a file
log_file = (
Path(os.environ.get("XDG_CACHE_HOME", Path("~/.cache").expanduser()))
.joinpath("pushbroom")
.joinpath("pushbroom.log")
)
log_file.parent.mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(str(log_file))
fmt = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
file_handler.setFormatter(fmt)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
if args.verbose or args.dry_run:
# If verbose or doing a dry run print info to console
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
def read_config(conf_file: Path = None) -> configparser.ConfigParser:
"""Find and read configuration file"""
if not conf_file:
# Look under XDG_CONFIG_HOME first, then for /etc/pushbroom/pushbroom.conf
conf_file = (
Path(os.environ.get("XDG_CONFIG_HOME", Path("~/.config").expanduser()))
.joinpath("pushbroom")
.joinpath("config")
)
if not conf_file.exists():
conf_file = Path("/etc/pushbroom/pushbroom.conf")
config = configparser.ConfigParser()
try:
with conf_file.open() as fil:
config.read_file(fil)
except FileNotFoundError:
logging.error("Configuration file %s not found", conf_file)
sys.exit(1)
return config
def parse_opts(config: configparser.ConfigParser, section: str) -> Dict:
num_days = config.getint(section, "numdays")
trash_dir = config.get(section, "trash", fallback=None)
ignore = config.get(section, "ignore", fallback="").split(",")
ignore_re = re.compile("|".join([fnmatch.translate(x) for x in ignore]))
match = config.get(section, "match", fallback="*").split(",")
match_re = re.compile("|".join([fnmatch.translate(x) for x in match]))
shred = config.getboolean(section, "shred", fallback=False)
remove_empty = config.getboolean(section, "removeempty", fallback=True)
trash = None
if trash_dir:
if shred:
logging.warning("Ignoring 'Shred' option while 'Trash' is set")
shred = False
trash = Path(trash_dir).expanduser().absolute()
if not trash.is_dir():
logging.warning("Creating directory %s", trash)
trash.mkdir(parents=True)
return {
"num_days": num_days,
"ignore": ignore_re,
"match": match_re,
"trash": trash,
"shred": shred,
"remove_empty": remove_empty,
}
|
mit
| -6,263,718,687,116,045,000 | 31.553957 | 86 | 0.62232 | false |
tombstone/models
|
research/skip_thoughts/skip_thoughts/vocabulary_expansion.py
|
1
|
7375
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute an expanded vocabulary of embeddings using a word2vec model.
This script loads the word embeddings from a trained skip-thoughts model and
from a trained word2vec model (typically with a larger vocabulary). It trains a
linear regression model without regularization to learn a linear mapping from
the word2vec embedding space to the skip-thoughts embedding space. The model is
then applied to all words in the word2vec vocabulary, yielding vectors in the
skip-thoughts word embedding space for the union of the two vocabularies.
The linear regression task is to learn a parameter matrix W to minimize
|| X - Y * W ||^2,
where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],
Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a
matrix of shape [dim2, dim1].
This is based on the "Translation Matrix" method from the paper:
"Exploiting Similarities among Languages for Machine Translation"
Tomas Mikolov, Quoc V. Le, Ilya Sutskever
https://arxiv.org/abs/1309.4168
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import gensim.models
import numpy as np
import sklearn.linear_model
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("skip_thoughts_model", None,
"Checkpoint file or directory containing a checkpoint "
"file.")
tf.flags.DEFINE_string("skip_thoughts_vocab", None,
"Path to vocabulary file containing a list of newline-"
"separated words where the word id is the "
"corresponding 0-based index in the file.")
tf.flags.DEFINE_string("word2vec_model", None,
"File containing a word2vec model in binary format.")
tf.flags.DEFINE_string("output_dir", None, "Output directory.")
tf.logging.set_verbosity(tf.logging.INFO)
def _load_skip_thoughts_embeddings(checkpoint_path):
"""Loads the embedding matrix from a skip-thoughts model checkpoint.
Args:
checkpoint_path: Model checkpoint file or directory containing a checkpoint
file.
Returns:
word_embedding: A numpy array of shape [vocab_size, embedding_dim].
Raises:
ValueError: If no checkpoint file matches checkpoint_path.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_file:
raise ValueError("No checkpoint file found in %s" % checkpoint_path)
else:
checkpoint_file = checkpoint_path
tf.logging.info("Loading skip-thoughts embedding matrix from %s",
checkpoint_file)
reader = tf.train.NewCheckpointReader(checkpoint_file)
word_embedding = reader.get_tensor("word_embedding")
tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s",
word_embedding.shape)
return word_embedding
def _load_vocabulary(filename):
"""Loads a vocabulary file.
Args:
filename: Path to text file containing newline-separated words.
Returns:
vocab: A dictionary mapping word to word id.
"""
tf.logging.info("Reading vocabulary from %s", filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode="rb") as f:
for i, line in enumerate(f):
word = line.decode("utf-8").strip()
assert word not in vocab, "Attempting to add word twice: %s" % word
vocab[word] = i
tf.logging.info("Read vocabulary of size %d", len(vocab))
return vocab
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
"""Runs vocabulary expansion on a skip-thoughts model using a word2vec model.
Args:
skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,
skip_thoughts_embedding_dim].
skip_thoughts_vocab: A dictionary of word to id.
word2vec: An instance of gensim.models.Word2Vec.
Returns:
combined_emb: A dictionary mapping words to embedding vectors.
"""
# Find words shared between the two vocabularies.
tf.logging.info("Finding shared words")
shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]
# Select embedding vectors for shared words.
tf.logging.info("Selecting embeddings for %d shared words", len(shared_words))
shared_st_emb = skip_thoughts_emb[[
skip_thoughts_vocab[w] for w in shared_words
]]
shared_w2v_emb = word2vec[shared_words]
# Train a linear regression model on the shared embedding vectors.
tf.logging.info("Training linear regression model")
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
# Create the expanded vocabulary.
tf.logging.info("Creating embeddings for expanded vocabuary")
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
# Ignore words with underscores (spaces).
if "_" not in w:
w_emb = model.predict(word2vec[w].reshape(1, -1))
combined_emb[w] = w_emb.reshape(-1)
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb))
return combined_emb
def main(unused_argv):
if not FLAGS.skip_thoughts_model:
raise ValueError("--skip_thoughts_model is required.")
if not FLAGS.skip_thoughts_vocab:
raise ValueError("--skip_thoughts_vocab is required.")
if not FLAGS.word2vec_model:
raise ValueError("--word2vec_model is required.")
if not FLAGS.output_dir:
raise ValueError("--output_dir is required.")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load the skip-thoughts embeddings and vocabulary.
skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)
skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)
# Load the Word2Vec model.
word2vec = gensim.models.KeyedVectors.load_word2vec_format(
FLAGS.word2vec_model, binary=True)
# Run vocabulary expansion.
embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,
word2vec)
# Save the output.
vocab = embedding_map.keys()
vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt")
with tf.gfile.GFile(vocab_file, "w") as f:
f.write("\n".join(vocab))
tf.logging.info("Wrote vocabulary file to %s", vocab_file)
embeddings = np.array(embedding_map.values())
embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy")
np.save(embeddings_file, embeddings)
tf.logging.info("Wrote embeddings file to %s", embeddings_file)
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
| -7,316,208,268,084,766,000 | 35.330049 | 80 | 0.70739 | false |
liosha2007/temporary-groupdocs-python-sdk
|
groupdocs/models/AddCollaboratorResponse.py
|
1
|
1165
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class AddCollaboratorResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SetCollaboratorsResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # SetCollaboratorsResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
apache-2.0
| -8,842,829,589,295,494,000 | 29.657895 | 77 | 0.649785 | false |
shoyer/xray
|
xarray/core/utils.py
|
1
|
18865
|
"""Internal utilties; not for external use
"""
import contextlib
import functools
import itertools
import os.path
import re
import warnings
from collections import OrderedDict
from typing import (
AbstractSet, Any, Callable, Container, Dict, Hashable, Iterable, Iterator,
Mapping, MutableMapping, MutableSet, Optional, Sequence, Tuple, TypeVar,
cast)
import numpy as np
import pandas as pd
from .pycompat import dask_array_type
try: # Fix typed collections in Python 3.5.0~3.5.2
from .pycompat import Mapping, MutableMapping, MutableSet # noqa: F811
except ImportError:
pass
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
def _check_inplace(inplace: Optional[bool], default: bool = False) -> bool:
if inplace is None:
inplace = default
else:
warnings.warn('The inplace argument has been deprecated and will be '
'removed in a future version of xarray.',
FutureWarning, stacklevel=3)
return inplace
def alias_message(old_name: str, new_name: str) -> str:
return '%s has been deprecated. Use %s instead.' % (old_name, new_name)
def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:
warnings.warn(alias_message(old_name, new_name), FutureWarning,
stacklevel=stacklevel)
def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:
assert isinstance(old_name, str)
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
wrapper.__doc__ = alias_message(old_name, obj.__name__)
return wrapper
def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:
from ..coding.cftimeindex import CFTimeIndex
if len(index) > 0 and index.dtype == 'O':
try:
return CFTimeIndex(index)
except (ImportError, TypeError):
return index
else:
return index
def safe_cast_to_index(array: Any) -> pd.Index:
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return _maybe_cast_to_cftimeindex(index)
def multiindex_from_product_levels(levels: Sequence[pd.Index],
names: Optional[Sequence[str]] = None
) -> pd.MultiIndex:
"""Creating a MultiIndex from a product without refactorizing levels.
Keeping levels the same gives back the original labels when we unstack.
Parameters
----------
levels : sequence of pd.Index
Values for each MultiIndex level.
names : optional sequence of objects
Names for each level.
Returns
-------
pandas.MultiIndex
"""
if any(not isinstance(lev, pd.Index) for lev in levels):
raise TypeError('levels must be a list of pd.Index objects')
split_labels, levels = zip(*[lev.factorize() for lev in levels])
labels_mesh = np.meshgrid(*split_labels, indexing='ij')
labels = [x.ravel() for x in labels_mesh]
return pd.MultiIndex(levels, labels, sortorder=0, names=names)
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first: T, second: T) -> bool:
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray
"""
# TODO: refactor to avoid circular import
from . import duck_array_ops
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return duck_array_ops.array_equiv(first, second)
else:
return ((first is second) or
(first == second) or
(pd.isnull(first) and pd.isnull(second)))
def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:
"""Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
def update_safety_check(first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent) -> None:
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in second_dict.items():
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
def remove_incompatible_items(first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent
) -> None:
"""Remove incompatible items from the first dictionary in-place.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k in list(first_dict):
if k not in second_dict or not compat(first_dict[k], second_dict[k]):
del first_dict[k]
def is_dict_like(value: Any) -> bool:
return hasattr(value, 'keys') and hasattr(value, '__getitem__')
def is_full_slice(value: Any) -> bool:
return isinstance(value, slice) and value == slice(None)
def either_dict_or_kwargs(pos_kwargs: Optional[Mapping[Hashable, T]],
kw_kwargs: Mapping[str, T],
func_name: str
) -> Mapping[Hashable, T]:
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError('the first argument to .%s must be a dictionary'
% func_name)
if kw_kwargs:
raise ValueError('cannot specify both keyword and positional '
'arguments to .%s' % func_name)
return pos_kwargs
else:
# Need an explicit cast to appease mypy due to invariance; see
# https://github.com/python/mypy/issues/6228
return cast(Mapping[Hashable, T], kw_kwargs)
def is_scalar(value: Any) -> bool:
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
return (
getattr(value, 'ndim', None) == 0 or
isinstance(value, (str, bytes)) or not
isinstance(value, (Iterable, ) + dask_array_type))
def is_valid_numpy_dtype(dtype: Any) -> bool:
try:
np.dtype(dtype)
except (TypeError, ValueError):
return False
else:
return True
def to_0d_object_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.
"""
result = np.empty((), dtype=object)
result[()] = value
return result
def to_0d_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray.
"""
if np.isscalar(value) or (isinstance(value, np.ndarray) and
value.ndim == 0):
return np.array(value)
else:
return to_0d_object_array(value)
def dict_equiv(first: Mapping[K, V], second: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent) -> bool:
"""Test equivalence of two dict-like objects. If any of the values are
numpy arrays, compare them correctly.
Parameters
----------
first, second : dict-like
Dictionaries to compare for equality
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
equals : bool
True if the dictionaries are equal
"""
for k in first:
if k not in second or not compat(first[k], second[k]):
return False
for k in second:
if k not in first:
return False
return True
def ordered_dict_intersection(first_dict: Mapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent
) -> MutableMapping[K, V]:
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
class SingleSlotPickleMixin:
"""Mixin class to add the ability to pickle objects whose state is defined
by a single __slots__ attribute. Only necessary under Python 2.
"""
def __getstate__(self):
return getattr(self, self.__slots__[0])
def __setstate__(self, state):
setattr(self, self.__slots__[0], state)
class Frozen(Mapping[K, V], SingleSlotPickleMixin):
"""Wrapper around an object implementing the mapping interface to make it
immutable. If you really want to modify the mapping, the mutable version is
saved under the `mapping` attribute.
"""
__slots__ = ['mapping']
def __init__(self, mapping: Mapping[K, V]):
self.mapping = mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(self.mapping)
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, self.mapping)
def FrozenOrderedDict(*args, **kwargs) -> Frozen:
return Frozen(OrderedDict(*args, **kwargs))
class SortedKeysDict(MutableMapping[K, V], SingleSlotPickleMixin):
"""An wrapper for dictionary-like objects that always iterates over its
items in sorted order by key but is otherwise equivalent to the underlying
mapping.
"""
__slots__ = ['mapping']
def __init__(self, mapping: Optional[MutableMapping[K, V]] = None):
self.mapping = {} if mapping is None else mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __setitem__(self, key: K, value: V) -> None:
self.mapping[key] = value
def __delitem__(self, key: K) -> None:
del self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(sorted(self.mapping))
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, self.mapping)
class OrderedSet(MutableSet[T]):
"""A simple ordered set.
The API matches the builtin set, but it preserves insertion order of
elements, like an OrderedDict.
"""
def __init__(self, values: Optional[AbstractSet[T]] = None):
self._ordered_dict = OrderedDict() # type: MutableMapping[T, None]
if values is not None:
# Disable type checking - both mypy and PyCharm believes that
# we're altering the type of self in place (see signature of
# MutableSet.__ior__)
self |= values # type: ignore
# Required methods for MutableSet
def __contains__(self, value: object) -> bool:
return value in self._ordered_dict
def __iter__(self) -> Iterator[T]:
return iter(self._ordered_dict)
def __len__(self) -> int:
return len(self._ordered_dict)
def add(self, value: T) -> None:
self._ordered_dict[value] = None
def discard(self, value: T) -> None:
del self._ordered_dict[value]
# Additional methods
def update(self, values: AbstractSet[T]) -> None:
# See comment on __init__ re. type checking
self |= values # type: ignore
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, list(self))
class NdimSizeLenMixin:
"""Mixin class that extends a class that defines a ``shape`` property to
one that also defines ``ndim``, ``size`` and ``__len__``.
"""
@property
def ndim(self: Any) -> int:
return len(self.shape)
@property
def size(self: Any) -> int:
# cast to int so that shape = () gives size = 1
return int(np.prod(self.shape))
def __len__(self: Any) -> int:
try:
return self.shape[0]
except IndexError:
raise TypeError('len() of unsized object')
class NDArrayMixin(NdimSizeLenMixin):
"""Mixin class for making wrappers of N-dimensional arrays that conform to
the ndarray interface required for the data argument to Variable objects.
A subclass should set the `array` property and override one or more of
`dtype`, `shape` and `__getitem__`.
"""
@property
def dtype(self: Any) -> np.dtype:
return self.array.dtype
@property
def shape(self: Any) -> Tuple[int]:
return self.array.shape
def __getitem__(self: Any, key):
return self.array[key]
def __repr__(self: Any) -> str:
return '%s(array=%r)' % (type(self).__name__, self.array)
class ReprObject:
"""Object that prints as the given value, for use with sentinel values.
"""
def __init__(self, value: str):
self._value = value
def __repr__(self) -> str:
return self._value
@contextlib.contextmanager
def close_on_error(f):
"""Context manager to ensure that a file opened by xarray is closed if an
exception is raised before the user sees the file object.
"""
try:
yield
except Exception:
f.close()
raise
def is_remote_uri(path: str) -> bool:
return bool(re.search(r'^https?\://', path))
def is_grib_path(path: str) -> bool:
_, ext = os.path.splitext(path)
return ext in ['.grib', '.grb', '.grib2', '.grb2']
def is_uniform_spaced(arr, **kwargs) -> bool:
"""Return True if values of an array are uniformly spaced and sorted.
>>> is_uniform_spaced(range(5))
True
>>> is_uniform_spaced([-4, 0, 100])
False
kwargs are additional arguments to ``np.isclose``
"""
arr = np.array(arr, dtype=float)
diffs = np.diff(arr)
return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))
def hashable(v: Any) -> bool:
"""Determine whether `v` can be hashed.
"""
try:
hash(v)
except TypeError:
return False
return True
def not_implemented(*args, **kwargs):
return NotImplemented
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:
"""Convert attribute values from numpy objects to native Python objects,
for use in to_dict
"""
attrs = dict(attrs)
for k, v in attrs.items():
if isinstance(v, np.ndarray):
attrs[k] = v.tolist()
elif isinstance(v, np.generic):
attrs[k] = v.item()
return attrs
def ensure_us_time_resolution(val):
"""Convert val out of numpy time, for use in to_dict.
Needed because of numpy bug GH#7619"""
if np.issubdtype(val.dtype, np.datetime64):
val = val.astype('datetime64[us]')
elif np.issubdtype(val.dtype, np.timedelta64):
val = val.astype('timedelta64[us]')
return val
class HiddenKeyDict(MutableMapping[K, V]):
"""Acts like a normal dictionary, but hides certain keys.
"""
# ``__init__`` method required to create instance from class.
def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):
self._data = data
self._hidden_keys = frozenset(hidden_keys)
def _raise_if_hidden(self, key: K) -> None:
if key in self._hidden_keys:
raise KeyError('Key `%r` is hidden.' % key)
# The next five methods are requirements of the ABC.
def __setitem__(self, key: K, value: V) -> None:
self._raise_if_hidden(key)
self._data[key] = value
def __getitem__(self, key: K) -> V:
self._raise_if_hidden(key)
return self._data[key]
def __delitem__(self, key: K) -> None:
self._raise_if_hidden(key)
del self._data[key]
def __iter__(self) -> Iterator[K]:
for k in self._data:
if k not in self._hidden_keys:
yield k
def __len__(self) -> int:
num_hidden = len(self._hidden_keys & self._data.keys())
return len(self._data) - num_hidden
def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable:
""" Get an new dimension name based on new_dim, that is not used in dims.
If the same name exists, we add an underscore(s) in the head.
Example1:
dims: ['a', 'b', 'c']
new_dim: ['_rolling']
-> ['_rolling']
Example2:
dims: ['a', 'b', 'c', '_rolling']
new_dim: ['_rolling']
-> ['__rolling']
"""
while new_dim in dims:
new_dim = '_' + str(new_dim)
return new_dim
|
apache-2.0
| -7,918,667,577,346,718,000 | 29.825163 | 79 | 0.608958 | false |
cyrta/sandbox
|
ml/pytorch/two_layer_net_custom_function.py
|
1
|
2350
|
# Code in file autograd/two_layer_net_custom_function.py
import torch
from torch.autograd import Variable
class MyReLU(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
def forward(self, input):
"""
In the forward pass we receive a Tensor containing the input and return a
Tensor containing the output. You can cache arbitrary Tensors for use in the
backward pass using the save_for_backward method.
"""
self.save_for_backward(input)
return input.clamp(min=0)
def backward(self, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# Construct an instance of our MyReLU class to use in our network
relu = MyReLU()
# Forward pass: compute predicted y using operations on Variables; we compute
# ReLU using our custom autograd operation.
y_pred = relu(x.mm(w1)).mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
print(t, loss.data[0])
# Manually zero the gradients before running the backward pass
w1.grad.data.zero_()
w2.grad.data.zero_()
# Use autograd to compute the backward pass.
loss.backward()
# Update weights using gradient descent
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
|
unlicense
| -2,534,284,823,519,713,300 | 33.057971 | 80 | 0.712766 | false |
boegel/easybuild-easyblocks
|
easybuild/easyblocks/e/esmf.py
|
1
|
4996
|
##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ESMF, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Damian Alvarez (Forschungszentrum Juelich GmbH)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_ESMF(ConfigureMake):
"""Support for building/installing ESMF."""
def configure_step(self):
"""Custom configuration procedure for ESMF through environment variables."""
env.setvar('ESMF_DIR', self.cfg['start_dir'])
env.setvar('ESMF_INSTALL_PREFIX', self.installdir)
env.setvar('ESMF_INSTALL_BINDIR', 'bin')
env.setvar('ESMF_INSTALL_LIBDIR', 'lib')
env.setvar('ESMF_INSTALL_MODDIR', 'mod')
# specify compiler
comp_family = self.toolchain.comp_family()
if comp_family in [toolchain.GCC]:
compiler = 'gfortran'
else:
compiler = comp_family.lower()
env.setvar('ESMF_COMPILER', compiler)
# specify MPI communications library
comm = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in [toolchain.MPICH, toolchain.QLOGICMPI]:
# MPICH family for MPICH v3.x, which is MPICH2 compatible
comm = 'mpich2'
else:
comm = mpi_family.lower()
env.setvar('ESMF_COMM', comm)
# specify decent LAPACK lib
env.setvar('ESMF_LAPACK', 'user')
ldflags = os.getenv('LDFLAGS')
liblapack = os.getenv('LIBLAPACK_MT') or os.getenv('LIBLAPACK')
if liblapack is None:
raise EasyBuildError("$LIBLAPACK(_MT) not defined, no BLAS/LAPACK in %s toolchain?", self.toolchain.name)
else:
env.setvar('ESMF_LAPACK_LIBS', ldflags + ' ' + liblapack)
# specify netCDF
netcdf = get_software_root('netCDF')
if netcdf:
if LooseVersion(self.version) >= LooseVersion('7.1.0'):
env.setvar('ESMF_NETCDF', 'nc-config')
else:
env.setvar('ESMF_NETCDF', 'user')
netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf']
# Fortran
netcdff = get_software_root('netCDF-Fortran')
if netcdff:
netcdf_libs = ["-L%s/lib" % netcdff] + netcdf_libs + ["-lnetcdff"]
else:
netcdf_libs.append('-lnetcdff')
# C++
netcdfcxx = get_software_root('netCDF-C++')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++"]
else:
netcdfcxx = get_software_root('netCDF-C++4')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++4"]
else:
netcdf_libs.append('-lnetcdf_c++')
env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs))
# 'make info' provides useful debug info
cmd = "make info"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def sanity_check_step(self):
"""Custom sanity check for ESMF."""
binaries = ['ESMF_Info', 'ESMF_InfoC', 'ESMF_RegridWeightGen', 'ESMF_WebServController']
libs = ['libesmf.a', 'libesmf.%s' % get_shared_lib_ext()]
custom_paths = {
'files': [os.path.join('bin', x) for x in binaries] + [os.path.join('lib', x) for x in libs],
'dirs': ['include', 'mod'],
}
super(EB_ESMF, self).sanity_check_step(custom_paths=custom_paths)
|
gpl-2.0
| -1,994,086,380,646,885,400 | 39.290323 | 117 | 0.619295 | false |
korepwx/tfsnippet
|
tfsnippet/examples/utils/jsonutils.py
|
1
|
5598
|
import json
from base64 import b64encode, b64decode
from datetime import datetime
import numpy as np
import six
__all__ = [
'JsonBinary', 'JsonEncoder', 'JsonDecoder',
]
class JsonBinary(object):
"""
Wrapper class for binary objects.
In Python2, ordinary strings are binary strings, thus we cannot encode
the binary strings into base64 strings directly. In this case, one
may explicitly wrap such a binary string in this class to inform the
encoder.
Args:
value (bytes): The wrapped binary object.
"""
def __init__(self, value):
if not isinstance(value, six.binary_type):
raise TypeError('`value` is not a binary object.')
self.value = value
def __repr__(self):
return 'JsonBinary(%r)' % (self.value,)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return isinstance(other, JsonBinary) and self.value == other.value
def __ne__(self, other):
return isinstance(other, JsonBinary) and self.value != other.value
def __lt__(self, other):
return isinstance(other, JsonBinary) and self.value < other.value
def __le__(self, other):
return isinstance(other, JsonBinary) and self.value <= other.value
def __gt__(self, other):
return isinstance(other, JsonBinary) and self.value > other.value
def __ge__(self, other):
return isinstance(other, JsonBinary) and self.value >= other.value
class JsonEncoder(json.JSONEncoder):
"""
Extended JSON encoder with support of the following types:
* bytes | JsonBinary ->
{'__type__': 'binary', 'data': base64 encoded}
* numpy.ndarray ->
{'__type__': 'ndarray', 'data': o.tolist(), 'dtype': o.dtype}
Besides, if the same (customized) object is referenced for multiple
times, and if `object_ref` is set to True, it will only be serialized
only at its first occurrence. All later occurrences will be saved as:
{'__type__': 'ObjectRef', 'id': ...}.
Args:
object_ref (bool): Whether or not to allow serializing same object as
references? (default :obj:`True`)
"""
NO_REF_TYPES = six.integer_types + (float, bool, datetime,)
def __init__(self, object_ref=True, **kwargs):
super(JsonEncoder, self).__init__(**kwargs)
self.object_ref = object_ref
self._ref_dict = {}
def _default_object_handler(self, o):
if isinstance(o, JsonBinary):
cnt = b64encode(o.value).decode('utf-8')
yield {'__type__': 'binary', 'data': cnt}
elif isinstance(o, (np.integer, np.int, np.uint,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)):
yield int(o)
elif isinstance(o, (np.float, np.float16, np.float32, np.float64)):
yield float(o)
elif isinstance(o, np.ndarray):
yield {
'__type__': 'ndarray',
'data': o.tolist(),
'dtype': str(o.dtype)
}
#: List of object serialization handlers
OBJECT_HANDLERS = [_default_object_handler]
def clear_object_ref(self):
"""Clear all serialized object references."""
self._ref_dict.clear()
def default(self, o):
o_id = id(o)
if self.object_ref:
if o_id in self._ref_dict:
return {'__type__': 'ObjectRef', '__id__': self._ref_dict[o_id]}
for handler in self.OBJECT_HANDLERS:
for obj in handler(self, o):
if self.object_ref and isinstance(obj, dict) and \
not isinstance(o, self.NO_REF_TYPES):
self._ref_dict[o_id] = len(self._ref_dict)
obj['__id__'] = self._ref_dict[o_id]
return obj
return super(JsonEncoder, self).default(o)
def encode(self, o):
self.clear_object_ref()
return super(JsonEncoder, self).encode(o)
class JsonDecoder(json.JSONDecoder):
"""
Extended JSON decoder coupled with :class:`JsonEncoder`.
Note that a `JsonDecoder` instance is designed to be used for only once.
"""
def __init__(self, **kwargs):
self._object_hook = kwargs.get('object_hook', None)
self._ref_dict = {}
kwargs['object_hook'] = self._injected_object_hook
kwargs.setdefault('object_hook', self._injected_object_hook)
super(JsonDecoder, self).__init__(**kwargs)
def _default_object_handler(self, v):
v_type = v['__type__']
if v_type == 'binary':
yield JsonBinary(b64decode(v['data']))
elif v_type == 'ndarray':
yield np.asarray(v['data'], dtype=v['dtype'])
#: List of object deserialization handlers
OBJECT_HANDLERS = [_default_object_handler]
def _injected_object_hook(self, v):
v_type = v.get('__type__', None)
if v_type == 'ObjectRef':
v_id = v['__id__']
if v_id not in self._ref_dict:
raise KeyError('Object reference %r is not defined.' % (v_id,))
return self._ref_dict[v_id]
elif v_type is not None:
for handler in self.OBJECT_HANDLERS:
for o in handler(self, v):
v_id = v.get('__id__', None)
if v_id is not None:
self._ref_dict[v_id] = o
return o
if self._object_hook is not None:
v = self._object_hook(v)
return v
|
mit
| -2,111,988,705,628,866,800 | 32.722892 | 80 | 0.566274 | false |
utcoupe/coupe18
|
ros_ws/src/processing_belt_interpreter/src/belt_parser.py
|
1
|
2839
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
import rospy
class BeltParser(object):
"""Class used to parse the definition XML"""
def __init__(self, file):
super(BeltParser, self).__init__()
rospy.logdebug("Parsing belt definition...")
root = ET.parse(file).getroot()
required = ["max_range", "angle", "precision", "scale_responsive"]
# parse params
if required and root.find("params") is None:
msg = "Can't parse belt definition file: a 'params' element is required. Shutting down."
rospy.logfatal(msg)
raise rospy.ROSInitException(msg)
self.Params = { p.attrib["type"] : {c.tag: float(c.text) for c in p} for p in root.find("params")}
rospy.loginfo(self.Params)
for p in required:
for s in self.Params:
if p not in self.Params[s]:
msg = "Can't parse belt definition: a '{}' element is required in the parameters. Shutting down."\
.format(p)
rospy.logfatal(msg)
raise rospy.ROSInitException(msg)
# parse sensors
if root.find("sensors") is None:
msg = "Can't parse belt definition: a 'sensors' element is required. Shutting down."
rospy.logfatal(msg)
raise rospy.ROSInitException(msg)
sensors = []
for sensor in root.find("sensors"):
if "id" not in sensor.attrib:
rospy.logerr("Can't parse sensor definition: a 'id' attribute is required. Skipping this sensor.")
continue
required = ["x", "y", "a"]
for p in required:
if sensor.find(p) is None:
rospy.logerr("Can't parse sensor definition: a '{}' element is required. Skipping this sensor.".format(p))
if "type" not in sensor.attrib:
rospy.logerr("Can't parse sensor definition: a 'type' attribute is required. Skipping this sensor.")
continue
if sensor.attrib["type"] not in self.Params:
rospy.logerr("Can't parse sensor definition: {} sensor type is not defined. Skipping this sensor."
.format(sensor.attrib["type"]))
continue
sensors.append({
"id": sensor.attrib["id"],
"x": float(sensor.find("x").text),
"y": float(sensor.find("y").text),
"a": float(sensor.find("a").text),
"type": sensor.attrib["type"]
})
if not sensors:
rospy.logwarn("No sensor found in belt definition.")
rospy.logdebug("{} sensors found in belt definition".format(len(sensors)))
self.Sensors = {s["id"]: s for s in sensors}
|
gpl-3.0
| 5,276,671,905,392,511,000 | 36.853333 | 126 | 0.549489 | false |
spencerpomme/coconuts-on-fire
|
DSA/ArrayQueue.py
|
1
|
2810
|
# python implemented queue ADT
# A new exception class defined for handling empty errors
class Empty(Exception):
'''
Error attempting to access an element from an empty container.
'''
pass
class ArrayQueue:
'''FIFO queue implementation using a python list as underlying storage.'''
DEFAULT_CAPACITY = 10 # moderate capacity for all
# new queues
def __init__(self):
'''Create an empty queue.'''
self._data = [None] * ArrayQueue.DEFAULT_CAPACITY
self._size = 0
self._front = 0
def __len__(self):
'''Return the number of elements in the queue.'''
return self._size
def is_empty(self):
'''Return True if the queue is empty.'''
return self._size == 0
def first(self):
'''Return (but do not remove) the element at the front of the queue.
Raise Empty exception if the queue is empty.'''
if self.is_empty():
raise Empty('Queue is empty')
return self._data[self._front]
def dequeue(self):
'''Remove and return the first element of the queue(i.e.,FIFO).
Raise Empty exception if the queue is empty.'''
if self.is_empty():
raise Empty('Queue is empty')
answer = self._data[self._front]
self._data[self._front] = None
self._front = (self._front + 1) % len(self._data) # i.e.,DEFAULT_CAPACITY
self._size -= 1
if 0 < self._size < len(self._data)//4:
self._resize(len(self._data)//2) # shrink list by 1/2 after
return answer # dequeue if shorter than 1/4
def enqueue(self, e):
'''Add an element to the back of queue.'''
if self._size == len(self._data):
self._resize(2 * len(self._data)) # double the array size
avail = (self._front +self._size) % len(self._data)
# self._size is current element
# number (before enqueue)
self._data[avail] = e
self._size += 1 # new size after enqueue
def _resize(self, cap):
'''Resize to a new list of capacity >= len(self)'''
old = self._data
self._data = [None] * cap
walk = self._front
for k in range(self._size): # self._size here is old size
self._data[k] = old[walk]
walk = (1 + walk) % len(old)
self._front = 0 # self._front realigned
|
apache-2.0
| 7,368,616,952,017,347,000 | 38.027778 | 91 | 0.484698 | false |
renatosamperio/context_task_queue
|
Tools/create_service.py
|
1
|
21855
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import sys, os
from optparse import OptionParser, OptionGroup
import xml.etree.ElementTree as ET
## Importing Utils from parent path
file_path = os.path.dirname(__file__)
join_path = os.path.join(file_path, '../Utils')
abs_path = os.path.abspath(join_path)
sys.path.append(abs_path)
from Utils import Utilities
from Utils.XMLParser import ParseXml2Dict
class AutoCodeError(Exception):
def __init__(self, message, reason):
self.message = message
self.reason = reason
class AutoCode(object):
'''
Service instance is the type of created service as defined in task
service parameters as 'instance' in the configuration file
'''
ServiceType = 'TestServiceType'
'''
Task file name is for defining specific operations from task class.
It will be imported from created directory and used to instance a
task class.
'''
TaskFile = 'TestTaskFile'
'''
Name of the autogenerated task class. It should have the logic for
producing a service. It is called by the service and imported by file name.
'''
TaskClass = 'TestTaskClass'
'''
Required for logging and identifying task operations.
'''
TaskDescription = 'TestTaskDescription'
'''
Logging context name. It is used in 'TaskLogName' in the configuration file.
'''
ContextName = 'TestContextName'
'''
Defines a service name for the identifying service process messages.
It is called in process configuration configuration file.
'''
ServiceName = 'TestServiceName'
'''
IP address of server endpoint. It is used in 'FrontEndEndpoint' and 'BackendEndpoint'
in the configuration file.
'''
ServerIP = 'TestServerIP'
'''
Front end port for subscriber and back end binding ports. It is used in 'FrontEndEndpoint'
and 'BackendBind' in the configuration file.
'''
SubPort = 'TestSubPort'
'''
Back end port for subscriber and front end binding ports. It is used in 'BackendEndpoint'
and 'FrontBind' in the configuration file.
'''
PubPort = 'TestPubPort'
'''
Task service ID identifier. It is used as parameter 'id' in 'TaskService' label in the
configuration file.
'''
TaskID = 'TestTaskID'
'''
Task device action used for message identification. It is used as 'device_action'
of the content configuration of the task service in the configuration file.
'''
DeviceAction = 'TestDeviceAction'
'''
Defines entry action to be executed upon entry to a state associated with
other states. It is not to transitions and it is called regardless of how a
state is resulted. This fixture is related to UML statechart.
'''
EntryAction = 'TestEntryAction'
'''
If it exists, defines the type of task template to use: "Single" or "Looped".
The "Single" process is executed from start to finish. The "Looped" is
a process that continously executes itself.
'''
TaskType = 'TestTaskType'
'''
User-defined identifier for context. It should be a unique alpha-numeric
identifier.
'''
ContextID = 'TestContextID'
'''
Used to refer the absolute path location of non-system services.
'''
ModuleLocation = 'TestModuleLocation'
def __init__(self, options):
''' Class constructor'''
try:
self.ServicePath = None
self.HomePath = None
self.ServiceType = None
self.TaskFile = None
self.TaskClass = None
self.TaskDescription = None
self.ServiceName = None
self.ServerIP = None
self.SubPort = None
self.PubPort = None
self.ContextName = None
self.TaskID = None
self.DeviceAction = None
self.EntryAction = None
self.TaskType = 'Looped'
self.ModuleLocation = None
self.ContextID = None
self.StateConf = []
self.log = True
## Service configuration location
self.ServicePath = options['service_path']
self.HomePath = options['home_path']
## Service generation stub variables
self.ServiceType = options['task_service']
self.TaskFile = options['task_service']
self.TaskClass = options['task_service']
self.TaskDescription= options['task_desc']
## Service XML configuration options
self.ServiceName = options['service_name']
self.ServerIP = options['server_ip']
self.SubPort = options['sub_port']
self.PubPort = options['pub_port']
self.ContextName = options['context_name']
self.TaskID = options['task_id']
self.DeviceAction = options['device_action']
self.EntryAction = options['entry_action']
self.StateConf = options['state']
self.ContextID = options['context_id']
self.ModuleLocation= options['location']
## Setting logger
if 'log_on' in options.keys():
self.log = options['log_on']
# Validating state values whether they would be incomplete
if 'task_type' in options.keys():
self.TaskType = options['task_type']
if len(self.StateConf) != 4:
raise AutoCodeError('Failure in constructor', 'State transitions are not complete')
reason = "Analysing... ["+self.ServicePath+"]"
self.PrintLog("- "+reason)
servicesPath = self.ServicePath+'/Services'
if not os.path.exists(servicesPath):
self.PrintLog("- Context service root path not found, creating [Services] directory")
os.makedirs(servicesPath)
else:
self.PrintLog(" Nothing to do")
except Exception as inst:
Utilities.ParseException(inst)
def PrintLog(self, msg):
''' Internal logger method'''
if self.log:
print msg
def CreateInit(self):
'''Create init file for service'''
try:
servicePath = self.ServicePath+'/Services/'+self.ServiceType
if not os.path.exists(servicePath):
message = "Warning:"
reason = "Root path is not valid"
print message+" : "+reason
return servicePath
## Open init template
#$TaskFile import $TaskClass
self.PrintLog("+ Generating init template file")
initName = '__init__.'
initPath = self.HomePath+'/Tools/Templates/'+initName+'tmpl'
with open(initPath, 'r') as openedFile:
data=openedFile.read()
data = data.replace('$TaskFile', self.TaskFile)
data = data.replace('$TaskClass', self.TaskClass)
initOutput = servicePath+'/'+initName+'py'
#print "==>", initOutput
with open(initOutput, "w") as init_file:
init_file.write(data)
except Exception as inst:
Utilities.ParseException(inst)
finally:
return servicePath
def CreateDirectory(self):
'''Create directoy with service name '''
try:
servicePath = self.ServicePath+'/Services/'+self.ServiceType
if os.path.exists(servicePath):
message = "Warning: Couldn't create service path"
reason = "Path already exists ["+servicePath+"]"
self.PrintLog(message+" "+reason)
return servicePath
## If directory does not exists, create it
self.PrintLog("+ Creating service directory [%s]"%servicePath)
os.makedirs(servicePath)
except Exception as inst:
Utilities.ParseException(inst)
finally:
return servicePath
def CreateServiceStub(self):
'''Create service task stub file'''
try:
servicePath = self.ServicePath+'/Services/'+self.ServiceType
if not os.path.exists(servicePath):
message = "Warning:"
reason = "Root path is not valid"
print message+" : "+reason
return servicePath
## Open service task template
#$TaskFile import $TaskClass
self.PrintLog("+ Loading task service stub template file")
fileName = 'Service'+self.ServiceType+'.py'
filePath = self.HomePath+'/Tools/Templates/ServiceTask.tmpl'
with open(filePath, 'r') as openedFile:
data=openedFile.read()
data = data.replace('$ServiceType', self.ServiceType)
data = data.replace('$TaskFile', self.TaskFile)
data = data.replace('$TaskDescription', self.TaskDescription)
data = data.replace('$ServiceName', self.ServiceName)
data = data.replace('$TaskClass', self.TaskClass)
fileOutput = servicePath+'/'+fileName
with open(fileOutput, "w") as init_file:
init_file.write(data)
except Exception as inst:
Utilities.ParseException(inst)
def CreateTaskStub(self):
'''Create strategy task stub file'''
try:
servicePath = self.ServicePath+'/Services/'+self.ServiceType
if not os.path.exists(servicePath):
message = "Warning:"
reason = "Root path is not valid"
print message+" : "+reason
##TODO: Should create an exception
sys.exit(0)
## Open service task template
self.PrintLog("+ Loading task template file")
fileName = self.TaskFile+'.py'
## Defining the type of task
if self.TaskType == 'Looped':
filePath = self.HomePath+'/Tools/Templates/Task.tmpl'
elif self.TaskType == 'Single':
filePath = self.HomePath+'/Tools/Templates/TaskSingle.tmpl'
else:
message = "Warning:"
reason = "Invalid type of task template"
print message+" : "+reason
##TODO: Should create an exception
sys.exit(0)
self.PrintLog("+ Loading a task for [%s]"%self.TaskType)
with open(filePath, 'r') as openedFile:
data=openedFile.read()
data = data.replace('$TaskClass', self.TaskClass)
fileOutput = servicePath+'/'+fileName
with open(fileOutput, "w") as init_file:
init_file.write(data)
except Exception as inst:
Utilities.ParseException(inst)
def AdaptConfFile(self):
'''Create strategy task stub file'''
try:
servicePath = self.ServicePath+'/Services/'+self.ServiceType
if not os.path.exists(servicePath):
message = "Warning:"
reason = "Root path is not valid"
print message+" : "+reason
return servicePath
## Open configuration template file
self.PrintLog("+ Adapting configuration file")
fileName = 'Context-'+self.ContextName+'.xml'
filePath = self.HomePath+'/Tools/Templates/Context.tmpl'
## Creating conf path
confFilePath = self.HomePath+'/Conf/'
if not os.path.exists(confFilePath):
self.PrintLog("- Creating configuration directory")
os.makedirs(confFilePath)
confFileName = confFilePath+fileName
if os.path.isfile(confFileName):
''' '''
## File already exists,
self.PrintLog("+ Updating configuration file [%s]"%fileName)
with open(confFileName, 'r') as openedFile:
''''''
## Loading file content as XML
data=openedFile.read()
root = ET.fromstring(data)
## Checking for already defined configuration processes
nodes = root.findall("./TaskService")
for child in nodes:
#print "==>1 ", child.tag, child.attrib
attribs = child.attrib
## Checking if process is already defined
if attribs['instance']==self.ServiceType or attribs['id']==self.TaskID:
self.PrintLog("+ Process is already defined")
return
## Merging both XML's
self.PrintLog("+ Verifying for exisiting content")
## Opening template file to get task service model
with open(filePath, 'r') as openedFile:
templateData = openedFile.read()
templateData = self.SetValues(templateData)
templateRoot = ET.fromstring(templateData)
## Removing non required items for merging
templateRoot.remove(templateRoot.find('FrontEndEndpoint'))
templateRoot.remove(templateRoot.find('BackendEndpoint'))
templateRoot.remove(templateRoot.find('FrontBind'))
templateRoot.remove(templateRoot.find('BackendBind'))
templateRoot.remove(templateRoot.find('TaskLogName'))
templateRoot.remove(templateRoot.find('ContextID'))
## Merging XML trees and obtaining merged XML file
self.PrintLog("+ Merging XML processes")
root.append(templateRoot[0])
mergedXML = ET.tostring(root, encoding='utf8', method='xml')
## Writing new appended file
self.PrintLog("+ Writing on merged file: [%s]"%confFilePath)
with open(confFileName, "w") as init_file:
init_file.write(mergedXML)
else:
## Generating a new file
self.PrintLog("+ Opening task template file")
with open(filePath, 'r') as openedFile:
data = openedFile.read()
data = self.SetValues(data)
self.PrintLog("+ Creating a new [%s] configuration file"%fileName)
with open(confFileName, "w") as init_file:
init_file.write(data)
## TODO: Add extended configuration if it exists
except Exception as inst:
Utilities.ParseException(inst)
def SetValues(self, data):
'''Setting values to template '''
data = data.replace('$ServerIP', self.ServerIP)
data = data.replace('$SubPort', self.SubPort)
data = data.replace('$PubPort', self.PubPort)
data = data.replace('$ContextName', self.ContextName)
data = data.replace('$ContextID', self.ContextID)
data = data.replace('$TaskID', self.TaskID)
data = data.replace('$DeviceAction',self.DeviceAction)
data = data.replace('$TaskDescription',self.TaskDescription)
data = data.replace('$ServiceName', self.ServiceName)
data = data.replace('$ServiceType', self.ServiceType)
data = data.replace('$EntryAction', self.EntryAction)
data = data.replace('$ModuleLocation',self.ModuleLocation)
## Replacing state information
confSize = len(self.StateConf)
for i in range(confSize):
confData = self.StateConf[i]
indexDoc = str(i+1)
self.PrintLog("+ [%s] Setting up data for triggering [%s]"%(indexDoc, confData['trigger']))
## Replacing state information: trigger, action and state ID
data = data.replace('$Trigger'+indexDoc, confData['trigger'])
data = data.replace('$Action'+indexDoc , confData['action'])
data = data.replace('$State'+indexDoc , confData['state_id'])
return data
def CreateFiles(self):
''' Generate code for:
1) Create service directory
2) __init__.py
3) Service<NAME>.py stub file
4) Strategy file stub
'''
try:
## 1) Create service directory
## TODO: Change to a dynamic path in context services
servicesPath = self.CreateDirectory()
## 2) Creating __init__.py
self.CreateInit()
## 3) Service<NAME>.py stub file
self.CreateServiceStub()
## 4) Strategy file stub
self.CreateTaskStub()
## 5) Create or update configuration file
self.AdaptConfFile()
except AutoCodeError as e:
print e.message+" : "+e.reason
except Exception as inst:
Utilities.ParseException(inst)
sUsage = "usage:\n"\
" For sending a message to an annotator service\n"\
"\t python Tools/create_service.py \n"\
"\t\t--service_path='/abs/path/unix/style' \n"\
"\t\t--home_path='/abs/path/unix/style' \n"\
"\t\t--task_service='instance_type' \n"\
"\t\t--service_name='service_name' \n"\
"\t\t--task_desc='task_description' \n"\
"\t\t--server_ip='127.0.0.1' \n"\
"\t\t--sub_port='XXXX' \n"\
"\t\t--pub_port='YYYY' \n"\
"\t\t--context_name='context_test_name' \n"\
"\t\t--task_id='task_ID' \n"\
"\t\t--device_action='device_action_id' \n"
if __name__ == '__main__':
try:
available_entry_actions = ['on_exit', 'on_fail', 'on_start', 'on_update']
usage = sUsage
parser = OptionParser(usage=usage)
systemOpts = OptionGroup(parser, "Service configuration location")
systemOpts.add_option('--service_path',
metavar="PATH",
default=None,
help="Absolute root path where context services are located")
systemOpts.add_option('--xml_file',
metavar="PATH XML FILE",
default=None,
help="Absolute root path where xml configuration file is located")
contextOpts= OptionGroup(parser, "Service generation stub variables")
contextOpts.add_option('--task_service',
metavar="SERVICE",
default=None,
help="Service instance is the type of created service as defined "
"in task service parameters in the configuration file")
contextOpts.add_option('--task_desc',
metavar="TASK_DESCRIPTION",
default=None,
help="Required for logging and identifying task operations.")
xmltOpts= OptionGroup(parser, "Service XML configuration options")
xmltOpts.add_option('--context_name',
metavar="CONTEXTNAME",
default=None,
help="Logging context name. It is used in 'TaskLogName' in the "
"configuration file.")
xmltOpts.add_option('--service_name',
metavar="SERVICE_NAME",
default=None,
help="Defines a service name for the identifying service process "
"messages. It is called in process configuration configuration file")
xmltOpts.add_option('--server_ip',
metavar="SERVERIP",
default=None,
help="IP address of server endpoint. It is used in "
"'FrontEndEndpoint' and 'BackendEndpoint' in the "
"configuration file.")
xmltOpts.add_option('--sub_port',
metavar="SUBPORT",
default=None,
help="Front end port for subscriber and back end binding ports. "
"It is used in 'FrontEndEndpoint' and 'BackendBind' in the "
"configuration file.")
xmltOpts.add_option('--pub_port',
metavar="PUBPORT",
default=None,
help="Back end port for subscriber and front end binding ports. "
"It is used in 'BackendEndpoint' and 'FrontBind' in the "
"configuration file.")
xmltOpts.add_option('--task_id',
metavar="TASKID",
default=None,
help="Task service ID identifier. It is used as parameter"
"'id' in 'TaskService' label in the configuration file")
xmltOpts.add_option('--device_action',
metavar="DEVICEACTION",
default=None,
help="Task device action used for message identification. messages."
"It is called in process configuration configuration file It is "
"used as 'device_action of the content configuration of the task "
"service in the configuration file.")
xmltOpts.add_option('--entry_action',
type='choice',
action='store',
dest='entry_action',
choices=available_entry_actions,
default=None,
help="Defines entry action to be executed upon entry to a state associated with "
"other states. It is not to transitions and it is called regardless of how a "
"state is resulted. This fixture is related to UML statechart from the following "
"choices: "+str(available_entry_actions))
parser.add_option_group(systemOpts)
parser.add_option_group(contextOpts)
parser.add_option_group(xmltOpts)
(options, args) = parser.parse_args()
if options.xml_file is None and options.service_path is None:
parser.error("Missing required option: service_path or xml_file")
parser.print_help()
if options.xml_file is None and options.home_path is None:
parser.error("Missing required option: home_path or xml_file")
parser.print_help()
if options.xml_file is not None:
''' '''
## Use if many services are generated at the same time
services = ParseXml2Dict(options.xml_file, 'MetaServiceConf')
if type(services['Service']) is not type([]):
services['Service'] = [services['Service']]
for service in services['Service']:
service.update({'context_name': services['context_name']})
service.update({'server_ip': services['server_ip']})
service.update({'sub_port': services['sub_port']})
service.update({'pub_port': services['pub_port']})
service.update({'service_path': services['service_path']})
if 'home_path' not in services.keys():
service.update({'home_path': '/opt/zmicroservices/'})
else:
service.update({'home_path': services['home_path']})
service.update({'context_id': services['context_id']})
if 'log_on' in services.keys():
service.update({'log_on': bool(int(services['log_on']))})
if 'location' not in services.keys():
service.update({'location': services['service_path']+'/Services'})
else:
service.update({'location': services['location']})
service.update({'context_id': services['context_id']})
#pprint.pprint(service)
## Checking if there is a type of task
taskType = 'Looped'
if 'task_type' in service.keys():
taskType = service['task_type']
service.update({'taskType': taskType})
## Calling code autogenerator
autogenerator = AutoCode(service)
autogenerator.CreateFiles()
else:
''' Checking argument values '''
if options.task_service is None:
parser.error("Missing required option: task_service")
parser.print_help()
if options.task_desc is None:
parser.error("Missing required option: task_desc")
parser.print_help()
if options.context_name is None:
parser.error("Missing required option: context_name")
parser.print_help()
if options.service_name is None:
parser.error("Missing required option: service_name")
parser.print_help()
if options.server_ip is None:
parser.error("Missing required option: server_ip")
parser.print_help()
if options.sub_port is None:
parser.error("Missing required option: sub_port")
parser.print_help()
if options.pub_port is None:
parser.error("Missing required option: pub_port")
parser.print_help()
if options.task_id is None:
parser.error("Missing required option: task_id")
parser.print_help()
if options.device_action is None:
parser.error("Missing required option: device_action")
parser.print_help()
if options.entry_action is None:
parser.error("Missing required option: entry_action")
parser.print_help()
## Calling code autogenerator
options_dict = vars(options)
autogenerator = AutoCode(options_dict)
autogenerator.CreateFiles()
except AutoCodeError as e:
print('Error: %s, %s'%(e.message, e.reason))
|
gpl-3.0
| -4,830,159,946,343,961,000 | 33.418898 | 101 | 0.666941 | false |
rojinva/Email-classifier
|
Text classification with probability.py
|
1
|
2058
|
__author__="rojin.varghese"
__date__ ="$Nov 8, 2013 8:48:18 PM$"
import os
from xlrd import open_workbook
import re
import xlwt
j = os.path.join
def train(text):
c = {}
lastword = ""
line = re.sub('[\-#*>]', '', text)
line = re.sub('[\n]', '', line)
for word in line.split():
word = word.lower()
if c.has_key(lastword):
inner = c[lastword]
if inner.has_key(word):
inner[word] += 1
else:
inner[word] = 1
else:
c[lastword] = {word: 1}
lastword = word
return c
def probability_of(dict, lastword, word):
word = word.lower()
if dict.has_key(lastword):
inner = dict[lastword]
sumvalues = sum([v for v in inner.values()])
if inner.has_key(word):
return inner[word] / (sumvalues * 1.0)
return 0
def classify(text, dict):
lastword = ""
probabilities = 0
line = re.sub('[\-#*>]', '', text)
line = re.sub('[\n]', '', line)
for word in line.split():
probabilities += probability_of(dict, lastword, word)
lastword = word
return probabilities / (len(text.split()) * 1.0)
if __name__ == "__main__":
ranking = []
book = open_workbook('C:/Documents and Settings/rojin.varghese/Desktop/Test_mail.xls')
sheet1 = book.sheet_by_index(0)
book1 = xlwt.Workbook()
sh = book1.add_sheet("sheet")
for i in range(sheet1.nrows):
line = sheet1.cell_value(i,1)
line = re.sub('[\-*>]', '', line)
line = re.sub('[\n]', '', line)
for file in os.listdir("C:/Documents and Settings/rojin.varghese/Desktop/ICICI_emails"):
trained = train(open(j("C:/Documents and Settings/rojin.varghese/Desktop/ICICI_emails", file)).read())
value = classify(line, trained)
ranking.append((value, file))
sh.write(i, 0, ranking[-1][1])
sh.write(i, 1, ranking[-2][1])
book1.save("C:/Documents and Settings/rojin.varghese/Desktop/Results/ProbabilityResult.xls")
|
unlicense
| -6,348,697,365,323,838,000 | 26.44 | 112 | 0.556365 | false |
mark-up/django-autoslug
|
doc/conf.py
|
1
|
6508
|
# -*- coding: utf-8 -*-
#
# Django Autoslug documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 21 22:35:51 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
VERSION = '1.3.4'
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Autoslug'
copyright = u'2009, Andy Mikhailenko'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(VERSION.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAutoslugdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAutoslug.tex', u'Django Autoslug Documentation',
u'Andy Mikhailenko', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
lgpl-3.0
| 2,734,622,607,506,859,500 | 31.703518 | 80 | 0.714352 | false |
huertatipografica/huertatipografica-fl-scripts
|
AT_Compatibility/AT_Compatible.py
|
1
|
1585
|
#FLM: AT Compatibility check
#Check compatibility of selected glyphs between the current font and declared fontCompare (Postcrit Full Name)
# By Andres Torresi
# http://www.huertatipografica.com
#fullName of font you want to compare with current font
fontCompare='ABBvoice Bold';
colorCompatible=150
colorIncompatible=255
#--------FUNCIONES------------------
def chequear(g,f2,f1):
f1name=f1.info.postscriptFullName
f2name=f2.info.postscriptFullName
if f1.has_key(g.name):
compatible=f1[g.name].isCompatible(f2[g.name], True)
if compatible[0]==False:
print "No es compatible el glifo "+g.name+" en "+f1name
print compatible
g.mark=colorIncompatible
f1[g.name].mark=colorIncompatible
else:
print "No existe el glifo "+g.name+" en "+f1name
###########
from robofab.world import AllFonts,CurrentFont,CurrentGlyph
from robofab.interface.all.dialogs import Message, ProgressBar
fonts=AllFonts()
for f in fonts:
if f.info.postscriptFullName==fontCompare:
f1=f
f2=CurrentFont()
glyph=CurrentGlyph()
fl.SetUndo()
error=0
print f1,f2
if len(fonts)<2:
error="Debe abrir dos fuentes para comparar"
elif f1.path==f2.path:
error="Origen y destino son la misma fuente."
if error==0:
tickCount = len(f2.selection)
bar = ProgressBar('Chequeando...', tickCount)
tick = 0
for g in f2.keys():
if f2[g].selected or g.index == fl.iglyph or g == glyph.name:
chequear(f2[g],f2,f1)
bar.tick(tick)
tick = tick+1
bar.close()
f1.update()
f2.update()
else:
print error
|
apache-2.0
| -4,392,702,551,218,769,000 | 23.384615 | 110 | 0.681388 | false |
vortex-ape/scikit-learn
|
conftest.py
|
2
|
2347
|
# Even if empty this file is useful so that when running from the root folder
# ./sklearn is added to sys.path by pytest. See
# https://docs.pytest.org/en/latest/pythonpath.html for more details. For
# example, this allows to build extensions in place and run pytest
# doc/modules/clustering.rst and use sklearn from the local folder rather than
# the one from site-packages.
import platform
from distutils.version import LooseVersion
import pytest
from _pytest.doctest import DoctestItem
from sklearn.utils.fixes import PY3_OR_LATER
PYTEST_MIN_VERSION = '3.3.0'
if LooseVersion(pytest.__version__) < PYTEST_MIN_VERSION:
raise('Your version of pytest is too old, you should have at least '
'pytest >= {} installed.'.format(PYTEST_MIN_VERSION))
def pytest_addoption(parser):
parser.addoption("--skip-network", action="store_true", default=False,
help="skip network tests")
def pytest_collection_modifyitems(config, items):
# FeatureHasher is not compatible with PyPy
if platform.python_implementation() == 'PyPy':
skip_marker = pytest.mark.skip(
reason='FeatureHasher is not compatible with PyPy')
for item in items:
if item.name == 'sklearn.feature_extraction.hashing.FeatureHasher':
item.add_marker(skip_marker)
# Skip tests which require internet if the flag is provided
if config.getoption("--skip-network"):
skip_network = pytest.mark.skip(
reason="test requires internet connectivity")
for item in items:
if "network" in item.keywords:
item.add_marker(skip_network)
# numpy changed the str/repr formatting of numpy arrays in 1.14. We want to
# run doctests only for numpy >= 1.14. We want to skip the doctest for
# python 2 due to unicode.
skip_doctests = False
if not PY3_OR_LATER:
skip_doctests = True
try:
import numpy as np
if LooseVersion(np.__version__) < LooseVersion('1.14'):
skip_doctests = True
except ImportError:
pass
if skip_doctests:
skip_marker = pytest.mark.skip(
reason='doctests are only run for numpy >= 1.14 and python >= 3')
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
|
bsd-3-clause
| -6,816,286,950,562,045,000 | 35.107692 | 79 | 0.663826 | false |
jzahedieh/django-tennis-ladder
|
tennis/settings.py
|
1
|
6946
|
# Django settings for tennis project.
import os
SETTINGS_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = os.environ.get("DEBUG_VALUE") == 'True'
ADMINS = (
('Admin User', 'admin@highgate-ladder.co.uk'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get("SQL_DATABASE", "tennis"),
'USER': os.environ.get("SQL_USER", "root"),
'PASSWORD': os.environ.get("SQL_PASSWORD", ""),
'HOST': os.environ.get("SQL_HOST", ""),
'PORT': '',
'OPTIONS': {
'autocommit': True,
},
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
'ladder.context_processors.navigation',
],
},
},
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = '*'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-uk'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(SETTINGS_DIR, '..', 'static'))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# ManifestStaticFilesStorage hashes assets for versioning
# which ensures all users see the same content
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get("SECRET_KEY", "this_should_be_kept_a_secret")
MIDDLEWARE = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
LOGIN_REDIRECT_URL = '/result/entry/'
LOGOUT_REDIRECT_URL = '/'
ROOT_URLCONF = 'tennis.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tennis.wsgi.application'
INSTALLED_APPS = (
'debug_toolbar',
'rest_framework',
'ladder',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL_DEFAULT_FROM')
SERVER_EMAIL = os.environ.get('EMAIL_SERVER_FROM')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
INTERNAL_IPS = (
'127.0.0.1',
'0.0.0.0',
'172.18.0.1',
'172.21.0.1',
'172.23.0.1'
)
|
mit
| 8,503,064,163,933,158,000 | 32.080952 | 85 | 0.670746 | false |
blakerohde/sidewalk
|
setup.py
|
1
|
1124
|
import os
from distutils.core import setup
import sidewalk
setup(
name='sidewalk',
version=sidewalk.__version__,
url= 'http://www.blakerohde.com/projects/sidewalk',
author='Blake Rohde',
author_email='blake@blakerohde.com',
description='The Simple Activity Aggregator.',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
download_url='https://github.com/blakerohde/sidewalk/tarball/master',
scripts=[
'sidewalk/bin/sidewalk',
],
packages=[
'sidewalk',
'sidewalk.bin',
'sidewalk.conf',
'sidewalk.core',
'sidewalk.test',
],
package_data={
'' : [
'LICENSE',
],
},
license=open('LICENSE').read(),
classifiers=(
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
)
|
isc
| -2,525,365,965,505,527,000 | 23.977778 | 70 | 0.630783 | false |
drewokane/xray
|
xarray/core/groupby.py
|
1
|
20086
|
import functools
import numpy as np
import pandas as pd
from . import ops
from .combine import concat
from .common import (
ImplementsArrayReduce, ImplementsDatasetReduce, _maybe_promote,
)
from .pycompat import zip
from .utils import peek_at, maybe_wrap_array, safe_cast_to_index
from .variable import as_variable, Variable, Coordinate
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _get_fill_value(dtype):
"""Return a fill value that appropriately promotes types when used with
np.concatenate
"""
dtype, fill_value = _maybe_promote(dtype)
return fill_value
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.data_vars.items()),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
xarray_obj.attrs)
elif isinstance(xarray_obj, DataArray):
res = DataArray(_get_fill_value(xarray_obj.dtype),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
name=xarray_obj.name,
attrs=xarray_obj.attrs)
else: # pragma: no cover
raise AssertionError
return res
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
def __init__(self, obj, group, squeeze=False, grouper=None):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray or Coordinate
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
"""
from .dataset import as_dataset
if group.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group` must be 1 dimensional')
if getattr(group, 'name', None) is None:
raise ValueError('`group` must have a name')
if not hasattr(group, 'dims'):
raise ValueError("`group` must have a 'dims' attribute")
group_dim, = group.dims
try:
expected_size = obj.dims[group_dim]
except TypeError:
expected_size = obj.shape[obj.get_axis_num(group_dim)]
if group.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
full_index = None
if grouper is not None:
# time-series resampling
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError('index must be monotonic for resampling')
s = pd.Series(np.arange(index.size), index)
first_items = s.groupby(grouper).first()
if first_items.isnull().any():
full_index = first_items.index
first_items = first_items.dropna()
bins = first_items.values.astype(np.int64)
group_indices = ([slice(i, j) for i, j in zip(bins[:-1], bins[1:])] +
[slice(bins[-1], None)])
unique_coord = Coordinate(group.name, first_items.index)
elif group.name in obj.dims:
# assume that group already has sorted, unique values
if group.dims != (group.name,):
raise ValueError('`group` is required to be a coordinate if '
'`group.name` is a dimension in `obj`')
group_indices = np.arange(group.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(group)
unique_coord = Coordinate(group.name, unique_values)
self.obj = obj
self.group = group
self.group_dim = group_dim
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
self._full_index = full_index
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return zip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.isel(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dims:
concat_dim = self.group
positions = self.group_indices
else:
concat_dim = self.unique_coord
positions = None
return concat_dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._concat(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self.group.name: group_value})
except AttributeError:
raise TypeError('GroupBy objects only support binary ops '
'when the other argument is a Dataset or '
'DataArray')
except KeyError:
if self.group.name not in other.dims:
raise ValueError('incompatible dimensions for a grouped '
'binary operation: the group variable %r '
'is not a dimension on the other argument'
% self.group.name)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if (self._full_index is not None and self.group.name in combined.dims):
indexers = {self.group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
return self._fillna(value)
def where(self, cond):
"""Return an object of the same shape with all entries where cond is
True and all other entries masked.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return self._where(cond)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self.group_indices[0], (int, np.integer)):
# NB. this is currently only used for reductions along an existing
# dimension
return self.obj
return self.reduce(op, self.group_dim, skipna=skipna,
keep_attrs=keep_attrs, allow_lazy=True)
def first(self, skipna=None, keep_attrs=True):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=True):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(ops.last, skipna, keep_attrs)
def assign_coords(self, **kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
"""
return self.apply(lambda ds: ds.assign_coords(**kwargs))
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self.obj.variable
for indices in self.group_indices:
yield var[{self.group_dim: indices}]
def _concat_shortcut(self, applied, concat_dim, positions):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(
applied, concat_dim, positions, shortcut=True)
stacked.attrs.update(self.obj.attrs)
result = self.obj._replace_maybe_drop_dims(stacked)
result._coords[concat_dim.name] = as_variable(concat_dim, copy=True)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self.group.name:
dimension, = self.group.dims
if dimension in self.obj.dims:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, **kwargs)) for arr in grouped)
combined = self._concat(applied, shortcut=shortcut)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied, shortcut=False):
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, concat_dim, positions)
else:
combined = concat(applied, concat_dim, positions=positions)
if isinstance(combined, type(self.obj)):
combined = self._restore_dim_order(combined)
return combined
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
shortcut=True, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
combined = self._concat(applied)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied):
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, concat_dim, positions=positions)
return combined
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
|
apache-2.0
| 8,235,435,103,712,422,000 | 37.478927 | 81 | 0.590959 | false |
forkbong/qutebrowser
|
qutebrowser/config/configexc.py
|
1
|
5505
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Exceptions related to config parsing."""
import dataclasses
from typing import Any, Mapping, Optional, Sequence, Union
from qutebrowser.utils import usertypes, log
class Error(Exception):
"""Base exception for config-related errors."""
class NoAutoconfigError(Error):
"""Raised when this option can't be set in autoconfig.yml."""
def __init__(self, name: str) -> None:
super().__init__("The {} setting can only be set in config.py!"
.format(name))
class BackendError(Error):
"""Raised when this setting is unavailable with the current backend."""
def __init__(
self, name: str,
backend: usertypes.Backend,
raw_backends: Optional[Mapping[str, bool]]
) -> None:
if raw_backends is None or not raw_backends[backend.name]:
msg = ("The {} setting is not available with the {} backend!"
.format(name, backend.name))
else:
msg = ("The {} setting needs {} with the {} backend!"
.format(name, raw_backends[backend.name], backend.name))
super().__init__(msg)
class NoPatternError(Error):
"""Raised when the given setting does not support URL patterns."""
def __init__(self, name: str) -> None:
super().__init__("The {} setting does not support URL patterns!"
.format(name))
class ValidationError(Error):
"""Raised when a value for a config type was invalid.
Attributes:
value: Config value that triggered the error.
msg: Additional error message.
"""
def __init__(self, value: Any, msg: Union[str, Exception]) -> None:
super().__init__("Invalid value '{}' - {}".format(value, msg))
self.option = None
class KeybindingError(Error):
"""Raised for issues with keybindings."""
class NoOptionError(Error):
"""Raised when an option was not found."""
def __init__(self, option: str, *,
deleted: bool = False,
renamed: str = None) -> None:
if deleted:
assert renamed is None
suffix = ' (this option was removed from qutebrowser)'
elif renamed is not None:
suffix = ' (this option was renamed to {!r})'.format(renamed)
else:
suffix = ''
super().__init__("No option {!r}{}".format(option, suffix))
self.option = option
@dataclasses.dataclass
class ConfigErrorDesc:
"""A description of an error happening while reading the config.
Attributes:
text: The text to show.
exception: The exception which happened.
traceback: The formatted traceback of the exception.
"""
text: str
exception: Union[str, Exception]
traceback: Optional[str] = None
def __str__(self) -> str:
if self.traceback:
return '{} - {}: {}'.format(self.text,
self.exception.__class__.__name__,
self.exception)
return '{}: {}'.format(self.text, self.exception)
def with_text(self, text: str) -> 'ConfigErrorDesc':
"""Get a new ConfigErrorDesc with the given text appended."""
return self.__class__(text='{} ({})'.format(self.text, text),
exception=self.exception,
traceback=self.traceback)
class ConfigFileErrors(Error):
"""Raised when multiple errors occurred inside the config."""
def __init__(self,
basename: str,
errors: Sequence[ConfigErrorDesc], *,
fatal: bool = False) -> None:
super().__init__("Errors occurred while reading {}:\n{}".format(
basename, '\n'.join(' {}'.format(e) for e in errors)))
self.basename = basename
self.errors = errors
self.fatal = fatal
for err in errors:
if err.traceback:
log.config.info(err.traceback)
def to_html(self) -> str:
"""Get the error texts as a HTML snippet."""
from qutebrowser.utils import jinja # circular import
template = jinja.environment.from_string("""
Errors occurred while reading {{ basename }}:
<ul>
{% for error in errors %}
<li>
<b>{{ error.text }}</b>: {{ error.exception }}
{% if error.traceback != none %}
<pre>
""".rstrip() + "\n{{ error.traceback }}" + """
</pre>
{% endif %}
</li>
{% endfor %}
</ul>
""")
return template.render(basename=self.basename, errors=self.errors)
|
gpl-3.0
| -2,327,407,455,753,032,700 | 30.820809 | 75 | 0.581471 | false |
panda4life/idpserver
|
mysite/idp/plotting.py
|
1
|
3702
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 16:43:00 2014
@author: jahad
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import os
def phasePlot(fp,fm,seqname,saveAs):
if(os.path.exists(saveAs)):
os.remove(saveAs)
for x,y,label in zip(fp,fm,seqname):
plt.scatter(x,y,marker='.',color='Black')
plt.annotate(label,xy=(x+.01,y+.01))
reg1, = plt.fill([0,0,.25],[0,.25,0],color = 'Chartreuse',alpha=.75)
reg2, = plt.fill([0,0,.35,.25],[.25,.35,0,0],color = 'MediumSeaGreen',alpha=.75)
reg3, = plt.fill([0,.35,.65,.35],[.35,.65,.35,0],color = 'DarkGreen',alpha=.75)
reg4, = plt.fill([0,0,.35],[.35,1,.65],color = 'Red',alpha=.75)
reg5, = plt.fill([.35,.65,1],[0,.35,0],color = 'Blue',alpha=.75)
plt.ylim([0,1])
plt.xlim([0,1])
plt.xlabel('f+')
plt.ylabel('f-')
plt.title('Phase Diagram')
fontP = FontProperties()
fontP.set_size('x-small')
plt.legend([reg1,reg2,reg3,reg4,reg5],
['Weak Polyampholytes & Polyelectrolytes:\nGlobules & Tadpoles',
'Boundary Region',
'Strong Polyampholytes:\nCoils, Hairpins, Chimeras',
'Negatively Charged Strong Polyelectrolytes:\nSwollen Coils',
'Positively Charged Strong Polyelectrolytes:\nSwollen Coils'],
prop = fontP)
plt.savefig(saveAs,dpi=200)
plt.close()
return plt
def testPhasePlot():
graph = phasePlot([.65,.32,.15],[.34,.21,.42],['derp1','harro','nyan'],'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\test.png')
def testPhasePlotNull():
graph = phasePlot([],[],[],'/work/jahad/IDP_patterning/idpserver/mysite/output/test.png')
import computation as comp
def NCPRPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.NCPRdist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('NCPR Distribution')
plt.xlabel('Blob Index')
plt.ylabel('NCPR')
plt.ylim([-1.1,1.1])
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testNCPRPlot():
graph = NCPRPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testNCPR.png')
def SigmaPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.Sigmadist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('Sigma Distribution')
plt.xlabel('Blob Index')
plt.ylabel('Sigma')
plt.ylim([-.1,1.1])
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testSigmaPlot():
graph = SigmaPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testSigma.png')
def HydroPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.Hydrodist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('Hydropathy Distribution')
plt.xlabel('Blob Index')
plt.ylabel('Hydropathy')
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testHydroPlot():
graph = HydroPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testHydro.png')
testNCPRPlot()
testSigmaPlot()
testHydroPlot()
|
gpl-3.0
| 7,079,705,620,649,945,000 | 34.605769 | 187 | 0.615613 | false |
mzszym/oedes
|
oedes/__init__.py
|
1
|
1135
|
# -*- coding: utf-8; -*-
#
# oedes - organic electronic device simulator
# Copyright (C) 2017-2018 Marek Zdzislaw Szymanski (marek@marekszymanski.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from sparsegrad.impl import multipledispatch
from . import ad
from . import functions
from . import fvm
from . model import *
from . solver import *
from . context import *
from . param import *
from . sensitivity import *
from . import models
from . import testing
from . import optical
from . import logs
from ._version import *
from numpy.testing import Tester
test = Tester().test
|
agpl-3.0
| -7,962,736,429,399,198,000 | 31.428571 | 77 | 0.755066 | false |
iut-ibk/P8-WSC-GUI
|
3dparty/Editra/tests/unittests/testSyntaxDataBase.py
|
1
|
2697
|
###############################################################################
# Name: testSyntaxDataBase.py #
# Purpose: Unit tests for syntax.syndata Base Class #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2010 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""Unittest cases for testing syndata.SyntaxDataBase class"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: testSyntaxDataBase.py 63836 2010-04-03 06:16:55Z CJP $"
__revision__ = "$Revision: 63836 $"
#-----------------------------------------------------------------------------#
# Imports
import unittest
import wx
import wx.stc
# Module to test
import syntax.syndata as syndata
#-----------------------------------------------------------------------------#
# Test Class
class SyntaxDataBaseTest(unittest.TestCase):
def setUp(self):
self.data = syndata.SyntaxDataBase()
def tearDown(self):
pass
#---- Test Cases ----#
def testCommentPattern(self):
self.assertTrue(isinstance(self.data.CommentPattern, list))
self.assertTrue(self.data.CommentPattern == self.data.GetCommentPattern())
def testKeywords(self):
self.assertTrue(isinstance(self.data.Keywords, list))
self.assertTrue(self.data.Keywords == self.data.GetKeywords())
def testLangId(self):
self.assertTrue(isinstance(self.data.LangId, int))
self.assertTrue(self.data.LangId == self.data.GetLangId())
def testLexer(self):
self.assertTrue(isinstance(self.data.Lexer, int))
lexers = [ getattr(wx.stc, item) for item in dir(wx.stc)
if item.startswith('STC_LEX') ]
self.assertTrue(self.data.Lexer in lexers)
def testProperties(self):
self.assertTrue(isinstance(self.data.Properties, list))
self.assertTrue(self.data.Properties == self.data.GetProperties())
def testSyntaxSpec(self):
self.assertRaises(NotImplementedError, self.data.GetSyntaxSpec)
def testRegisterFeature(self):
def foo():
pass
self.data.RegisterFeature('foo', foo)
self.assertTrue(self.data.GetFeature('foo') is foo)
def testSetLangId(self):
self.data.SetLangId(10)
self.assertEquals(10, self.data.LangId)
def testSetLexer(self):
self.data.SetLexer(wx.stc.STC_LEX_CPP)
self.assertEquals(wx.stc.STC_LEX_CPP, self.data.GetLexer())
|
gpl-2.0
| 5,417,375,356,736,835,000 | 35.445946 | 82 | 0.54987 | false |
likit/dementia
|
webapp/manage.py
|
1
|
2123
|
#!/usr/bin/env python
import os
from app import create_app, db
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from werkzeug.security import generate_password_hash
from datetime import datetime
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
def make_shell_context():
return dict(app=app, db=db)
manager.add_command("shell", Shell(make_context=make_shell_context))
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def initdb():
"""Init the database."""
db.create_all()
# db.drop_collection('users')
# db.drop_collection('form1')
# password = generate_password_hash('testpass')
# admin_doc = {
# 'username': 'admin',
# 'email': 'admin@example.com',
# 'password': password,
# 'name': 'Likit',
# 'lastname': 'Preeyanon',
# 'organization': 'MUMT',
# 'role': 'admin',
# 'province': 'all',
# 'verified': True,
# 'create_date_time': datetime.today(),
# }
# db.users.insert(admin_doc, safe=True)
if __name__ == '__main__':
manager.run()
|
gpl-2.0
| -6,631,071,479,762,149,000 | 28.901408 | 68 | 0.597739 | false |
robwarm/gpaw-symm
|
doc/tutorials/fxc_correlation/gs_diamond.py
|
1
|
1096
|
from ase import *
from ase.lattice import bulk
from ase.dft import monkhorst_pack
from ase.parallel import paropen
from gpaw import *
from gpaw.wavefunctions.pw import PW
from gpaw.xc.exx import EXX
# Monkhorst-Pack grid shifted to be gamma centered
k = 8
kpts = monkhorst_pack([k, k, k])
kpts += [1. / (2 * k), 1. / ( 2 * k), 1. / (2 * k)]
cell = bulk('C', 'fcc', a=3.553).get_cell()
a = Atoms('C2', cell=cell, pbc=True,
scaled_positions=((0, 0, 0), (0.25, 0.25, 0.25)))
calc = GPAW(mode=PW(600),
xc='PBE',
occupations=FermiDirac(width=0.01),
convergence={'density': 1.e-6},
kpts=kpts,
txt='diamond_pbe.txt',
)
a.set_calculator(calc)
E_pbe = a.get_potential_energy()
exx = EXX(calc, txt='diamond_exx.txt')
exx.calculate()
E_hf = exx.get_total_energy()
import numpy as np
E_C = np.loadtxt('PBE_HF_C.dat')
f = paropen('PBE_HF_diamond.dat', 'w')
print >> f, 'PBE: ', E_pbe / 2 - E_C[0]
print >> f, 'HF: ', E_hf / 2 - E_C[1]
f.close()
calc.diagonalize_full_hamiltonian()
calc.write('diamond.gpw', mode='all')
|
gpl-3.0
| 2,567,397,908,081,199,600 | 25.095238 | 59 | 0.601277 | false |
hdemeyer/king-phisher
|
king_phisher/version.py
|
1
|
3552
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/version.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import os
import subprocess
import smoke_zephyr.utilities
def get_revision():
"""
Retrieve the current git revision identifier. If the git binary can not be
found or the repository information is unavailable, None will be returned.
:return: The git revision tag if it's available.
:rtype: str
"""
git_bin = smoke_zephyr.utilities.which('git')
if not git_bin:
return None
proc_h = subprocess.Popen(
(git_bin, 'rev-parse', 'HEAD'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=os.path.dirname(os.path.abspath(__file__))
)
rev = proc_h.stdout.read().strip()
proc_h.wait()
if not len(rev):
return None
return rev.decode('utf-8')
revision = get_revision()
"""The git revision identifying the latest commit if available."""
version_info = collections.namedtuple('version_info', ('major', 'minor', 'micro'))(1, 8, 0)
"""A tuple representing the version information in the format ('major', 'minor', 'micro')"""
version_label = 'beta'
"""A version label such as alpha or beta."""
version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro)
"""A string representing the full version information."""
# distutils_version is compatible with distutils.version classes
distutils_version = version
"""A string suitable for being parsed by :py:mod:`distutils.version` classes."""
if version_label:
version += '-' + version_label
if revision:
version += " (rev: {0})".format(revision[:12])
distutils_version += version_label[0]
if version_label[-1].isdigit():
distutils_version += version_label[-1]
else:
distutils_version += '0'
rpc_api_version = collections.namedtuple('rpc_api_version', ('major', 'minor'))(5, 5)
"""
A tuple representing the local version of the RPC API for use with compatibility
checks. The major version is incremented when backwards incompatible changes are
made and the minor version is incremented when backwards compatible changes are
made.
"""
|
bsd-3-clause
| 2,827,822,948,980,709,400 | 36.389474 | 92 | 0.739583 | false |
Vito2015/tcc3-portal
|
tcc3portal/api/users.py
|
1
|
1150
|
# coding:utf-8
"""
tcc3portal.api.users
~~~~~~~~~~~~~~~~~~~~
User endpoints.
:copyright: (c) 2015 by Vito.
:license: GNU, see LICENSE for more details.
"""
from flask import Blueprint, current_app
from flask_login import login_required
# from flask import json
# from . import route
from ..tcc_core import UserProfile, Q
bp = Blueprint('users', __name__, url_prefix='/users')
@bp.route('/')
@login_required
def show_list():
"""Return the user instance of the currently authenticated user."""
print(current_app)
# a = current_user._get_current_object()
# ret = mongo.db.users.find().sort([("_id", 1)])
ret = UserProfile.objects().all().order_by('+nick_name') # '-name' or 'name'
# ret = list(map(lambda x: x, ret))
# return JSONEncoder().default(ret)
return ret.to_json()
# @route(bp, '/<user_name>')
@bp.route('/<user_name>')
def show_user(user_name):
"""Return a user instance."""
# ret = mongo.db.users.find_one_or_404({"name": user_name})
ret = UserProfile.objects(Q(nick_name=user_name) | Q(email=user_name))
# return JSONEncoder().default(ret)
return ret.to_json()
|
gpl-2.0
| 2,390,627,513,436,331,000 | 27.75 | 81 | 0.629565 | false |
momahajogami/Prime-Numbers
|
practical_sieve.py
|
1
|
1165
|
def primes_in_block(small_primes, L, R, B):
# Algorithm 3.2.1 in Prime Numbers, A Computational Perspective.
# p. 122
# 0. Choose R>L even,
# L > P := int_sqrt(R),
# B|R-L
primes_in_range = []
# 1. Initialize offsets.
primes_with_offsets = []
for p in small_primes:
q = -(L+1+p)/2 %p
primes_with_offsets.append((p,q))
# 2. Process Blocks
T = L
while (T < R):
block = [1]*B
next_pwo = []
for (p,q) in primes_with_offsets:
j = q
while j<B:
block[j] = 0
j = j + p
# This is the way to move the offset over one block:
q = (q-B)%p
next_pwo.append((p,q))
for j in range(B):
if block[j] == 1:
primes_in_range.append(T + 2*j + 1)
T = T+2*B
primes_with_offsets = next_pwo
return(primes_in_range)
odd_p_under_100 = [3,5,7]+primes_in_block([3,5,7],10,100,10)
odd_p_under_10000 = primes_in_block(odd_p_under_100,100,10000,100)
primes_under_100 = [2]+odd_p_under_100
primes_under_10000 = [2] + odd_p_under_10000
|
mit
| 5,651,048,751,521,574,000 | 28.125 | 68 | 0.499571 | false |
cpcloud/ibis
|
ibis/pandas/tests/test_core.py
|
1
|
4872
|
from typing import Any
import pandas as pd
import pandas.util.testing as tm
import pytest
from multipledispatch.conflict import ambiguities
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.pandas.client import PandasClient
from ibis.pandas.core import is_computable_input
from ibis.pandas.dispatch import execute_node, post_execute, pre_execute
pytestmark = pytest.mark.pandas
@pytest.fixture
def dataframe():
return pd.DataFrame(
{
'plain_int64': list(range(1, 4)),
'plain_strings': list('abc'),
'dup_strings': list('dad'),
}
)
@pytest.fixture
def core_client(dataframe):
return ibis.pandas.connect({'df': dataframe})
@pytest.fixture
def ibis_table(core_client):
return core_client.table('df')
@pytest.mark.parametrize('func', [execute_node, pre_execute, post_execute])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
def test_from_dataframe(dataframe, ibis_table, core_client):
t = ibis.pandas.from_dataframe(dataframe)
result = t.execute()
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
t = ibis.pandas.from_dataframe(dataframe, name='foo')
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
client = core_client
t = ibis.pandas.from_dataframe(dataframe, name='foo', client=client)
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
def test_pre_execute_basic():
"""
Test that pre_execute has intercepted execution and provided its own
scope dict
"""
@pre_execute.register(ops.Add)
def pre_execute_test(op, *clients, scope=None, **kwargs):
return {op: 4}
one = ibis.literal(1)
expr = one + one
result = ibis.pandas.execute(expr)
assert result == 4
del pre_execute.funcs[(ops.Add,)]
pre_execute.reorder()
pre_execute._cache.clear()
def test_execute_parameter_only():
param = ibis.param('int64')
result = ibis.pandas.execute(param, params={param: 42})
assert result == 42
def test_missing_data_sources():
t = ibis.table([('a', 'string')])
expr = t.a.length()
with pytest.raises(com.UnboundExpressionError):
ibis.pandas.execute(expr)
def test_missing_data_on_custom_client():
class MyClient(PandasClient):
def table(self, name):
return ops.DatabaseTable(
name, ibis.schema([('a', 'int64')]), self
).to_expr()
con = MyClient({})
t = con.table('t')
with pytest.raises(
NotImplementedError,
match=(
'Could not find signature for execute_node: '
'<DatabaseTable, MyClient>'
),
):
con.execute(t)
def test_post_execute_called_on_joins(dataframe, core_client, ibis_table):
count = [0]
@post_execute.register(ops.InnerJoin, pd.DataFrame)
def tmp_left_join_exe(op, lhs, **kwargs):
count[0] += 1
return lhs
left = ibis_table
right = left.view()
join = left.join(right, 'plain_strings')[left.plain_int64]
result = join.execute()
assert result is not None
assert not result.empty
assert count[0] == 1
def test_is_computable_input():
class MyObject:
def __init__(self, value: float) -> None:
self.value = value
def __getattr__(self, name: str) -> Any:
return getattr(self.value, name)
def __hash__(self) -> int:
return hash((type(self), self.value))
def __eq__(self, other):
return (
isinstance(other, type(self))
and isinstance(self, type(other))
and self.value == other.value
)
def __float__(self) -> float:
return self.value
@execute_node.register(ops.Add, int, MyObject)
def add_int_my_object(op, left, right, **kwargs):
return left + right.value
# This multimethod must be implemented to play nicely with other value
# types like columns and literals. In other words, for a custom
# non-expression object to play nicely it must somehow map to one of the
# types in ibis/expr/datatypes.py
@dt.infer.register(MyObject)
def infer_my_object(_, **kwargs):
return dt.float64
@is_computable_input.register(MyObject)
def is_computable_input_my_object(_):
return True
one = ibis.literal(1)
two = MyObject(2.0)
assert is_computable_input(two)
three = one + two
four = three + 1
result = ibis.pandas.execute(four)
assert result == 4.0
del execute_node.funcs[ops.Add, int, MyObject]
execute_node.reorder()
execute_node._cache.clear()
del dt.infer.funcs[(MyObject,)]
dt.infer.reorder()
dt.infer._cache.clear()
|
apache-2.0
| 1,048,607,088,477,634,700 | 25.769231 | 76 | 0.634442 | false |
k4cg/nichtparasoup
|
examples/nichtparasoup-imagecrawler-plugin/tests/test_10_nichtparasoup_placeholders/test_dummyimage.py
|
1
|
4465
|
import unittest
from typing import Type
from nichtparasoup_placeholders import DummyImage
from nichtparasoup.imagecrawler import BaseImageCrawler
from nichtparasoup.testing.imagecrawler import ImageCrawlerLoaderTest
_DUMMYIMAGE_RIGHT_CONFIG = {'width': 800, 'height': 600}
class DummyImageConfigCorrect(unittest.TestCase):
def test__check_config_right_value(self) -> None:
# arrange
config_in = _DUMMYIMAGE_RIGHT_CONFIG.copy()
# act
config_out = DummyImage.check_config(config_in)
# assert
self.assertDictEqual(config_in, config_out)
class DummyImageConfigWidthTest(unittest.TestCase):
def setUp(self) -> None:
self._dummyimage_right_config_wo_width = _DUMMYIMAGE_RIGHT_CONFIG.copy()
del self._dummyimage_right_config_wo_width["width"]
def tearDown(self) -> None:
del self._dummyimage_right_config_wo_width
def test__check_config_missing_value(self) -> None:
# assert
with self.assertRaises(KeyError):
DummyImage.check_config(self._dummyimage_right_config_wo_width)
def test__check_config_wrong_type(self) -> None:
wrong_types = [None, True, "", [], (), {}, self] # type: ignore
for wrong_type in wrong_types:
# arrange
config_in = self._dummyimage_right_config_wo_width
config_in["width"] = wrong_type # type: ignore
# assert
with self.assertRaises(TypeError, msg=repr(config_in)):
DummyImage.check_config(config_in)
def test__check_config_wrong_value(self) -> None:
wrong_values = [0, -1]
for wrong_value in wrong_values:
# arrange
config_in = self._dummyimage_right_config_wo_width
config_in["width"] = wrong_value
# assert
with self.assertRaises(ValueError, msg=repr(config_in)):
DummyImage.check_config(config_in)
class DummyImageConfigHeightTest(unittest.TestCase):
def setUp(self) -> None:
self._dummyimage_right_config_wo_height = _DUMMYIMAGE_RIGHT_CONFIG.copy()
del self._dummyimage_right_config_wo_height["height"]
def tearDown(self) -> None:
del self._dummyimage_right_config_wo_height
def test__check_config_missing_value(self) -> None:
# assert
with self.assertRaises(KeyError):
DummyImage.check_config(self._dummyimage_right_config_wo_height)
def test__check_config_wrong_type(self) -> None:
wrong_types = [None, True, "", [], (), {}, self] # type: ignore
for wrong_type in wrong_types:
# arrange
config_in = self._dummyimage_right_config_wo_height
config_in["height"] = wrong_type # type: ignore
# assert
with self.assertRaises(TypeError, msg=repr(config_in)):
DummyImage.check_config(config_in)
def test__check_config_wrong_value(self) -> None:
wrong_values = [0, -1]
for wrong_value in wrong_values:
# arrange
config_in = self._dummyimage_right_config_wo_height
config_in["height"] = wrong_value
# assert
with self.assertRaises(ValueError, msg=repr(config_in)):
DummyImage.check_config(config_in)
class DummyImageCrawlTest(unittest.TestCase):
def test_crawl(self) -> None:
# arrange
crawler = DummyImage(**_DUMMYIMAGE_RIGHT_CONFIG)
# act
images_crawled = crawler.crawl()
images_crawled_len = len(images_crawled)
image_crawled = images_crawled.pop() if images_crawled_len else None
# assert
self.assertEqual(images_crawled_len, crawler._BUNCH, "crawler did not finish")
if image_crawled:
self.assertTrue(image_crawled.is_generic, 'this is not generic')
class DummyImageDescriptionTest(unittest.TestCase):
def test_description_config(self) -> None:
# act
description = DummyImage.info()
# assert
assert isinstance(description.config, dict)
for config_key in _DUMMYIMAGE_RIGHT_CONFIG.keys():
self.assertIn(config_key, description.config)
class DummyImageLoaderTest(ImageCrawlerLoaderTest):
@property
def ic_name(self) -> str:
return "DummyImage"
@property
def ic_class(self) -> Type[BaseImageCrawler]:
return DummyImage
def test_loader(self) -> None:
self.check()
|
mit
| -6,405,216,303,146,802,000 | 33.346154 | 86 | 0.632027 | false |
porridge/apt-forktracer
|
lib/apt_forktracer/tests/test_reporter.py
|
1
|
2589
|
#!/usr/bin/python3
# apt-forktracer - a utility for managing package versions
# Copyright (C) 2008,2010,2019 Marcin Owsiany <porridge@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import unittest
from apt_forktracer.testlib import test_helper
from apt_forktracer.reporter import Reporter
from apt_forktracer.status import Status
class TestReporter(test_helper.MoxTestCase):
def setUp(self):
super(TestReporter, self).setUp()
self.reporter = Reporter()
def testFormattingNoCandidateVersion(self):
v1 = self.struct()
v1.string = '1.2'
status = Status('apackage', v1, None)
report = self.reporter.format(status)
self.assertContains(report, '1.2->)')
self.assertNotContains(report, '[')
def testFormattingNoCurrentVersion(self):
v1 = self.struct()
v1.string = '1.2'
status = Status('apackage', None, v1)
report = self.reporter.format(status)
self.assertContains(report, '(->1.2')
self.assertNotContains(report, '[')
def testFormatting(self):
v1 = self.struct()
v1.string = '1.2'
v2 = self.struct()
v2.string = '1.3'
v3, v4 = self.struct(), self.struct()
v3.string = '1.2.3'
v4.string = 'x.y.z'
status = Status('apackage', v1, v2, {'Debian': [v3, v4], 'another origin': [v3, v4]})
report = self.reporter.format(status)
self.assertContains(report, 'apackage (1.2->1.3) ')
self.assertContains(report, ' [Debian: 1.2.3 x.y.z]')
self.assertContains(report, ' [another origin: 1.2.3 x.y.z]')
def testFormattingSameVersion(self):
v = self.struct()
v.string = '1.2'
status = Status('apackage', v, v)
report = self.reporter.format(status)
self.assertNotContains(report, '[')
self.assertContains(report, 'apackage (1.2)')
def testReporting(self):
"""Since this should print to stdout, we don't call it, just check the method's there."""
mock_status = self.struct()
self.assertTrue(self.reporter.report)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 3,654,835,441,928,508,000 | 34.465753 | 91 | 0.713403 | false |
rafaellehmkuhl/OpenCV-Python-GUI
|
CvPyGui/PlotContainer.py
|
1
|
2407
|
import pandas as pd
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QLabel, QHBoxLayout,
QVBoxLayout, QPushButton, QSlider,
QComboBox)
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from .FilterCvQtContainer import Filter
import random
class SinglePlotContainer(QWidget):
num_plots = 0
def __init__(self, parent=None):
super().__init__()
self.num_plots += 1
self.variable_df = pd.DataFrame()
self.figure = Figure() # don't use matplotlib.pyplot at all!
self.canvas = FigureCanvas(self.figure)
self.hLayout = QHBoxLayout(self)
self.dataConfigColumn = QVBoxLayout()
self.filtersColumn = QVBoxLayout()
self.hLayout.addLayout(self.dataConfigColumn)
self.hLayout.addWidget(self.canvas)
self.hLayout.addLayout(self.filtersColumn)
self.comboLoadVariable = QComboBox()
self.dataConfigColumn.addWidget(self.comboLoadVariable)
self.filter1 = Filter('Moving Average', 3, 30, 5, 1)
self.filtersColumn.addWidget(self.filter1)
# drawEvent = self.figure.canvas.mpl_connect('draw', self.updatePlot)
self.plotRandom()
def connectButtons(self):
self.comboLoadVariable.activated[str].connect(self.loadVariable)
def loadVariable(self, variable):
self.variable_df = self.parent().parent().original_df[variable]
self.plot()
def plot(self):
if self.num_plots != 0:
self.axes = self.figure.add_subplot(111, sharex=self.parent().parent().plots[0].axes)
else:
self.axes = self.figure.add_subplot(111)
self.axes.clear()
self.axes.plot(self.variable_df, '-')
self.canvas.draw()
def updatePlot(self):
ymax,ymin = self.axes.get_ylim()
self.axes.clear()
self.axes.set_ylim(ymax,ymin)
self.axes.plot(self.variable_df, '-')
self.canvas.draw()
def plotRandom(self):
''' plot some random stuff '''
data = [random.random() for i in range(10)]
self.axes = self.figure.add_subplot(111)
self.axes.clear()
self.axes.plot(data, '-')
self.canvas.draw()
|
mit
| -3,096,640,997,540,342,000 | 30.671053 | 97 | 0.641047 | false |
perey/python-gsl
|
tests/test_gsl.py
|
1
|
1597
|
#!/usr/bin/env python3
"""Tests for package-level objects in python-gsl."""
# Copyright © 2016 Timothy Pederick.
#
# Based on the GNU Scientific Library (GSL):
# Copyright © 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
# 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 The GSL Team.
#
# This file is part of python-gsl.
#
# python-gsl is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-gsl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-gsl. If not, see <http://www.gnu.org/licenses/>.
# Standard library imports.
import unittest
# Library to be tested.
import gsl
# Test cases.
class TestComplex(unittest.TestCase):
"""Test the complex number handling in python-gsl."""
def test_tonative(self):
"""Test conversion from Python complex numbers to C."""
a = gsl.gsl_complex((3.0, 0.5))
b = gsl.gsl_complex.from_complex(3.0 + 0.5j)
self.assertEqual(a, b)
def test_fromnative(self):
"""Test conversion from the GSL complex type to Python."""
a = gsl.gsl_complex((3.0, 0.5))
b = 3.0 + 0.5j
self.assertEqual(complex(a), b)
|
gpl-3.0
| -2,551,407,467,596,602,400 | 34.444444 | 78 | 0.688401 | false |
coolharsh55/hdd-indexer
|
hdd_indexer/views.py
|
1
|
19442
|
"""Views for hdd-indexer
/ - homepage
/crawler [GET/POST] - crawler interactions
/loader [GET/POST] - loader interactions
/settings [POST] - settings
/setup [POST] - setup
"""
# TODO: refactor validations into separate methods
# TODO: make validation a method of class
# HDDRoot.validate(path): True on success, False otherwise
# if True, assigns the path to HDDRoot.path
# then we need to call HDDRoot.save() to save the path
# TODO: check if crawler/loader active and send status to homepage
# TODO: documentation for export
# TODO: documentation for organize
import json
from os import path
import re
import logging
log = logging.getLogger('server')
log.info(72 * '-')
log.info('server views loaded')
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from hdd_settings.models import RegistrationKey
from hdd_settings.models import HDDName
from hdd_settings.models import HDDRoot
from hdd_settings.models import MovieFolder
from hdd_settings.models import TMDbKey
from hdd_settings.models import OpenSubKey
from setup import internet_on
from movie_metadata.crawl import crawler_status
from movie_metadata.crawl import start_crawler
from movie_metadata.crawl import stop_crawler
from movie_metadata.load import loader_status
from movie_metadata.load import start_loader
from movie_metadata.load import stop_loader
from movie_metadata.models import Movie
from movie_metadata.export import export as get_export_content
from movie_metadata.organize import organizer_status
from movie_metadata.organize import start_organizer
from movie_metadata.organize import stop_organizer
from movie_metadata.compare import compare
@csrf_exempt
def crawler(request):
"""crawler interactions on /crawler for hdd-indexer
Interactions with crawler using GET and POST
GET:
status(str): status of crawler ON/OFF
POST:
start(str): start the crawler
stop(str): stop the crawler
Args:
request(RequestContext) - passed by Django
Returns:
response(HttpResponse) - resposne to GET/POST request
Raises:
None
"""
def response(e_m=None):
"""Response for GET/POST methods on crawler
returns a HTTPResponse with content type json
Args:
e_m(str): error message if any
e_m(None): if no error message
"""
if e_m:
e = True
else:
e = False
payload = {
'status': crawler_status(),
'files_evaluated': crawler_status('FILES_EVALUATED'),
'movies_found': crawler_status('MOVIES_FOUND'),
'movies_added': crawler_status('MOVIES_ADDED'),
'error': e,
'error_message': e_m,
}
log.debug('crawler status: %s' % payload)
return HttpResponse(
json.dumps(payload),
content_type='application/json'
)
# if request is not POST, return error
if request.method == 'GET':
# print 'GET', request.GET
if request.GET.get('status', None):
log.debug('GET: crawler status')
return response()
elif request.method == 'POST':
# print 'POST', request.POST
if request.POST.get('start', None):
log.info('POST: start crawler')
err_msg = start_crawler()
if err_msg:
log.error('start crawler: %s' % err_msg)
return response(err_msg)
return response()
elif request.POST.get('stop', None):
log.info('POST: stop crawler')
err_msg = stop_crawler()
if err_msg:
log.error('stop crawler: %s' % err_msg)
return response(err_msg)
return response()
# 405: Method not allowed
log.error('405: method not allowed')
return HttpResponse(status=405)
@csrf_exempt
def help(request):
"""Help for HDD-indexer
Shows the help page containing help on modules and settings
Args:
request(HttpRequest): request to server
Returns:
render(HttpResponse): shows the help page
Raises:
None
"""
log.debug('help.html served with welcome=False')
return render(
request,
'hdd_indexer/help.html',
{
'welcome': False,
}
)
@csrf_exempt
def loader(request):
"""crawler interactions on /loader for hdd-indexer
Interactions with loader using GET and POST
GET:
status(str): status of loader ON/OFF
POST:
start(str): start the loader
stop(str): stop the loader
Args:
request(RequestContext) - passed by Django
Returns:
response(HttpResponse) - resposne to GET/POST request
Raises:
None
"""
def response(e_m=None):
"""Response for GET/POST methods on loader
returns a HTTPResponse with content type json
Args:
e_m(str): error message if any
e_m(None): if no error message
"""
if e_m:
e = True
else:
e = False
# print loader_status('SKIPPED_LIST')
payload = {
'status': loader_status(),
'movies_evaluated': loader_status('MOVIES_EVALUATED'),
'metadata_downloaded': loader_status('METADATA_DOWNLOADED'),
'movies_skipped': loader_status('MOVIES_SKIPPED'),
'skipped_list': loader_status('SKIPPED_LIST'),
'error': e,
'error_message': e_m,
}
log.debug('loader status: %s' % payload)
# print payload
return HttpResponse(
json.dumps(payload),
content_type='application/json'
)
# if request is not POST, return error
if request.method == 'GET':
# print 'GET', request.GET
if request.GET.get('status', None):
log.debug('GET: loader status')
return response()
elif request.method == 'POST':
# print 'POST', request.POST
if request.POST.get('start', None):
log.info('POST: start loader')
if not internet_on():
log.warning('Start Loader: No Internet Connectivity')
return response('Please check your Internet connection!!!')
# print 'starting loader'
err_msg = start_loader()
if err_msg:
log.error('start loader: %s' % err_msg)
return response(err_msg)
# print 'started loader'
return response()
elif request.POST.get('stop', None):
log.info('POST: stop loader')
err_msg = stop_loader()
if err_msg:
log.error('stop loader: %s' % err_msg)
return response(err_msg)
return response()
# 405: Method not allowed
log.error('405: Method not allowed')
return HttpResponse(status=405)
def homepage(request):
"""homepage view for / on hdd-indexer
Serves the homepage at root (/) with index.html
Passes hdd_name, hdd_root, movie_folder, crawler_status
Args:
request(RequestContext) - passed by Django
Returns:
response(Response) - file template to serve
Raises:
None
"""
log.info('served homepage')
return render(
request,
'hdd_indexer/index.html',
{
'hdd_name': HDDName.get_solo(),
'hdd_root': HDDRoot.get_solo(),
'movie_folder': path.join(
HDDRoot.get_solo().path,
MovieFolder.get_solo().relpath,
),
'crawler_status': crawler_status(),
}
)
@csrf_exempt
def settings(request):
"""settings view for / on hdd-indexer
Validates settings sent using POST
POST:
hdd_name(str)
hdd_root(str)
movie_folder(str)
Args:
request(RequestContext) - passed by Django
Returns:
response(HttpResponse) - resposne to POST request
Raises:
None
"""
def response(d=True, v=True):
"""Response for POST methods
returns a HTTPResponse with content type json
Args:
d(bool): POST success (JQuery done)
v(bool): POST validation
"""
payload = {
'done': d,
'validation': v,
}
log.debug('settings validation: %s' % payload)
return HttpResponse(
json.dumps(payload),
content_type='application/json'
)
# if request is not POST, return error
if request.method != 'POST':
# 405: Method not allowed
log.error('405: Method not allowed')
return HttpResponse(status=405)
# request for HDD Name
if request.POST.get('hdd_name', None):
hdd_name = request.POST['hdd_name']
log.info('POST: hdd_name: %s' % hdd_name)
pattern = re.compile(r'^[0-9a-zA-z_-]+$')
if pattern.match(hdd_name):
try:
hdd_name_db = HDDName.get_solo()
hdd_name_db.name = hdd_name
hdd_name_db.save()
log.info('hdd_name = %s saved to db' % hdd_name)
return response()
except ValueError:
return response(d=False, v=True)
except TypeError:
return response(d=False, v=False)
except Exception as e:
print e
return response(d=False, v=False)
else:
log.error('%s is a not a valid hdd_name' % hdd_name)
return response(d=False, v=True)
# request for HDD Root
elif request.POST.get('hdd_root', None):
hdd_root = request.POST['hdd_root']
log.info('POST: hdd_root = %s' % hdd_root)
if path.isdir(hdd_root):
hdd_root_db = HDDRoot.get_solo()
hdd_root_db.path = hdd_root
hdd_root_db.save()
log.info('hdd_root = %s saved to db' % hdd_root)
return response()
else:
log.error('%s is not a valid path' % hdd_root)
return response(d=False, v=True)
# request for Movie Folder
elif request.POST.get('movie_folder', None):
movie_folder = request.POST['movie_folder']
log.info('POST: movie_folder = %s' % movie_folder)
hdd_root = HDDRoot.get_solo().path
if not movie_folder.startswith(hdd_root):
log.error('movie_folder does not start with hdd_root')
return response(d=False, v=True)
if not path.isdir(movie_folder):
log.error('movie_folder is not a valid path')
return response(d=False, v=True)
movie_folder = path.relpath(movie_folder, hdd_root)
movie_folder_db = MovieFolder.get_solo()
movie_folder_db.relpath = movie_folder
movie_folder_db.save()
log.info('movie_folder = %s saved to db' % movie_folder)
return response(d=True)
log.error('405: Method not allowed')
return HttpResponse(status=405)
@csrf_exempt
def setup(request):
"""Setup for first-use
"""
if not request.POST:
log.info('served setup page')
return render(
request,
'hdd_indexer/setup.html',
{
'RegistrationKey': RegistrationKey.get_solo().key,
'hdd_name': HDDName.get_solo().name,
'hdd_root': HDDRoot.get_solo().path,
'movie_folder': MovieFolder.get_solo().relpath,
'opensub_id': OpenSubKey.get_solo().uid,
'opensub_key': OpenSubKey.get_solo().key,
'tmdb_key': TMDbKey.get_solo().key,
'error': False,
'err_msg': '',
}
)
error = False
err_msg = 'Validation errors have been found: '
log.info('POST: preferences and settings in setup')
# validations
# registration key
registration_key = request.POST.get('ID', '')
if registration_key:
# make sure that it is valid registration key
registration_key_db = RegistrationKey.get_solo()
registration_key_db.key = registration_key
registration_key_db.save()
log.info('registration key = %s saved to db' % registration_key)
else:
pass
# hdd name
hdd_name = request.POST.get('HDDName', '')
pattern = re.compile(r'^[0-9a-zA-z_-]+$')
if pattern.match(hdd_name):
hdd_name_db = HDDName.get_solo()
hdd_name_db.name = hdd_name
hdd_name_db.save()
log.info('hdd_name: %s saved to db' % hdd_name)
else:
error = True
err_msg = ' '.join(((err_msg, 'HDD Name,')))
log.error('%s is not a valid hdd_name' % hdd_name)
# hdd root
hdd_root = request.POST.get('HDDRoot', '')
if path.exists(hdd_root):
hdd_root_db = HDDRoot.get_solo()
hdd_root_db.path = hdd_root
hdd_root_db.save()
log.info('hdd_root = %s saved to db' % hdd_root)
else:
error = True
err_msg = ' '.join(((err_msg, 'HDD Root,')))
log.error('%s is not a valid path' % hdd_root)
# movie folder
movie_folder = request.POST.get('MovieFolder', '')
log.info('POST: movie_folder = %s' % movie_folder)
if path.exists(movie_folder):
movie_folder_db = MovieFolder.get_solo()
movie_folder_db.relpath = movie_folder
movie_folder_db.save()
log.info('movie_folder = %s saved to db' % movie_folder)
else:
error = True
err_msg = ' '.join((err_msg, 'Movie Folder,'))
log.error('%s is not a valid path' % movie_folder)
# tmdb key
# TODO: check tmdb key is valid
tmdb_key = request.POST.get('TMDB_KEY', '')
log.info('POST: tmdb_key = %s' % tmdb_key)
if len(tmdb_key) >= 5:
tmdb_db = TMDbKey.get_solo()
tmdb_db.key = tmdb_key
tmdb_db.save()
log.info('tmdb_key = %s saved to db' % tmdb_key)
else:
error = True
err_msg = ' '.join(((err_msg, 'TMDb Key,')))
log.error('%s is not a valid tmdb_key' % tmdb_key)
# opensub
# TODO: check opensub key is valid
opensub_id = request.POST.get('OpenSubID', '')
opensub_key = request.POST.get('OpenSubKey', '')
log.info('opensub id:%s key:%s' % (opensub_id, opensub_key))
if opensub_id and opensub_key:
if len(opensub_id) >= 5 and len(opensub_key) >= 5:
opensub_db = OpenSubKey.get_solo()
opensub_db.uid = opensub_id
opensub_db.key = opensub_key
opensub_db.save()
log.info('opensub id:%s key:%s saved to db' % (
opensub_id, opensub_key
))
else:
error = True
err_msg = ' '.join((err_msg, 'OpenSubtitles ID and Key,'))
log.info('opensub id:%s key:%s are not valid' % (
opensub_id, opensub_key
))
if error is False:
log.info('setup complete, redirected to welcome page')
return render(
request,
'hdd_indexer/help.html',
{
'welcome': True,
}
)
log.error('setup input has errors, redirect to setup page')
return render(
request,
'hdd_indexer/setup.html',
{
'RegistrationKey': RegistrationKey,
'hdd_name': hdd_name,
'hdd_root': hdd_root,
'movie_folder': movie_folder,
'opensub_id': opensub_id,
'opensub_key': opensub_key,
'tmdb_key': tmdb_key,
'error': error,
'err_msg': err_msg,
}
)
def export(request):
"""export movies to file and serve it as a downloaded file
"""
if request.method == 'GET':
fields = []
filter = []
order = []
file_format = 'txt'
for key in request.GET.keys():
if key.startswith('content-'):
fields.append(key[8:])
elif key.startswith('filter-'):
filter.append(key[7:])
elif key.startswith('order'):
order.append(request.GET[key][6:])
elif key.startswith('file-format'):
file_format = request.GET[key][12:]
log.info('export request movie.%s with fields=%s, '
'filter=%s, ordering=%s' %
(file_format, fields, filter, order))
content = get_export_content(
Movie, fields=fields, order=order, file_format=file_format)
if content is None:
log.error('export error Http400')
return HttpResponse(status=400)
filename = 'movies.' + file_format
response = HttpResponse(content, content_type='application/zip')
response['Content-Disposition'] = 'inline; filename=%s' % filename
log.info('export request completed: served %s' % filename)
return response
return HttpResponse(status=405)
@csrf_exempt
def organizer(request):
"""
"""
def response(e_m=None):
"""Response for GET/POST methods on loader
returns a HTTPResponse with content type json
Args:
e_m(str): error message if any
e_m(None): if no error message
"""
if e_m:
e = True
else:
e = False
# print loader_status('SKIPPED_LIST')
payload = {
'status': organizer_status(),
'files_evaluated': organizer_status('FILES_EVALUATED'),
'error': e,
'error_message': e_m,
}
log.debug('organizer status: %s' % payload)
# print payload
return HttpResponse(
json.dumps(payload),
content_type='application/json'
)
# if request is not POST, return error
if request.method == 'GET':
# print 'GET', request.GET
if request.GET.get('status', None):
log.debug('GET: organizer status')
return response()
elif request.method == 'POST':
# print 'POST', request.POST
if request.POST.get('start', None):
log.info('POST: start organizer')
# print 'starting loader'
err_msg = start_organizer()
if err_msg:
log.error('start organizer: %s' % err_msg)
return response(err_msg)
# print 'started loader'
return response()
elif request.POST.get('stop', None):
log.info('POST: stop organizer')
err_msg = stop_organizer()
if err_msg:
log.error('stop organizer: %s' % err_msg)
return response(err_msg)
return response()
# 405: Method not allowed
log.error('405: Method not allowed')
return HttpResponse(status=405)
@csrf_exempt
def comparator(request):
"""compare movies across disks
"""
if request.method == 'POST':
if request.FILES.get('c-file', None):
take_list, give_list = compare(request.FILES['c-file'])
return render(
request,
'hdd_indexer/compare.html',
{
'take': take_list,
'give': give_list,
})
log.error('405: Method not allowed')
return HttpResponse(status=405)
|
mit
| 745,216,335,649,311,700 | 29.81141 | 75 | 0.562031 | false |
google/report2bq
|
application/classes/sa360_report_validation/visit.py
|
1
|
1861
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = ['davidharcombe@google.com (David Harcombe)']
from googleapiclient.discovery import Resource
from classes.sa360_report_validation.sa360_field_validator import SA360Validator
class Visit(SA360Validator):
def __init__(self,
sa360_service: Resource = None,
agency: int = None,
advertiser: int = None) -> None:
super().__init__(sa360_service, agency, advertiser)
self.fields = [
"status",
"deviceSegment",
"agency",
"agencyId",
"advertiser",
"advertiserId",
"account",
"accountId",
"accountEngineId",
"accountType",
"campaign",
"campaignId",
"campaignStatus",
"adGroup",
"adGroupId",
"adGroupStatus",
"keywordId",
"keywordMatchType",
"keywordText",
"productTargetId",
"productGroupId",
"ad",
"adId",
"isUnattributedAd",
"inventoryAccountId",
"productId",
"productCountry",
"productLanguage",
"productStoreId",
"productChannel",
"visitId",
"visitSearchQuery",
"visitDate",
"visitTimestamp",
"visitNetworkType",
"visitReferrer",
"visitExternalClickId",
"feedItemId",
"feedId",
"feedType",
]
|
apache-2.0
| -4,312,721,375,928,980,500 | 25.225352 | 80 | 0.634605 | false |
sudhirmishra/edx-dl
|
edx_dl/common.py
|
1
|
4327
|
# -*- coding: utf-8 -*-
"""
Common type definitions and constants for edx-dl
The classes in this module represent the structure of courses in edX. The
structure is:
* A Course contains Sections
* Each Section contains Subsections
* Each Subsection contains Units
Notice that we don't represent the full tree structure for both performance
and UX reasons:
Course -> [Section] -> [SubSection] -> [Unit] -> [Video]
In the script the data structures used are:
1. The data structures to represent the course information:
Course, Section->[SubSection]
2. The data structures to represent the chosen courses and sections:
selections = {Course, [Section]}
3. The data structure of all the downloable resources which represent each
subsection via its URL and the of resources who can be extracted from the
Units it contains:
all_units = {Subsection.url: [Unit]}
4. The units can contain multiple videos:
Unit -> [Video]
"""
class Course(object):
"""
Course class represents course information.
"""
def __init__(self, id, name, url, state):
"""
@param id: The id of a course in edX is composed by the path
{organization}/{course_number}/{course_run}
@type id: str or None
@param name: Name of the course. The name is taken from course page
h3 header.
@type name: str
@param url: URL of the course.
@type url: str or None
@param state: State of the course. One of the following values:
* 'Not yet'
* 'Started'
@type state: str
"""
self.id = id
self.name = name
self.url = url
self.state = state
class Section(object):
"""
Representation of a section of the course.
"""
def __init__(self, position, name, url, subsections):
"""
@param position: Integer position of the section in the list of
sections. Starts at 1.
@type position: int
@param name: Name of the section.
@type name: str
@param url: URL of the section. None when section contains no
subsections.
@type url: str or None
@param subsections: List of subsections.
@type subsections: [SubSection]
"""
self.position = position
self.name = name
self.url = url
self.subsections = subsections
class SubSection(object):
"""
Representation of a subsection in a section.
"""
def __init__(self, position, name, url):
"""
@param position: Integer position of the subsection in the subsection
list. Starts at 1.
@type position: int
@param name: Name of the subsection.
@type name: str
@param url: URL of the subsection.
@type url: str
"""
self.position = position
self.name = name
self.url = url
class Unit(object):
"""
Representation of a single unit of the course.
"""
def __init__(self, videos, resources_urls):
"""
@param videos: List of videos present in the unit.
@type videos: [Video]
@param resources_urls: List of additional resources that are come along
with the unit. Resources include files with certain extensions
and youtube links.
@type resources_urls: [str]
"""
self.videos = videos
self.resources_urls = resources_urls
class Video(object):
"""
Representation of a single video.
"""
def __init__(self, video_youtube_url, available_subs_url,
sub_template_url, mp4_urls):
"""
@param video_youtube_url: Youtube link (if any).
@type video_youtube_url: str or None
@param available_subs_url: URL to the available subtitles.
@type available_subs_url: str
@param sub_template_url: ???
@type sub_template_url: str
@param mp4_urls: List of URLs to mp4 video files.
@type mp4_urls: [str]
"""
self.video_youtube_url = video_youtube_url
self.available_subs_url = available_subs_url
self.sub_template_url = sub_template_url
self.mp4_urls = mp4_urls
YOUTUBE_DL_CMD = ['youtube-dl', '--ignore-config']
DEFAULT_CACHE_FILENAME = 'edx-dl.cache'
|
lgpl-3.0
| 7,304,929,588,988,307,000 | 26.916129 | 79 | 0.610354 | false |
uc-cdis/cdis-python-utils
|
cdispyutils/profiling/profiler.py
|
1
|
8064
|
__all__ = ["Profiler"]
from collections import defaultdict
import cProfile
import datetime
import errno
import functools
import os
import time
from werkzeug.contrib.profiler import ProfilerMiddleware
def profile(category, *profiler_args, **profiler_kwargs):
"""
Decorate a function to run a profiler on the execution of that function.
Arguments are passed through to the ``Profiler`` initialization. Most relevant one
would be ``output_style`` which can be set to either "detailed" or "simple". With
"detailed" the profiler saves the complete ``.prof`` file, with "simple" it saves
only a file with the execution time saved as text.
"""
profiler = Profiler(name=_make_timestamp(), *profiler_args, **profiler_kwargs)
def decorator(f):
@functools.wraps(f)
def wrapper(*f_args, **f_kwargs):
return profiler.call(category, f, args=f_args, kwargs=f_kwargs)
return wrapper
return decorator
class Profiler(object):
"""
Output profiling information for specified function calls and Flask requests.
Enable profiling either by passing ``enable=True`` to the profiler, or setting the
environment variable ``ENABLE_PYTHON_PROFILING`` to ``True``. The profiler is
intended to be used everywhere that profiling *might* be desirable; if enabled it
will actually do the profiling and save the results, and otherwise it will just pass
through function calls at no additional runtime cost (aside from its method call).
The profiler singleton for a flask application saves profiling files into the
directory specified at initialization. All files use the standard format for python
profiling; use ``pstats`` to tabulate the information from one or more files, or a
visualization tool like ``snakeviz``.
Some example usage for a generic flask app, including profiling a couple setup
functions, as well as the application's endpoints:
def app_init(app):
profiler = Profiler(logger=app.logger)
init_functions = [app_register_blueprints, db_init]
for f in init_functions:
profiler.call("init", f, app)
profiler.profile_app(app)
The output for this Flask application might look like this:
profile/
2018-11-30T15:15:36.14/
init/
app_register_blueprints-1.prof
db_init-1.prof
run/
traverse-1.prof
traverse-2.prof
traverse-3.prof
wsgi/
GET.root.000003ms.1543612537.prof
GET._status.000019ms.1543612539.prof
In this example the ``directory`` argument is ``"profile"``, and the ``name`` was
``None`` so it defaults to just a timestamp.
"""
def __init__(
self,
name=None,
logger=None,
enable=False,
output_style="detailed",
directory="profile",
):
name = name or _make_timestamp()
self.directory = os.path.join(directory, name)
self.logger = logger
self.output_style = output_style
self._enable = enable
self._function_counts = defaultdict(lambda: defaultdict(int))
if self.enabled:
if not os.path.isdir(self.directory):
if os.path.isfile(self.directory):
raise EnvironmentError(
"can't save profile output; file already exists: {}".format(
self.directory
)
)
os.makedirs(self.directory, mode=0o744)
if self.logger:
self.logger.info("profiling enabled")
@property
def enabled(self):
"""
Return boolean indicating if the profiler should actually profile, or just pass
through results from any calls it's asked to handle.
"""
return (
self._enable
or os.environ.get("ENABLE_PYTHON_PROFILING", "").lower() == "true"
)
def call(self, category, f, args=None, kwargs=None, output_style=None):
"""
Do a function call and (if the profiler is enabled) save profiling results to
the directory for this category.
Args:
category (str): category to save the result under
f (Callable): function to call
args (Optional[List]): arguments to pass to f call
kwargs (Optional[Dict]): keyword arguments to pass to f call
output_style (Optional[str]):
whether to save complete profile files ("detailed") or only the
execution time ("simple"); defaults to detailed
Return:
exactly the return from calling ``f(*args, **kwargs)``
"""
args = args or []
kwargs = kwargs or {}
if not self.enabled:
return f(*args, **kwargs)
# count the number of times this function is executed in this category, so the
# filenames are kept unique
function_name = "{}.{}".format(f.__module__, f.__name__)
self._function_counts[category][function_name] += 1
output_style = output_style or self.output_style or "detailed"
if self.output_style == "detailed":
profiler = cProfile.Profile()
profiler.enable()
result = f(*args, **kwargs)
profiler.disable()
self._make_profile_category(category)
filename = "{}-{}.prof".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
profiler.dump_stats(path)
return result
elif self.output_style == "simple":
start = time.time()
result = f(*args, **kwargs)
execution_time = time.time() - start
filename = "{}-{}.time".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
# if the file exists already (say we gave the Profiler a directory that
# already exists, and re-ran the same function as the previous run), then
# tick up the counter until we're writing out new files
while os.path.exists(path):
self._function_counts[category][function_name] += 1
filename = "{}-{}.prof".format(
function_name, str(self._function_counts[category][function_name])
)
path = os.path.join(self.directory, category, filename)
with open(path, "w") as output_file:
output_file.write(str(execution_time))
return result
def profile_app(self, app):
"""
Enable WSGI's built-in profiler and include the output in the configured
profiling directory.
"""
if self.enabled:
path = self._make_profile_category("wsgi")
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir=path)
def _make_profile_category(self, name):
"""
Add a directory under the profiling directory given at initialization, for
saving a category of results into.
"""
path = os.path.join(self.directory, name)
try:
_mkdir_p(path)
except OSError:
raise EnvironmentError(
"can't save profile output; file already exists: {}".format(path)
)
return path
def _mkdir_p(directory, mode=0o774):
try:
os.makedirs(directory, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(directory):
raise
def _make_timestamp():
"""
Return a timestamp to identify this profiling run.
Output format is: ``2018-11-30T14:51:55.95``.
(Truncate to hundredths of a second.)
"""
return datetime.datetime.now().isoformat()[:-4]
|
apache-2.0
| 6,428,691,702,124,679,000 | 36.16129 | 88 | 0.598586 | false |
jordanpotti/AWSBucketDump
|
AWSBucketDump.py
|
1
|
7239
|
#!/usr/bin/env python
# AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to S3
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# by Jordan Potti
# @ok_bye_now
from argparse import ArgumentParser
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list = None
arguments = None
def fetch(url):
print('Fetching ' + url + '...')
response = requests.get(url)
if response.status_code == 403 or response.status_code == 404:
status403(url)
if response.status_code == 200:
if "Content" in response.text:
status200(response, grep_list, url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('Download worker running...')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
def downloadFile(filename):
global arguments
print('Downloading {}'.format(filename) + '...')
local_path = get_make_directory_return_filename_path(filename)
local_filename = (filename.split('/')[-1]).rstrip()
print('local {}'.format(local_path))
if local_filename =="":
print("Directory..\n")
else:
r = requests.get(filename.rstrip(), stream=True)
if 'Content-Length' in r.headers:
if int(r.headers['Content-Length']) > arguments.maxsize:
print("This file is greater than the specified max size... skipping...\n")
else:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def print_banner():
print('''\nDescription:
AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to S3
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Jordan Potti
@ok_bye_now''')
def cleanUp():
print("Cleaning up files...")
def status403(line):
print(line.rstrip() + " is not accessible.")
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response, grep_list, line):
print("Pilfering "+line.rstrip() + '...')
objects = xmltodict.parse(response.text)
Keys = []
try:
contents = objects['ListBucketResult']['Contents']
if not isinstance(contents, list):
contents = [contents]
for child in contents:
Keys.append(child['Key'])
except KeyError:
pass
for words in Keys:
words = (str(words)).rstrip()
collectable = line+'/'+words
if grep_list is not None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant disk space.")
parser.add_argument("-d", dest="savedir", required=False, default='', help="If -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for.")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="Number of threads.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage()
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
if arguments.grepwords:
with open(arguments.grepwords, "r") as grep_file:
grep_content = grep_file.readlines()
grep_list = [g.strip() for g in grep_content]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), save directories (-d) for each host will be created/used.")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), will be saved to current directory.")
else:
print("Downloads were not enabled (-D), not saving results locally.")
# start up bucket workers
for _ in range(0, arguments.threads):
print('Starting thread...')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for _ in range(0, arguments.threads - 1):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
bucket = 'http://'+line.rstrip()+'.s3.amazonaws.com'
print('Queuing {}'.format(bucket) + '...')
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
download_q.join()
cleanUp()
if __name__ == "__main__":
main()
|
mit
| 4,622,916,681,330,770,000 | 29.544304 | 160 | 0.629231 | false |
pzia/keepmydatas
|
src/images_merge_datetree.py
|
1
|
4138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Compare all photos within a tree with those stored in a date tree (yyyy/mm/dd/*) and remove already existing pictures"""
import KmdImages
import KmdCmd
import PIL.Image as Image
import os, sys
import logging
class KmdImagesMergeDateTree(KmdCmd.KmdCommand):
def extendParser(self):
super(KmdImagesMergeDateTree, self).extendParser()
#Extend parser
self.parser.add_argument('srctree', metavar='</path/to/srctree>', nargs=1, help='Path to images to merges')
self.parser.add_argument('datetree', metavar='</path/to/datetree>', nargs=1, help='Root of a date tree /aaaa/mm/dd')
#FIXME : we could/should add option to move photo file not matching the datetree.
def run(self):
ymd = {} #store path to ymd => list of potential matching files
#Please note that is a WRONG good idea to store the PIL.Images in a dictionnary
#Keep it simple and like this : just loading image when needed, and use the file system cache for performance.
for root, _, files in os.walk(self.args.srctree[0]):
#For each folder in the src tree
for name in files:
#for each file in the folder
p = os.path.join(root, name)
#Is it an image with meta datas ?
meta = KmdImages.readExivMetadata(p)
if meta:
if "Exif.Photo.DateTimeOriginal" in meta.exif_keys :
d = meta["Exif.Photo.DateTimeOriginal"]
elif "Exif.Photo.DateTime" in meta.exif_keys :
d = meta["Exif.Photo.DateTime"]
else :
#FIXME : we could use the fstat modification time here
continue
#FIXME : Should we test the final date ?
try :
pathtest = os.path.join(self.args.datetree[0], d.value.strftime("%Y/%m/%d"))
except :
logging.warning("Wrong datetime value %s in %s", d.value, p)
continue
if pathtest not in ymd :
ymd[pathtest] = []
ymd[pathtest].append(p)
else :
logging.warning("%s has no metadatas, doesn't seem to be a photo !", p)
logging.info("%d differents dates found", len(ymd))
for pathtest in ymd :
#KmdImage compare with folder in datetree
if not os.path.exists(pathtest):
logging.warning("%s does not exist", pathtest)
continue
for p in ymd[pathtest]:
flist = os.listdir(pathtest)
#We should at least first try to find an image with the same filename before crawling the whole directory
_, tail = os.path.split(p)
if tail in flist :
#In case both name matches !
logging.debug("%s found in flist", tail)
flist.insert(0, tail)
elif tail.upper() in flist :
#In case both name matches !
logging.debug("%s found in flist", tail.upper())
flist.insert(0, tail.upper())
elif tail.lower() in flist :
#In case both name matches !
logging.debug("%s found in flist", tail.lower())
flist.insert(0, tail.lower())
for image in flist :
fullimage = os.path.join(pathtest, image)
logging.debug("Testing %s against %s", p, fullimage)
if KmdImages.compareImagesFiles(p, fullimage) :
logging.info("Image found in %s, removing %s", pathtest, p)
if self.args.doit :
os.remove(p)
break
if __name__ == "__main__":
cmd = KmdImagesMergeDateTree(__doc__)
cmd.run()
|
mit
| -7,093,201,711,377,779,000 | 42.104167 | 124 | 0.518366 | false |
cameronmaske/skipper
|
tests/test_creds.py
|
1
|
1769
|
import pytest
import mock
from skipper.creds import Creds, JSONStorage
@pytest.fixture()
def creds():
storage = mock.MagicMock()
storage.retrieve.return_value = {}
creds = Creds(storage=storage)
return creds
def test_creds(creds):
assert creds == {}
creds['foo'] = 'bar'
assert creds == {'foo': 'bar'}
creds.storage.save.assert_called_with({'foo': 'bar'})
def test_creds_get(creds):
creds['foo'] = 'bar'
assert creds['foo'] == 'bar'
assert creds.get('foo') == 'bar'
def test_creds_del(creds):
creds['foo'] = 'bar'
del creds['foo']
assert creds == {}
creds.storage.save.assert_called_with({})
def test_nested_creds(creds):
assert creds['foo']['bar'] == {}
creds['foo']['bar'] = 'buzz'
assert creds['foo']['bar'] == 'buzz'
creds.storage.save.assert_called_once_with({
'foo': {
'bar': 'buzz'
}})
def test_config_retrieve():
storage = mock.MagicMock()
existing = {
'foo': 'bar',
'foo2': {
'bar2': 'buzz'
}
}
storage.retrieve.return_value = existing
creds = Creds(storage=storage)
assert creds == existing
assert storage.retrieve.called
def test_storage_retrieve(tmpdir):
storage = JSONStorage(path=tmpdir)
tmpdir.join('.skippercfg').write('{"foo": "bar"}')
assert storage.retrieve() == {'foo': 'bar'}
def test_storage_retrieve_invalid_json(tmpdir):
storage = JSONStorage(path=tmpdir)
tmpdir.join('.skippercfg').write('["boo"}')
assert storage.retrieve() == {}
def test_storage_save(tmpdir):
storage = JSONStorage(path=tmpdir)
storage.save({'foo': 'bar'})
skippercfg = tmpdir.join('.skippercfg').read()
assert skippercfg == '{\n "foo": "bar"\n}'
|
bsd-2-clause
| -2,971,130,151,624,621,000 | 22.586667 | 57 | 0.598643 | false |
greyshell/Exploit-Dev
|
find_safe_address/find_safe_address.py
|
1
|
3098
|
#!/usr/bin/env python3
# author: greyshell
# description: find safe return address
import argparse
import sys
from colorama import Fore
# global constant variable
PROGRAM_LOGO = """
___ __ __ ___ ___ __ __ __ ___ __ __
|__ | |\ | | \ /__` /\ |__ |__ /\ | \ | \ |__) |__ /__` /__`
| | | \| |__/ .__/ /~~\ | |___ /~~\ |__/ |__/ | \ |___ .__/ .__/
"""
flag = 0
count = 0
class UserInput:
def __init__(self):
self.parser = argparse.ArgumentParser(
description="find safe/ good return address")
self.parser.add_argument("-r", "--return_address", metavar="",
help="provide a text file containing all return addresses",
required=True)
self.parser.add_argument("-g", "--good_characters", metavar="",
help="provide a text file containing all good characters",
required=True)
def is_safe(test_char, safe_chars):
"""
test each characters safe or not
:param test_char: char
:param safe_chars: string
:return: None
"""
global flag
global count
result = safe_chars.find(test_char)
if result <= 0:
flag = 0
else:
flag = 1
count += 1
def find_safe_address(return_address, good_characters):
"""
find safe address
:param return_address: string
:param good_characters: string
:return: None
"""
global flag
global count
# processing good_chars.txt
with open(good_characters) as f:
for safe_chars in f: # good_chars.txt has only one line, load all chars into the safe_chars variable
pass
# processing return_address.txt / seh.txt
with open(return_address) as f:
flag = 0
index = 0
for line in f:
# parse the line and get the address
addr = line[0:10]
# load the chars one by one and test
first_char = addr[2:4]
is_safe(first_char, safe_chars)
second_char = addr[4:6]
is_safe(second_char, safe_chars)
third_char = addr[6:8]
is_safe(third_char, safe_chars)
fourth_char = addr[8:10]
is_safe(fourth_char, safe_chars)
if count == 4:
print(Fore.GREEN, f"{index}: {line}")
index += 1
not_found_flag = False
count = 0
if not_found_flag is True:
print(Fore.RED, f"[+] not found any safe address ")
return
def main():
my_input = UserInput()
args = my_input.parser.parse_args()
if len(sys.argv) == 1:
my_input.parser.print_help(sys.stderr)
sys.exit(1)
if args.return_address and args.good_characters:
# display program logo
print(Fore.CYAN, f"{PROGRAM_LOGO}")
find_safe_address(args.return_address, args.good_characters)
else:
my_input.parser.print_help(sys.stderr)
if __name__ == "__main__":
main()
|
mit
| -1,507,579,297,391,570,400 | 25.478632 | 109 | 0.517431 | false |
T-002/python-project-template
|
tests/__init__.py
|
1
|
1370
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016-2017 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Initializes the test environment."""
import os
import sys
sys.path.append("%s/.." % os.path.dirname(os.path.abspath(__file__)))
sys.path.append("%s/../package" % os.path.dirname(os.path.abspath(__file__)))
|
mit
| -4,959,437,430,437,986,000 | 46.241379 | 77 | 0.748905 | false |
jokey2k/sentry
|
src/sentry/api/base.py
|
1
|
5659
|
from __future__ import absolute_import
__all__ = ['DocSection', 'Endpoint', 'StatsMixin']
import logging
from datetime import datetime, timedelta
from django.utils.http import urlquote
from django.views.decorators.csrf import csrf_exempt
from enum import Enum
from pytz import utc
from rest_framework.authentication import SessionAuthentication
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from sentry.app import tsdb
from sentry.utils.cursors import Cursor
from .authentication import ApiKeyAuthentication, ProjectKeyAuthentication
from .paginator import Paginator
from .permissions import NoPermission
ONE_MINUTE = 60
ONE_HOUR = ONE_MINUTE * 60
ONE_DAY = ONE_HOUR * 24
LINK_HEADER = '<{uri}&cursor={cursor}>; rel="{name}"; results="{has_results}"; cursor="{cursor}"'
DEFAULT_AUTHENTICATION = (
ApiKeyAuthentication,
ProjectKeyAuthentication,
SessionAuthentication
)
class DocSection(Enum):
ACCOUNTS = 'Accounts'
EVENTS = 'Events'
ORGANIZATIONS = 'Organizations'
PROJECTS = 'Projects'
RELEASES = 'Releases'
TEAMS = 'Teams'
class Endpoint(APIView):
authentication_classes = DEFAULT_AUTHENTICATION
renderer_classes = (JSONRenderer,)
parser_classes = (JSONParser,)
permission_classes = (NoPermission,)
def build_cursor_link(self, request, name, cursor):
querystring = u'&'.join(
u'{0}={1}'.format(urlquote(k), urlquote(v))
for k, v in request.GET.iteritems()
if k != 'cursor'
)
base_url = request.build_absolute_uri(request.path)
if querystring:
base_url = '{0}?{1}'.format(base_url, querystring)
else:
base_url = base_url + '?'
return LINK_HEADER.format(
uri=base_url,
cursor=str(cursor),
name=name,
has_results='true' if bool(cursor) else 'false',
)
def convert_args(self, request, *args, **kwargs):
return (args, kwargs)
def handle_exception(self, exc):
try:
return super(Endpoint, self).handle_exception(exc)
except Exception as exc:
logging.exception(unicode(exc))
return Response({'detail': 'Internal Error'}, status=500)
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
Identical to rest framework's dispatch except we add the ability
to convert arguments (for common URL params).
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
(args, kwargs) = self.convert_args(request, *args, **kwargs)
self.args = args
self.kwargs = kwargs
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def paginate(self, request, on_results=None, paginator_cls=Paginator,
**kwargs):
per_page = int(request.GET.get('per_page', 100))
input_cursor = request.GET.get('cursor')
if input_cursor:
input_cursor = Cursor.from_string(input_cursor)
assert per_page <= 100
paginator = paginator_cls(**kwargs)
cursor_result = paginator.get_result(
limit=per_page,
cursor=input_cursor,
)
# map results based on callback
if on_results:
results = on_results(cursor_result.results)
headers = {}
headers['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return Response(results, headers=headers)
class StatsMixin(object):
def _parse_args(self, request):
resolution = request.GET.get('resolution')
if resolution:
resolution = self._parse_resolution(resolution)
assert any(r for r in tsdb.rollups if r[0] == resolution)
end = request.GET.get('until')
if end:
end = datetime.fromtimestamp(float(end)).replace(tzinfo=utc)
else:
end = datetime.utcnow().replace(tzinfo=utc)
start = request.GET.get('since')
if start:
start = datetime.fromtimestamp(float(start)).replace(tzinfo=utc)
else:
start = end - timedelta(days=1, seconds=-1)
return {
'start': start,
'end': end,
'rollup': resolution,
}
def _parse_resolution(self, value):
if value.endswith('h'):
return int(value[:-1]) * ONE_HOUR
elif value.endswith('d'):
return int(value[:-1]) * ONE_DAY
elif value.endswith('m'):
return int(value[:-1]) * ONE_MINUTE
elif value.endswith('s'):
return int(value[:-1])
else:
raise ValueError(value)
|
bsd-3-clause
| -3,169,921,191,962,351,000 | 30.265193 | 97 | 0.604877 | false |
foobarbazblarg/stayclean
|
stayclean-2019-december/serve-signups-with-flask.py
|
1
|
8591
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ 'e0zlc9', 'e1ggmu', 'e1wx3j', 'e2e88a', 'e2yb14', 'e3gyth', 'e3wdz9' ]
flaskport = 8951
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
mit
| -5,756,191,314,231,812,000 | 41.741294 | 129 | 0.67722 | false |
symbolicdata/code
|
src/sdeval/classes/computationproblems/GB_Z_lp.py
|
1
|
4721
|
from ComputationProblem import ComputationProblem
import os
class GB_Z_lp(ComputationProblem):
"""
This computation problem represents the computation of a Groebner basis of a given polynomial system
(commutative) with integer coefficients and the lexicographical ordering.
.. moduleauthor:: Albert Heinle <albert.heinle@rwth-aachen.de>
"""
__associatedTables = ["IntPS"]
"""
The tables containing problem instances that can be used as input for this computation problem
"""
__name = "GB_Z_lp"
"""
The name of the computation problem in the comp folder of symbolic data
"""
__possibleCASs = ["Singular",
"Magma",
"Maple",
"GAP"]
def __init__(self):
"""
The constructor of the problem instance. It checks the templates folder, which
computer algebra systems are possible to use for that computation problem.
"""
sdEvalPath = os.path.split(os.path.realpath(os.path.dirname(__file__)))[0]
self.__possibleCASs = filter(lambda x: os.path.isdir(os.path.join(sdEvalPath,"templates","comp",self.__name,x)) ,
os.listdir(os.path.join(sdEvalPath,"templates","comp",self.__name)))
def getPossibleComputerAlgebraSystems(self):
"""
Overloading of the function given in ComputationProblem. It lists all computeralgebra
systems, that provide algorithms to solve this computation problem.
:returns: A list of names of computer algebra systems.
:rtype: list
"""
return self.__possibleCASs
def getAssociatedTables(self):
"""
Overloading the function given in ComputationProblem. It lists all SD-Tables that
contain problem instances which can be used as input for algorithms solving this computation
problem.
:returns: All SD-Tables with suitable problem instances
:rtype: list
"""
return self.__associatedTables
def getName(self):
"""
Returns the name of this computation problem as it is given in the comp-table.
:returns: The name of the computation problem.
:rtype: string
"""
return self.__name
def addToAssociatedTables(self,associatedTable):
"""
This method adds a SymbolicData-Table to the list of associated tables to this
problem. It should contain problem instances that can be used as input for that specific
computation.
This function is meant to be used for prototyping new problem instances for SDEval and if they can be used,
they will be added fixed to the class variable representing the list of associated tables.
:param associatedTable: The name of the table the user wants to add
:type associatedTable: string[nonempty][valid file name]
:returns: True if associatedTable could be appended or was already there,
False otherwise
"""
if not associatedTable in self.__associatedTables:
if not self._checkIfValidFileName(associatedTable):
#Validity check of input file name
return False
#If we reach this line, everything was okay with the code.
self.__associatedTables.append(associatedTable)
return True
else:
#In this case, the associated table was already in the list.
return True
def addToComputerAlgebraSystems(self,cas):
"""
This method adds a computer algebra system to the list of computer algebra systems that
provide algorithms to solve this specific computation problem.
This function is meant to be used for checking new computer algebra systems for that computation problem and if they can be used
in a stable way, they will be added fixed to the class variable representing the list of possible computer algebra systems.
:param cas: The name of the computer algebra system
:type cas: string[non-empty][valid file name]
:returns: True, if computer algebra system had a valid name and could be appended,
False, otherwise
"""
if not cas in self.__possibleCASs:
if not self._checkIfValidFileName(cas):
#CAS name was empty or just whitespace characters.
#Don't change anything and return false
return False
#If we reach this line, everything was okay with the code.
self.__possibleCASs.append(cas)
return True
else:
return True
|
gpl-3.0
| -7,154,972,848,801,620,000 | 41.918182 | 136 | 0.640119 | false |
pombredanne/kitchen-1
|
kitchen2/tests/test_i18n.py
|
1
|
40979
|
# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
import os
import types
from kitchen import i18n
import base_classes
class TestI18N_UTF8(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_easy_gettext_setup(self):
'''Test that the easy_gettext_setup function works
'''
_, N_ = i18n.easy_gettext_setup('foo', localedirs=
['%s/data/locale/' % os.path.dirname(__file__)])
tools.assert_true(isinstance(_, types.MethodType))
tools.assert_true(isinstance(N_, types.MethodType))
tools.eq_(_.__name__, '_ugettext')
tools.eq_(N_.__name__, '_ungettext')
tools.eq_(_(self.utf8_spanish), self.u_spanish)
tools.eq_(_(self.u_spanish), self.u_spanish)
tools.eq_(N_(self.utf8_limao, self.utf8_limoes, 1), self.u_limao)
tools.eq_(N_(self.utf8_limao, self.utf8_limoes, 2), self.u_limoes)
tools.eq_(N_(self.u_limao, self.u_limoes, 1), self.u_limao)
tools.eq_(N_(self.u_limao, self.u_limoes, 2), self.u_limoes)
def test_easy_gettext_setup_non_unicode(self):
'''Test that the easy_gettext_setup function works
'''
b_, bN_ = i18n.easy_gettext_setup('foo', localedirs=
['%s/data/locale/' % os.path.dirname(__file__)],
use_unicode=False)
tools.assert_true(isinstance(b_, types.MethodType))
tools.assert_true(isinstance(bN_, types.MethodType))
tools.eq_(b_.__name__, '_lgettext')
tools.eq_(bN_.__name__, '_lngettext')
tools.eq_(b_(self.utf8_spanish), self.utf8_spanish)
tools.eq_(b_(self.u_spanish), self.utf8_spanish)
tools.eq_(bN_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_limao)
tools.eq_(bN_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_limoes)
tools.eq_(bN_(self.u_limao, self.u_limoes, 1), self.utf8_limao)
tools.eq_(bN_(self.u_limao, self.u_limoes, 2), self.utf8_limoes)
def test_get_translation_object(self):
'''Test that the get_translation_object function works
'''
translations = i18n.get_translation_object('foo', ['%s/data/locale/' % os.path.dirname(__file__)])
tools.eq_(translations.__class__, i18n.DummyTranslations)
tools.assert_raises(IOError, i18n.get_translation_object, 'foo', ['%s/data/locale/' % os.path.dirname(__file__)], fallback=False)
translations = i18n.get_translation_object('test', ['%s/data/locale/' % os.path.dirname(__file__)])
tools.eq_(translations.__class__, i18n.NewGNUTranslations)
def test_get_translation_object_create_fallback(self):
'''Test get_translation_object creates fallbacks for additional catalogs'''
translations = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
tools.eq_(translations.__class__, i18n.NewGNUTranslations)
tools.eq_(translations._fallback.__class__, i18n.NewGNUTranslations)
def test_get_translation_object_copy(self):
'''Test get_translation_object shallow copies the message catalog'''
translations = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)], codeset='utf-8')
translations.input_charset = 'utf-8'
translations2 = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)], codeset='latin-1')
translations2.input_charset = 'latin-1'
# Test that portions of the translation objects are the same and other
# portions are different (which is a space optimization so that the
# translation data isn't in memory multiple times)
tools.assert_not_equal(id(translations._fallback), id(translations2._fallback))
tools.assert_not_equal(id(translations.output_charset()), id(translations2.output_charset()))
tools.assert_not_equal(id(translations.input_charset), id(translations2.input_charset))
tools.assert_not_equal(id(translations.input_charset), id(translations2.input_charset))
tools.eq_(id(translations._catalog), id(translations2._catalog))
def test_get_translation_object_optional_params(self):
'''Smoketest leaving out optional parameters'''
translations = i18n.get_translation_object('test')
tools.assert_true(translations.__class__ in (i18n.NewGNUTranslations, i18n.DummyTranslations))
def test_get_translation_object_python2_api_default(self):
'''Smoketest that python2_api default value yields the python2 functions'''
# Default
translations = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)], codeset='utf-8')
translations.input_charset = 'utf-8'
tools.eq_(translations.gettext.__name__, '_gettext')
tools.eq_(translations.lgettext.__name__, '_lgettext')
tools.eq_(translations.ugettext.__name__, '_ugettext')
tools.eq_(translations.ngettext.__name__, '_ngettext')
tools.eq_(translations.lngettext.__name__, '_lngettext')
tools.eq_(translations.ungettext.__name__, '_ungettext')
def test_get_translation_object_python2_api_true(self):
'''Smoketest that setting python2_api true yields the python2 functions'''
# Default
translations = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)], codeset='utf-8',
python2_api=True)
translations.input_charset = 'utf-8'
tools.eq_(translations.gettext.__name__, '_gettext')
tools.eq_(translations.lgettext.__name__, '_lgettext')
tools.eq_(translations.ugettext.__name__, '_ugettext')
tools.eq_(translations.ngettext.__name__, '_ngettext')
tools.eq_(translations.lngettext.__name__, '_lngettext')
tools.eq_(translations.ungettext.__name__, '_ungettext')
def test_get_translation_object_python2_api_false(self):
'''Smoketest that setting python2_api false yields the python3 functions'''
# Default
translations = i18n.get_translation_object('test',
['%s/data/locale' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)], codeset='utf-8',
python2_api=False)
translations.input_charset = 'utf-8'
tools.eq_(translations.gettext.__name__, '_ugettext')
tools.eq_(translations.lgettext.__name__, '_lgettext')
tools.eq_(translations.ngettext.__name__, '_ungettext')
tools.eq_(translations.lngettext.__name__, '_lngettext')
tools.assert_raises(AttributeError, translations.ugettext, 'message')
tools.assert_raises(AttributeError, translations.ungettext, 'message1', 'message2')
def test_dummy_translation(self):
'''Test that we can create a DummyTranslation object
'''
tools.assert_true(isinstance(i18n.DummyTranslations(), i18n.DummyTranslations))
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestDummyTranslations(base_classes.UnicodeTestData):
def __init__(self):
self.test_data = {'bytes': (( # First set is with default charset (utf8)
(self.u_ascii, self.b_ascii),
(self.u_spanish, self.utf8_spanish),
(self.u_japanese, self.utf8_japanese),
(self.b_ascii, self.b_ascii),
(self.utf8_spanish, self.utf8_spanish),
(self.latin1_spanish, self.utf8_mangled_spanish_latin1_as_utf8),
(self.utf8_japanese, self.utf8_japanese),
),
( # Second set is with output_charset of latin1 (ISO-8859-1)
(self.u_ascii, self.b_ascii),
(self.u_spanish, self.latin1_spanish),
(self.u_japanese, self.latin1_mangled_japanese_replace_as_latin1),
(self.b_ascii, self.b_ascii),
(self.utf8_spanish, self.utf8_spanish),
(self.latin1_spanish, self.latin1_spanish),
(self.utf8_japanese, self.utf8_japanese),
),
( # Third set is with output_charset of C
(self.u_ascii, self.b_ascii),
(self.u_spanish, self.ascii_mangled_spanish_as_ascii),
(self.u_japanese, self.ascii_mangled_japanese_replace_as_latin1),
(self.b_ascii, self.b_ascii),
(self.utf8_spanish, self.ascii_mangled_spanish_as_ascii),
(self.latin1_spanish, self.ascii_twice_mangled_spanish_latin1_as_utf8_as_ascii),
(self.utf8_japanese, self.ascii_mangled_japanese_replace_as_latin1),
),
),
'unicode': (( # First set is with the default charset (utf8)
(self.u_ascii, self.u_ascii),
(self.u_spanish, self.u_spanish),
(self.u_japanese, self.u_japanese),
(self.b_ascii, self.u_ascii),
(self.utf8_spanish, self.u_spanish),
(self.latin1_spanish, self.u_mangled_spanish_latin1_as_utf8), # String is mangled but no exception
(self.utf8_japanese, self.u_japanese),
),
( # Second set is with _charset of latin1 (ISO-8859-1)
(self.u_ascii, self.u_ascii),
(self.u_spanish, self.u_spanish),
(self.u_japanese, self.u_japanese),
(self.b_ascii, self.u_ascii),
(self.utf8_spanish, self.u_mangled_spanish_utf8_as_latin1), # String mangled but no exception
(self.latin1_spanish, self.u_spanish),
(self.utf8_japanese, self.u_mangled_japanese_utf8_as_latin1), # String mangled but no exception
),
( # Third set is with _charset of C
(self.u_ascii, self.u_ascii),
(self.u_spanish, self.u_spanish),
(self.u_japanese, self.u_japanese),
(self.b_ascii, self.u_ascii),
(self.utf8_spanish, self.u_mangled_spanish_utf8_as_ascii), # String mangled but no exception
(self.latin1_spanish, self.u_mangled_spanish_latin1_as_ascii), # String mangled but no exception
(self.utf8_japanese, self.u_mangled_japanese_utf8_as_ascii), # String mangled but no exception
),
)
}
def setUp(self):
self.translations = i18n.DummyTranslations()
def check_gettext(self, message, value, charset=None):
self.translations.set_output_charset(charset)
tools.eq_(self.translations.gettext(message), value,
msg='gettext(%s): trans: %s != val: %s (charset=%s)'
% (repr(message), repr(self.translations.gettext(message)),
repr(value), charset))
def check_lgettext(self, message, value, charset=None,
locale='en_US.UTF-8'):
os.environ['LC_ALL'] = locale
self.translations.set_output_charset(charset)
tools.eq_(self.translations.lgettext(message), value,
msg='lgettext(%s): trans: %s != val: %s (charset=%s, locale=%s)'
% (repr(message), repr(self.translations.lgettext(message)),
repr(value), charset, locale))
# Note: charset has a default value because nose isn't invoking setUp and
# tearDown each time check_* is run.
def check_ugettext(self, message, value, charset='utf-8'):
'''ugettext method with default values'''
self.translations.input_charset = charset
tools.eq_(self.translations.ugettext(message), value,
msg='ugettext(%s): trans: %s != val: %s (charset=%s)'
% (repr(message), repr(self.translations.ugettext(message)),
repr(value), charset))
def check_ngettext(self, message, value, charset=None):
self.translations.set_output_charset(charset)
tools.eq_(self.translations.ngettext(message, 'blank', 1), value)
tools.eq_(self.translations.ngettext('blank', message, 2), value)
tools.assert_not_equal(self.translations.ngettext(message, 'blank', 2), value)
tools.assert_not_equal(self.translations.ngettext('blank', message, 1), value)
def check_lngettext(self, message, value, charset=None, locale='en_US.UTF-8'):
os.environ['LC_ALL'] = locale
self.translations.set_output_charset(charset)
tools.eq_(self.translations.lngettext(message, 'blank', 1), value,
msg='lngettext(%s, "blank", 1): trans: %s != val: %s (charset=%s, locale=%s)'
% (repr(message), repr(self.translations.lngettext(message,
'blank', 1)), repr(value), charset, locale))
tools.eq_(self.translations.lngettext('blank', message, 2), value,
msg='lngettext("blank", %s, 2): trans: %s != val: %s (charset=%s, locale=%s)'
% (repr(message), repr(self.translations.lngettext('blank',
message, 2)), repr(value), charset, locale))
tools.assert_not_equal(self.translations.lngettext(message, 'blank', 2), value,
msg='lngettext(%s, "blank", 2): trans: %s, val: %s (charset=%s, locale=%s)'
% (repr(message), repr(self.translations.lngettext(message,
'blank', 2)), repr(value), charset, locale))
tools.assert_not_equal(self.translations.lngettext('blank', message, 1), value,
msg='lngettext("blank", %s, 1): trans: %s != val: %s (charset=%s, locale=%s)'
% (repr(message), repr(self.translations.lngettext('blank',
message, 1)), repr(value), charset, locale))
# Note: charset has a default value because nose isn't invoking setUp and
# tearDown each time check_* is run.
def check_ungettext(self, message, value, charset='utf-8'):
self.translations.input_charset = charset
tools.eq_(self.translations.ungettext(message, 'blank', 1), value)
tools.eq_(self.translations.ungettext('blank', message, 2), value)
tools.assert_not_equal(self.translations.ungettext(message, 'blank', 2), value)
tools.assert_not_equal(self.translations.ungettext('blank', message, 1), value)
def test_gettext(self):
'''gettext method with default values'''
for message, value in self.test_data['bytes'][0]:
yield self.check_gettext, message, value
def test_gettext_output_charset(self):
'''gettext method after output_charset is set'''
for message, value in self.test_data['bytes'][1]:
yield self.check_gettext, message, value, 'latin1'
def test_ngettext(self):
for message, value in self.test_data['bytes'][0]:
yield self.check_ngettext, message, value
def test_ngettext_output_charset(self):
for message, value in self.test_data['bytes'][1]:
yield self.check_ngettext, message, value, 'latin1'
def test_lgettext(self):
'''lgettext method with default values on a utf8 locale'''
for message, value in self.test_data['bytes'][0]:
yield self.check_lgettext, message, value
def test_lgettext_output_charset(self):
'''lgettext method after output_charset is set'''
for message, value in self.test_data['bytes'][1]:
yield self.check_lgettext, message, value, 'latin1'
def test_lgettext_output_charset_and_locale(self):
'''lgettext method after output_charset is set in C locale
output_charset should take precedence
'''
for message, value in self.test_data['bytes'][1]:
yield self.check_lgettext, message, value, 'latin1', 'C'
def test_lgettext_locale_C(self):
'''lgettext method in a C locale'''
for message, value in self.test_data['bytes'][2]:
yield self.check_lgettext, message, value, None, 'C'
def test_lngettext(self):
'''lngettext method with default values on a utf8 locale'''
for message, value in self.test_data['bytes'][0]:
yield self.check_lngettext, message, value
def test_lngettext_output_charset(self):
'''lngettext method after output_charset is set'''
for message, value in self.test_data['bytes'][1]:
yield self.check_lngettext, message, value, 'latin1'
def test_lngettext_output_charset_and_locale(self):
'''lngettext method after output_charset is set in C locale
output_charset should take precedence
'''
for message, value in self.test_data['bytes'][1]:
yield self.check_lngettext, message, value, 'latin1', 'C'
def test_lngettext_locale_C(self):
'''lngettext method in a C locale'''
for message, value in self.test_data['bytes'][2]:
yield self.check_lngettext, message, value, None, 'C'
def test_ugettext(self):
for message, value in self.test_data['unicode'][0]:
yield self.check_ugettext, message, value
def test_ugettext_charset_latin1(self):
for message, value in self.test_data['unicode'][1]:
yield self.check_ugettext, message, value, 'latin1'
def test_ugettext_charset_ascii(self):
for message, value in self.test_data['unicode'][2]:
yield self.check_ugettext, message, value, 'ascii'
def test_ungettext(self):
for message, value in self.test_data['unicode'][0]:
yield self.check_ungettext, message, value
def test_ungettext_charset_latin1(self):
for message, value in self.test_data['unicode'][1]:
yield self.check_ungettext, message, value, 'latin1'
def test_ungettext_charset_ascii(self):
for message, value in self.test_data['unicode'][2]:
yield self.check_ungettext, message, value, 'ascii'
def test_nonbasestring(self):
tools.eq_(self.translations.gettext(dict(hi='there')), self.b_empty_string)
tools.eq_(self.translations.ngettext(dict(hi='there'), dict(hi='two'), 1), self.b_empty_string)
tools.eq_(self.translations.lgettext(dict(hi='there')), self.b_empty_string)
tools.eq_(self.translations.lngettext(dict(hi='there'), dict(hi='two'), 1), self.b_empty_string)
tools.eq_(self.translations.ugettext(dict(hi='there')), self.u_empty_string)
tools.eq_(self.translations.ungettext(dict(hi='there'), dict(hi='two'), 1), self.u_empty_string)
class TestI18N_Latin1(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.iso88591'
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_easy_gettext_setup_non_unicode(self):
'''Test that the easy_gettext_setup function works
'''
b_, bN_ = i18n.easy_gettext_setup('foo', localedirs=
['%s/data/locale/' % os.path.dirname(__file__)],
use_unicode=False)
tools.eq_(b_(self.utf8_spanish), self.utf8_spanish)
tools.eq_(b_(self.u_spanish), self.latin1_spanish)
tools.eq_(bN_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_limao)
tools.eq_(bN_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_limoes)
tools.eq_(bN_(self.u_limao, self.u_limoes, 1), self.latin1_limao)
tools.eq_(bN_(self.u_limao, self.u_limoes, 2), self.latin1_limoes)
class TestNewGNUTranslationsNoMatch(TestDummyTranslations):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
self.translations = i18n.get_translation_object('test', ['%s/data/locale/' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
class TestNewGNURealTranslations_UTF8(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
self.translations = i18n.get_translation_object('test', ['%s/data/locale/' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_gettext(self):
_ = self.translations.gettext
tools.eq_(_(self.utf8_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.utf8_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.utf8_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.u_kuratomi), self.utf8_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.u_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.utf8_not_in_catalog)
def test_ngettext(self):
_ = self.translations.ngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.utf8_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.utf8_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
def test_lgettext(self):
_ = self.translations.lgettext
tools.eq_(_(self.utf8_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.utf8_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.utf8_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.u_kuratomi), self.utf8_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.u_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.utf8_not_in_catalog)
def test_lngettext(self):
_ = self.translations.lngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.utf8_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.utf8_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
def test_ugettext(self):
_ = self.translations.ugettext
tools.eq_(_(self.utf8_kitchen), self.u_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.u_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.u_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.utf8_in_fallback), self.u_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.u_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.u_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.u_kuratomi)
tools.eq_(_(self.u_kuratomi), self.u_ja_kuratomi)
# This is not translated to utf8_yes_in_fallback because this test is
# without the fallback message catalog
tools.eq_(_(self.u_in_fallback), self.u_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.u_not_in_catalog)
def test_ungettext(self):
_ = self.translations.ungettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.u_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.u_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.u_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.u_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.u_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.u_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.u_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.u_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.u_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.u_not_in_catalog)
class TestNewGNURealTranslations_Latin1(TestNewGNURealTranslations_UTF8):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.iso88591'
self.translations = i18n.get_translation_object('test', ['%s/data/locale/' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_lgettext(self):
_ = self.translations.lgettext
tools.eq_(_(self.utf8_kitchen), self.latin1_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.latin1_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.latin1_ja_kuratomi)
# Neither of the following two tests encode to proper latin-1 because:
# any byte is valid in latin-1 so there's no way to know that what
# we're given in the string is really utf-8
#
# This is not translated to latin1_yes_in_fallback because this test
# is without the fallback message catalog
tools.eq_(_(self.utf8_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.latin1_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.latin1_kuratomi)
tools.eq_(_(self.u_kuratomi), self.latin1_ja_kuratomi)
# This is not translated to latin1_yes_in_fallback because this test
# is without the fallback message catalog
tools.eq_(_(self.u_in_fallback), self.latin1_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.latin1_not_in_catalog)
def test_lngettext(self):
_ = self.translations.lngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.latin1_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.latin1_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.latin1_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.latin1_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.latin1_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.latin1_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.latin1_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.latin1_lemons)
# This unfortunately does not encode to proper latin-1 because:
# any byte is valid in latin-1 so there's no way to know that what
# we're given in the string is really utf-8
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.latin1_not_in_catalog)
class TestFallbackNewGNUTranslationsNoMatch(TestDummyTranslations):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
self.translations = i18n.get_translation_object('test',
['%s/data/locale/' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
class TestFallbackNewGNURealTranslations_UTF8(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
self.translations = i18n.get_translation_object('test',
['%s/data/locale/' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_gettext(self):
_ = self.translations.gettext
tools.eq_(_(self.utf8_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.utf8_in_fallback), self.utf8_yes_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.u_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.u_in_fallback), self.utf8_yes_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.utf8_not_in_catalog)
def test_ngettext(self):
_ = self.translations.ngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.utf8_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.utf8_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
def test_lgettext(self):
_ = self.translations.lgettext
tools.eq_(_(self.utf8_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.utf8_in_fallback), self.utf8_yes_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.utf8_kuratomi)
tools.eq_(_(self.u_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.u_in_fallback), self.utf8_yes_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.utf8_not_in_catalog)
def test_lngettext(self):
_ = self.translations.lngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.utf8_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.utf8_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.utf8_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.utf8_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.utf8_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
def test_ugettext(self):
_ = self.translations.ugettext
tools.eq_(_(self.utf8_kitchen), self.u_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.u_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.u_ja_kuratomi)
tools.eq_(_(self.utf8_in_fallback), self.u_yes_in_fallback)
tools.eq_(_(self.utf8_not_in_catalog), self.u_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.u_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.u_kuratomi)
tools.eq_(_(self.u_kuratomi), self.u_ja_kuratomi)
tools.eq_(_(self.u_in_fallback), self.u_yes_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.u_not_in_catalog)
def test_ungettext(self):
_ = self.translations.ungettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.u_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.u_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.u_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.u_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.u_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.u_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.u_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.u_lemons)
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.u_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.u_not_in_catalog)
class TestFallbackNewGNURealTranslations_Latin1(TestFallbackNewGNURealTranslations_UTF8):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.iso88591'
self.translations = i18n.get_translation_object('test',
['%s/data/locale/' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_lgettext(self):
_ = self.translations.lgettext
tools.eq_(_(self.utf8_kitchen), self.latin1_pt_kitchen)
tools.eq_(_(self.utf8_ja_kuratomi), self.latin1_kuratomi)
tools.eq_(_(self.utf8_kuratomi), self.latin1_ja_kuratomi)
tools.eq_(_(self.utf8_in_fallback), self.latin1_yes_in_fallback)
# This unfortunately does not encode to proper latin-1 because:
# any byte is valid in latin-1 so there's no way to know that what
# we're given in the string is really utf-8
tools.eq_(_(self.utf8_not_in_catalog), self.utf8_not_in_catalog)
tools.eq_(_(self.u_kitchen), self.latin1_pt_kitchen)
tools.eq_(_(self.u_ja_kuratomi), self.latin1_kuratomi)
tools.eq_(_(self.u_kuratomi), self.latin1_ja_kuratomi)
tools.eq_(_(self.u_in_fallback), self.latin1_yes_in_fallback)
tools.eq_(_(self.u_not_in_catalog), self.latin1_not_in_catalog)
def test_lngettext(self):
_ = self.translations.lngettext
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 1), self.latin1_limao)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 1), self.latin1_lemon)
tools.eq_(_(self.u_lemon, self.u_lemons, 1), self.latin1_limao)
tools.eq_(_(self.u_limao, self.u_limoes, 1), self.latin1_lemon)
tools.eq_(_(self.utf8_lemon, self.utf8_lemons, 2), self.latin1_limoes)
tools.eq_(_(self.utf8_limao, self.utf8_limoes, 2), self.latin1_lemons)
tools.eq_(_(self.u_lemon, self.u_lemons, 2), self.latin1_limoes)
tools.eq_(_(self.u_limao, self.u_limoes, 2), self.latin1_lemons)
# This unfortunately does not encode to proper latin-1 because:
# any byte is valid in latin-1 so there's no way to know that what
# we're given in the string is really utf-8
tools.eq_(_(self.utf8_not_in_catalog, 'throwaway', 1), self.utf8_not_in_catalog)
tools.eq_(_(self.u_not_in_catalog, 'throwaway', 1), self.latin1_not_in_catalog)
class TestFallback(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.iso88591'
self.gtranslations = i18n.get_translation_object('test',
['%s/data/locale/' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
self.gtranslations.add_fallback(object())
self.dtranslations = i18n.get_translation_object('nonexistent',
['%s/data/locale/' % os.path.dirname(__file__),
'%s/data/locale-old' % os.path.dirname(__file__)])
self.dtranslations.add_fallback(object())
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
def test_invalid_fallback_no_raise(self):
'''Test when we have an invalid fallback that it does not raise.'''
tools.eq_(self.gtranslations.gettext(self.u_spanish), self.utf8_spanish)
tools.eq_(self.gtranslations.ugettext(self.u_spanish), self.u_spanish)
tools.eq_(self.gtranslations.lgettext(self.u_spanish), self.latin1_spanish)
tools.eq_(self.gtranslations.ngettext(self.u_spanish, 'cde', 1), self.utf8_spanish)
tools.eq_(self.gtranslations.ungettext(self.u_spanish, 'cde', 1), self.u_spanish)
tools.eq_(self.gtranslations.lngettext(self.u_spanish, 'cde', 1), self.latin1_spanish)
tools.eq_(self.dtranslations.gettext(self.u_spanish), self.utf8_spanish)
tools.eq_(self.dtranslations.ugettext(self.u_spanish), self.u_spanish)
tools.eq_(self.dtranslations.lgettext(self.u_spanish), self.latin1_spanish)
tools.eq_(self.dtranslations.ngettext(self.u_spanish, 'cde', 1), self.utf8_spanish)
tools.eq_(self.dtranslations.ungettext(self.u_spanish, 'cde', 1), self.u_spanish)
tools.eq_(self.dtranslations.lngettext(self.u_spanish, 'cde', 1), self.latin1_spanish)
class TestDefaultLocaleDir(unittest.TestCase, base_classes.UnicodeTestData):
def setUp(self):
self.old_LC_ALL = os.environ.get('LC_ALL', None)
os.environ['LC_ALL'] = 'pt_BR.utf8'
self.old_DEFAULT_LOCALEDIRS = i18n._DEFAULT_LOCALEDIR
i18n._DEFAULT_LOCALEDIR = '%s/data/locale/' % os.path.dirname(__file__)
self.translations = i18n.get_translation_object('test')
def tearDown(self):
if self.old_LC_ALL:
os.environ['LC_ALL'] = self.old_LC_ALL
else:
del(os.environ['LC_ALL'])
if self.old_DEFAULT_LOCALEDIRS:
i18n._DEFAULT_LOCALEDIR = self.old_DEFAULT_LOCALEDIRS
def test_gettext(self):
_ = self.translations.gettext
tools.eq_(_(self.utf8_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.utf8_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.utf8_ja_kuratomi), self.utf8_kuratomi)
# Returns msgid because the string is in a fallback catalog which we
# haven't setup
tools.eq_(_(self.utf8_in_fallback), self.utf8_in_fallback)
tools.eq_(_(self.u_kitchen), self.utf8_pt_kitchen)
tools.eq_(_(self.u_kuratomi), self.utf8_ja_kuratomi)
tools.eq_(_(self.u_ja_kuratomi), self.utf8_kuratomi)
# Returns msgid because the string is in a fallback catalog which we
# haven't setup
tools.eq_(_(self.u_in_fallback), self.utf8_in_fallback)
|
gpl-2.0
| -4,151,479,420,773,446,700 | 48.97439 | 137 | 0.617389 | false |
mholtrop/Phys605
|
Python/RTC/ReadClock.py
|
1
|
1380
|
#!/usr/bin/env python
#
# The wiringPi requires you to run as sudo.
# This is needed for access to /dev/mem, which we don't use here.
# There is no way around this though, so we start this code as a root user.
#
#
import wiringpi as wp
#
# Define a function to decode BCD encoded numbers.
def decBCD(num):
return( (num/16)*10 + (num%16) )
#
# Open the RTC
#
fc= wp.wiringPiI2CSetup(0x68)
#
# We read the registers one at a time.
secs = wp.wiringPiI2CReadReg8(fc,0x00)
mins = wp.wiringPiI2CReadReg8(fc,0x01)
hour = wp.wiringPiI2CReadReg8(fc,0x02)
day = wp.wiringPiI2CReadReg8(fc,0x03)
dat = wp.wiringPiI2CReadReg8(fc,0x04)
mon = wp.wiringPiI2CReadReg8(fc,0x05)
yr = wp.wiringPiI2CReadReg8(fc,0x06)
cent = wp.wiringPiI2CReadReg8(fc,0x07)
temp1 = wp.wiringPiI2CReadReg8(fc,0x11)
temp2 = wp.wiringPiI2CReadReg8(fc,0x12)
year = decBCD(yr)
month = decBCD(mon & 0x7f)
date = decBCD(dat)
if (mon & 0x80)>0:
year+=2100
else:
year+=2000
if (hour&0x40)>0: # Test for 12 or 24 hour clock. 1=12 hour 0=24 hour
hours = decBCD(hour&0x1F)
if (hour&0x20)>0:
ampm = "PM"
else:
ampm = "AM"
print "{2}:{1:02d}:{0:02d} {3} ({4}) {5}-{6}-{7}".format(decBCD(secs),decBCD(mins),hours,ampm,day,year,month,date)
else:
hours = decBCD(hour&0x3F)
print "{2}:{1:02d}:{0:02d} ({3}) {4}-{5}-{6}".format(decBCD(secs),decBCD(mins),hours,day,year,month,date)
|
gpl-3.0
| -5,986,468,438,403,868,000 | 27.75 | 118 | 0.675362 | false |
hachreak/invenio-oauthclient
|
invenio_oauthclient/contrib/orcid.py
|
1
|
7449
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Pre-configured remote application for enabling sign in/up with ORCID.
1. Edit your configuration and add:
.. code-block:: python
from invenio_oauthclient.contrib import orcid
OAUTHCLIENT_REMOTE_APPS = dict(
orcid=orcid.REMOTE_APP,
)
ORCID_APP_CREDENTIALS = dict(
consumer_key="changeme",
consumer_secret="changeme",
)
Note, if you want to use the ORCID Member API, use
``orcid.REMOTE_MEMBER_APP`` instead of ``orcid.REMOTE_APP``.
In case you want use sandbox:
To use the ORCID Public API sandbox, use ``orcid.REMOTE_SANDBOX_APP``
instead of ``orcid.REMOTE_APP``.
To use the ORCID Member API sandbox, use ``orcid.REMOTE_SANDBOX_MEMBER_APP``.
2. Register a new application with ORCID. When registering the
application ensure that the *Redirect URI* points to:
``CFG_SITE_URL/oauth/authorized/orcid/`` (note, ORCID does not
allow localhost to be used, thus testing on development machines is
somewhat complicated by this).
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration (``invenio.cfg``):
.. code-block:: python
ORCID_APP_CREDENTIALS = dict(
consumer_key="<CLIENT ID>",
consumer_secret="<CLIENT SECRET>",
)
4. Now go to ``CFG_SITE_URL/oauth/login/orcid/`` (e.g.
http://localhost:4000/oauth/login/orcid/)
5. Also, you should see ORCID listed under Linked accounts:
http://localhost:4000/account/settings/linkedaccounts/
By default the ORCID module will try first look if a link already exists
between a ORCID account and a user. If no link is found, the user is asked
to provide an email address to sign-up.
In templates you can add a sign in/up link:
.. code-block:: jinja
<a href="{{url_for('invenio_oauthclient.login', remote_app='orcid')}}">
Sign in with ORCID
</a>
For more details you can play with a :doc:`working example <examplesapp>`.
"""
import copy
from flask import current_app, redirect, url_for
from flask_login import current_user
from invenio_db import db
from invenio_oauthclient.models import RemoteAccount
from invenio_oauthclient.utils import oauth_link_external_id, \
oauth_unlink_external_id
REMOTE_APP = dict(
title='ORCID',
description='Connecting Research and Researchers.',
icon='',
authorized_handler='invenio_oauthclient.handlers'
':authorized_signup_handler',
disconnect_handler='invenio_oauthclient.contrib.orcid'
':disconnect_handler',
signup_handler=dict(
info='invenio_oauthclient.contrib.orcid:account_info',
setup='invenio_oauthclient.contrib.orcid:account_setup',
view='invenio_oauthclient.handlers:signup_handler',
),
params=dict(
request_token_params={'scope': '/authenticate',
'show_login': 'true'},
base_url='https://pub.orcid.org/v1.2/',
request_token_url=None,
access_token_url='https://pub.orcid.org/oauth/token',
access_token_method='POST',
authorize_url='https://orcid.org/oauth/authorize',
app_key='ORCID_APP_CREDENTIALS',
content_type='application/json',
)
)
"""ORCID Remote Application."""
REMOTE_MEMBER_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Remote Application with member API."""
REMOTE_MEMBER_APP['params'].update(dict(
base_url='https://api.orcid.org/',
access_token_url='https://api.orcid.org/oauth/token',
))
"""ORCID sandbox member API."""
REMOTE_SANDBOX_MEMBER_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Sandbox Remote Application with member API."""
REMOTE_SANDBOX_MEMBER_APP['params'].update(dict(
base_url='https://api.sandbox.orcid.org/',
access_token_url='https://api.sandbox.orcid.org/oauth/token',
authorize_url='https://sandbox.orcid.org/oauth/authorize#show_login',
))
"""ORCID sandbox member API."""
REMOTE_SANDBOX_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Sandbox Remote Application with public API."""
REMOTE_SANDBOX_APP['params'].update(dict(
base_url='https://pub.sandbox.orcid.org/',
access_token_url='https://pub.sandbox.orcid.org/oauth/token',
authorize_url='https://sandbox.orcid.org/oauth/authorize#show_login',
))
"""ORCID sandbox public API."""
def account_info(remote, resp):
"""Retrieve remote account information used to find local user.
It returns a dictionary with the following structure:
.. code-block:: python
{
'user': {
'profile': {
'full_name': 'Full Name',
},
},
'external_id': 'github-unique-identifier',
'external_method': 'github',
}
:param remote: The remote application.
:param resp: The response.
:returns: A dictionary with the user information.
"""
orcid = resp.get('orcid')
return {
'external_id': orcid,
'external_method': 'orcid',
'user': {
'profile': {
'full_name': resp.get('name'),
},
},
}
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
:param remote: The remote application.
"""
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
orcid = account.extra_data.get('orcid')
if orcid:
oauth_unlink_external_id({'id': orcid, 'method': 'orcid'})
if account:
with db.session.begin_nested():
account.delete()
return redirect(url_for('invenio_oauthclient_settings.index'))
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in.
:param remote: The remote application.
:param token: The token value.
:param resp: The response.
"""
with db.session.begin_nested():
# Retrieve ORCID from response.
orcid = resp.get('orcid')
full_name = resp.get('name')
# Set ORCID in extra_data.
token.remote_account.extra_data = {
'orcid': orcid,
'full_name': full_name,
}
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, {'id': orcid, 'method': 'orcid'})
|
gpl-2.0
| 46,624,260,302,116,430 | 30.969957 | 79 | 0.657404 | false |
PyCQA/isort
|
tests/unit/test_place.py
|
1
|
2623
|
"""Tests for the isort import placement module"""
from functools import partial
from isort import place, sections
from isort.settings import Config
def test_module(src_path):
place_tester = partial(place.module, config=Config(src_paths=[src_path]))
assert place_tester("isort") == sections.FIRSTPARTY
assert place_tester("os") == sections.STDLIB
assert place_tester(".deprecated") == sections.LOCALFOLDER
assert place_tester("__future__") == sections.FUTURE
assert place_tester("hug") == sections.THIRDPARTY
def test_extra_standard_library(src_path):
place_tester = partial(
place.module, config=Config(src_paths=[src_path], extra_standard_library=["hug"])
)
assert place_tester("os") == sections.STDLIB
assert place_tester("hug") == sections.STDLIB
def test_no_standard_library_placement():
assert place.module_with_reason(
"pathlib", config=Config(sections=["THIRDPARTY"], default_section="THIRDPARTY")
) == ("THIRDPARTY", "Default option in Config or universal default.")
assert place.module("pathlib") == "STDLIB"
def test_namespace_package_placement(examples_path):
namespace_examples = examples_path / "namespaces"
implicit = namespace_examples / "implicit"
pkg_resource = namespace_examples / "pkg_resource"
pkgutil = namespace_examples / "pkgutil"
for namespace_test in (implicit, pkg_resource, pkgutil):
print(namespace_test)
config = Config(settings_path=namespace_test)
no_namespaces = Config(settings_path=namespace_test, auto_identify_namespace_packages=False)
namespace_override = Config(settings_path=namespace_test, known_firstparty=["root.name"])
assert place.module("root.name", config=config) == "THIRDPARTY"
assert place.module("root.nested", config=config) == "FIRSTPARTY"
assert place.module("root.name", config=no_namespaces) == "FIRSTPARTY"
assert place.module("root.name", config=namespace_override) == "FIRSTPARTY"
no_namespace = namespace_examples / "none"
almost_implicit = namespace_examples / "almost-implicit"
for lacks_namespace in (no_namespace, almost_implicit):
config = Config(settings_path=lacks_namespace)
manual_namespace = Config(settings_path=lacks_namespace, namespace_packages=["root"])
assert place.module("root.name", config=config) == "FIRSTPARTY"
assert place.module("root.nested", config=config) == "FIRSTPARTY"
assert place.module("root.name", config=manual_namespace) == "THIRDPARTY"
assert place.module("root.nested", config=config) == "FIRSTPARTY"
|
mit
| 7,563,002,035,906,282,000 | 45.839286 | 100 | 0.700724 | false |
Aravinthu/odoo
|
addons/maintenance/models/maintenance.py
|
1
|
20858
|
# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.exceptions import UserError
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
class MaintenanceStage(models.Model):
""" Model for case stages. This models the main stages of a Maintenance Request management flow. """
_name = 'maintenance.stage'
_description = 'Maintenance Stage'
_order = 'sequence, id'
name = fields.Char('Name', required=True, translate=True)
sequence = fields.Integer('Sequence', default=20)
fold = fields.Boolean('Folded in Maintenance Pipe')
done = fields.Boolean('Request Done')
class MaintenanceEquipmentCategory(models.Model):
_name = 'maintenance.equipment.category'
_inherit = ['mail.alias.mixin', 'mail.thread']
_description = 'Asset Category'
@api.one
@api.depends('equipment_ids')
def _compute_fold(self):
self.fold = False if self.equipment_count else True
name = fields.Char('Category Name', required=True, translate=True)
technician_user_id = fields.Many2one('res.users', 'Responsible', track_visibility='onchange', default=lambda self: self.env.uid, oldname='user_id')
color = fields.Integer('Color Index')
note = fields.Text('Comments', translate=True)
equipment_ids = fields.One2many('maintenance.equipment', 'category_id', string='Equipments', copy=False)
equipment_count = fields.Integer(string="Equipment", compute='_compute_equipment_count')
maintenance_ids = fields.One2many('maintenance.request', 'category_id', copy=False)
maintenance_count = fields.Integer(string="Maintenance", compute='_compute_maintenance_count')
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete='restrict', required=True,
help="Email alias for this equipment category. New emails will automatically "
"create new maintenance request for this equipment category.")
fold = fields.Boolean(string='Folded in Maintenance Pipe', compute='_compute_fold', store=True)
@api.multi
def _compute_equipment_count(self):
equipment_data = self.env['maintenance.equipment'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in equipment_data])
for category in self:
category.equipment_count = mapped_data.get(category.id, 0)
@api.multi
def _compute_maintenance_count(self):
maintenance_data = self.env['maintenance.request'].read_group([('category_id', 'in', self.ids)], ['category_id'], ['category_id'])
mapped_data = dict([(m['category_id'][0], m['category_id_count']) for m in maintenance_data])
for category in self:
category.maintenance_count = mapped_data.get(category.id, 0)
@api.model
def create(self, vals):
self = self.with_context(alias_model_name='maintenance.request', alias_parent_model_name=self._name)
if not vals.get('alias_name'):
vals['alias_name'] = vals.get('name')
category_id = super(MaintenanceEquipmentCategory, self).create(vals)
category_id.alias_id.write({'alias_parent_thread_id': category_id.id, 'alias_defaults': {'category_id': category_id.id}})
return category_id
@api.multi
def unlink(self):
MailAlias = self.env['mail.alias']
for category in self:
if category.equipment_ids or category.maintenance_ids:
raise UserError(_("You cannot delete an equipment category containing equipments or maintenance requests."))
MailAlias += category.alias_id
res = super(MaintenanceEquipmentCategory, self).unlink()
MailAlias.unlink()
return res
def get_alias_model_name(self, vals):
return vals.get('alias_model', 'maintenance.equipment')
def get_alias_values(self):
values = super(MaintenanceEquipmentCategory, self).get_alias_values()
values['alias_defaults'] = {'category_id': self.id}
return values
class MaintenanceEquipment(models.Model):
_name = 'maintenance.equipment'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = 'Equipment'
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'owner_user_id' in init_values and self.owner_user_id:
return 'maintenance.mt_mat_assign'
return super(MaintenanceEquipment, self)._track_subtype(init_values)
@api.multi
def name_get(self):
result = []
for record in self:
if record.name and record.serial_no:
result.append((record.id, record.name + '/' + record.serial_no))
if record.name and not record.serial_no:
result.append((record.id, record.name))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
recs = self.browse()
if name:
recs = self.search([('name', '=', name)] + args, limit=limit)
if not recs:
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get()
name = fields.Char('Equipment Name', required=True, translate=True)
active = fields.Boolean(default=True)
technician_user_id = fields.Many2one('res.users', string='Technician', track_visibility='onchange', oldname='user_id')
owner_user_id = fields.Many2one('res.users', string='Owner', track_visibility='onchange')
category_id = fields.Many2one('maintenance.equipment.category', string='Equipment Category',
track_visibility='onchange', group_expand='_read_group_category_ids')
partner_id = fields.Many2one('res.partner', string='Vendor', domain="[('supplier', '=', 1)]")
partner_ref = fields.Char('Vendor Reference')
location = fields.Char('Location')
model = fields.Char('Model')
serial_no = fields.Char('Serial Number', copy=False)
assign_date = fields.Date('Assigned Date', track_visibility='onchange')
cost = fields.Float('Cost')
note = fields.Text('Note')
warranty = fields.Date('Warranty')
color = fields.Integer('Color Index')
scrap_date = fields.Date('Scrap Date')
maintenance_ids = fields.One2many('maintenance.request', 'equipment_id')
maintenance_count = fields.Integer(compute='_compute_maintenance_count', string="Maintenance", store=True)
maintenance_open_count = fields.Integer(compute='_compute_maintenance_count', string="Current Maintenance", store=True)
period = fields.Integer('Days between each preventive maintenance')
next_action_date = fields.Date(compute='_compute_next_maintenance', string='Date of the next preventive maintenance', store=True)
maintenance_team_id = fields.Many2one('maintenance.team', string='Maintenance Team')
maintenance_duration = fields.Float(help="Maintenance Duration in hours.")
@api.depends('period', 'maintenance_ids.request_date', 'maintenance_ids.close_date')
def _compute_next_maintenance(self):
date_now = fields.Date.context_today(self)
for equipment in self.filtered(lambda x: x.period > 0):
next_maintenance_todo = self.env['maintenance.request'].search([
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('stage_id.done', '!=', True),
('close_date', '=', False)], order="request_date asc", limit=1)
last_maintenance_done = self.env['maintenance.request'].search([
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('stage_id.done', '=', True),
('close_date', '!=', False)], order="close_date desc", limit=1)
if next_maintenance_todo and last_maintenance_done:
next_date = next_maintenance_todo.request_date
date_gap = fields.Date.from_string(next_maintenance_todo.request_date) - fields.Date.from_string(last_maintenance_done.close_date)
# If the gap between the last_maintenance_done and the next_maintenance_todo one is bigger than 2 times the period and next request is in the future
# We use 2 times the period to avoid creation too closed request from a manually one created
if date_gap > timedelta(0) and date_gap > timedelta(days=equipment.period) * 2 and fields.Date.from_string(next_maintenance_todo.request_date) > fields.Date.from_string(date_now):
# If the new date still in the past, we set it for today
if fields.Date.from_string(last_maintenance_done.close_date) + timedelta(days=equipment.period) < fields.Date.from_string(date_now):
next_date = date_now
else:
next_date = fields.Date.to_string(fields.Date.from_string(last_maintenance_done.close_date) + timedelta(days=equipment.period))
elif next_maintenance_todo:
next_date = next_maintenance_todo.request_date
date_gap = fields.Date.from_string(next_maintenance_todo.request_date) - fields.Date.from_string(date_now)
# If next maintenance to do is in the future, and in more than 2 times the period, we insert an new request
# We use 2 times the period to avoid creation too closed request from a manually one created
if date_gap > timedelta(0) and date_gap > timedelta(days=equipment.period) * 2:
next_date = fields.Date.to_string(fields.Date.from_string(date_now)+timedelta(days=equipment.period))
elif last_maintenance_done:
next_date = fields.Date.from_string(last_maintenance_done.close_date)+timedelta(days=equipment.period)
# If when we add the period to the last maintenance done and we still in past, we plan it for today
if next_date < fields.Date.from_string(date_now):
next_date = date_now
else:
next_date = fields.Date.to_string(fields.Date.from_string(date_now) + timedelta(days=equipment.period))
equipment.next_action_date = next_date
@api.one
@api.depends('maintenance_ids.stage_id.done')
def _compute_maintenance_count(self):
self.maintenance_count = len(self.maintenance_ids)
self.maintenance_open_count = len(self.maintenance_ids.filtered(lambda x: not x.stage_id.done))
@api.onchange('category_id')
def _onchange_category_id(self):
self.technician_user_id = self.category_id.technician_user_id
_sql_constraints = [
('serial_no', 'unique(serial_no)', "Another asset already exists with this serial number!"),
]
@api.model
def create(self, vals):
equipment = super(MaintenanceEquipment, self).create(vals)
if equipment.owner_user_id:
equipment.message_subscribe_users(user_ids=[equipment.owner_user_id.id])
return equipment
@api.multi
def write(self, vals):
if vals.get('owner_user_id'):
self.message_subscribe_users(user_ids=[vals['owner_user_id']])
return super(MaintenanceEquipment, self).write(vals)
@api.model
def _read_group_category_ids(self, categories, domain, order):
""" Read group customization in order to display all the categories in
the kanban view, even if they are empty.
"""
category_ids = categories._search([], order=order, access_rights_uid=SUPERUSER_ID)
return categories.browse(category_ids)
def _create_new_request(self, date):
self.ensure_one()
self.env['maintenance.request'].create({
'name': _('Preventive Maintenance - %s') % self.name,
'request_date': date,
'schedule_date': date,
'category_id': self.category_id.id,
'equipment_id': self.id,
'maintenance_type': 'preventive',
'owner_user_id': self.owner_user_id.id,
'technician_user_id': self.technician_user_id.id,
'maintenance_team_id': self.maintenance_team_id.id,
'duration': self.maintenance_duration,
})
@api.model
def _cron_generate_requests(self):
"""
Generates maintenance request on the next_action_date or today if none exists
"""
for equipment in self.search([('period', '>', 0)]):
next_requests = self.env['maintenance.request'].search([('stage_id.done', '=', False),
('equipment_id', '=', equipment.id),
('maintenance_type', '=', 'preventive'),
('request_date', '=', equipment.next_action_date)])
if not next_requests:
equipment._create_new_request(equipment.next_action_date)
class MaintenanceRequest(models.Model):
_name = 'maintenance.request'
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = 'Maintenance Requests'
_order = "id desc"
@api.returns('self')
def _default_stage(self):
return self.env['maintenance.stage'].search([], limit=1)
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'stage_id' in init_values and self.stage_id.sequence <= 1:
return 'maintenance.mt_req_created'
elif 'stage_id' in init_values and self.stage_id.sequence > 1:
return 'maintenance.mt_req_status'
return super(MaintenanceRequest, self)._track_subtype(init_values)
def _get_default_team_id(self):
return self.env.ref('maintenance.equipment_team_maintenance', raise_if_not_found=False)
name = fields.Char('Subjects', required=True)
description = fields.Text('Description')
request_date = fields.Date('Request Date', track_visibility='onchange', default=fields.Date.context_today,
help="Date requested for the maintenance to happen")
owner_user_id = fields.Many2one('res.users', string='Created by', default=lambda s: s.env.uid)
category_id = fields.Many2one('maintenance.equipment.category', related='equipment_id.category_id', string='Category', store=True, readonly=True)
equipment_id = fields.Many2one('maintenance.equipment', string='Equipment', index=True)
technician_user_id = fields.Many2one('res.users', string='Owner', track_visibility='onchange', oldname='user_id')
stage_id = fields.Many2one('maintenance.stage', string='Stage', track_visibility='onchange',
group_expand='_read_group_stage_ids', default=_default_stage)
priority = fields.Selection([('0', 'Very Low'), ('1', 'Low'), ('2', 'Normal'), ('3', 'High')], string='Priority')
color = fields.Integer('Color Index')
close_date = fields.Date('Close Date', help="Date the maintenance was finished. ")
kanban_state = fields.Selection([('normal', 'In Progress'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')],
string='Kanban State', required=True, default='normal', track_visibility='onchange')
# active = fields.Boolean(default=True, help="Set active to false to hide the maintenance request without deleting it.")
archive = fields.Boolean(default=False, help="Set archive to true to hide the maintenance request without deleting it.")
maintenance_type = fields.Selection([('corrective', 'Corrective'), ('preventive', 'Preventive')], string='Maintenance Type', default="corrective")
schedule_date = fields.Datetime('Scheduled Date', help="Date the maintenance team plans the maintenance. It should not differ much from the Request Date. ")
maintenance_team_id = fields.Many2one('maintenance.team', string='Team', required=True, default=_get_default_team_id)
duration = fields.Float(help="Duration in minutes and seconds.")
@api.multi
def archive_equipment_request(self):
self.write({'archive': True})
@api.multi
def reset_equipment_request(self):
""" Reinsert the maintenance request into the maintenance pipe in the first stage"""
first_stage_obj = self.env['maintenance.stage'].search([], order="sequence asc", limit=1)
# self.write({'active': True, 'stage_id': first_stage_obj.id})
self.write({'archive': False, 'stage_id': first_stage_obj.id})
@api.onchange('equipment_id')
def onchange_equipment_id(self):
if self.equipment_id:
self.technician_user_id = self.equipment_id.technician_user_id if self.equipment_id.technician_user_id else self.equipment_id.category_id.technician_user_id
self.category_id = self.equipment_id.category_id
if self.equipment_id.maintenance_team_id:
self.maintenance_team_id = self.equipment_id.maintenance_team_id.id
@api.onchange('category_id')
def onchange_category_id(self):
if not self.technician_user_id or not self.equipment_id or (self.technician_user_id and not self.equipment_id.technician_user_id):
self.technician_user_id = self.category_id.technician_user_id
@api.model
def create(self, vals):
# context: no_log, because subtype already handle this
self = self.with_context(mail_create_nolog=True)
request = super(MaintenanceRequest, self).create(vals)
if request.owner_user_id or request.technician_user_id:
request._add_followers()
if request.equipment_id and not request.maintenance_team_id:
request.maintenance_team_id = request.equipment_id.maintenance_team_id
return request
@api.multi
def write(self, vals):
# Overridden to reset the kanban_state to normal whenever
# the stage (stage_id) of the Maintenance Request changes.
if vals and 'kanban_state' not in vals and 'stage_id' in vals:
vals['kanban_state'] = 'normal'
res = super(MaintenanceRequest, self).write(vals)
if vals.get('owner_user_id') or vals.get('technician_user_id'):
self._add_followers()
if self.stage_id.done and 'stage_id' in vals:
self.write({'close_date': fields.Date.today()})
return res
def _add_followers(self):
for request in self:
user_ids = (request.owner_user_id + request.technician_user_id).ids
request.message_subscribe_users(user_ids=user_ids)
@api.model
def _read_group_stage_ids(self, stages, domain, order):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty
"""
stage_ids = stages._search([], order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
class MaintenanceTeam(models.Model):
_name = 'maintenance.team'
_description = 'Maintenance Teams'
name = fields.Char(required=True)
member_ids = fields.Many2many('res.users', 'maintenance_team_users_rel', string="Team Members")
color = fields.Integer("Color Index", default=0)
request_ids = fields.One2many('maintenance.request', 'maintenance_team_id', copy=False)
equipment_ids = fields.One2many('maintenance.equipment', 'maintenance_team_id', copy=False)
# For the dashboard only
todo_request_ids = fields.One2many('maintenance.request', copy=False, compute='_compute_todo_requests')
todo_request_count = fields.Integer(compute='_compute_todo_requests')
todo_request_count_date = fields.Integer(compute='_compute_todo_requests')
todo_request_count_high_priority = fields.Integer(compute='_compute_todo_requests')
todo_request_count_block = fields.Integer(compute='_compute_todo_requests')
todo_request_count_unscheduled = fields.Integer(compute='_compute_todo_requests')
@api.one
@api.depends('request_ids.stage_id.done')
def _compute_todo_requests(self):
self.todo_request_ids = self.request_ids.filtered(lambda e: e.stage_id.done==False)
self.todo_request_count = len(self.todo_request_ids)
self.todo_request_count_date = len(self.todo_request_ids.filtered(lambda e: e.schedule_date != False))
self.todo_request_count_high_priority = len(self.todo_request_ids.filtered(lambda e: e.priority == '3'))
self.todo_request_count_block = len(self.todo_request_ids.filtered(lambda e: e.kanban_state == 'blocked'))
self.todo_request_count_unscheduled = len(self.todo_request_ids.filtered(lambda e: not e.schedule_date))
@api.one
@api.depends('equipment_ids')
def _compute_equipment(self):
self.equipment_count = len(self.equipment_ids)
|
agpl-3.0
| 8,363,381,834,550,002,000 | 52.619537 | 195 | 0.651788 | false |
lbarahona/UdacityProject4
|
main.py
|
1
|
2446
|
#!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from google.appengine.api import memcache
from google.appengine.ext import ndb
from models import Session
from conference import MEMCACHE_FEATURED_SPEAKER_KEY
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
# - - - Task 4: Add a Task - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The task will check if there is more than one session by this speaker at this conference,
# also add a new Memcache entry that features the speaker and session names.
class CheckFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
"""set memcache entry if speaker has more than one session"""
sessions = Session.query(ancestor=confKey).filter(Session.speakerKey==self.request.get('speakerKey'))
# Add one if the session key just added can not yet be found in the queried sessions
#not_found = not any(s.key.urlsafe() == self.request.get('sessionKey') for s in sessions)
if sessions.count() > 1:
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY,
'%s is our latest Featured Speaker' % self.request.get(
'speakerDisplayName'))
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featuredSpeaker', CheckFeaturedSpeakerHandler),
], debug=True)
|
apache-2.0
| -2,527,707,062,007,526,000 | 37.825397 | 109 | 0.657809 | false |
JohnSnowLabs/spark-nlp
|
python/tensorflow/lib/ner/ner_model.py
|
1
|
21854
|
import numpy as np
import tensorflow as tf
import random
import math
import sys
from sentence_grouper import SentenceGrouper
class NerModel:
# If session is not defined than default session will be used
def __init__(self, session=None, dummy_tags=None, use_contrib=True, use_gpu_device=0):
tf.disable_v2_behavior()
self.word_repr = None
self.word_embeddings = None
self.session = session
self.session_created = False
self.dummy_tags = dummy_tags or []
self.use_contrib = use_contrib
self.use_gpu_device = use_gpu_device
if self.session is None:
self.session_created = True
self.session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("char_repr") as scope:
# shape = (batch size, sentence, word)
self.char_ids = tf.compat.v1.placeholder(tf.int32, shape=[None, None, None], name="char_ids")
# shape = (batch_size, sentence)
self.word_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None, None], name="word_lengths")
with tf.compat.v1.variable_scope("word_repr") as scope:
# shape = (batch size)
self.sentence_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None], name="sentence_lengths")
with tf.compat.v1.variable_scope("training", reuse=None) as scope:
# shape = (batch, sentence)
self.labels = tf.compat.v1.placeholder(tf.int32, shape=[None, None], name="labels")
self.lr = tf.compat.v1.placeholder_with_default(0.005, shape=(), name="lr")
self.dropout = tf.compat.v1.placeholder(tf.float32, shape=(), name="dropout")
self._char_bilstm_added = False
self._char_cnn_added = False
self._word_embeddings_added = False
self._context_added = False
self._encode_added = False
def add_bilstm_char_repr(self, nchars=101, dim=25, hidden=25):
self._char_bilstm_added = True
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("char_repr_lstm") as scope:
# 1. Lookup for character embeddings
char_range = math.sqrt(3 / dim)
embeddings = tf.compat.v1.get_variable(name="char_embeddings",
dtype=tf.float32,
shape=[nchars, dim],
initializer=tf.compat.v1.random_uniform_initializer(
-char_range,
char_range
),
use_resource=False)
# shape = (batch, sentence, word, char embeddings dim)
char_embeddings = tf.nn.embedding_lookup(params=embeddings, ids=self.char_ids)
# char_embeddings = tf.nn.dropout(char_embeddings, self.dropout)
s = tf.shape(input=char_embeddings)
# shape = (batch x sentence, word, char embeddings dim)
char_embeddings_seq = tf.reshape(char_embeddings, shape=[-1, s[-2], dim])
# shape = (batch x sentence)
word_lengths_seq = tf.reshape(self.word_lengths, shape=[-1])
# 2. Add Bidirectional LSTM
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
layer=tf.keras.layers.LSTM(hidden, return_sequences=False),
merge_mode="concat"
)
])
inputs = char_embeddings_seq
mask = tf.expand_dims(tf.sequence_mask(word_lengths_seq, dtype=tf.float32), axis=-1)
# shape = (batch x sentence, 2 x hidden)
output = model(inputs, mask=mask)
# shape = (batch, sentence, 2 x hidden)
char_repr = tf.reshape(output, shape=[-1, s[1], 2*hidden])
if self.word_repr is not None:
self.word_repr = tf.concat([self.word_repr, char_repr], axis=-1)
else:
self.word_repr = char_repr
def add_cnn_char_repr(self, nchars=101, dim=25, nfilters=25, pad=2):
self._char_cnn_added = True
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("char_repr_cnn") as scope:
# 1. Lookup for character embeddings
char_range = math.sqrt(3 / dim)
embeddings = tf.compat.v1.get_variable(name="char_embeddings", dtype=tf.float32,
shape=[nchars, dim],
initializer=tf.compat.v1.random_uniform_initializer(-char_range, char_range),
use_resource=False)
# shape = (batch, sentence, word_len, embeddings dim)
char_embeddings = tf.nn.embedding_lookup(params=embeddings, ids=self.char_ids)
# char_embeddings = tf.nn.dropout(char_embeddings, self.dropout)
s = tf.shape(input=char_embeddings)
# shape = (batch x sentence, word_len, embeddings dim)
char_embeddings = tf.reshape(char_embeddings, shape=[-1, s[-2], dim])
# batch x sentence, word_len, nfilters
conv1d = tf.keras.layers.Conv1D(
filters=nfilters,
kernel_size=[3],
padding='same',
activation=tf.nn.relu
)(char_embeddings)
# Max across each filter, shape = (batch x sentence, nfilters)
char_repr = tf.reduce_max(input_tensor=conv1d, axis=1, keepdims=True)
char_repr = tf.squeeze(char_repr, axis=[1])
# (batch, sentence, nfilters)
char_repr = tf.reshape(char_repr, shape=[s[0], s[1], nfilters])
if self.word_repr is not None:
self.word_repr = tf.concat([self.word_repr, char_repr], axis=-1)
else:
self.word_repr = char_repr
def add_pretrained_word_embeddings(self, dim=100):
self._word_embeddings_added = True
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("word_repr") as scope:
# shape = (batch size, sentence, dim)
self.word_embeddings = tf.compat.v1.placeholder(tf.float32, shape=[None, None, dim],
name="word_embeddings")
if self.word_repr is not None:
self.word_repr = tf.concat([self.word_repr, self.word_embeddings], axis=-1)
else:
self.word_repr = self.word_embeddings
def _create_lstm_layer(self, inputs, hidden_size, lengths):
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
if not self.use_contrib:
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
layer=tf.keras.layers.LSTM(hidden_size, return_sequences=False),
merge_mode="concat"
)
])
mask = tf.expand_dims(tf.sequence_mask(lengths, dtype=tf.float32), axis=-1)
# shape = (batch x sentence, 2 x hidden)
output = model(inputs, mask=mask)
# inputs shape = (batch, sentence, inp)
batch = tf.shape(input=lengths)[0]
return tf.reshape(output, shape=[batch, -1, 2*hidden_size])
time_based = tf.transpose(a=inputs, perm=[1, 0, 2])
cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(hidden_size, use_peephole=True)
cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(hidden_size, use_peephole=True)
cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(cell_bw)
output_fw, _ = cell_fw(time_based, dtype=tf.float32, sequence_length=lengths)
output_bw, _ = cell_bw(time_based, dtype=tf.float32, sequence_length=lengths)
result = tf.concat([output_fw, output_bw], axis=-1)
return tf.transpose(a=result, perm=[1, 0, 2])
def _multiply_layer(self, source, result_size, activation=tf.nn.relu):
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
ntime_steps = tf.shape(input=source)[1]
source_size = source.shape[2]
W = tf.compat.v1.get_variable("W", shape=[source_size, result_size],
dtype=tf.float32,
initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
use_resource=False)
b = tf.compat.v1.get_variable("b", shape=[result_size], dtype=tf.float32, use_resource=False)
# batch x time, source_size
source = tf.reshape(source, [-1, source_size])
# batch x time, result_size
result = tf.matmul(source, W) + b
result = tf.reshape(result, [-1, ntime_steps, result_size])
if activation:
result = activation(result)
return result
# Adds Bi LSTM with size of each cell hidden_size
def add_context_repr(self, ntags, hidden_size=100, height=1, residual=True):
assert(self._word_embeddings_added or self._char_cnn_added or self._char_bilstm_added,
"Add word embeddings by method add_word_embeddings " +
"or add char representation by method add_bilstm_char_repr " +
"or add_bilstm_char_repr before adding context layer")
self._context_added = True
self.ntags = ntags
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
context_repr = self._multiply_layer(self.word_repr, 2*hidden_size)
# Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`
context_repr = tf.nn.dropout(x=context_repr, rate=1-self.dropout)
with tf.compat.v1.variable_scope("context_repr") as scope:
for i in range(height):
with tf.compat.v1.variable_scope('lstm-{}'.format(i)):
new_repr = self._create_lstm_layer(context_repr, hidden_size,
lengths=self.sentence_lengths)
context_repr = new_repr + context_repr if residual else new_repr
context_repr = tf.nn.dropout(x=context_repr, rate=1-self.dropout)
# batch, sentence, ntags
self.scores = self._multiply_layer(context_repr, ntags, activation=None)
tf.identity(self.scores, "scores")
self.predicted_labels = tf.argmax(input=self.scores, axis=-1)
tf.identity(self.predicted_labels, "predicted_labels")
def add_inference_layer(self, crf=False):
assert(self._context_added,
"Add context representation layer by method add_context_repr before adding inference layer")
self._inference_added = True
with tf.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("inference", reuse=None) as scope:
self.crf = tf.constant(crf, dtype=tf.bool, name="crf")
if crf:
transition_params = tf.compat.v1.get_variable("transition_params",
shape=[self.ntags, self.ntags],
initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"),
use_resource=False)
# CRF shape = (batch, sentence)
log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
self.scores,
self.labels,
self.sentence_lengths,
transition_params
)
tf.identity(log_likelihood, "log_likelihood")
tf.identity(self.transition_params, "transition_params")
self.loss = tf.reduce_mean(input_tensor=-log_likelihood)
self.prediction, _ = tf.contrib.crf.crf_decode(self.scores, self.transition_params, self.sentence_lengths)
else:
# Softmax
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.scores, labels=self.labels)
# shape = (batch, sentence, ntags)
mask = tf.sequence_mask(self.sentence_lengths)
# apply mask
losses = tf.boolean_mask(tensor=losses, mask=mask)
self.loss = tf.reduce_mean(input_tensor=losses)
self.prediction = tf.math.argmax(input=self.scores, axis=-1)
tf.identity(self.loss, "loss")
# clip_gradient < 0 - no gradient clipping
def add_training_op(self, clip_gradient = 2.0):
assert(self._inference_added,
"Add inference layer by method add_inference_layer before adding training layer")
self._training_added = True
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
with tf.compat.v1.variable_scope("training", reuse=None) as scope:
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.lr)
if clip_gradient > 0:
gvs = optimizer.compute_gradients(self.loss)
capped_gvs = [(tf.clip_by_value(grad, -clip_gradient, clip_gradient), var) for grad, var in gvs if grad is not None]
self.train_op = optimizer.apply_gradients(capped_gvs)
else:
self.train_op = optimizer.minimize(self.loss)
self.init_op = tf.compat.v1.variables_initializer(tf.compat.v1.global_variables(), name="init")
@staticmethod
def num_trues(array):
result = 0
for item in array:
if item == True:
result += 1
return result
@staticmethod
def fill(array, l, val):
result = array[:]
for i in range(l - len(array)):
result.append(val)
return result
@staticmethod
def get_sentence_lengths(batch, idx="word_embeddings"):
return [len(row[idx]) for row in batch]
@staticmethod
def get_sentence_token_lengths(batch, idx="tag_ids"):
return [len(row[idx]) for row in batch]
@staticmethod
def get_word_lengths(batch, idx="char_ids"):
max_words = max([len(row[idx]) for row in batch])
return [NerModel.fill([len(chars) for chars in row[idx]], max_words, 0)
for row in batch]
@staticmethod
def get_char_ids(batch, idx="char_ids"):
max_chars = max([max([len(char_ids) for char_ids in sentence[idx]]) for sentence in batch])
max_words = max([len(sentence[idx]) for sentence in batch])
return [
NerModel.fill(
[NerModel.fill(char_ids, max_chars, 0) for char_ids in sentence[idx]],
max_words, [0]*max_chars
)
for sentence in batch]
@staticmethod
def get_from_batch(batch, idx):
k = max([len(row[idx]) for row in batch])
return list([NerModel.fill(row[idx], k, 0) for row in batch])
@staticmethod
def get_tag_ids(batch, idx="tag_ids"):
return NerModel.get_from_batch(batch, idx)
@staticmethod
def get_word_embeddings(batch, idx="word_embeddings"):
embeddings_dim = len(batch[0][idx][0])
max_words = max([len(sentence[idx]) for sentence in batch])
return [
NerModel.fill([word_embedding for word_embedding in sentence[idx]],
max_words, [0]*embeddings_dim
)
for sentence in batch]
@staticmethod
def slice(dataset, batch_size=10):
grouper = SentenceGrouper([5, 10, 20, 50])
return grouper.slice(dataset, batch_size)
def init_variables(self):
self.session.run(self.init_op)
def train(self, train,
epoch_start=0,
epoch_end=100,
batch_size=32,
lr=0.01,
po=0,
dropout=0.65,
init_variables=False
):
assert(self._training_added, "Add training layer by method add_training_op before running training")
if init_variables:
with tf.compat.v1.device('/gpu:{}'.format(self.use_gpu_device)):
self.session.run(tf.compat.v1.global_variables_initializer())
print('trainig started')
for epoch in range(epoch_start, epoch_end):
random.shuffle(train)
sum_loss = 0
for batch in NerModel.slice(train, batch_size):
feed_dict = {
self.sentence_lengths: NerModel.get_sentence_lengths(batch),
self.word_embeddings: NerModel.get_word_embeddings(batch),
self.word_lengths: NerModel.get_word_lengths(batch),
self.char_ids: NerModel.get_char_ids(batch),
self.labels: NerModel.get_tag_ids(batch),
self.dropout: dropout,
self.lr: lr / (1 + po * epoch)
}
mean_loss, _ = self.session.run([self.loss, self.train_op], feed_dict=feed_dict)
sum_loss += mean_loss
print("epoch {}".format(epoch))
print("mean loss: {}".format(sum_loss))
print()
sys.stdout.flush()
def measure(self, dataset, batch_size=20, dropout=1.0):
predicted = {}
correct = {}
correct_predicted = {}
for batch in NerModel.slice(dataset, batch_size):
tags_ids = NerModel.get_tag_ids(batch)
sentence_lengths = NerModel.get_sentence_lengths(batch)
feed_dict = {
self.sentence_lengths: sentence_lengths,
self.word_embeddings: NerModel.get_word_embeddings(batch),
self.word_lengths: NerModel.get_word_lengths(batch),
self.char_ids: NerModel.get_char_ids(batch),
self.labels: tags_ids,
self.dropout: dropout
}
prediction = self.session.run(self.prediction, feed_dict=feed_dict)
batch_prediction = np.reshape(prediction, (len(batch), -1))
for i in range(len(batch)):
is_word_start = batch[i]['is_word_start']
for word in range(sentence_lengths[i]):
if not is_word_start[word]:
continue
p = batch_prediction[i][word]
c = tags_ids[i][word]
if c in self.dummy_tags:
continue
predicted[p] = predicted.get(p, 0) + 1
correct[c] = correct.get(c, 0) + 1
if p == c:
correct_predicted[p] = correct_predicted.get(p, 0) + 1
num_correct_predicted = sum([correct_predicted.get(i, 0) for i in range(1, self.ntags)])
num_predicted = sum([predicted.get(i, 0) for i in range(1, self.ntags)])
num_correct = sum([correct.get(i, 0) for i in range(1, self.ntags)])
prec = num_correct_predicted / (num_predicted or 1.)
rec = num_correct_predicted / (num_correct or 1.)
f1 = 2 * prec * rec / (rec + prec)
return prec, rec, f1
@staticmethod
def get_softmax(scores, threshold=None):
exp_scores = np.exp(scores)
for batch in exp_scores:
for sentence in exp_scores:
for i in range(len(sentence)):
probabilities = sentence[i] / np.sum(sentence[i])
sentence[i] = [p if threshold is None or p >= threshold else 0 for p in probabilities]
return exp_scores
def predict(self, sentences, batch_size=20, threshold=None):
result = []
for batch in NerModel.slice(sentences, batch_size):
sentence_lengths = NerModel.get_sentence_lengths(batch)
feed_dict = {
self.sentence_lengths: sentence_lengths,
self.word_embeddings: NerModel.get_word_embeddings(batch),
self.word_lengths: NerModel.get_word_lengths(batch),
self.char_ids: NerModel.get_char_ids(batch),
self.dropout: 1.1
}
prediction = self.session.run(self.prediction, feed_dict=feed_dict)
batch_prediction = np.reshape(prediction, (len(batch), -1))
for i in range(len(batch)):
sentence = []
for word in range(sentence_lengths[i]):
tag = batch_prediction[i][word]
sentence.append(tag)
result.append(sentence)
return result
def close(self):
if self.session_created:
self.session.close()
|
apache-2.0
| -6,108,740,250,423,457,000 | 41.352713 | 167 | 0.541182 | false |
vlegoff/tsunami
|
src/primaires/vehicule/force.py
|
1
|
2990
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Force, détaillée plus bas."""
from abstraits.obase import *
from math import sqrt
from .vecteur import Vecteur
class Force(BaseObj):
"""Classe représentant une force.
"""
def __init__(self, subissant=None):
"""Constructeur de la force"""
BaseObj.__init__(self)
self.subissant = subissant
self.desuette = False
def __getnewargs__(self):
return ()
def __str__(self):
return str(self.calcul())
@property
def valeur(self):
return self.calcul()
def calcul(self):
return Vecteur(0, 0, 0)
class Propulsion(Force):
"""Classe représentant une force de propulsion.
"""
def __init__(self, valeur=None):
"""Constructeur de la force"""
Force.__init__(self)
if valeur:
self._valeur = valeur
else:
self._valeur = Vecteur(1, 0, 0)
def calcul(self):
return self._valeur
class Frottement(Force):
"""Classe représentant une force de frottement.
"""
def __init__(self, subissant, coef):
"""Constructeur de la force"""
Force.__init__(self, subissant)
self.coef = coef
def __getnewargs__(self):
return (None, 1)
def calcul(self):
return -self.coef * self.subissant.vitesse
|
bsd-3-clause
| -9,029,035,095,335,561,000 | 30.09375 | 79 | 0.669012 | false |
KDE/pykde4
|
examples/kdeuiExamples/kaboutapplicationdialog.py
|
1
|
2937
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4.QtCore import SIGNAL, Qt
from PyQt4.QtGui import QLabel, QSizePolicy
from PyKDE4.kdecore import i18n, ki18n, KAboutData
from PyKDE4.kdeui import KVBox, KHBox, KPushButton, KAboutApplicationDialog
helpText = """The KAboutApplicationDialog is normally displayed from the
applications Help menu.
It requires a KAboutData object to provide the information displayed in
the dialog. This is usually the same KAboutData object constructed when
you start your program, although a different object could be used.
Press the button below to display the dialog.
"""
dialogName = "KAboutApplicationDialog"
appName = "kaboutapplicationdialog.py"
catalog = ""
programName = ki18n ("kaboutapplicationdialog") #ki18n required here
version = "1.0"
description = ki18n ("KAboutApplicationDialog Example") #ki18n required here
license = KAboutData.License_GPL
copyright = ki18n ("(c) 2007 Jim Bublitz") #ki18n required here
text = ki18n ("none") #ki18n required here
homePage = "www.riverbankcomputing.com"
bugEmail = "jbublitz@nwinternet.com"
aboutData = KAboutData (appName, catalog, programName, version, description,
license, copyright, text, homePage, bugEmail)
# ki18n required for first two addAuthor () arguments
aboutData.addAuthor (ki18n ("Troy Melhase"), ki18n ("original concept"))
aboutData.addAuthor (ki18n ("Jim Bublitz"), ki18n ("pykdedocs"))
class MainFrame(KVBox):
"""Example class showing how KAboutApplicationDialog works."""
def __init__(self, parent):
super(MainFrame, self).__init__(parent)
self.help = QLabel (helpText, self)
self.layout ().setAlignment (self.help, Qt.AlignHCenter)
hBox = KHBox (self)
self.button = KPushButton(i18n("Show %s" % dialogName), hBox)
self.button.setMaximumSize (250, 30)
self.button.clicked.connect(self.showDialog)
def showDialog(self):
dlg = KAboutApplicationDialog (aboutData, self.parent ())
dlg.exec_ ()
# This example can be run standalone
if __name__ == '__main__':
import sys
from PyQt4.QtCore import SIGNAL
from PyKDE4.kdecore import KCmdLineArgs, KAboutData, KLocalizedString, ki18n
from PyKDE4.kdeui import KApplication, KMainWindow
class MainWin (KMainWindow):
def __init__ (self, *args):
KMainWindow.__init__ (self)
self.resize (640, 480)
self.setCentralWidget (MainFrame (self))
#-------------------- main ------------------------------------------------
KCmdLineArgs.init (sys.argv, aboutData)
app = KApplication ()
mainWindow = MainWin (None, "main window")
mainWindow.show()
app.lastWindowClosed.connect(app.quit)
app.exec_ ()
|
gpl-2.0
| 8,162,753,217,201,841,000 | 30.580645 | 84 | 0.646578 | false |
caesar0301/omnilab-misc
|
OmniperfTools/perf_aem.py
|
1
|
1492
|
#!/usr/bin/env python
# Evaluating Activity-Entity model
#
# By chenxm
#
import os
import sys
import groundtruth
from PyOmniMisc.model.aem import AEM
def print_usage():
print("Usage: python exHttp.py <omniperf_trace>")
cut_gap = 8 # sec
def modelAEM(etrs):
print("Modeling AEM ...")
# Modeling traffic with AEM
aem = AEM(etrs, cut_gap)
withUA = aem.sessionsWithUA()
withoutUA = aem.sessionsWithoutUA()
forest = {}
for ua in withUA:
ss = withUA[ua]
for el in ss[1:]:
trees = aem.model(el)
if ua not in forest:
forest[ua] = []
forest[ua].extend(trees)
for host in withoutUA:
nss = withoutUA[host]
for el in nss[1:]:
trees = aem.model(el)
if host not in forest:
forest[host] = []
forest[host].extend(trees)
return forest
# Options
if len(sys.argv) < 2:
print_usage()
sys.exit(-1)
# Http log file
input_folder = sys.argv[1]
hl = os.path.join(input_folder, 'http_logs')
if not os.path.exists(hl):
raise Exception("Sry, I do not find *http_logs*.out in given folder.")
# Read http logs
etrs = groundtruth.readHttpEntries(hl)
if len(etrs) == 0:
print("No entries")
sys.exit(-1)
forest = modelAEM(etrs)
forest_gt = groundtruth.modelGT(input_folder)
fms = groundtruth.evaluate(forest, forest_gt)
of = open("perf-aem-c%d.out" % cut_gap, "ab")
for fm in fms:
if fm != 0:
of.write("%.3f\n" % fm)
|
gpl-2.0
| 667,371,110,707,126,800 | 22.328125 | 74 | 0.605228 | false |
plotly/plotly.py
|
packages/python/plotly/plotly/validators/layout/xaxis/_categoryorder.py
|
1
|
1151
|
import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="categoryorder", parent_name="layout.xaxis", **kwargs
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"trace",
"category ascending",
"category descending",
"array",
"total ascending",
"total descending",
"min ascending",
"min descending",
"max ascending",
"max descending",
"sum ascending",
"sum descending",
"mean ascending",
"mean descending",
"median ascending",
"median descending",
],
),
**kwargs
)
|
mit
| 5,048,350,118,352,210,000 | 32.852941 | 79 | 0.435274 | false |
mercycorps/tola-activity
|
htdocs/activitydb/migrations/0035_auto_20151028_0057.py
|
1
|
3568
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('activitydb', '0034_auto_20151027_1851'),
]
operations = [
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_cost_materials',
field=models.CharField(max_length='255', null=True, verbose_name='Estimated Total Cost of Materials', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_female',
field=models.IntegerField(null=True, verbose_name='Estimated # of Female Laborers', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_male',
field=models.IntegerField(null=True, verbose_name='Estimated # of Male Laborers', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_person_days',
field=models.IntegerField(null=True, verbose_name='Estimated # of Person Days', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_project_days',
field=models.IntegerField(null=True, verbose_name='Estimated # of Project Days', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_total',
field=models.IntegerField(null=True, verbose_name='Estimated Total # of Laborers', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='cfw_estimate_wages_budgeted',
field=models.CharField(max_length='255', null=True, verbose_name='Estimated Wages Budgeted', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='distribution_estimate',
field=models.CharField(max_length='255', null=True, verbose_name='Estimated # of Items Distributed', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='distribution_type',
field=models.CharField(max_length='255', null=True, verbose_name='Type of Items Distributed', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='distribution_uom',
field=models.CharField(max_length='255', null=True, verbose_name='Unit of Measure', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='estimate_female_trained',
field=models.IntegerField(null=True, verbose_name='Estimated # of Female Trained', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='estimate_male_trained',
field=models.IntegerField(null=True, verbose_name='Estimated # of Male Trained', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='estimate_total_trained',
field=models.IntegerField(null=True, verbose_name='Estimated Total # Trained', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='estimate_trainings',
field=models.IntegerField(null=True, verbose_name='Estimated # of Trainings Conducted', blank=True),
),
]
|
gpl-2.0
| -8,942,267,121,895,962,000 | 41.47619 | 126 | 0.607623 | false |
SolarLune/SolHelp
|
BGE/bghelper/time.py
|
1
|
1469
|
__author__ = 'SolarLune'
from bge import logic
# Time-based mechanics.
class TimerBank():
def __init__(self):
self.time = 0.0
self.timers = {}
def add(self, timer_name):
self.timers[timer_name] = Timer()
def set(self, timer_name, time):
self.timers[timer_name].set(time)
def get_time_left(self, timer_name):
return self.timers[timer_name].get_time_left()
def time_up(self, timer_name):
return self.timers[timer_name].time_up()
def update(self):
for t in self.timers:
t.update()
class Timer():
def __init__(self):
self.time = 0.0
self.wait_time = 0.0
self.set_time = 0.0
self.on = True
def set(self, time):
self.set_time = self.time
self.wait_time = time
self.on = True
def add(self, time):
#self.set_time += time
self.wait_time += time
def get_time_left(self):
if self.on:
return self.wait_time - (self.time - self.set_time)
else:
return 0
def time_up(self):
if self.on:
return self.get_time_left() <= 0
else:
return False
def pause(self):
self.on = False
def resume(self):
self.on = True
def update(self, frequency=0):
if self.on:
self.time += (1.0 / logic.getLogicTicRate()) * (frequency + 1)
timer_bank = TimerBank()
|
mit
| 7,765,912,870,802,807,000 | 16.081395 | 74 | 0.530293 | false |
michael-afanasiev/specfemHelperScripts
|
setup_specfem_lasif.py
|
1
|
11312
|
#!/usr/bin/env python
import os, errno, shutil
import sys, subprocess
import argparse
import xml.etree.ElementTree as ET
class ParameterError(Exception):
pass
class PathError(Exception):
pass
class MesherNotRunError(Exception):
pass
class colours:
ylw = '\033[93m'
blu = '\033[94m'
rst = '\033[0m'
def print_blu(message):
print colours.blu + message + colours.rst
def print_ylw(message):
print colours.ylw + message + colours.rst
def read_parameter_file(filename):
"""
Little function to read the specfem setup parameter file, and returns a
dictionary of parameters.
:filename: path to parameter file.
"""
# List of required parameters.
required = ['compiler_suite', 'project_name', 'scratch_path',
'specfem_root', 'lasif_path', 'iteration_name']
# Read file, skip lines with #.
parameters = {}
file = open(filename, 'r')
for line in file:
if line.startswith('#'):
continue
fields = line.split()
parameters.update({fields[0]:fields[1]})
# Make sure all required parameters are present.
for param in required:
if param not in parameters.keys():
raise ParameterError('Parameter ' + param + \
' not in parameter file.')
# Fix paths.
parameters['scratch_path'] = os.path.abspath(parameters['scratch_path'])
parameters['specfem_root'] = os.path.abspath(parameters['specfem_root'])
parameters['lasif_path'] = os.path.abspath(parameters['lasif_path'])
return parameters
def safe_copy(source, dest):
"""
Sets up a file copy that won't fail for a stupid reason.
:source: Source file.
:dest: Destination directory.
"""
source = os.path.join (source)
dest = os.path.join (dest)
if (os.path.isdir(source)):
return
if not (os.path.isdir(dest)):
return
try:
shutil.copy(source, dest)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def safe_sym_link(source, dest):
"""
Sets up symbolic links that won't fail for a stupid reason.
:source: Source file.
:dest: Destination file.
"""
source = os.path.join (source)
dest = os.path.join (dest)
if (os.path.isdir(source)):
return
try:
os.symlink(source, dest)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def setup_dir_tree(event_path):
"""
Sets up the simulation directory strucutre for one event.
:event_path: Path the forward simulation directory for a specific event.
"""
mkdir_p(event_path)
mkdir_p(event_path + '/bin')
mkdir_p(event_path + '/DATA')
mkdir_p(event_path + '/OUTPUT_FILES')
mkdir_p(event_path + '/DATABASES_MPI')
mkdir_p(event_path + '/DATA/cemRequest')
def find_event_names(iteration_xml_path):
"""
Quickly parses the iteration xml file and extracts all the event names.
:iteration_xml_path: Path the xml file driving the requested iteration.
"""
# Find event names.
tree = ET.parse (iteration_xml_path)
root = tree.getroot ()
event_list = []
for name in root.findall ('event'):
for event in name.findall ('event_name'):
event_list.append (event.text)
return event_list
def mkdir_p(path):
"""
Makes a directory and doesn't fail if the directory already exists.
:path: New directory path.
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def setup_run():
"""
Function does a whole bunch of things to set up a specfem run on daint.
"""
# Find iteration xml file.
iteration_xml_path = os.path.join(p['lasif_path'],
'ITERATIONS/ITERATION_%s.xml' % (p['iteration_name']))
if not os.path.exists(iteration_xml_path):
raise PathError('Your iteration xml file does not exist in the location\
you specified.')
event_list = find_event_names(iteration_xml_path)
# Create the forward modelling directories.
print_ylw('Creating forward modelling directories...')
for event in event_list:
event_path = os.path.join(solver_base_path, event)
setup_dir_tree(event_path)
# Make master mesh directory.
mesh_path = os.path.join(solver_base_path, 'mesh')
setup_dir_tree(mesh_path)
# Copy over input files.
print_ylw('Copying initial files...')
lasif_output = os.path.join(p['lasif_path'], 'OUTPUT')
for dir in os.listdir (lasif_output):
for event in event_list:
if p['iteration_name'] in dir and event in dir:
event_output_dir = os.path.join(lasif_output, dir)
for file in os.listdir(event_output_dir):
source = os.path.join(event_output_dir, file)
dest = os.path.join(solver_base_path, event, 'DATA')
safe_copy(source, dest)
if event == event_list[0]:
source = os.path.join(event_output_dir, file)
dest = os.path.join(solver_base_path, 'mesh', 'DATA')
safe_copy(source, dest)
# Copy one instance of forward files to specfem base directory.
source = os.path.join(p['lasif_path'], 'SUBMISSION', 'Par_file')
dest = os.path.join(p['specfem_root'], 'DATA')
safe_copy(source, dest)
# # Change to specfem root directory and compile.
print_ylw('Compiling...')
os.chdir(p['specfem_root'])
with open('compilation_log.txt', 'w') as output:
proc = subprocess.Popen(['./mk_daint.sh', p['compiler_suite'],
'adjoint'], stdout=output, stderr=output)
proc.communicate()
retcode = proc.wait()
# Copy binaries to all directories.
print_ylw('Copying compiled binaries...')
for event in os.listdir(solver_base_path):
for binary in os.listdir('./bin/'):
source = os.path.join('./bin', binary)
dest = os.path.join(solver_base_path, event, 'bin')
safe_copy(source, dest)
print_ylw('Copying compiled parameter file...')
for event in os.listdir(solver_base_path):
source = os.path.join('./DATA', 'Par_file')
dest = os.path.join(solver_base_path, event, 'DATA')
safe_copy(source, dest)
# Copy jobarray script to base directory.
print_ylw('Copying jobarray sbatch script...')
source = os.path.join(p['lasif_path'], 'SUBMISSION',
'jobArray_solver_daint.sbatch')
dest = solver_root_path
safe_copy(source, dest)
log_directory = os.path.join(solver_root_path, 'logs')
mkdir_p(log_directory)
# Copy topo_bathy to mesh directory.
print_ylw('Copying topography information...')
mesh_data_path = os.path.join(solver_base_path, 'mesh', 'DATA')
mesh_topo_path = os.path.join(mesh_data_path, 'topo_bathy')
master_topo_path = os.path.join('./DATA', 'topo_bathy')
mkdir_p(mesh_topo_path)
for file in os.listdir(master_topo_path):
source = os.path.join(master_topo_path, file)
dest = os.path.join(mesh_topo_path)
safe_copy(source, dest)
# Copy submission script to mesh directory.
source = os.path.join(p['lasif_path'], 'SUBMISSION',
'job_mesher_daint.sbatch')
dest = os.path.join(solver_base_path, 'mesh')
safe_copy(source, dest)
print_blu('Done.')
def prepare_solve():
"""
Sets up symbolic link to generated mesh files.
"""
print 'Preparing solver directories.'
for dir in os.listdir(solver_base_path):
if dir == 'mesh':
continue
print_ylw('Linking ' + dir)
databases_mpi = os.path.join(solver_base_path, 'mesh', 'DATABASES_MPI')
output_files = os.path.join(solver_base_path, 'mesh', 'OUTPUT_FILES')
if not os.listdir(databases_mpi):
raise MesherNotRunError("It doesn't look like the mesher has been \
run. There are no mesh files in the expected mesh directory.")
for file in os.listdir(databases_mpi):
source = os.path.join(databases_mpi, file)
dest = os.path.join(solver_base_path, dir, 'DATABASES_MPI', file)
safe_sym_link(source, dest)
for file in os.listdir(output_files):
source = os.path.join(output_files, file)
dest = os.path.join(solver_base_path, dir, 'OUTPUT_FILES')
safe_copy(source, dest)
print_blu('Done.')
def submit_mesher():
"""
Runs over to the meshing directory, and just submits the job.
"""
mesh_dir = os.path.join(solver_base_path, 'mesh')
os.chdir(mesh_dir)
subprocess.Popen(['sbatch', 'job_mesher_daint.sbatch']).wait()
def submit_solver(first_job, last_job):
"""
Submits the job array script in the solver_root_path directory. Submits
job array indices first_job to last_job.
:first_job: The job array index of the first job to submit (i.e. 0)
:last_job: The job array index of the last job to submit (i.e. n_events-1)
"""
os.chdir(solver_root_path)
subprocess.Popen(['sbatch', '--array=%s-%s' % (first_job, last_job),
'jobArray_solver_daint.sbatch', p['iteration_name']]).wait()
parser = argparse.ArgumentParser(description='Assists in the setup of'
'specfem3d_globe on Piz Daint')
parser.add_argument('-f', type=str, help='Simulation driver parameter file.',
required=True, metavar='parameter_file_name', dest='filename')
parser.add_argument('--setup_run', action='store_true',
help='Setup the directory tree on scratch for one iteration. Requires a \
param file.')
parser.add_argument('--prepare_solve', action='store_true',
help='Symbolically links the mesh files to all forward directories.')
parser.add_argument('--submit_mesher', action='store_true',
help='Runs the mesher in the "mesh" directory.')
parser.add_argument('--submit_solver', action='store_true',
help='Submit the job array script for the current iteration.')
parser.add_argument('-fj', type=str, help='First index in job array to submit',
metavar='first_job', dest='first_job')
parser.add_argument('-lj', type=str, help='Last index in job array to submit',
metavar='last_job', dest='last_job')
args = parser.parse_args()
if args.submit_solver and args.first_job is None and args.last_job is None:
parser.error('Submitting the solver required -fj and -lj arguments.')
p = read_parameter_file(args.filename)
# Construct full run path.
solver_base_path = os.path.join(p['scratch_path'], p['project_name'],
p['iteration_name'])
solver_root_path = os.path.join(p['scratch_path'], p['project_name'])
mkdir_p(solver_base_path)
if args.setup_run:
setup_run()
elif args.prepare_solve:
prepare_solve()
elif args.submit_mesher:
submit_mesher()
elif args.submit_solver:
submit_solver(args.first_job, args.last_job)
|
gpl-2.0
| 1,261,488,015,305,045,000 | 32.368732 | 80 | 0.613419 | false |
Vettejeep/Boulder_County_Home_Prices
|
value_vs_price.py
|
1
|
4101
|
# Simply uses the assessors estimate to predict price, so we can see how much better the machine learning models are.
# requires data from Assemble_Data.py
# Copyright (C) 2017 Kevin Maher
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Data for this project may be the property of the Boulder County Assessor's office,
# they gave me free access as a student but were not clear about any restrictions regarding
# sharing the URL from which the data was downloaded.
# The data has been pre-processed from xlsx to csv files because OpenOffice had
# problems with the xlsx files.
# Data was pre-processed by a data setup script, Assemble_Data.py which produced the
# file '$working_data_5c.csv'
import pandas as pd
import numpy as np
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
# https://stats.stackexchange.com/questions/58391/mean-absolute-percentage-error-mape-in-scikit-learn
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
working_df = pd.read_csv('Data\\$working_data_5c.csv')
# eliminate some outliers, homes above an estimated value of $2 million are especially difficult to model
# with the available data
working_df = working_df[working_df['Age_Yrs'] > 0]
working_df = working_df[working_df['totalActualVal'] <= 2000000]
y = working_df['price']
columns = working_df.columns[2:]
X = working_df.drop(columns, axis=1) # , 'totalActualVal'
X = X.drop(labels=['price'], axis=1)
# 70/30 split of data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=245)
# determine metrics
gradient, intercept, r_value, p_value, std_err = stats.linregress(X_test['totalActualVal'], y_test)
print 'Gradient: %.4f' % gradient
print 'R Value: %.4f' % r_value
print 'R-Squared: %.4f' % r_value ** 2
# adjusted R-squared - https://www.easycalculation.com/statistics/learn-adjustedr2.php
r_sq_adj = 1 - ((1 - r_value ** 2) * (len(y_test) - 1) / (len(y_test) - X_train.shape[1] - 1))
print 'R-Squared Adjusted: %.4f' % r_sq_adj
mape = mean_absolute_percentage_error(y_test, X_test['totalActualVal'])
print 'MAPE: %.4f' % mape
# plot with regression lines, one for actual data, one to represent ideal answer
z = np.polyfit(X_test['totalActualVal'], y_test, 1)
print 'z'
print z
y_poly = [z[0] * x + z[1] for x in range(int(intercept), 3100000 + int(intercept), 100000)]
x_poly = [x for x in range(0, 3100000, 100000)]
y_perfect = [x for x in range(0, 3100000, 100000)]
plt.figure(0)
plt.plot(X_test, y_test, ".")
plt.plot(x_poly, y_poly, "-")
plt.plot(x_poly, y_perfect, "-")
plt.xlim(0, 4000000)
plt.ylim(0, 4000000)
plt.xlabel("Est Price")
plt.ylabel("Actual Price")
plt.title("Estimated vs. Actual Sales Price")
plt.show()
plt.close()
# delta_price = pd.Series((X_test['totalActualVal'] / y_test * 100.0) - 100.0)
# delta_price.to_csv('Data\\delta_price_basic.csv', index=False)
print 'min price, actual: %.2f' % np.min(y_test)
print 'min price, assessor estimate: %.2f' % np.min(X_test['totalActualVal'])
|
gpl-3.0
| 4,022,882,354,570,326,500 | 40.71875 | 117 | 0.718118 | false |
stvstnfrd/edx-platform
|
cms/djangoapps/contentstore/management/commands/git_export.py
|
1
|
2578
|
"""
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six import text_type
import cms.djangoapps.contentstore.git_export_utils as git_export_utils
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def add_arguments(self, parser):
parser.add_argument('course_loc')
parser.add_argument('git_url')
parser.add_argument('--username', '-u', dest='user',
help='Specify a username from LMS/Studio to be used as the commit author.')
parser.add_argument('--repo_dir', '-r', dest='repo', help='Specify existing git repo directory.')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
# Rethrow GitExportError as CommandError for SystemExit
try:
course_key = CourseKey.from_string(options['course_loc'])
except InvalidKeyError:
raise CommandError(text_type(git_export_utils.GitExportError.BAD_COURSE)) # lint-amnesty, pylint: disable=raise-missing-from
try:
git_export_utils.export_to_git(
course_key,
options['git_url'],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(text_type(ex)) # lint-amnesty, pylint: disable=raise-missing-from
|
agpl-3.0
| -4,141,749,198,761,361,000 | 39.28125 | 137 | 0.671063 | false |
CiNC0/Cartier
|
cartier-python-resign-linux/tests/test_public_interface.py
|
1
|
1352
|
from isign_base_test import IsignBaseTest
import os
from os.path import exists
from isign import isign
import logging
log = logging.getLogger(__name__)
class TestPublicInterface(IsignBaseTest):
def _test_signable(self, filename, output_path):
self.resign(filename, output_path=output_path)
assert exists(output_path)
assert os.path.getsize(output_path) > 0
self.unlink(output_path)
def _test_unsignable(self, filename, output_path):
with self.assertRaises(isign.NotSignable):
self.resign(filename, output_path=output_path)
self.unlink(output_path)
def test_app(self):
self._test_signable(self.TEST_APP, self.get_temp_dir())
def test_app_ipa(self):
self._test_signable(self.TEST_IPA, self.get_temp_file())
def test_app_with_frameworks_ipa(self):
self._test_signable(self.TEST_WITH_FRAMEWORKS_IPA, self.get_temp_file())
def test_appzip(self):
self._test_signable(self.TEST_APPZIP, self.get_temp_file())
def test_non_app_txt(self):
self._test_unsignable(self.TEST_NONAPP_TXT, self.get_temp_file())
def test_non_app_ipa(self):
self._test_unsignable(self.TEST_NONAPP_IPA, self.get_temp_file())
def test_simulator_app(self):
self._test_unsignable(self.TEST_SIMULATOR_APP, self.get_temp_file())
|
apache-2.0
| 6,229,180,489,206,329,000 | 31.190476 | 80 | 0.678994 | false |
paulmathews/nova
|
nova/openstack/common/rpc/amqp.py
|
1
|
15162
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import inspect
import logging
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common.rpc import common as rpc_common
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug('Pool creating new connection')
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the caller of
create_connection(). This is essentially a wrapper around
Connection that supports 'with'. It can also return a new
Connection, or one from a pool. The function will also catch
when an instance of this class is to be deleted. With that
we can return Connections to the pool on exceptions and so
forth without making the caller be responsible for catching
them. If possible the function makes sure to return a
connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool"""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self"""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance"""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
ending=False):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
conn.direct_send(msg_id, msg)
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None):
if self.msg_id:
msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
ending)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class ProxyCallback(object):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
self.proxy = proxy
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version', None)
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method, args)
def _process_data(self, ctxt, version, method, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, **args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except Exception as e:
LOG.exception('Exception during message handling')
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(timeout=timeout or
conf.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
self._conf = conf
def done(self):
if self._done:
return
self._done = True
self._iterator.close()
self._iterator = None
self._connection.close()
def __call__(self, data):
"""The consume() callback will call this. Store the result."""
if data['failure']:
failure = data['failure']
self._result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
self._result = data['result']
def __iter__(self):
"""Return a result until we get a 'None' response from consumer"""
if self._done:
raise StopIteration
while True:
try:
self._iterator.next()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
result = self._result
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection"""
return ConnectionContext(conf, connection_pool, pooled=not new)
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
LOG.debug(_('Making asynchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
pack_context(msg, context)
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
conn.topic_send(topic, msg)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, msg)
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, msg)
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, msg)
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, msg)
def notify(conf, context, topic, msg, connection_pool):
"""Sends a notification event on a topic."""
event_type = msg.get('event_type')
LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals())
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
try:
return conf.control_exchange
except cfg.NoSuchOptError:
return 'openstack'
|
apache-2.0
| -1,366,094,623,838,141,700 | 34.591549 | 79 | 0.618388 | false |
chaddienhart/UdacityP4-GAE-API
|
conference.py
|
1
|
29852
|
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import logging
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import Session
from models import SessionForm
from models import SessionForms
from models import BooleanMessage
from models import ConflictException
from google.appengine.api import memcache
from models import StringMessage
from google.appengine.api import taskqueue
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_BY_TYPE = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2)
)
SESSION_BY_SPEAKER = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1),
)
SESSION_ADD_WISH_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1),
)
CONF_BY_TOPIC = endpoints.ResourceContainer(
message_types.VoidMessage,
topic=messages.StringField(1),
)
SESSION_BY_CITY = endpoints.ResourceContainer(
message_types.VoidMessage,
city=messages.StringField(1),
)
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESSION_DEFAULTS = {
"highlights": ["tbd"],
"speaker": "guest speaker",
"durationHours": 1.0,
"type": "lecture",
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(), 'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") \
for conf in conferences]
)
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# make profile key
p_key = ndb.Key(Profile, getUserId(user))
# create ancestor query for this user
conferences = Conference.query(ancestor=p_key)
# get the user profile and display name
prof = p_key.get()
displayName = getattr(prof, 'displayName')
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, displayName) for conf in conferences]
)
@endpoints.method(message_types.VoidMessage, ConferenceForms, path='filterPlayground',
http_method='POST', name='filterPlayground')
def filterPlayground(self, request):
q = Conference.query()
q = q.filter(Conference.city == "London")
q = q.filter(Conference.topics == "Medical Innovations")
q = q.order(Conference)
q = q.filter(Conference.maxAttendees > 10)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q])
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
# TODO:
# step 1: get user profile
# step 2: get conferenceKeysToAttend from profile.
# to make a ndb key from websafe key you can use:
# ndb.Key(urlsafe=my_websafe_key_string)
# step 3: fetch conferences from datastore.
# Use get_multi(array_of_keys) to fetch all keys at once.
# Do not fetch them one by one!
# return set of ConferenceForm objects per Conference
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
return ConferenceForms(items=[self._copyConferenceToForm(conf, "") for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
# TODO 1
# return an existing announcement from Memcache or an empty string.
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ''
return StringMessage(data=announcement)
####################### Begin Project 4 work ###################
def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
logging.debug(type(session))
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "websafeSessionKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a websaveConferenceKey, return all sessions"""
sessions = Session.query()
sessions = sessions.filter(Session.webSafeConfId == request.websafeConferenceKey)
# return set of SessionForm objects one per Session
return SessionForms(items=[self._copySessionToForm(sn) for sn in sessions])
@endpoints.method(SESSION_BY_TYPE, SessionForms,
path='conference/{websafeConferenceKey}/sessions/{typeOfSession}',
http_method='GET', name='getSessionsByType')
def getSessionsByType(self, request):
"""Given a websaveConferenceKey, return all sessions of a specified type (eg lecture, keynote, workshop)"""
sessions = Session.query()
sessions = sessions.filter(Session.webSafeConfId == request.websafeConferenceKey)
sessions = sessions.filter(Session.type == request.typeOfSession)
# return set of SessionForm objects one per Session
return SessionForms(items=[self._copySessionToForm(sn) for sn in sessions])
@endpoints.method(SESSION_BY_SPEAKER, SessionForms,
path='sessions/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by this particular speaker, across all conferences"""
sessions = Session.query()
sessions = sessions.filter(Session.speaker == request.speaker)
# return set of SessionForm objects one per Session
return SessionForms(items=[self._copySessionToForm(sesn) for sesn in sessions])
@endpoints.method(SESSION_GET_REQUEST, SessionForm,
path='session/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create or update Session object, returning SessionForm/request.
Note: open only to the organizer of the conference"""
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# check for authorization, valid conference key, and that the current user is the conference orgainizer
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
try:
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
except TypeError:
raise endpoints.BadRequestException('Sorry, only string is allowed as websafeConferenceKey input')
except Exception, e:
if e.__class__.__name__ == 'ProtocolBufferDecodeError':
raise endpoints.BadRequestException('Sorry, the websafeConferenceKey string seems to be invalid')
else:
raise
if not conf:
raise endpoints.NotFoundException('No conference found with key: %s' % request.webSafeConfId)
if user_id != getattr(conf, 'organizerUserId'):
raise endpoints.UnauthorizedException('Only conference organizer is authorized to add sessions.')
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeConferenceKey'] # session does not have a websafeConferenceKey
# add default values for those missing (both data model & outbound Message)
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# convert dates from strings to Date objects
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
data['webSafeConfId'] = request.websafeConferenceKey
del data['websafeSessionKey'] # this is only in the SessionForm
logging.debug(data)
# creation of Session, record the key to get the item & return (modified) SessionForm
sessionKey = Session(**data).put()
# start the task to update the conference featured speaker if needed
if data['speaker'] is not SESSION_DEFAULTS['speaker']:
taskqueue.add(params={'websafeConferenceKey': request.websafeConferenceKey, 'speaker': data['speaker']},
url='/tasks/set_featured_speaker')
return self._copySessionToForm(sessionKey.get())
@endpoints.method(SESSION_ADD_WISH_REQUEST, BooleanMessage,
path='sessions/addwish/{websafeSessionKey}',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add a session (using the websaveSessionKey) to the users session wish list.
Returns true if successful, false otherwise."""
retval = None
# make sure user is authorized
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# check if user already registered otherwise add
# could also check to see if the session time conflicts with others already in the list
if request.websafeSessionKey in profile.sessionKeysWishList:
raise ConflictException(
"You have already added this session to your wish list.")
else:
profile.sessionKeysWishList.append(request.websafeSessionKey)
retval = True
profile.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/wishlist',
http_method='GET', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Return list of Sessions the user has in there wish list."""
# make sure user is authorized
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# look up the sessions based on SessionKey in the profile.sessionKeysWishList
s_keys = [ndb.Key(urlsafe=wsk) for wsk in profile.sessionKeysWishList]
sessions = ndb.get_multi(s_keys)
# return set of SessionForm objects one per Session
return SessionForms(items=[self._copySessionToForm(sesn) for sesn in sessions])
@endpoints.method(CONF_BY_TOPIC, ConferenceForms,
path='getconferencebytopic/{topic}',
http_method='GET', name='getConferenceByTopic')
def getConferenceByTopic(self, request):
"""Given a speaker, return all sessions given by this particular speaker, across all conferences"""
conferences = Conference.query()
conferences = conferences.filter(Conference.topics.IN([request.topic]))
# return individual ConferenceForm object per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, "") for conf in conferences])
@endpoints.method(SESSION_BY_CITY, SessionForms,
path='getsessionbycity/{city}',
http_method='GET', name='getSessionByCity')
def getSessionByCity(self, request):
"""Given a city, return all sessions across all conferences in the city."""
# get all the conferences in the city
c = Conference.query()
c = c.filter(Conference.city == request.city)
conferences = c.fetch()
logging.debug(conferences)
# get all the websafeConferenceKeys for the conferences
confWebKeys = [conf.key.urlsafe() for conf in conferences]
logging.debug(confWebKeys)
# find all the sessions matching any of the conferences that matched the city, across all conferences
sessions = Session.query()
sessions = sessions.filter(Session.webSafeConfId.IN(confWebKeys))
# return set of SessionForm objects one per Session
return SessionForms(items=[self._copySessionToForm(sesn) for sesn in sessions])
@staticmethod
def _cacheFeaturedSpeaker(wsck, speaker):
"""Create the featured speaker announcement & assign to memcache.
used by getFeaturedSpeaker().
The announcement will have the following format:
'Featured Speaker: <speaker>, Sessions: <session1>, <session2>, ...
"""
sessions = Session.query()
sessions = sessions.filter(Session.webSafeConfId == wsck)
sessions = sessions.filter(Session.speaker == speaker)
sessions = sessions.fetch()
logging.debug(speaker)
if(len(sessions) < 2):
return
announcement = "Featured speaker: %s, Sessions: " % speaker
for session in sessions:
announcement += "%s, " % session.name
# might want to check that the websafeConferenceKey is not none
# slice off the trailing ", "
memcache.set(wsck, announcement[:-2])
logging.debug(announcement)
return announcement
@endpoints.method(CONF_GET_REQUEST, StringMessage,
path='conference/{websafeConferenceKey}/featuredspeaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return featured speaker for the conference from memcache (if there is one, '' if none)."""
logging.debug(request.websafeConferenceKey)
info = memcache.get(request.websafeConferenceKey)
logging.debug(info)
if not info:
info = ''
return StringMessage(data=info)
# registers API
api = endpoints.api_server([ConferenceApi])
|
apache-2.0
| 2,507,629,036,731,198,000 | 38.696809 | 116 | 0.630946 | false |
csparpa/pyowm
|
tests/integration/agroapi10/test_integration_soil_data.py
|
1
|
1137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os
from pyowm import owm
from pyowm.agroapi10.polygon import GeoPolygon
from pyowm.agroapi10.soil import Soil
class IntegrationTestsSoilData(unittest.TestCase):
__owm = owm.OWM(os.getenv('OWM_API_KEY', None))
def test_call_soil_data(self):
mgr = self.__owm.agro_manager()
# check if any previous polygon exists on this account
n_old_polygons = len(mgr.get_polygons())
# create pol1
geopol1 = GeoPolygon([[
[-121.1958, 37.6683],
[-121.1779, 37.6687],
[-121.1773, 37.6792],
[-121.1958, 37.6792],
[-121.1958, 37.6683]
]])
test_pol = mgr.create_polygon(geopol1, 'soil_data_test_pol')
soil = mgr.soil_data(test_pol)
self.assertTrue(isinstance(soil, Soil))
self.assertEqual(test_pol.id, soil.polygon_id)
# Delete test polygon
mgr.delete_polygon(test_pol)
polygons = mgr.get_polygons()
self.assertEqual(n_old_polygons, len(polygons))
if __name__ == "__main__":
unittest.main()
|
mit
| -6,533,728,216,020,760,000 | 24.266667 | 68 | 0.598065 | false |
pasquantonio/blackjack_terminal
|
src/blackjack.py
|
1
|
20969
|
#!/usr/bin/env python
import sqlite3
import argparse
import sys
import time
from player_class import Player, Fore, randint
from bot_player_class import BotPlayer
from dealer_class import Dealer
from deck_class import Deck
"""
Global Game Functions
"""
def create_players(p, bots, minimum, maximum):
""" Create and append players and bots and return list of players."""
players = []
for i in range(0, p):
n, b = get_user_info(i)
players.append(Player(n, b, "Human", minimum, maximum))
if not bot_bets:
for i in range(0, bots):
entering = True
while entering:
try:
cash = int(
raw_input(
"Enter starting cash for Bot {num} "
"(20, 50, 100, 200, 500, 1000, 2000): "
.format(num = i+1)))
if cash in [20, 50, 100, 200, 500, 1000, 2000]:
entering = False
else:
print(
"Please enter one of these: (20, 50, 100, 200, 500): "
)
except ValueError, e:
print("Enter a number please")
players.append(BotPlayer("",cash, "Bot", minimum, maximum))
else:
for i in range(0, bots):
players.append(BotPlayer("",bot_bets, "Bot", minimum, maximum))
return players
def get_user_info(i):
""" Get Name and Starting Cash from player and Return."""
buy_in_list = [20,50,100,200,500, 1000, 2000]
name = raw_input("Enter player {number}'s name: ".format(number = i+1))
choosing = True
while choosing:
try:
buy = int(
raw_input("Select Starting Cash: 20, 50, 100, 200, "
"500, 1000, 2000: ")
)
except ValueError, e:
print("Invalid. Choose one of the above")
continue
if buy not in buy_in_list:
print("Invalid. Choose again")
else:
choosing = False
return name, buy
def show_player_info(players, dealer = None):
""" Print each player's information to the console."""
pnd = '#'
for player in players:
player.show_info()
if dealer != None:
dealer.show_card()
print(pnd*50)
def deal(players, shoe):
""" Initial deal. Deal Two cards to each player, check for blackjack."""
dealer_blackjack = False
print("Dealing Cards...")
for player in players:
card = deal_card(shoe)
player.receive_card(card)
for player in players:
card = deal_card(shoe)
player.receive_card(card)
if player.get_score() == 21:
player.blackjack = True
def play(dealer, players, shoe):
""" Container for player/dealer turns. Return value from player_turn."""
busted = player_turn(dealer, players, shoe)
dealer_turn(players, shoe, busted)
return busted
def player_split(player, shoe):
""" Split player's hand, receive user unput for the next action."""
print("Split")
player.split_hand()
card1 = deal_card(shoe)
card2 = deal_card(shoe)
player.split_receive_cards(card1, card2)
player.split_show()
i = 0 # hand counter
for hand in player.hand:
print("{n}: {h}: {c}".format(
n = player.name,
h = [card.display for card in hand],
c = player.get_split_score()[i])
)
deciding = True
while deciding:
try:
action = int(raw_input(
"1: Hit, 2: Stand,"
"3: DoubleDown, 4: Info: ")
)
except ValueError, e:
action = 0
if action == 1:
player.split_hit_count[i] = player.split_hit_count[i] + 1
print("Hit")
card = deal_card(shoe)
hand.append(card)
print("Card: {c}".format(c = card.display))
if check_hand_bust(hand):
print("Hand Busted!")
deciding = False
else:
player.split_show()
elif action == 2:
print("Stand")
deciding = False
continue
elif action == 3:
if player.split_hit_count[i] == 0:
total_bet = 0
for bet in player.split_bet:
total_bet = total_bet + bet
if total_bet + player.split_bet[i] <= player.cash:
print("Double Down")
player.split_bet[i] = player.split_bet[i] * 2
print("Bet for hand {i} is now: {b}"\
.format(i = i+1, b = player.split_bet[i]))
card = deal_card(shoe)
hand.append(card)
print("Card: {c}".format(c = card.display))
if check_hand_bust(hand):
print("Hand {i} Busted!".format(i = i + 1))
else:
player.split_show()
deciding = False
else:
print("Not Enough cash to double down")
else:
print("You can only double down if you haven't hit yet")
elif action == 4:
player.split_show()
else:
print("Invalid. Enter a number 1 - 4")
i = i + 1
def check_hand_bust(hand):
""" Check if a hand has busted and return boolean."""
sum = 0
ace = False
for card in hand:
if card.value == 11:
ace = True
sum = sum + card.value
if sum > 21:
if ace:
sum = sum - 10
ace = False
else:
return True
return False
def player_turn(dealer, players, shoe):
"""Get input from user on what action to take on their hand."""
bust_count = 0
deciding = True
for player in players:
if not player.blackjack:
if player.type != 'Bot':
dealer.show_card()
player.quick_show()
ask = True
while ask:
try:
action = int(
raw_input(
"1: Hit, 2: Stand, 3: Split,"
"4: DoubleDown, 5: Surrender,"
"6: Insurance, 7: Self Info,"
"8: All Info: "
)
)
except ValueError, e:
print("Please type a number")
action = 0
if action == 1: # HIT
player.hit_count = player.hit_count + 1
card = deal_card(shoe)
player.receive_card(card)
print("Card: {c}".format(c = card.display))
if WAIT_TIME > 0:
time.sleep(WAIT_TIME)
if player.check_bust():
print("{n} Busts!".format(n = player.name))
bust_count = bust_count + 1
ask = False
else:
player.quick_show()
elif action == 2: #STAND
ask = False
elif action == 3: #SPLIT
if player.hand[0].value == player.hand[1].value:
if player.bet*2 <= player.cash:
player.split = True
player.split_bet = [player.bet, player.bet]
player_split(player, shoe)
ask = False
else:
print("Not enough cash to do that bet")
else:
print("Cannot do that action")
elif action == 4: #DOUBLE DOWN
if player.hit_count == 0:
if player.bet*2 <= player.cash:
player.double_down()
card = deal_card(shoe)
player.receive_card(card)
print("Card: {c}".format(c = card.display))
if player.check_bust():
print("{n} Busts!".format(n = player.name))
bust_count = bust_count + 1
else:
player.quick_show()
ask = False
else:
print("Not enough cash!")
else:
print("You've already hit, cannot double down.")
elif action == 5: #SURRENDER
if player.hit_count == 0:
print("{n} surrender's hand.".\
format(n = player.name))
tmp = player.bet/2
player.cash = player.cash - tmp
player.surrender = True
ask = False
else:
print("You've already hit, cannot surrender.")
elif action == 6: #INSURANCE
if player.hit_count == 0:
if dealer.hand[0].value == 11:
print("Insurance")
player.insurance = True
player.insurance_bet = player.bet/2
if (player.insurance_bet
+ player.bet) > player.cash:
print("Cannot afford insurance")
player.insurance = False
player.insurance_bet = 0
else:
print("Not allowed")
else:
print("You've already hit, cannot buy insurance.")
elif action == 7: # PLAYER INFO
player.show_info()
elif action == 8:
show_player_info(players, dealer)
else:
print("Invalid. Enter a number 1 - 7")
else:
while 1:
player.quick_show()
if player.hit():
print('{n} hits'.format(n = player.name))
card = deal_card(shoe)
player.receive_card(card)
print("Card: {c}".format(c = card.display))
if WAIT_TIME > 0:
time.sleep(WAIT_TIME)
player.quick_show()
if player.check_bust():
print("{n} Bust!".format(n = player.name))
break
else:
player.quick_show()
print("{n} stands".format(n = player.name))
break
return bust_count
def dealer_turn(players, shoe, bust_count):
""" Dealer action function."""
dealer.quick_show()
deciding = True
while deciding:
if dealer.check_hit():
print('Dealer hit')
card = deal_card(shoe)
dealer.receive_card(card)
print("Card: {c}".format(c = card.display))
if WAIT_TIME > 0:
time.sleep(WAIT_TIME)
dealer.quick_show()
if dealer.check_bust():
print("Dealer Bust!")
deciding = False
else:
dealer.quick_show()
print('Dealer stand')
deciding = False
def win_lose(dealer, players, busted):
""" Check if each player has won, tied, or lost. Payout bets."""
if not dealer.blackjack:
dealer_score = dealer.get_score()
for player in players:
if not player.blackjack:
if not player.surrender:
if not player.split:
if player.check_bust():
player.lose()
elif player.get_score() < dealer_score \
and dealer_score < 22:
player.lose()
elif player.get_score() == dealer_score:
player.tie()
else:
player.win()
else:
for i in range (0, len(player.hand)):
if not player.split_surrender[i]:
if check_hand_bust(player.hand[i]):
player.lose()
elif \
player.get_split_score()[i] < dealer_score \
and dealer_score < 22:
player.lose()
elif\
player.get_split_score()[i] == dealer_score:
player.tie()
else:
player.win()
else:
print("{n} already surrendered this hand"\
.format(n = player.name))
else:
player.blackjack_win()
if player.insurance:
player.insurance_lose()
else:
print("Dealer Blackjack!")
for player in players:
if player.blackjack:
print("{n} Blackjack -> Push".format(n = player.name))
player.tie()
else:
player.lose()
if player.insurance:
player.insurance_win()
def reset(players):
""" Reset each player's attributes to prepare for next deal."""
for player in players:
player.reset()
def intro_msg():
""" Print a welcome message to the console."""
pnd = "#"
print(Fore.BLUE + pnd*50)
print(pnd*50)
print(" Welcome to Blackjack Terminal")
print(pnd*50)
print(pnd*50 + Fore.WHITE)
def show_table_rules(dealer, minimum, maximum):
""" Print the house rules to the console"""
print('+'*25)
print("House Rules:")
print("Dealer play: {p}".format(p = dealer.house_rule_name))
print("Minimum Bet: {m}".format(m = minimum))
print("Maximum Bet: {m}".format(m = maximum))
print('+'*25)
def place_bets(players, dealer, minimum, maximum):
""" Prompt user to input their bet for the next hand."""
for player in players:
if player.type == "Bot":
player.place_bet()
continue
deciding = True
while deciding:
print("Type 'd' or 'done' to cash out.")
print("Type 'i' or 'info' to see your information")
print("Type 's' to show all player information")
print("Type 'h' to see table rules")
try:
bet = raw_input("{n} place your bet: "\
.format(n = player.name))
if 'd' in bet:
out = players.pop(players.index(player))
print("{n} cashed out with: {c}"\
.format(n = out.name, c = out.cash))
deciding = False
continue
elif 'i' in bet:
player.show_info()
continue
elif 's' in bet:
show_player_info(players)
continue
elif 'h' in bet:
show_table_rules(dealer, minimum, maximum)
continue
else:
bet = int(bet)
except ValueError, e:
print("Invalid bet. Retry")
continue
if bet < minimum or bet > maximum:
print('-'*20)
print("Bet out of allowed range.")
print("Table Minimum: {min}, Table Maximum: {max}"\
.format(min = minimum, max = maximum))
print('-'*20)
continue
elif player.cash - bet >= 0 and bet > 0:
player.place_bet(bet)
deciding = False
else:
print("Can't do that bet.")
return players
def out_of_money(players, out):
""" Check if any player's have 0 cash and remove then from the game."""
keep = []
for player in players:
if player.cash > 0:
keep.append(player)
else:
player.out_of_money()
out.append(player)
print("Player out of money. bye {n}.".format(n = player.name))
return keep
def how_many_playing():
""" Prompt user to enter the number of human players."""
deciding = True
while deciding:
try:
number_of_players = int(raw_input(
"How many players? (up to 7): ")
)
if number_of_players < 6: # maximum 5 players
deciding = False
else:
print("Too many players")
except ValueError, e:
print("Please enter a number")
return number_of_players
def setup(shoe_size, house, player_count, bots, minimum, maximum):
""" Print welcome info and create players, bots, and dealer."""
intro_msg()
print("Number of decks being used in shoe: {s}".format(s = shoe_size))
#number_of_players = how_many_playing()
players = create_players(player_count, bots, minimum, maximum)
dealer = Dealer("", 0, "Dealer", house)
dealer.greeting()
people = []
for player in players:
print player.name
people.append(player)
people.append(dealer)
return players, dealer, people
def create_shoe(shoe_size):
""" Append each card from each deck to a list. Shuffle and return it."""
decks = []
for i in range(shoe_size):
deck = Deck(i)
deck.shuffle()
decks.append(deck)
shoe = [card for deck in decks for card in deck.cards]
shoe = shuffle(shoe)
return shoe
def shuffle(shoe):
""" Fisher-yates shuffle algorithm. Shuffles the shoe."""
n = len(shoe)
for i in range(n-1,0,-1):
j = randint(0, i)
if j == i:
continue
shoe[i], shoe[j] = shoe[j], shoe[i]
return shoe
def deal_card(shoe):
""" Pops a card from the shoe to 'deal' to a player."""
return shoe.pop(0)
def argument_setup(parser):
""" Parse through terminal args and assign variables."""
args = parser.parse_args()
if args.players:
if args.bots:
if args.players + args.bots > 7:
print("Can only play with at most 7 players and bots")
while 1:
try:
players = int(raw_input("Enter number of players: "))
except ValueError, e:
print("Enter a number")
if players + args.bots > 7:
print("Still too many players & bots.")
else:
break
else:
players = args.players
elif args.players > 7:
print("Can only play with at most 7 players and bots")
while 1:
try:
players = int(raw_input("Enter number of players: "))
except ValueError, e:
print("Enter a number")
if players > 7:
print("Still too many players & bots.")
else:
break
else:
players = args.players
else:
players = 0
if args.shoe:
SHOE_SIZE = args.shoe
else:
SHOE_SIZE = 6
if args.house:
house_rules = args.house
else:
house_rules = 1
if args.bots:
if args.players:
if args.bots + args.players > 7:
print("Can only play with at most 7 players and bots")
while 1:
try:
bots = int(raw_input("Enter number of bots: "))
except ValueError, e:
print("Enter a number")
if bots + players > 7:
print("Still too many players & bots.")
else:
break
else:
bots = args.bots
elif args.bots > 7:
print("Can only play with at most 7 players and bots")
while 1:
try:
bots = int(raw_input("Enter number of players: "))
except ValueError, e:
print("Enter a number")
if bots > 7:
print("Still too many players & bots.")
else:
break
else:
bots = args.bots
else:
bots = 0
global bot_bets
if args.all:
bot_bets = args.all
else:
bot_bets = False
global WAIT_TIME #messy code
if args.time:
WAIT_TIME = args.time
elif args.time == 0:
WAIT_TIME = args.time
else:
if players == 0:
WAIT_TIME = 0
else:
WAIT_TIME = 1.5
if args.minimum:
minimum = args.minimum
else:
print("Minimum table bet must be in [10, 20, 50, 100]")
print("Setting minimum table bet to 10")
minimum = 10
if args.maximum:
maximum = args.maximum
else:
print("Maximum table bet must be in [10, 20, 50, 100, 500, 1000]")
print("Setting maximum table bet to 500")
maximum = 10000
if bots + players == 0:
print("Usage: ./blackjack --players 1 --bots 2")
print("Enter number of players and bots through command line")
sys.exit(1)
return SHOE_SIZE, house_rules, players, bots, minimum, maximum
def connect_to_database():
""" Attempt to connect to sqlite database. Return connection object."""
try:
connection = sqlite3.connect('blackjack_terminal')
print("DB Connected!")
except Exception, e:
print e
sys.exit(1)
return connection
def create_tables(connection):
""" Create database tables if they are not yet created."""
connection.execute(""" CREATE TABLE IF NOT EXISTS GAMES
(GAME_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
ROUNDS INT);""")
connection.execute(""" CREATE TABLE IF NOT EXISTS ROUNDS
(ROUND_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
PLAYER_NAME TEXT,
BET INT,
CARD1 TEXT,
CARD2, TEXT,
OUTCOME TEXT);""")
def insert_game(rounds, connection):
try:
connection.execute(
"""INSERT INTO GAMES (ROUNDS) VALUES ({r});""".format(r = rounds)
)
connection.commit()
except sqlite3.Error as e:
print e
def insert_round(players, connection):
""" Insert each player's hand, bet, and outcome into table ROUNDS."""
for player in players:
if not player.split:
connection.execute(
"""INSERT INTO ROUNDS (PLAYER_NAME,BET,CARD1,CARD2,OUTCOME)"""
"""VALUES ('{n}',{b},'{c1}','{c2}','{o}');"""
.format(
n=player.name,
b=player.bet,
c1=player.hand[0].name,
c2=player.hand[1].name,
o=player.outcome)
)
else:
connection.execute(
"""INSERT INTO ROUNDS (NAME,BET,CARD1,CARD2,OUTCOME)"""
"""VALUES ('{n}',{b},'{c1}','{c2}','{o}');"""
.format(
n=player.name,
b=player.split_bet[0],
c1=player.hand[0][0].name,
c2=player.hand[0][1].name,
o=player.outcome)
)
connection.execute(
"""INSERT INTO ROUNDS (NAME,BET,CARD1,CARD2,OUTCOME)"""
"""VALUES ('{n}',{b},'{c1}','{c2}','{o}');"""
.format(
n=player.name,
b=player.split_bet[1],
c1=player.hand[1][0].name,
c2=player.hand[1][1].name,
o=player.outcome)
)
connection.commit()
# Main Method. Program Starts and Ends Here
if __name__ == "__main__":
""" Game creation, setup and loop contained in here."""
parser = argparse.ArgumentParser(
description="Blackjack Terminal: A game for fun or a simulator"
" for putting strategies to the test"
)
parser.add_argument(
"-p","--players",
help="Number of Human players",
type=int
)
parser.add_argument(
"-s","--shoe",
help="set how many decks used in the shoe",
type=int
)
parser.add_argument(
"--house",
help="1: Dealer stand on all 17, 2: Dealer hit on soft 17",
type=int
)
parser.add_argument(
"-b","--bots",
help="Enter number of bots you want. Up to 7",
type=int
)
parser.add_argument(
"-t","--time",
help="Wait time for actions such as deal cards, hit, stand, etc"
". For simulations do 0, for humans playing do 1.5",
type=int
)
parser.add_argument(
"-a","--all",
help="Give every bot the same starting cash value",
type=int
)
parser.add_argument("--minimum", help="Table Minimum Bet", type=int)
parser.add_argument("--maximum", help="Table Maximum Bet", type=int)
SHOE_SIZE, house_rules, player_count, bots, minimum, maximum = \
argument_setup(parser)
connection = connect_to_database()
create_tables(connection)
players, dealer, people = setup(
SHOE_SIZE, house_rules, player_count,
bots, minimum, maximum
)
DECK_SIZE = 52
TOTAL_CARDS = SHOE_SIZE * DECK_SIZE
shoe = create_shoe(SHOE_SIZE)
####################################
# Game Loop #
####################################
reshuffle_count = 0
end_game = False
round_num = 0
out = []
while not end_game:
round_num = round_num + 1
print("*******************Round {r}**********************"\
.format(r = round_num))
if len(shoe) < TOTAL_CARDS/2:
print("Dealer Reshuffling Shoe!")
if WAIT_TIME > 0:
time.sleep(WAIT_TIME)
shoe = create_shoe(SHOE_SIZE)
reshuffle_count = reshuffle_count + 1
players = place_bets(players, dealer, minimum, maximum)
if players:
deal(people, shoe)
show_player_info(players, dealer)
busted = play(dealer, players, shoe)
win_lose(dealer, players, busted)
insert_round(players, connection)
reset(people)
players = out_of_money(players, out)
else:
print("No players left. Game over.")
print("PostGame Statistics")
print("reshuffle count: {c}".format(c = reshuffle_count))
for player in out:
player.end_game_stats()
end_game = True
insert_game(round_num, connection)
continue
connection.close()
|
mit
| -4,642,748,919,596,337,000 | 25.883333 | 73 | 0.611665 | false |
bellowsj/aiopogo
|
aiopogo/pogoprotos/settings/fort_settings_pb2.py
|
1
|
5612
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/fort_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/fort_settings.proto',
package='pogoprotos.settings',
syntax='proto3',
serialized_pb=_b('\n\'pogoprotos/settings/fort_settings.proto\x12\x13pogoprotos.settings\"\xc7\x02\n\x0c\x46ortSettings\x12 \n\x18interaction_range_meters\x18\x01 \x01(\x01\x12\"\n\x1amax_total_deployed_pokemon\x18\x02 \x01(\x05\x12#\n\x1bmax_player_deployed_pokemon\x18\x03 \x01(\x05\x12!\n\x19\x64\x65ploy_stamina_multiplier\x18\x04 \x01(\x01\x12 \n\x18\x64\x65ploy_attack_multiplier\x18\x05 \x01(\x01\x12$\n\x1c\x66\x61r_interaction_range_meters\x18\x06 \x01(\x01\x12\x14\n\x0c\x64isable_gyms\x18\x07 \x01(\x08\x12 \n\x18max_same_pokemon_at_fort\x18\x08 \x01(\x05\x12)\n!max_player_total_deployed_pokemon\x18\t \x01(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FORTSETTINGS = _descriptor.Descriptor(
name='FortSettings',
full_name='pogoprotos.settings.FortSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interaction_range_meters', full_name='pogoprotos.settings.FortSettings.interaction_range_meters', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_total_deployed_pokemon', full_name='pogoprotos.settings.FortSettings.max_total_deployed_pokemon', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_player_deployed_pokemon', full_name='pogoprotos.settings.FortSettings.max_player_deployed_pokemon', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deploy_stamina_multiplier', full_name='pogoprotos.settings.FortSettings.deploy_stamina_multiplier', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deploy_attack_multiplier', full_name='pogoprotos.settings.FortSettings.deploy_attack_multiplier', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='far_interaction_range_meters', full_name='pogoprotos.settings.FortSettings.far_interaction_range_meters', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='disable_gyms', full_name='pogoprotos.settings.FortSettings.disable_gyms', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_same_pokemon_at_fort', full_name='pogoprotos.settings.FortSettings.max_same_pokemon_at_fort', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_player_total_deployed_pokemon', full_name='pogoprotos.settings.FortSettings.max_player_total_deployed_pokemon', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=392,
)
DESCRIPTOR.message_types_by_name['FortSettings'] = _FORTSETTINGS
FortSettings = _reflection.GeneratedProtocolMessageType('FortSettings', (_message.Message,), dict(
DESCRIPTOR = _FORTSETTINGS,
__module__ = 'pogoprotos.settings.fort_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.FortSettings)
))
_sym_db.RegisterMessage(FortSettings)
# @@protoc_insertion_point(module_scope)
|
mit
| -1,978,980,154,719,400,700 | 43.896 | 641 | 0.722559 | false |
apmichaud/vitess-apm
|
py/zk/zkocc.py
|
1
|
9075
|
import itertools
import json
import logging
import os
import random
import threading
from net import bsonrpc
from net import gorpc
class ZkOccError(Exception):
pass
#
# the ZkNode dict returned by these structures has the following members:
#
# Path string
# Data string
# Stat ZkStat
# Children []string
# Cached bool // the response comes from the zkocc cache
# Stale bool // the response is stale because we're not connected
#
# ZkStat is a dict:
#
# czxid long
# mzxid long
# cTime time.DateTime
# mTime time.DateTime
# version int
# cVersion int
# aVersion int
# ephemeralOwner long
# dataLength int
# numChildren int
# pzxid long
#
# A simple, direct connection to a single zkocc server. Doesn't retry.
# You probably want to use ZkOccConnection instead.
class SimpleZkOccConnection(object):
def __init__(self, addr, timeout, user=None, password=None):
self.client = bsonrpc.BsonRpcClient(addr, timeout, user, password)
def dial(self):
self.client.dial()
def close(self):
self.client.close()
def _call(self, method, **kwargs):
req = dict((''.join(w.capitalize() for w in k.split('_')), v)
for k, v in kwargs.items())
try:
return self.client.call(method, req).reply
except gorpc.GoRpcError as e:
raise ZkOccError('%s %s failed' % (method, req), e)
# returns a ZkNode, see header
def get(self, path):
return self._call('ZkReader.Get', path=path)
# returns an array of ZkNode, see header
def getv(self, paths):
return self._call('ZkReader.GetV', paths=paths)
# returns a ZkNode, see header
def children(self, path):
return self._call('ZkReader.Children', path=path)
def get_srv_keyspace_names(self, cell):
return self._call('TopoReader.GetSrvKeyspaceNames', cell=cell)['Entries']
def get_srv_keyspace(self, cell, keyspace):
return self._call('TopoReader.GetSrvKeyspace', cell=cell, keyspace=keyspace)
def get_end_points(self, cell, keyspace, shard, tablet_type):
return self._call('TopoReader.GetEndPoints', cell=cell, keyspace=keyspace, shard=shard, tablet_type=tablet_type)
# A meta-connection that can connect to multiple alternate servers, and will
# retry a couple times. Calling dial before get/getv/children is optional,
# and will only do anything at all if authentication is enabled.
class ZkOccConnection(object):
max_attempts = 2
# addrs is a comma separated list of server:ip pairs.
def __init__(self, addrs, local_cell, timeout, user=None, password=None):
self.timeout = timeout
self._input_addrs = addrs.split(',')
random.shuffle(self._input_addrs)
self.addr_count = len(self._input_addrs)
self.addrs = itertools.cycle(self._input_addrs)
self.local_cell = local_cell
if bool(user) != bool(password):
raise ValueError("You must provide either both or none of user and password.")
self.user = user
self.password = password
self.simple_conn = None
self.lock = threading.Lock()
def _resolve_path(self, zk_path):
# Maps a 'meta-path' to a cell specific path.
# '/zk/local/blah' -> '/zk/vb/blah'
parts = zk_path.split('/')
if len(parts) < 3:
return zk_path
if parts[2] != 'local':
return zk_path
parts[2] = self.local_cell
return '/'.join(parts)
def dial(self):
if self.simple_conn:
self.simple_conn.close()
# try to connect to each server once (this will always work
# if no auth is used, as then no connection is really established here)
for i in xrange(self.addr_count):
self.simple_conn = SimpleZkOccConnection(self.addrs.next(), self.timeout, self.user, self.password)
try:
self.simple_conn.dial()
return
except:
pass
self.simple_conn = None
raise ZkOccError("Cannot dial to any server, tried: %s" %
list(sorted(self._input_addrs)))
def close(self):
if self.simple_conn:
self.simple_conn.close()
self.simple_conn = None
def _call(self, client_method, *args, **kwargs):
with self.lock:
if not self.simple_conn:
self.dial()
attempt = 0
while True:
try:
return getattr(self.simple_conn, client_method)(*args, **kwargs)
except Exception as e:
attempt += 1
logging.warning('zkocc: %s command failed %u times: %s', client_method, attempt, e)
if attempt >= self.max_attempts:
raise ZkOccError('zkocc %s command failed %u times: %s' % (client_method, attempt, e))
# try the next server if there is one, or retry our only server
self.dial()
# New API.
def get_srv_keyspace_names(self, cell):
if cell == 'local':
cell = self.local_cell
return self._call('get_srv_keyspace_names', cell=cell)
def get_srv_keyspace(self, cell, keyspace):
if cell == 'local':
cell = self.local_cell
return self._call('get_srv_keyspace', cell=cell, keyspace=keyspace)
def get_end_points(self, cell, keyspace, shard, tablet_type):
if cell == 'local':
cell = self.local_cell
return self._call('get_end_points', cell=cell, keyspace=keyspace,
shard=shard, tablet_type=tablet_type)
# Old, deprecated API.
# returns a ZkNode, see header
def get(self, path):
return self._call('get', self._resolve_path(path))
# returns an array of ZkNode, see header
def getv(self, paths):
return self._call('getv', [self._resolve_path(p) for p in paths])
# returns a ZkNode, see header
def children(self, path):
return self._call('children', self._resolve_path(path))
# use this class for faking out a zkocc client. The startup config values
# can be loaded from a json file. After that, they can be mass-altered
# to replace default values with test-specific values, for instance.
class FakeZkOccConnection(object):
def __init__(self, local_cell):
self.data = {}
self.local_cell = local_cell
@classmethod
def from_data_path(cls, local_cell, data_path):
# Returns client with data at given data_path loaded.
client = cls(local_cell)
with open(data_path) as f:
data = f.read()
for key, value in json.loads(data).iteritems():
client.data[key] = json.dumps(value)
return client
def replace_zk_data(self, before, after):
# Does a string substitution on all zk data.
# This is for testing purpose only.
for key, data in self.data.iteritems():
self.data[key] = data.replace(before, after)
def _resolve_path(self, zk_path):
# Maps a 'meta-path' to a cell specific path.
# '/zk/local/blah' -> '/zk/vb/blah'
parts = zk_path.split('/')
if len(parts) < 3:
return zk_path
if parts[2] != 'local':
return zk_path
parts[2] = self.local_cell
return '/'.join(parts)
def dial(self):
pass
def close(self):
pass
# Old, deprecated API.
def get(self, path):
path = self._resolve_path(path)
if not path in self.data:
raise ZkOccError("FakeZkOccConnection: not found: " + path)
return {
'Data':self.data[path],
'Children':[]
}
def getv(self, paths):
raise ZkOccError("FakeZkOccConnection: not found: " + " ".join(paths))
def children(self, path):
path = self._resolve_path(path)
children = [os.path.basename(node) for node in self.data
if os.path.dirname(node) == path]
if len(children) == 0:
raise ZkOccError("FakeZkOccConnection: not found: " + path)
return {
'Data':'',
'Children':children
}
# New API. For this fake object, it is based on the old API.
def get_srv_keyspace_names(self, cell):
if cell == 'local':
cell = self.local_cell
return self.children('/zk/' + cell + '/vt/ns')['Children']
def get_srv_keyspace(self, cell, keyspace):
keyspace_path = '/zk/' + cell + '/vt/ns/' + keyspace
try:
data = self.get(keyspace_path)['Data']
if not data:
raise ZkOccError("FakeZkOccConnection: empty keyspace: " + keyspace)
result = json.loads(data)
# for convenience, we store the KeyRange as hex, but we need to
# decode it here, as BSON RPC sends it as binary.
if 'Shards' in result:
for shard in result['Shards']:
shard['KeyRange']['Start'] = shard['KeyRange']['Start'].decode('hex')
shard['KeyRange']['End'] = shard['KeyRange']['End'].decode('hex')
return result
except Exception as e:
raise ZkOccError('FakeZkOccConnection: invalid keyspace', keyspace, e)
def get_end_points(self, cell, keyspace, shard, tablet_type):
zk_path = os.path.join('/zk', cell, 'vt', 'ns', keyspace, shard,
tablet_type)
try:
data = self.get(zk_path)['Data']
if not data:
raise ZkOccError("FakeZkOccConnection: empty end point: " + zk_path)
return json.loads(data)
except Exception as e:
raise ZkOccError('FakeZkOccConnection: invalid end point', zk_path, e)
|
bsd-3-clause
| -6,605,538,623,471,355,000 | 29.867347 | 116 | 0.641102 | false |
yourlabs/django-cities-light
|
src/cities_light/tests/base.py
|
1
|
2657
|
"""."""
import os
from unittest import mock
from django import test
from django.core import management
from django.conf import settings
class FixtureDir:
"""Helper class to construct fixture paths."""
def __init__(self, rel_path='', base_dir=None):
"""Class constructor.
params:
rel_path - subdir relative to base dir, e.g. 'aaaa/bbbb/'
base_dir - base fixture directory (settings.FIXTURE_DIR by default)
"""
self.base_dir = base_dir or settings.FIXTURE_DIR
self.rel_path = rel_path
def get_file_path(self, file_name):
"""
Get full fixture path.
Concatenate base_dir, rel_path and file_name.
"""
return os.path.abspath(
os.path.join(self.base_dir, self.rel_path, file_name)
)
class TestImportBase(test.TransactionTestCase):
"""Base class for import testcases.
Inherit from this class and use separate
fixture subdirectory for each test_*.py.
"""
maxDiff = 100000
reset_sequences = True
def import_data(self, srcdir, countries, regions, subregions, cities, trans, file_type="txt", **options):
"""Helper method to import Geonames data.
Patch *_SOURCES settings and call 'cities_light' command with
--force-import-all option.
params:
srcdir - source directory represented by FixtureDir object.
countries - values for COUNTRY_SOURCES
regions - values for REGION_SOURCES
subregions - values for SUBREGION_SOURCES
cities - values for CITY_SOURCES
trans - values for TRANSLATION_SOURCES
**options - passed to call_command() as is
"""
def _s2l(param):
return param if isinstance(param, list) else [param]
def _patch(setting, *values):
setting_to_patch = (
'cities_light.management.commands.cities_light.%s_SOURCES' %
setting.upper()
)
return mock.patch(
setting_to_patch,
['file://%s.%s' % (srcdir.get_file_path(v), file_type) for v in values]
)
m_country = _patch('country', *_s2l(countries))
m_region = _patch('region', *_s2l(regions))
m_subregion = _patch('subregion', *_s2l(subregions))
m_city = _patch('city', *_s2l(cities))
m_tr = _patch('translation', *_s2l(trans))
with m_country, m_region, m_subregion, m_city, m_tr:
management.call_command('cities_light', progress=True,
force_import_all=True,
**options)
|
mit
| 8,972,758,104,204,965,000 | 31.402439 | 109 | 0.585247 | false |
marcofucci/mooshell
|
models.py
|
1
|
9791
|
from datetime import date, timedelta, datetime
import os
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.models import User
from django.conf import settings
from managers import JSDependencyManager, JSLibraryManager, PastieManager, ShellManager
def next_week():
return datetime.now() + timedelta(days=7)
class JSLibraryGroup(models.Model):
"""
Main library to load - MooTools core, jQuery, Prototype, etc.
"""
name = models.CharField('Name', max_length=100, unique=True)
description = models.TextField(blank=True, null=True)
# TODO: check if selected is used at all
selected = models.BooleanField(blank=True, default=False)
def __unicode__(self):
return self.name
class Admin:
pass
class JSLibraryWrap(models.Model):
"""
how to wrap the code in specific library
"""
name = models.CharField(max_length=255)
code_start = models.TextField()
code_end = models.TextField()
def __unicode__(self):
return self.name
class Admin:
pass
class Meta:
verbose_name_plural = "JS Library Code Wrappers"
class JSLibrary(models.Model):
"""
Version of the library - Mootools 1.2.4, etc.
"""
library_group = models.ForeignKey(JSLibraryGroup, related_name="libs")
version = models.CharField(max_length=30, null=True, blank=True)
href = models.CharField('URL to the core library file', max_length=255, unique=True)
selected = models.BooleanField(blank=True, default=False)
wrap_d = models.ForeignKey(JSLibraryWrap, related_name='lib_for_domready')
wrap_l = models.ForeignKey(JSLibraryWrap, related_name='lib_for_load')
active = models.BooleanField(default=True, blank=True)
objects = JSLibraryManager()
def __unicode__(self):
return ' '.join((self.library_group.name, self.version))
class Admin:
pass
class Meta:
verbose_name_plural = "JS Library versions"
ordering = ["version"]
class JSDependency(models.Model):
"""
Additional library file - MooTools more, Scriptaculous, etc.
"""
library = models.ForeignKey(JSLibrary)
name = models.CharField(max_length=150)
url = models.CharField('URL to the library file', max_length=255)
description = models.TextField(blank=True, null=True)
selected = models.BooleanField(blank=True, default=False)
ord = models.IntegerField("Order",default=0, blank=True, null=True)
active = models.BooleanField(default=True, blank=True)
objects = JSDependencyManager()
def __unicode__(self):
return self.name
class Admin:
pass
class Meta:
verbose_name_plural = "JS Dependencies"
# highest number on top
ordering = ["-ord"]
class ExternalResource(models.Model):
url = models.CharField('URL to the resource file', max_length=255, unique=True)
class Admin:
pass
def __unicode__(self):
return self.filename
def __str__(self):
return self.filename
@property
def filename(self):
if not hasattr(self, '_filename'):
self._filename = ExternalResource.get_filename(self.url)
return self._filename
@property
def extension(self):
if not hasattr(self, '_extension'):
self._extension = ExternalResource.get_extension(self.url)
return self._extension
@staticmethod
def get_filename(url):
return url.split('/')[-1]
@staticmethod
def get_extension(url):
return os.path.splitext(ExternalResource.get_filename(url))[1][1:]
WRAPCHOICE = (
('', 'none'),
('d', 'onDomready'),
('l', 'onLoad'),
)
class DocType(models.Model):
"""
DocString to choose from
"""
name = models.CharField(max_length=255, unique=True)
code = models.TextField(blank=True, null=True)
type = models.CharField(max_length=100, default='html', blank=True)
selected = models.BooleanField(default=False, blank=True)
def __unicode__(self):
return self.code
class Admin:
pass
class Pastie(models.Model):
"""
default metadata
"""
slug = models.CharField(max_length=255, unique=True, blank=True)
created_at = models.DateTimeField(default=datetime.now)
author = models.ForeignKey(User, null=True, blank=True)
example = models.BooleanField(default=False, blank=True)
favourite = models.ForeignKey('Shell', null=True, blank=True, related_name='favs')
objects = PastieManager()
def set_slug(self):
from random import choice
allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'
check_slug = True
# repeat until the slug will be unique
while check_slug:
self.slug = ''.join([choice(allowed_chars) for i in range(settings.MOOSHELL_SLUG_LENGTH)]) #here some random stuff
try:
check_slug = Pastie.objects.get(slug=self.slug)
except:
check_slug = False
def __unicode__(self):
return self.slug
@models.permalink
def get_absolute_url(self):
return ('pastie',[self.slug])
class Admin:
pass
class Meta:
verbose_name_plural = "Pasties"
def make_slug_on_create(instance, **kwargs):
if kwargs.get('raw',False): return
if not instance.id and not instance.slug:
instance.set_slug()
pre_save.connect(make_slug_on_create, sender=Pastie)
class Shell(models.Model):
"""
Holds shell data
"""
pastie = models.ForeignKey(Pastie)
version = models.IntegerField(default=0, blank=True)
revision = models.IntegerField(default=0, blank=True, null=True)
# authoring
author = models.ForeignKey(User, null=True, blank=True)
private = models.BooleanField(default=False, blank=True)
# meta
title = models.CharField(max_length=255, null=True, blank=True)
description = models.TextField(null=True, blank=True)
# STATISTICS (a bit)
displayed = models.PositiveIntegerField(default=1, null=True, blank=True)
# is the shell private (do not list in search)
# how long author she should be hold by the system ?
valid_until = models.DateTimeField('Valid until', default=None, null=True, blank=True)
# editors
code_css = models.TextField('CSS', null=True, blank=True)
code_html = models.TextField('HTML', null=True, blank=True)
code_js = models.TextField('Javascript', null=True, blank=True)
# filled automatically
created_at = models.DateTimeField(default=datetime.now)
# is it proposed to be an example
proposed_example = models.BooleanField(default=False, blank=True)
# loaded library
js_lib = models.ForeignKey(JSLibrary)
js_lib_option = models.CharField(max_length=255, null=True, blank=True)
js_dependency = models.ManyToManyField(JSDependency, null=True, blank=True)
js_wrap = models.CharField(max_length=1, choices=WRAPCHOICE, default='d', null=True, blank=True)
external_resources = models.ManyToManyField(ExternalResource, null=True, blank=True)
body_tag = models.CharField(max_length=255, null=True, blank=True, default="<body>")
doctype = models.ForeignKey(DocType, blank=True, null=True)
objects = ShellManager()
def is_favourite(self):
return (self.version == 0 and not self.pastie.favourite) or (self.pastie.favourite and self.pastie.favourite_id == self.id)
def __str__(self):
past = ''
if self.id != self.pastie.favourite.id:
past += '-%i' % self.version
if self.code_js:
past += ': %s' % self.code_js[:20]
elif self.code_html:
past += ': %s' % self.code_html[:20]
elif self.code_css:
past += ': %s' % self.code_css[:20]
pre = self.title + ' - ' if self.title else ''
return pre + self.pastie.slug + past
@models.permalink
def get_absolute_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
if not self.revision or self.revision == 0:
if self.is_favourite():
rev += 'pastie'
args.append(self.pastie.slug)
else:
rev += 'shell'
args.extend([self.pastie.slug,self.version])
else:
rev += 'revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
@models.permalink
def get_embedded_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
rev += 'embedded'
if not self.revision or self.revision == 0:
if self.is_favourite():
args.append(self.pastie.slug)
else:
rev += '_with_version'
args.extend([self.pastie.slug,self.version])
else:
rev += '_revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
@models.permalink
def get_show_url(self):
if self.author:
args = [self.author.username]
rev = 'author_'
else:
args=[]
rev = ''
rev += 'pastie_show'
if not self.revision or self.revision == 0:
if self.is_favourite():
args.append(self.pastie.slug)
else:
rev += '_with_version'
args.extend([self.pastie.slug,self.version])
else:
rev += '_revision'
args.extend([self.pastie.slug,self.version,self.revision])
return (rev, args)
def get_next_version(self):
shell_with_highest_version = Shell.objects.filter(pastie=self.pastie).order_by('-version')[0]
return shell_with_highest_version.version + 1
def set_next_version(self):
self.version = self.get_next_version()
class Meta:
ordering = ["-version", "revision"]
unique_together = ['pastie', 'version']
class Admin:
pass
# I think it wasn't called as print was there before
def increase_version_on_save(instance, **kwargs):
if kwargs.get('raw',False): return
if not instance.id:
# check if any shell exists for the pastie
try:
shells = Shell.objects.filter(pastie__id=instance.pastie.id).order_by('-version')
version = list(shells)[0].version + 1
except:
version = 0
instance.version = version
pre_save.connect(increase_version_on_save, sender=Shell)
def make_first_version_favourite(instance, **kwargs):
if kwargs.get('raw',False): return
if not kwargs.get('created'): return
if instance.version == 0:
instance.pastie.favourite = instance
instance.pastie.save()
post_save.connect(make_first_version_favourite, sender=Shell)
|
mit
| 6,345,273,455,268,990,000 | 26.121884 | 126 | 0.704525 | false |
AusTac/parma
|
b3/pkg_handler.py
|
1
|
4322
|
#
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2005 Michael "ThorN" Thornton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# CHANGELOG:
# 05/01/2009 - 1.1.1 - Courgette
# * make PkgResourcesStandIn.version('b3') work with py2exe build
# 20/03/2010 - 1.2 - Courgette
# * make sure to read the version from the PKG-INFO file if found in the b3 module
# even when setup_tools are installed on the system
# 21/10/2010 - 1.2.1 - Courgette
# * fix an issue that broke the b3_run.exe when frozen on a machine that
# have pkg_resources available
__author__ = 'ThorN'
__version__ = '1.2.1'
import os, sys, re
from b3.functions import main_is_frozen
__all__ = ['version', 'resource_directory']
# use this class if pkg_resources is installed
class PkgResources:
def version(self, module):
version = '<unknown>'
try:
if os.path.isfile(os.path.join(self.resource_directory(module), 'PKG-INFO')):
## we need this in the case the user installed B3 from sources (copying the b3.egg-in
## folder) and then updates just the b3 folder but still have setup_tools installed
## on his system
version = getVersionFromFile(os.path.join(self.resource_directory(module), 'PKG-INFO'))
else:
version = pkg_resources.get_distribution(module).version
except pkg_resources.DistributionNotFound:
# must not be installed as an egg
pkg_handler = PkgResourcesStandIn()
version = pkg_handler.version(module)
return version
def resource_directory(self, module):
return pkg_resources.resource_filename(module, '')
# use this class if pkg_resources is NOT installed
class PkgResourcesStandIn:
def version(self, module):
# find package info
version = '<unknown>'
searchDirectories = ['PKG-INFO' ,
os.path.join(self.resource_directory(module), 'PKG-INFO'),
os.path.join(self.resource_directory(module), '..', 'PKG-INFO'),
os.path.join(self.resource_directory(module), '..', 'b3.egg-info', 'PKG-INFO')
]
if module == 'b3':
searchDirectories.insert(0, os.path.join(self.getB3Path(), 'PKG-INFO'))
for p in searchDirectories:
if os.path.isfile(p):
version = getVersionFromFile(p)
return version
def resource_directory(self, module):
return os.path.dirname(sys.modules[module].__file__)
def getB3Path(self):
if main_is_frozen():
# which happens when running from the py2exe build
return os.path.dirname(sys.executable)
return self.resource_directory('b3')
def getVersionFromFile(filename):
version = None
if os.path.isfile(filename):
f = file(filename, 'r')
for line in f:
if line.lower().startswith('version:'):
version = re.sub('[^A-Za-z0-9.]+', '-', line.split(':',1)[1].strip().replace(' ','.'))
break
f.close()
return version
pkg_handler = None
if main_is_frozen():
# avoid issues when frozen with py2exe on a windows machine
# that have phg_resources importable.
pkg_handler = PkgResourcesStandIn()
else:
try:
import pkg_resources
except ImportError:
# package tools is not intalled
pkg_handler = PkgResourcesStandIn()
else:
# package tools is installed
pkg_handler = PkgResources()
version = pkg_handler.version
resource_directory = pkg_handler.resource_directory
|
gpl-2.0
| 8,140,766,433,042,558,000 | 36.591304 | 103 | 0.63975 | false |
xiedidan/luna-network
|
resnext/createGroundTruth.py
|
1
|
1336
|
# -*- coding:utf-8 -*-
import os
from glob import glob
import numpy as np
try:
import cPickle as pickle
except:
import pickle
# create label for resnext tests
# glob filename from nodules/, write label to groundTruths/
class GroundTruthCreator(object):
def __init__(self, dataPath, phrase = "train"):
self.dataPath = dataPath
self.phrase = phrase
self.phraseSubPath = self.phrase + "/"
np.random.seed()
def createLabel(self):
groundTruthPath = self.dataPath + self.phraseSubPath + "groundTruths/"
if not os.path.isdir(groundTruthPath):
os.makedirs(groundTruthPath)
noduleFileList = glob(self.dataPath + self.phraseSubPath + "nodules/*.npy")
print(noduleFileList)
for noduleFile in noduleFileList:
label = np.array([0, 0])
if np.random.random() > 0.5:
label[0] = 1
else:
label[1] = 1
filename = os.path.basename(noduleFile)
groundTruthFile = groundTruthPath + filename
print(groundTruthFile)
with open(groundTruthFile, "wb") as f:
pickle.dump(label, f)
if __name__ == "__main__":
creator = GroundTruthCreator("d:/project/tianchi/data/experiment/")
creator.createLabel()
|
gpl-3.0
| -5,242,850,325,965,492,000 | 30.069767 | 83 | 0.599551 | false |
langcog/wordbank
|
instruments/schemas/English_American_WG.py
|
1
|
79336
|
from django.db import models
from instruments.base import BaseTable
class English_American_WG(BaseTable):
item_1_choices = [('yes', 'yes'), ('no', 'no')]
item_1 = models.CharField(max_length=3, choices=item_1_choices, null=True)
item_2_choices = [('yes', 'yes'), ('no', 'no')]
item_2 = models.CharField(max_length=3, choices=item_2_choices, null=True)
item_3_choices = [('yes', 'yes'), ('no', 'no')]
item_3 = models.CharField(max_length=3, choices=item_3_choices, null=True)
item_4_choices = [('understands', 'understands')]
item_4 = models.CharField(max_length=11, choices=item_4_choices, null=True)
item_5_choices = [('understands', 'understands')]
item_5 = models.CharField(max_length=11, choices=item_5_choices, null=True)
item_6_choices = [('understands', 'understands')]
item_6 = models.CharField(max_length=11, choices=item_6_choices, null=True)
item_7_choices = [('understands', 'understands')]
item_7 = models.CharField(max_length=11, choices=item_7_choices, null=True)
item_8_choices = [('understands', 'understands')]
item_8 = models.CharField(max_length=11, choices=item_8_choices, null=True)
item_9_choices = [('understands', 'understands')]
item_9 = models.CharField(max_length=11, choices=item_9_choices, null=True)
item_10_choices = [('understands', 'understands')]
item_10 = models.CharField(max_length=11, choices=item_10_choices, null=True)
item_11_choices = [('understands', 'understands')]
item_11 = models.CharField(max_length=11, choices=item_11_choices, null=True)
item_12_choices = [('understands', 'understands')]
item_12 = models.CharField(max_length=11, choices=item_12_choices, null=True)
item_13_choices = [('understands', 'understands')]
item_13 = models.CharField(max_length=11, choices=item_13_choices, null=True)
item_14_choices = [('understands', 'understands')]
item_14 = models.CharField(max_length=11, choices=item_14_choices, null=True)
item_15_choices = [('understands', 'understands')]
item_15 = models.CharField(max_length=11, choices=item_15_choices, null=True)
item_16_choices = [('understands', 'understands')]
item_16 = models.CharField(max_length=11, choices=item_16_choices, null=True)
item_17_choices = [('understands', 'understands')]
item_17 = models.CharField(max_length=11, choices=item_17_choices, null=True)
item_18_choices = [('understands', 'understands')]
item_18 = models.CharField(max_length=11, choices=item_18_choices, null=True)
item_19_choices = [('understands', 'understands')]
item_19 = models.CharField(max_length=11, choices=item_19_choices, null=True)
item_20_choices = [('understands', 'understands')]
item_20 = models.CharField(max_length=11, choices=item_20_choices, null=True)
item_21_choices = [('understands', 'understands')]
item_21 = models.CharField(max_length=11, choices=item_21_choices, null=True)
item_22_choices = [('understands', 'understands')]
item_22 = models.CharField(max_length=11, choices=item_22_choices, null=True)
item_23_choices = [('understands', 'understands')]
item_23 = models.CharField(max_length=11, choices=item_23_choices, null=True)
item_24_choices = [('understands', 'understands')]
item_24 = models.CharField(max_length=11, choices=item_24_choices, null=True)
item_25_choices = [('understands', 'understands')]
item_25 = models.CharField(max_length=11, choices=item_25_choices, null=True)
item_26_choices = [('understands', 'understands')]
item_26 = models.CharField(max_length=11, choices=item_26_choices, null=True)
item_27_choices = [('understands', 'understands')]
item_27 = models.CharField(max_length=11, choices=item_27_choices, null=True)
item_28_choices = [('understands', 'understands')]
item_28 = models.CharField(max_length=11, choices=item_28_choices, null=True)
item_29_choices = [('understands', 'understands')]
item_29 = models.CharField(max_length=11, choices=item_29_choices, null=True)
item_30_choices = [('understands', 'understands')]
item_30 = models.CharField(max_length=11, choices=item_30_choices, null=True)
item_31_choices = [('understands', 'understands')]
item_31 = models.CharField(max_length=11, choices=item_31_choices, null=True)
item_32_choices = [('never', 'never'), ('sometimes', 'sometimes'), ('often', 'often')]
item_32 = models.CharField(max_length=9, choices=item_32_choices, null=True)
item_33_choices = [('never', 'never'), ('sometimes', 'sometimes'), ('often', 'often')]
item_33 = models.CharField(max_length=9, choices=item_33_choices, null=True)
item_34_choices = [('understands', 'understands'), ('produces', 'produces')]
item_34 = models.CharField(max_length=11, choices=item_34_choices, null=True)
item_35_choices = [('understands', 'understands'), ('produces', 'produces')]
item_35 = models.CharField(max_length=11, choices=item_35_choices, null=True)
item_36_choices = [('understands', 'understands'), ('produces', 'produces')]
item_36 = models.CharField(max_length=11, choices=item_36_choices, null=True)
item_37_choices = [('understands', 'understands'), ('produces', 'produces')]
item_37 = models.CharField(max_length=11, choices=item_37_choices, null=True)
item_38_choices = [('understands', 'understands'), ('produces', 'produces')]
item_38 = models.CharField(max_length=11, choices=item_38_choices, null=True)
item_39_choices = [('understands', 'understands'), ('produces', 'produces')]
item_39 = models.CharField(max_length=11, choices=item_39_choices, null=True)
item_40_choices = [('understands', 'understands'), ('produces', 'produces')]
item_40 = models.CharField(max_length=11, choices=item_40_choices, null=True)
item_41_choices = [('understands', 'understands'), ('produces', 'produces')]
item_41 = models.CharField(max_length=11, choices=item_41_choices, null=True)
item_42_choices = [('understands', 'understands'), ('produces', 'produces')]
item_42 = models.CharField(max_length=11, choices=item_42_choices, null=True)
item_43_choices = [('understands', 'understands'), ('produces', 'produces')]
item_43 = models.CharField(max_length=11, choices=item_43_choices, null=True)
item_44_choices = [('understands', 'understands'), ('produces', 'produces')]
item_44 = models.CharField(max_length=11, choices=item_44_choices, null=True)
item_45_choices = [('understands', 'understands'), ('produces', 'produces')]
item_45 = models.CharField(max_length=11, choices=item_45_choices, null=True)
item_46_choices = [('understands', 'understands'), ('produces', 'produces')]
item_46 = models.CharField(max_length=11, choices=item_46_choices, null=True)
item_47_choices = [('understands', 'understands'), ('produces', 'produces')]
item_47 = models.CharField(max_length=11, choices=item_47_choices, null=True)
item_48_choices = [('understands', 'understands'), ('produces', 'produces')]
item_48 = models.CharField(max_length=11, choices=item_48_choices, null=True)
item_49_choices = [('understands', 'understands'), ('produces', 'produces')]
item_49 = models.CharField(max_length=11, choices=item_49_choices, null=True)
item_50_choices = [('understands', 'understands'), ('produces', 'produces')]
item_50 = models.CharField(max_length=11, choices=item_50_choices, null=True)
item_51_choices = [('understands', 'understands'), ('produces', 'produces')]
item_51 = models.CharField(max_length=11, choices=item_51_choices, null=True)
item_52_choices = [('understands', 'understands'), ('produces', 'produces')]
item_52 = models.CharField(max_length=11, choices=item_52_choices, null=True)
item_53_choices = [('understands', 'understands'), ('produces', 'produces')]
item_53 = models.CharField(max_length=11, choices=item_53_choices, null=True)
item_54_choices = [('understands', 'understands'), ('produces', 'produces')]
item_54 = models.CharField(max_length=11, choices=item_54_choices, null=True)
item_55_choices = [('understands', 'understands'), ('produces', 'produces')]
item_55 = models.CharField(max_length=11, choices=item_55_choices, null=True)
item_56_choices = [('understands', 'understands'), ('produces', 'produces')]
item_56 = models.CharField(max_length=11, choices=item_56_choices, null=True)
item_57_choices = [('understands', 'understands'), ('produces', 'produces')]
item_57 = models.CharField(max_length=11, choices=item_57_choices, null=True)
item_58_choices = [('understands', 'understands'), ('produces', 'produces')]
item_58 = models.CharField(max_length=11, choices=item_58_choices, null=True)
item_59_choices = [('understands', 'understands'), ('produces', 'produces')]
item_59 = models.CharField(max_length=11, choices=item_59_choices, null=True)
item_60_choices = [('understands', 'understands'), ('produces', 'produces')]
item_60 = models.CharField(max_length=11, choices=item_60_choices, null=True)
item_61_choices = [('understands', 'understands'), ('produces', 'produces')]
item_61 = models.CharField(max_length=11, choices=item_61_choices, null=True)
item_62_choices = [('understands', 'understands'), ('produces', 'produces')]
item_62 = models.CharField(max_length=11, choices=item_62_choices, null=True)
item_63_choices = [('understands', 'understands'), ('produces', 'produces')]
item_63 = models.CharField(max_length=11, choices=item_63_choices, null=True)
item_64_choices = [('understands', 'understands'), ('produces', 'produces')]
item_64 = models.CharField(max_length=11, choices=item_64_choices, null=True)
item_65_choices = [('understands', 'understands'), ('produces', 'produces')]
item_65 = models.CharField(max_length=11, choices=item_65_choices, null=True)
item_66_choices = [('understands', 'understands'), ('produces', 'produces')]
item_66 = models.CharField(max_length=11, choices=item_66_choices, null=True)
item_67_choices = [('understands', 'understands'), ('produces', 'produces')]
item_67 = models.CharField(max_length=11, choices=item_67_choices, null=True)
item_68_choices = [('understands', 'understands'), ('produces', 'produces')]
item_68 = models.CharField(max_length=11, choices=item_68_choices, null=True)
item_69_choices = [('understands', 'understands'), ('produces', 'produces')]
item_69 = models.CharField(max_length=11, choices=item_69_choices, null=True)
item_70_choices = [('understands', 'understands'), ('produces', 'produces')]
item_70 = models.CharField(max_length=11, choices=item_70_choices, null=True)
item_71_choices = [('understands', 'understands'), ('produces', 'produces')]
item_71 = models.CharField(max_length=11, choices=item_71_choices, null=True)
item_72_choices = [('understands', 'understands'), ('produces', 'produces')]
item_72 = models.CharField(max_length=11, choices=item_72_choices, null=True)
item_73_choices = [('understands', 'understands'), ('produces', 'produces')]
item_73 = models.CharField(max_length=11, choices=item_73_choices, null=True)
item_74_choices = [('understands', 'understands'), ('produces', 'produces')]
item_74 = models.CharField(max_length=11, choices=item_74_choices, null=True)
item_75_choices = [('understands', 'understands'), ('produces', 'produces')]
item_75 = models.CharField(max_length=11, choices=item_75_choices, null=True)
item_76_choices = [('understands', 'understands'), ('produces', 'produces')]
item_76 = models.CharField(max_length=11, choices=item_76_choices, null=True)
item_77_choices = [('understands', 'understands'), ('produces', 'produces')]
item_77 = models.CharField(max_length=11, choices=item_77_choices, null=True)
item_78_choices = [('understands', 'understands'), ('produces', 'produces')]
item_78 = models.CharField(max_length=11, choices=item_78_choices, null=True)
item_79_choices = [('understands', 'understands'), ('produces', 'produces')]
item_79 = models.CharField(max_length=11, choices=item_79_choices, null=True)
item_80_choices = [('understands', 'understands'), ('produces', 'produces')]
item_80 = models.CharField(max_length=11, choices=item_80_choices, null=True)
item_81_choices = [('understands', 'understands'), ('produces', 'produces')]
item_81 = models.CharField(max_length=11, choices=item_81_choices, null=True)
item_82_choices = [('understands', 'understands'), ('produces', 'produces')]
item_82 = models.CharField(max_length=11, choices=item_82_choices, null=True)
item_83_choices = [('understands', 'understands'), ('produces', 'produces')]
item_83 = models.CharField(max_length=11, choices=item_83_choices, null=True)
item_84_choices = [('understands', 'understands'), ('produces', 'produces')]
item_84 = models.CharField(max_length=11, choices=item_84_choices, null=True)
item_85_choices = [('understands', 'understands'), ('produces', 'produces')]
item_85 = models.CharField(max_length=11, choices=item_85_choices, null=True)
item_86_choices = [('understands', 'understands'), ('produces', 'produces')]
item_86 = models.CharField(max_length=11, choices=item_86_choices, null=True)
item_87_choices = [('understands', 'understands'), ('produces', 'produces')]
item_87 = models.CharField(max_length=11, choices=item_87_choices, null=True)
item_88_choices = [('understands', 'understands'), ('produces', 'produces')]
item_88 = models.CharField(max_length=11, choices=item_88_choices, null=True)
item_89_choices = [('understands', 'understands'), ('produces', 'produces')]
item_89 = models.CharField(max_length=11, choices=item_89_choices, null=True)
item_90_choices = [('understands', 'understands'), ('produces', 'produces')]
item_90 = models.CharField(max_length=11, choices=item_90_choices, null=True)
item_91_choices = [('understands', 'understands'), ('produces', 'produces')]
item_91 = models.CharField(max_length=11, choices=item_91_choices, null=True)
item_92_choices = [('understands', 'understands'), ('produces', 'produces')]
item_92 = models.CharField(max_length=11, choices=item_92_choices, null=True)
item_93_choices = [('understands', 'understands'), ('produces', 'produces')]
item_93 = models.CharField(max_length=11, choices=item_93_choices, null=True)
item_94_choices = [('understands', 'understands'), ('produces', 'produces')]
item_94 = models.CharField(max_length=11, choices=item_94_choices, null=True)
item_95_choices = [('understands', 'understands'), ('produces', 'produces')]
item_95 = models.CharField(max_length=11, choices=item_95_choices, null=True)
item_96_choices = [('understands', 'understands'), ('produces', 'produces')]
item_96 = models.CharField(max_length=11, choices=item_96_choices, null=True)
item_97_choices = [('understands', 'understands'), ('produces', 'produces')]
item_97 = models.CharField(max_length=11, choices=item_97_choices, null=True)
item_98_choices = [('understands', 'understands'), ('produces', 'produces')]
item_98 = models.CharField(max_length=11, choices=item_98_choices, null=True)
item_99_choices = [('understands', 'understands'), ('produces', 'produces')]
item_99 = models.CharField(max_length=11, choices=item_99_choices, null=True)
item_100_choices = [('understands', 'understands'), ('produces', 'produces')]
item_100 = models.CharField(max_length=11, choices=item_100_choices, null=True)
item_101_choices = [('understands', 'understands'), ('produces', 'produces')]
item_101 = models.CharField(max_length=11, choices=item_101_choices, null=True)
item_102_choices = [('understands', 'understands'), ('produces', 'produces')]
item_102 = models.CharField(max_length=11, choices=item_102_choices, null=True)
item_103_choices = [('understands', 'understands'), ('produces', 'produces')]
item_103 = models.CharField(max_length=11, choices=item_103_choices, null=True)
item_104_choices = [('understands', 'understands'), ('produces', 'produces')]
item_104 = models.CharField(max_length=11, choices=item_104_choices, null=True)
item_105_choices = [('understands', 'understands'), ('produces', 'produces')]
item_105 = models.CharField(max_length=11, choices=item_105_choices, null=True)
item_106_choices = [('understands', 'understands'), ('produces', 'produces')]
item_106 = models.CharField(max_length=11, choices=item_106_choices, null=True)
item_107_choices = [('understands', 'understands'), ('produces', 'produces')]
item_107 = models.CharField(max_length=11, choices=item_107_choices, null=True)
item_108_choices = [('understands', 'understands'), ('produces', 'produces')]
item_108 = models.CharField(max_length=11, choices=item_108_choices, null=True)
item_109_choices = [('understands', 'understands'), ('produces', 'produces')]
item_109 = models.CharField(max_length=11, choices=item_109_choices, null=True)
item_110_choices = [('understands', 'understands'), ('produces', 'produces')]
item_110 = models.CharField(max_length=11, choices=item_110_choices, null=True)
item_111_choices = [('understands', 'understands'), ('produces', 'produces')]
item_111 = models.CharField(max_length=11, choices=item_111_choices, null=True)
item_112_choices = [('understands', 'understands'), ('produces', 'produces')]
item_112 = models.CharField(max_length=11, choices=item_112_choices, null=True)
item_113_choices = [('understands', 'understands'), ('produces', 'produces')]
item_113 = models.CharField(max_length=11, choices=item_113_choices, null=True)
item_114_choices = [('understands', 'understands'), ('produces', 'produces')]
item_114 = models.CharField(max_length=11, choices=item_114_choices, null=True)
item_115_choices = [('understands', 'understands'), ('produces', 'produces')]
item_115 = models.CharField(max_length=11, choices=item_115_choices, null=True)
item_116_choices = [('understands', 'understands'), ('produces', 'produces')]
item_116 = models.CharField(max_length=11, choices=item_116_choices, null=True)
item_117_choices = [('understands', 'understands'), ('produces', 'produces')]
item_117 = models.CharField(max_length=11, choices=item_117_choices, null=True)
item_118_choices = [('understands', 'understands'), ('produces', 'produces')]
item_118 = models.CharField(max_length=11, choices=item_118_choices, null=True)
item_119_choices = [('understands', 'understands'), ('produces', 'produces')]
item_119 = models.CharField(max_length=11, choices=item_119_choices, null=True)
item_120_choices = [('understands', 'understands'), ('produces', 'produces')]
item_120 = models.CharField(max_length=11, choices=item_120_choices, null=True)
item_121_choices = [('understands', 'understands'), ('produces', 'produces')]
item_121 = models.CharField(max_length=11, choices=item_121_choices, null=True)
item_122_choices = [('understands', 'understands'), ('produces', 'produces')]
item_122 = models.CharField(max_length=11, choices=item_122_choices, null=True)
item_123_choices = [('understands', 'understands'), ('produces', 'produces')]
item_123 = models.CharField(max_length=11, choices=item_123_choices, null=True)
item_124_choices = [('understands', 'understands'), ('produces', 'produces')]
item_124 = models.CharField(max_length=11, choices=item_124_choices, null=True)
item_125_choices = [('understands', 'understands'), ('produces', 'produces')]
item_125 = models.CharField(max_length=11, choices=item_125_choices, null=True)
item_126_choices = [('understands', 'understands'), ('produces', 'produces')]
item_126 = models.CharField(max_length=11, choices=item_126_choices, null=True)
item_127_choices = [('understands', 'understands'), ('produces', 'produces')]
item_127 = models.CharField(max_length=11, choices=item_127_choices, null=True)
item_128_choices = [('understands', 'understands'), ('produces', 'produces')]
item_128 = models.CharField(max_length=11, choices=item_128_choices, null=True)
item_129_choices = [('understands', 'understands'), ('produces', 'produces')]
item_129 = models.CharField(max_length=11, choices=item_129_choices, null=True)
item_130_choices = [('understands', 'understands'), ('produces', 'produces')]
item_130 = models.CharField(max_length=11, choices=item_130_choices, null=True)
item_131_choices = [('understands', 'understands'), ('produces', 'produces')]
item_131 = models.CharField(max_length=11, choices=item_131_choices, null=True)
item_132_choices = [('understands', 'understands'), ('produces', 'produces')]
item_132 = models.CharField(max_length=11, choices=item_132_choices, null=True)
item_133_choices = [('understands', 'understands'), ('produces', 'produces')]
item_133 = models.CharField(max_length=11, choices=item_133_choices, null=True)
item_134_choices = [('understands', 'understands'), ('produces', 'produces')]
item_134 = models.CharField(max_length=11, choices=item_134_choices, null=True)
item_135_choices = [('understands', 'understands'), ('produces', 'produces')]
item_135 = models.CharField(max_length=11, choices=item_135_choices, null=True)
item_136_choices = [('understands', 'understands'), ('produces', 'produces')]
item_136 = models.CharField(max_length=11, choices=item_136_choices, null=True)
item_137_choices = [('understands', 'understands'), ('produces', 'produces')]
item_137 = models.CharField(max_length=11, choices=item_137_choices, null=True)
item_138_choices = [('understands', 'understands'), ('produces', 'produces')]
item_138 = models.CharField(max_length=11, choices=item_138_choices, null=True)
item_139_choices = [('understands', 'understands'), ('produces', 'produces')]
item_139 = models.CharField(max_length=11, choices=item_139_choices, null=True)
item_140_choices = [('understands', 'understands'), ('produces', 'produces')]
item_140 = models.CharField(max_length=11, choices=item_140_choices, null=True)
item_141_choices = [('understands', 'understands'), ('produces', 'produces')]
item_141 = models.CharField(max_length=11, choices=item_141_choices, null=True)
item_142_choices = [('understands', 'understands'), ('produces', 'produces')]
item_142 = models.CharField(max_length=11, choices=item_142_choices, null=True)
item_143_choices = [('understands', 'understands'), ('produces', 'produces')]
item_143 = models.CharField(max_length=11, choices=item_143_choices, null=True)
item_144_choices = [('understands', 'understands'), ('produces', 'produces')]
item_144 = models.CharField(max_length=11, choices=item_144_choices, null=True)
item_145_choices = [('understands', 'understands'), ('produces', 'produces')]
item_145 = models.CharField(max_length=11, choices=item_145_choices, null=True)
item_146_choices = [('understands', 'understands'), ('produces', 'produces')]
item_146 = models.CharField(max_length=11, choices=item_146_choices, null=True)
item_147_choices = [('understands', 'understands'), ('produces', 'produces')]
item_147 = models.CharField(max_length=11, choices=item_147_choices, null=True)
item_148_choices = [('understands', 'understands'), ('produces', 'produces')]
item_148 = models.CharField(max_length=11, choices=item_148_choices, null=True)
item_149_choices = [('understands', 'understands'), ('produces', 'produces')]
item_149 = models.CharField(max_length=11, choices=item_149_choices, null=True)
item_150_choices = [('understands', 'understands'), ('produces', 'produces')]
item_150 = models.CharField(max_length=11, choices=item_150_choices, null=True)
item_151_choices = [('understands', 'understands'), ('produces', 'produces')]
item_151 = models.CharField(max_length=11, choices=item_151_choices, null=True)
item_152_choices = [('understands', 'understands'), ('produces', 'produces')]
item_152 = models.CharField(max_length=11, choices=item_152_choices, null=True)
item_153_choices = [('understands', 'understands'), ('produces', 'produces')]
item_153 = models.CharField(max_length=11, choices=item_153_choices, null=True)
item_154_choices = [('understands', 'understands'), ('produces', 'produces')]
item_154 = models.CharField(max_length=11, choices=item_154_choices, null=True)
item_155_choices = [('understands', 'understands'), ('produces', 'produces')]
item_155 = models.CharField(max_length=11, choices=item_155_choices, null=True)
item_156_choices = [('understands', 'understands'), ('produces', 'produces')]
item_156 = models.CharField(max_length=11, choices=item_156_choices, null=True)
item_157_choices = [('understands', 'understands'), ('produces', 'produces')]
item_157 = models.CharField(max_length=11, choices=item_157_choices, null=True)
item_158_choices = [('understands', 'understands'), ('produces', 'produces')]
item_158 = models.CharField(max_length=11, choices=item_158_choices, null=True)
item_159_choices = [('understands', 'understands'), ('produces', 'produces')]
item_159 = models.CharField(max_length=11, choices=item_159_choices, null=True)
item_160_choices = [('understands', 'understands'), ('produces', 'produces')]
item_160 = models.CharField(max_length=11, choices=item_160_choices, null=True)
item_161_choices = [('understands', 'understands'), ('produces', 'produces')]
item_161 = models.CharField(max_length=11, choices=item_161_choices, null=True)
item_162_choices = [('understands', 'understands'), ('produces', 'produces')]
item_162 = models.CharField(max_length=11, choices=item_162_choices, null=True)
item_163_choices = [('understands', 'understands'), ('produces', 'produces')]
item_163 = models.CharField(max_length=11, choices=item_163_choices, null=True)
item_164_choices = [('understands', 'understands'), ('produces', 'produces')]
item_164 = models.CharField(max_length=11, choices=item_164_choices, null=True)
item_165_choices = [('understands', 'understands'), ('produces', 'produces')]
item_165 = models.CharField(max_length=11, choices=item_165_choices, null=True)
item_166_choices = [('understands', 'understands'), ('produces', 'produces')]
item_166 = models.CharField(max_length=11, choices=item_166_choices, null=True)
item_167_choices = [('understands', 'understands'), ('produces', 'produces')]
item_167 = models.CharField(max_length=11, choices=item_167_choices, null=True)
item_168_choices = [('understands', 'understands'), ('produces', 'produces')]
item_168 = models.CharField(max_length=11, choices=item_168_choices, null=True)
item_169_choices = [('understands', 'understands'), ('produces', 'produces')]
item_169 = models.CharField(max_length=11, choices=item_169_choices, null=True)
item_170_choices = [('understands', 'understands'), ('produces', 'produces')]
item_170 = models.CharField(max_length=11, choices=item_170_choices, null=True)
item_171_choices = [('understands', 'understands'), ('produces', 'produces')]
item_171 = models.CharField(max_length=11, choices=item_171_choices, null=True)
item_172_choices = [('understands', 'understands'), ('produces', 'produces')]
item_172 = models.CharField(max_length=11, choices=item_172_choices, null=True)
item_173_choices = [('understands', 'understands'), ('produces', 'produces')]
item_173 = models.CharField(max_length=11, choices=item_173_choices, null=True)
item_174_choices = [('understands', 'understands'), ('produces', 'produces')]
item_174 = models.CharField(max_length=11, choices=item_174_choices, null=True)
item_175_choices = [('understands', 'understands'), ('produces', 'produces')]
item_175 = models.CharField(max_length=11, choices=item_175_choices, null=True)
item_176_choices = [('understands', 'understands'), ('produces', 'produces')]
item_176 = models.CharField(max_length=11, choices=item_176_choices, null=True)
item_177_choices = [('understands', 'understands'), ('produces', 'produces')]
item_177 = models.CharField(max_length=11, choices=item_177_choices, null=True)
item_178_choices = [('understands', 'understands'), ('produces', 'produces')]
item_178 = models.CharField(max_length=11, choices=item_178_choices, null=True)
item_179_choices = [('understands', 'understands'), ('produces', 'produces')]
item_179 = models.CharField(max_length=11, choices=item_179_choices, null=True)
item_180_choices = [('understands', 'understands'), ('produces', 'produces')]
item_180 = models.CharField(max_length=11, choices=item_180_choices, null=True)
item_181_choices = [('understands', 'understands'), ('produces', 'produces')]
item_181 = models.CharField(max_length=11, choices=item_181_choices, null=True)
item_182_choices = [('understands', 'understands'), ('produces', 'produces')]
item_182 = models.CharField(max_length=11, choices=item_182_choices, null=True)
item_183_choices = [('understands', 'understands'), ('produces', 'produces')]
item_183 = models.CharField(max_length=11, choices=item_183_choices, null=True)
item_184_choices = [('understands', 'understands'), ('produces', 'produces')]
item_184 = models.CharField(max_length=11, choices=item_184_choices, null=True)
item_185_choices = [('understands', 'understands'), ('produces', 'produces')]
item_185 = models.CharField(max_length=11, choices=item_185_choices, null=True)
item_186_choices = [('understands', 'understands'), ('produces', 'produces')]
item_186 = models.CharField(max_length=11, choices=item_186_choices, null=True)
item_187_choices = [('understands', 'understands'), ('produces', 'produces')]
item_187 = models.CharField(max_length=11, choices=item_187_choices, null=True)
item_188_choices = [('understands', 'understands'), ('produces', 'produces')]
item_188 = models.CharField(max_length=11, choices=item_188_choices, null=True)
item_189_choices = [('understands', 'understands'), ('produces', 'produces')]
item_189 = models.CharField(max_length=11, choices=item_189_choices, null=True)
item_190_choices = [('understands', 'understands'), ('produces', 'produces')]
item_190 = models.CharField(max_length=11, choices=item_190_choices, null=True)
item_191_choices = [('understands', 'understands'), ('produces', 'produces')]
item_191 = models.CharField(max_length=11, choices=item_191_choices, null=True)
item_192_choices = [('understands', 'understands'), ('produces', 'produces')]
item_192 = models.CharField(max_length=11, choices=item_192_choices, null=True)
item_193_choices = [('understands', 'understands'), ('produces', 'produces')]
item_193 = models.CharField(max_length=11, choices=item_193_choices, null=True)
item_194_choices = [('understands', 'understands'), ('produces', 'produces')]
item_194 = models.CharField(max_length=11, choices=item_194_choices, null=True)
item_195_choices = [('understands', 'understands'), ('produces', 'produces')]
item_195 = models.CharField(max_length=11, choices=item_195_choices, null=True)
item_196_choices = [('understands', 'understands'), ('produces', 'produces')]
item_196 = models.CharField(max_length=11, choices=item_196_choices, null=True)
item_197_choices = [('understands', 'understands'), ('produces', 'produces')]
item_197 = models.CharField(max_length=11, choices=item_197_choices, null=True)
item_198_choices = [('understands', 'understands'), ('produces', 'produces')]
item_198 = models.CharField(max_length=11, choices=item_198_choices, null=True)
item_199_choices = [('understands', 'understands'), ('produces', 'produces')]
item_199 = models.CharField(max_length=11, choices=item_199_choices, null=True)
item_200_choices = [('understands', 'understands'), ('produces', 'produces')]
item_200 = models.CharField(max_length=11, choices=item_200_choices, null=True)
item_201_choices = [('understands', 'understands'), ('produces', 'produces')]
item_201 = models.CharField(max_length=11, choices=item_201_choices, null=True)
item_202_choices = [('understands', 'understands'), ('produces', 'produces')]
item_202 = models.CharField(max_length=11, choices=item_202_choices, null=True)
item_203_choices = [('understands', 'understands'), ('produces', 'produces')]
item_203 = models.CharField(max_length=11, choices=item_203_choices, null=True)
item_204_choices = [('understands', 'understands'), ('produces', 'produces')]
item_204 = models.CharField(max_length=11, choices=item_204_choices, null=True)
item_205_choices = [('understands', 'understands'), ('produces', 'produces')]
item_205 = models.CharField(max_length=11, choices=item_205_choices, null=True)
item_206_choices = [('understands', 'understands'), ('produces', 'produces')]
item_206 = models.CharField(max_length=11, choices=item_206_choices, null=True)
item_207_choices = [('understands', 'understands'), ('produces', 'produces')]
item_207 = models.CharField(max_length=11, choices=item_207_choices, null=True)
item_208_choices = [('understands', 'understands'), ('produces', 'produces')]
item_208 = models.CharField(max_length=11, choices=item_208_choices, null=True)
item_209_choices = [('understands', 'understands'), ('produces', 'produces')]
item_209 = models.CharField(max_length=11, choices=item_209_choices, null=True)
item_210_choices = [('understands', 'understands'), ('produces', 'produces')]
item_210 = models.CharField(max_length=11, choices=item_210_choices, null=True)
item_211_choices = [('understands', 'understands'), ('produces', 'produces')]
item_211 = models.CharField(max_length=11, choices=item_211_choices, null=True)
item_212_choices = [('understands', 'understands'), ('produces', 'produces')]
item_212 = models.CharField(max_length=11, choices=item_212_choices, null=True)
item_213_choices = [('understands', 'understands'), ('produces', 'produces')]
item_213 = models.CharField(max_length=11, choices=item_213_choices, null=True)
item_214_choices = [('understands', 'understands'), ('produces', 'produces')]
item_214 = models.CharField(max_length=11, choices=item_214_choices, null=True)
item_215_choices = [('understands', 'understands'), ('produces', 'produces')]
item_215 = models.CharField(max_length=11, choices=item_215_choices, null=True)
item_216_choices = [('understands', 'understands'), ('produces', 'produces')]
item_216 = models.CharField(max_length=11, choices=item_216_choices, null=True)
item_217_choices = [('understands', 'understands'), ('produces', 'produces')]
item_217 = models.CharField(max_length=11, choices=item_217_choices, null=True)
item_218_choices = [('understands', 'understands'), ('produces', 'produces')]
item_218 = models.CharField(max_length=11, choices=item_218_choices, null=True)
item_219_choices = [('understands', 'understands'), ('produces', 'produces')]
item_219 = models.CharField(max_length=11, choices=item_219_choices, null=True)
item_220_choices = [('understands', 'understands'), ('produces', 'produces')]
item_220 = models.CharField(max_length=11, choices=item_220_choices, null=True)
item_221_choices = [('understands', 'understands'), ('produces', 'produces')]
item_221 = models.CharField(max_length=11, choices=item_221_choices, null=True)
item_222_choices = [('understands', 'understands'), ('produces', 'produces')]
item_222 = models.CharField(max_length=11, choices=item_222_choices, null=True)
item_223_choices = [('understands', 'understands'), ('produces', 'produces')]
item_223 = models.CharField(max_length=11, choices=item_223_choices, null=True)
item_224_choices = [('understands', 'understands'), ('produces', 'produces')]
item_224 = models.CharField(max_length=11, choices=item_224_choices, null=True)
item_225_choices = [('understands', 'understands'), ('produces', 'produces')]
item_225 = models.CharField(max_length=11, choices=item_225_choices, null=True)
item_226_choices = [('understands', 'understands'), ('produces', 'produces')]
item_226 = models.CharField(max_length=11, choices=item_226_choices, null=True)
item_227_choices = [('understands', 'understands'), ('produces', 'produces')]
item_227 = models.CharField(max_length=11, choices=item_227_choices, null=True)
item_228_choices = [('understands', 'understands'), ('produces', 'produces')]
item_228 = models.CharField(max_length=11, choices=item_228_choices, null=True)
item_229_choices = [('understands', 'understands'), ('produces', 'produces')]
item_229 = models.CharField(max_length=11, choices=item_229_choices, null=True)
item_230_choices = [('understands', 'understands'), ('produces', 'produces')]
item_230 = models.CharField(max_length=11, choices=item_230_choices, null=True)
item_231_choices = [('understands', 'understands'), ('produces', 'produces')]
item_231 = models.CharField(max_length=11, choices=item_231_choices, null=True)
item_232_choices = [('understands', 'understands'), ('produces', 'produces')]
item_232 = models.CharField(max_length=11, choices=item_232_choices, null=True)
item_233_choices = [('understands', 'understands'), ('produces', 'produces')]
item_233 = models.CharField(max_length=11, choices=item_233_choices, null=True)
item_234_choices = [('understands', 'understands'), ('produces', 'produces')]
item_234 = models.CharField(max_length=11, choices=item_234_choices, null=True)
item_235_choices = [('understands', 'understands'), ('produces', 'produces')]
item_235 = models.CharField(max_length=11, choices=item_235_choices, null=True)
item_236_choices = [('understands', 'understands'), ('produces', 'produces')]
item_236 = models.CharField(max_length=11, choices=item_236_choices, null=True)
item_237_choices = [('understands', 'understands'), ('produces', 'produces')]
item_237 = models.CharField(max_length=11, choices=item_237_choices, null=True)
item_238_choices = [('understands', 'understands'), ('produces', 'produces')]
item_238 = models.CharField(max_length=11, choices=item_238_choices, null=True)
item_239_choices = [('understands', 'understands'), ('produces', 'produces')]
item_239 = models.CharField(max_length=11, choices=item_239_choices, null=True)
item_240_choices = [('understands', 'understands'), ('produces', 'produces')]
item_240 = models.CharField(max_length=11, choices=item_240_choices, null=True)
item_241_choices = [('understands', 'understands'), ('produces', 'produces')]
item_241 = models.CharField(max_length=11, choices=item_241_choices, null=True)
item_242_choices = [('understands', 'understands'), ('produces', 'produces')]
item_242 = models.CharField(max_length=11, choices=item_242_choices, null=True)
item_243_choices = [('understands', 'understands'), ('produces', 'produces')]
item_243 = models.CharField(max_length=11, choices=item_243_choices, null=True)
item_244_choices = [('understands', 'understands'), ('produces', 'produces')]
item_244 = models.CharField(max_length=11, choices=item_244_choices, null=True)
item_245_choices = [('understands', 'understands'), ('produces', 'produces')]
item_245 = models.CharField(max_length=11, choices=item_245_choices, null=True)
item_246_choices = [('understands', 'understands'), ('produces', 'produces')]
item_246 = models.CharField(max_length=11, choices=item_246_choices, null=True)
item_247_choices = [('understands', 'understands'), ('produces', 'produces')]
item_247 = models.CharField(max_length=11, choices=item_247_choices, null=True)
item_248_choices = [('understands', 'understands'), ('produces', 'produces')]
item_248 = models.CharField(max_length=11, choices=item_248_choices, null=True)
item_249_choices = [('understands', 'understands'), ('produces', 'produces')]
item_249 = models.CharField(max_length=11, choices=item_249_choices, null=True)
item_250_choices = [('understands', 'understands'), ('produces', 'produces')]
item_250 = models.CharField(max_length=11, choices=item_250_choices, null=True)
item_251_choices = [('understands', 'understands'), ('produces', 'produces')]
item_251 = models.CharField(max_length=11, choices=item_251_choices, null=True)
item_252_choices = [('understands', 'understands'), ('produces', 'produces')]
item_252 = models.CharField(max_length=11, choices=item_252_choices, null=True)
item_253_choices = [('understands', 'understands'), ('produces', 'produces')]
item_253 = models.CharField(max_length=11, choices=item_253_choices, null=True)
item_254_choices = [('understands', 'understands'), ('produces', 'produces')]
item_254 = models.CharField(max_length=11, choices=item_254_choices, null=True)
item_255_choices = [('understands', 'understands'), ('produces', 'produces')]
item_255 = models.CharField(max_length=11, choices=item_255_choices, null=True)
item_256_choices = [('understands', 'understands'), ('produces', 'produces')]
item_256 = models.CharField(max_length=11, choices=item_256_choices, null=True)
item_257_choices = [('understands', 'understands'), ('produces', 'produces')]
item_257 = models.CharField(max_length=11, choices=item_257_choices, null=True)
item_258_choices = [('understands', 'understands'), ('produces', 'produces')]
item_258 = models.CharField(max_length=11, choices=item_258_choices, null=True)
item_259_choices = [('understands', 'understands'), ('produces', 'produces')]
item_259 = models.CharField(max_length=11, choices=item_259_choices, null=True)
item_260_choices = [('understands', 'understands'), ('produces', 'produces')]
item_260 = models.CharField(max_length=11, choices=item_260_choices, null=True)
item_261_choices = [('understands', 'understands'), ('produces', 'produces')]
item_261 = models.CharField(max_length=11, choices=item_261_choices, null=True)
item_262_choices = [('understands', 'understands'), ('produces', 'produces')]
item_262 = models.CharField(max_length=11, choices=item_262_choices, null=True)
item_263_choices = [('understands', 'understands'), ('produces', 'produces')]
item_263 = models.CharField(max_length=11, choices=item_263_choices, null=True)
item_264_choices = [('understands', 'understands'), ('produces', 'produces')]
item_264 = models.CharField(max_length=11, choices=item_264_choices, null=True)
item_265_choices = [('understands', 'understands'), ('produces', 'produces')]
item_265 = models.CharField(max_length=11, choices=item_265_choices, null=True)
item_266_choices = [('understands', 'understands'), ('produces', 'produces')]
item_266 = models.CharField(max_length=11, choices=item_266_choices, null=True)
item_267_choices = [('understands', 'understands'), ('produces', 'produces')]
item_267 = models.CharField(max_length=11, choices=item_267_choices, null=True)
item_268_choices = [('understands', 'understands'), ('produces', 'produces')]
item_268 = models.CharField(max_length=11, choices=item_268_choices, null=True)
item_269_choices = [('understands', 'understands'), ('produces', 'produces')]
item_269 = models.CharField(max_length=11, choices=item_269_choices, null=True)
item_270_choices = [('understands', 'understands'), ('produces', 'produces')]
item_270 = models.CharField(max_length=11, choices=item_270_choices, null=True)
item_271_choices = [('understands', 'understands'), ('produces', 'produces')]
item_271 = models.CharField(max_length=11, choices=item_271_choices, null=True)
item_272_choices = [('understands', 'understands'), ('produces', 'produces')]
item_272 = models.CharField(max_length=11, choices=item_272_choices, null=True)
item_273_choices = [('understands', 'understands'), ('produces', 'produces')]
item_273 = models.CharField(max_length=11, choices=item_273_choices, null=True)
item_274_choices = [('understands', 'understands'), ('produces', 'produces')]
item_274 = models.CharField(max_length=11, choices=item_274_choices, null=True)
item_275_choices = [('understands', 'understands'), ('produces', 'produces')]
item_275 = models.CharField(max_length=11, choices=item_275_choices, null=True)
item_276_choices = [('understands', 'understands'), ('produces', 'produces')]
item_276 = models.CharField(max_length=11, choices=item_276_choices, null=True)
item_277_choices = [('understands', 'understands'), ('produces', 'produces')]
item_277 = models.CharField(max_length=11, choices=item_277_choices, null=True)
item_278_choices = [('understands', 'understands'), ('produces', 'produces')]
item_278 = models.CharField(max_length=11, choices=item_278_choices, null=True)
item_279_choices = [('understands', 'understands'), ('produces', 'produces')]
item_279 = models.CharField(max_length=11, choices=item_279_choices, null=True)
item_280_choices = [('understands', 'understands'), ('produces', 'produces')]
item_280 = models.CharField(max_length=11, choices=item_280_choices, null=True)
item_281_choices = [('understands', 'understands'), ('produces', 'produces')]
item_281 = models.CharField(max_length=11, choices=item_281_choices, null=True)
item_282_choices = [('understands', 'understands'), ('produces', 'produces')]
item_282 = models.CharField(max_length=11, choices=item_282_choices, null=True)
item_283_choices = [('understands', 'understands'), ('produces', 'produces')]
item_283 = models.CharField(max_length=11, choices=item_283_choices, null=True)
item_284_choices = [('understands', 'understands'), ('produces', 'produces')]
item_284 = models.CharField(max_length=11, choices=item_284_choices, null=True)
item_285_choices = [('understands', 'understands'), ('produces', 'produces')]
item_285 = models.CharField(max_length=11, choices=item_285_choices, null=True)
item_286_choices = [('understands', 'understands'), ('produces', 'produces')]
item_286 = models.CharField(max_length=11, choices=item_286_choices, null=True)
item_287_choices = [('understands', 'understands'), ('produces', 'produces')]
item_287 = models.CharField(max_length=11, choices=item_287_choices, null=True)
item_288_choices = [('understands', 'understands'), ('produces', 'produces')]
item_288 = models.CharField(max_length=11, choices=item_288_choices, null=True)
item_289_choices = [('understands', 'understands'), ('produces', 'produces')]
item_289 = models.CharField(max_length=11, choices=item_289_choices, null=True)
item_290_choices = [('understands', 'understands'), ('produces', 'produces')]
item_290 = models.CharField(max_length=11, choices=item_290_choices, null=True)
item_291_choices = [('understands', 'understands'), ('produces', 'produces')]
item_291 = models.CharField(max_length=11, choices=item_291_choices, null=True)
item_292_choices = [('understands', 'understands'), ('produces', 'produces')]
item_292 = models.CharField(max_length=11, choices=item_292_choices, null=True)
item_293_choices = [('understands', 'understands'), ('produces', 'produces')]
item_293 = models.CharField(max_length=11, choices=item_293_choices, null=True)
item_294_choices = [('understands', 'understands'), ('produces', 'produces')]
item_294 = models.CharField(max_length=11, choices=item_294_choices, null=True)
item_295_choices = [('understands', 'understands'), ('produces', 'produces')]
item_295 = models.CharField(max_length=11, choices=item_295_choices, null=True)
item_296_choices = [('understands', 'understands'), ('produces', 'produces')]
item_296 = models.CharField(max_length=11, choices=item_296_choices, null=True)
item_297_choices = [('understands', 'understands'), ('produces', 'produces')]
item_297 = models.CharField(max_length=11, choices=item_297_choices, null=True)
item_298_choices = [('understands', 'understands'), ('produces', 'produces')]
item_298 = models.CharField(max_length=11, choices=item_298_choices, null=True)
item_299_choices = [('understands', 'understands'), ('produces', 'produces')]
item_299 = models.CharField(max_length=11, choices=item_299_choices, null=True)
item_300_choices = [('understands', 'understands'), ('produces', 'produces')]
item_300 = models.CharField(max_length=11, choices=item_300_choices, null=True)
item_301_choices = [('understands', 'understands'), ('produces', 'produces')]
item_301 = models.CharField(max_length=11, choices=item_301_choices, null=True)
item_302_choices = [('understands', 'understands'), ('produces', 'produces')]
item_302 = models.CharField(max_length=11, choices=item_302_choices, null=True)
item_303_choices = [('understands', 'understands'), ('produces', 'produces')]
item_303 = models.CharField(max_length=11, choices=item_303_choices, null=True)
item_304_choices = [('understands', 'understands'), ('produces', 'produces')]
item_304 = models.CharField(max_length=11, choices=item_304_choices, null=True)
item_305_choices = [('understands', 'understands'), ('produces', 'produces')]
item_305 = models.CharField(max_length=11, choices=item_305_choices, null=True)
item_306_choices = [('understands', 'understands'), ('produces', 'produces')]
item_306 = models.CharField(max_length=11, choices=item_306_choices, null=True)
item_307_choices = [('understands', 'understands'), ('produces', 'produces')]
item_307 = models.CharField(max_length=11, choices=item_307_choices, null=True)
item_308_choices = [('understands', 'understands'), ('produces', 'produces')]
item_308 = models.CharField(max_length=11, choices=item_308_choices, null=True)
item_309_choices = [('understands', 'understands'), ('produces', 'produces')]
item_309 = models.CharField(max_length=11, choices=item_309_choices, null=True)
item_310_choices = [('understands', 'understands'), ('produces', 'produces')]
item_310 = models.CharField(max_length=11, choices=item_310_choices, null=True)
item_311_choices = [('understands', 'understands'), ('produces', 'produces')]
item_311 = models.CharField(max_length=11, choices=item_311_choices, null=True)
item_312_choices = [('understands', 'understands'), ('produces', 'produces')]
item_312 = models.CharField(max_length=11, choices=item_312_choices, null=True)
item_313_choices = [('understands', 'understands'), ('produces', 'produces')]
item_313 = models.CharField(max_length=11, choices=item_313_choices, null=True)
item_314_choices = [('understands', 'understands'), ('produces', 'produces')]
item_314 = models.CharField(max_length=11, choices=item_314_choices, null=True)
item_315_choices = [('understands', 'understands'), ('produces', 'produces')]
item_315 = models.CharField(max_length=11, choices=item_315_choices, null=True)
item_316_choices = [('understands', 'understands'), ('produces', 'produces')]
item_316 = models.CharField(max_length=11, choices=item_316_choices, null=True)
item_317_choices = [('understands', 'understands'), ('produces', 'produces')]
item_317 = models.CharField(max_length=11, choices=item_317_choices, null=True)
item_318_choices = [('understands', 'understands'), ('produces', 'produces')]
item_318 = models.CharField(max_length=11, choices=item_318_choices, null=True)
item_319_choices = [('understands', 'understands'), ('produces', 'produces')]
item_319 = models.CharField(max_length=11, choices=item_319_choices, null=True)
item_320_choices = [('understands', 'understands'), ('produces', 'produces')]
item_320 = models.CharField(max_length=11, choices=item_320_choices, null=True)
item_321_choices = [('understands', 'understands'), ('produces', 'produces')]
item_321 = models.CharField(max_length=11, choices=item_321_choices, null=True)
item_322_choices = [('understands', 'understands'), ('produces', 'produces')]
item_322 = models.CharField(max_length=11, choices=item_322_choices, null=True)
item_323_choices = [('understands', 'understands'), ('produces', 'produces')]
item_323 = models.CharField(max_length=11, choices=item_323_choices, null=True)
item_324_choices = [('understands', 'understands'), ('produces', 'produces')]
item_324 = models.CharField(max_length=11, choices=item_324_choices, null=True)
item_325_choices = [('understands', 'understands'), ('produces', 'produces')]
item_325 = models.CharField(max_length=11, choices=item_325_choices, null=True)
item_326_choices = [('understands', 'understands'), ('produces', 'produces')]
item_326 = models.CharField(max_length=11, choices=item_326_choices, null=True)
item_327_choices = [('understands', 'understands'), ('produces', 'produces')]
item_327 = models.CharField(max_length=11, choices=item_327_choices, null=True)
item_328_choices = [('understands', 'understands'), ('produces', 'produces')]
item_328 = models.CharField(max_length=11, choices=item_328_choices, null=True)
item_329_choices = [('understands', 'understands'), ('produces', 'produces')]
item_329 = models.CharField(max_length=11, choices=item_329_choices, null=True)
item_330_choices = [('understands', 'understands'), ('produces', 'produces')]
item_330 = models.CharField(max_length=11, choices=item_330_choices, null=True)
item_331_choices = [('understands', 'understands'), ('produces', 'produces')]
item_331 = models.CharField(max_length=11, choices=item_331_choices, null=True)
item_332_choices = [('understands', 'understands'), ('produces', 'produces')]
item_332 = models.CharField(max_length=11, choices=item_332_choices, null=True)
item_333_choices = [('understands', 'understands'), ('produces', 'produces')]
item_333 = models.CharField(max_length=11, choices=item_333_choices, null=True)
item_334_choices = [('understands', 'understands'), ('produces', 'produces')]
item_334 = models.CharField(max_length=11, choices=item_334_choices, null=True)
item_335_choices = [('understands', 'understands'), ('produces', 'produces')]
item_335 = models.CharField(max_length=11, choices=item_335_choices, null=True)
item_336_choices = [('understands', 'understands'), ('produces', 'produces')]
item_336 = models.CharField(max_length=11, choices=item_336_choices, null=True)
item_337_choices = [('understands', 'understands'), ('produces', 'produces')]
item_337 = models.CharField(max_length=11, choices=item_337_choices, null=True)
item_338_choices = [('understands', 'understands'), ('produces', 'produces')]
item_338 = models.CharField(max_length=11, choices=item_338_choices, null=True)
item_339_choices = [('understands', 'understands'), ('produces', 'produces')]
item_339 = models.CharField(max_length=11, choices=item_339_choices, null=True)
item_340_choices = [('understands', 'understands'), ('produces', 'produces')]
item_340 = models.CharField(max_length=11, choices=item_340_choices, null=True)
item_341_choices = [('understands', 'understands'), ('produces', 'produces')]
item_341 = models.CharField(max_length=11, choices=item_341_choices, null=True)
item_342_choices = [('understands', 'understands'), ('produces', 'produces')]
item_342 = models.CharField(max_length=11, choices=item_342_choices, null=True)
item_343_choices = [('understands', 'understands'), ('produces', 'produces')]
item_343 = models.CharField(max_length=11, choices=item_343_choices, null=True)
item_344_choices = [('understands', 'understands'), ('produces', 'produces')]
item_344 = models.CharField(max_length=11, choices=item_344_choices, null=True)
item_345_choices = [('understands', 'understands'), ('produces', 'produces')]
item_345 = models.CharField(max_length=11, choices=item_345_choices, null=True)
item_346_choices = [('understands', 'understands'), ('produces', 'produces')]
item_346 = models.CharField(max_length=11, choices=item_346_choices, null=True)
item_347_choices = [('understands', 'understands'), ('produces', 'produces')]
item_347 = models.CharField(max_length=11, choices=item_347_choices, null=True)
item_348_choices = [('understands', 'understands'), ('produces', 'produces')]
item_348 = models.CharField(max_length=11, choices=item_348_choices, null=True)
item_349_choices = [('understands', 'understands'), ('produces', 'produces')]
item_349 = models.CharField(max_length=11, choices=item_349_choices, null=True)
item_350_choices = [('understands', 'understands'), ('produces', 'produces')]
item_350 = models.CharField(max_length=11, choices=item_350_choices, null=True)
item_351_choices = [('understands', 'understands'), ('produces', 'produces')]
item_351 = models.CharField(max_length=11, choices=item_351_choices, null=True)
item_352_choices = [('understands', 'understands'), ('produces', 'produces')]
item_352 = models.CharField(max_length=11, choices=item_352_choices, null=True)
item_353_choices = [('understands', 'understands'), ('produces', 'produces')]
item_353 = models.CharField(max_length=11, choices=item_353_choices, null=True)
item_354_choices = [('understands', 'understands'), ('produces', 'produces')]
item_354 = models.CharField(max_length=11, choices=item_354_choices, null=True)
item_355_choices = [('understands', 'understands'), ('produces', 'produces')]
item_355 = models.CharField(max_length=11, choices=item_355_choices, null=True)
item_356_choices = [('understands', 'understands'), ('produces', 'produces')]
item_356 = models.CharField(max_length=11, choices=item_356_choices, null=True)
item_357_choices = [('understands', 'understands'), ('produces', 'produces')]
item_357 = models.CharField(max_length=11, choices=item_357_choices, null=True)
item_358_choices = [('understands', 'understands'), ('produces', 'produces')]
item_358 = models.CharField(max_length=11, choices=item_358_choices, null=True)
item_359_choices = [('understands', 'understands'), ('produces', 'produces')]
item_359 = models.CharField(max_length=11, choices=item_359_choices, null=True)
item_360_choices = [('understands', 'understands'), ('produces', 'produces')]
item_360 = models.CharField(max_length=11, choices=item_360_choices, null=True)
item_361_choices = [('understands', 'understands'), ('produces', 'produces')]
item_361 = models.CharField(max_length=11, choices=item_361_choices, null=True)
item_362_choices = [('understands', 'understands'), ('produces', 'produces')]
item_362 = models.CharField(max_length=11, choices=item_362_choices, null=True)
item_363_choices = [('understands', 'understands'), ('produces', 'produces')]
item_363 = models.CharField(max_length=11, choices=item_363_choices, null=True)
item_364_choices = [('understands', 'understands'), ('produces', 'produces')]
item_364 = models.CharField(max_length=11, choices=item_364_choices, null=True)
item_365_choices = [('understands', 'understands'), ('produces', 'produces')]
item_365 = models.CharField(max_length=11, choices=item_365_choices, null=True)
item_366_choices = [('understands', 'understands'), ('produces', 'produces')]
item_366 = models.CharField(max_length=11, choices=item_366_choices, null=True)
item_367_choices = [('understands', 'understands'), ('produces', 'produces')]
item_367 = models.CharField(max_length=11, choices=item_367_choices, null=True)
item_368_choices = [('understands', 'understands'), ('produces', 'produces')]
item_368 = models.CharField(max_length=11, choices=item_368_choices, null=True)
item_369_choices = [('understands', 'understands'), ('produces', 'produces')]
item_369 = models.CharField(max_length=11, choices=item_369_choices, null=True)
item_370_choices = [('understands', 'understands'), ('produces', 'produces')]
item_370 = models.CharField(max_length=11, choices=item_370_choices, null=True)
item_371_choices = [('understands', 'understands'), ('produces', 'produces')]
item_371 = models.CharField(max_length=11, choices=item_371_choices, null=True)
item_372_choices = [('understands', 'understands'), ('produces', 'produces')]
item_372 = models.CharField(max_length=11, choices=item_372_choices, null=True)
item_373_choices = [('understands', 'understands'), ('produces', 'produces')]
item_373 = models.CharField(max_length=11, choices=item_373_choices, null=True)
item_374_choices = [('understands', 'understands'), ('produces', 'produces')]
item_374 = models.CharField(max_length=11, choices=item_374_choices, null=True)
item_375_choices = [('understands', 'understands'), ('produces', 'produces')]
item_375 = models.CharField(max_length=11, choices=item_375_choices, null=True)
item_376_choices = [('understands', 'understands'), ('produces', 'produces')]
item_376 = models.CharField(max_length=11, choices=item_376_choices, null=True)
item_377_choices = [('understands', 'understands'), ('produces', 'produces')]
item_377 = models.CharField(max_length=11, choices=item_377_choices, null=True)
item_378_choices = [('understands', 'understands'), ('produces', 'produces')]
item_378 = models.CharField(max_length=11, choices=item_378_choices, null=True)
item_379_choices = [('understands', 'understands'), ('produces', 'produces')]
item_379 = models.CharField(max_length=11, choices=item_379_choices, null=True)
item_380_choices = [('understands', 'understands'), ('produces', 'produces')]
item_380 = models.CharField(max_length=11, choices=item_380_choices, null=True)
item_381_choices = [('understands', 'understands'), ('produces', 'produces')]
item_381 = models.CharField(max_length=11, choices=item_381_choices, null=True)
item_382_choices = [('understands', 'understands'), ('produces', 'produces')]
item_382 = models.CharField(max_length=11, choices=item_382_choices, null=True)
item_383_choices = [('understands', 'understands'), ('produces', 'produces')]
item_383 = models.CharField(max_length=11, choices=item_383_choices, null=True)
item_384_choices = [('understands', 'understands'), ('produces', 'produces')]
item_384 = models.CharField(max_length=11, choices=item_384_choices, null=True)
item_385_choices = [('understands', 'understands'), ('produces', 'produces')]
item_385 = models.CharField(max_length=11, choices=item_385_choices, null=True)
item_386_choices = [('understands', 'understands'), ('produces', 'produces')]
item_386 = models.CharField(max_length=11, choices=item_386_choices, null=True)
item_387_choices = [('understands', 'understands'), ('produces', 'produces')]
item_387 = models.CharField(max_length=11, choices=item_387_choices, null=True)
item_388_choices = [('understands', 'understands'), ('produces', 'produces')]
item_388 = models.CharField(max_length=11, choices=item_388_choices, null=True)
item_389_choices = [('understands', 'understands'), ('produces', 'produces')]
item_389 = models.CharField(max_length=11, choices=item_389_choices, null=True)
item_390_choices = [('understands', 'understands'), ('produces', 'produces')]
item_390 = models.CharField(max_length=11, choices=item_390_choices, null=True)
item_391_choices = [('understands', 'understands'), ('produces', 'produces')]
item_391 = models.CharField(max_length=11, choices=item_391_choices, null=True)
item_392_choices = [('understands', 'understands'), ('produces', 'produces')]
item_392 = models.CharField(max_length=11, choices=item_392_choices, null=True)
item_393_choices = [('understands', 'understands'), ('produces', 'produces')]
item_393 = models.CharField(max_length=11, choices=item_393_choices, null=True)
item_394_choices = [('understands', 'understands'), ('produces', 'produces')]
item_394 = models.CharField(max_length=11, choices=item_394_choices, null=True)
item_395_choices = [('understands', 'understands'), ('produces', 'produces')]
item_395 = models.CharField(max_length=11, choices=item_395_choices, null=True)
item_396_choices = [('understands', 'understands'), ('produces', 'produces')]
item_396 = models.CharField(max_length=11, choices=item_396_choices, null=True)
item_397_choices = [('understands', 'understands'), ('produces', 'produces')]
item_397 = models.CharField(max_length=11, choices=item_397_choices, null=True)
item_398_choices = [('understands', 'understands'), ('produces', 'produces')]
item_398 = models.CharField(max_length=11, choices=item_398_choices, null=True)
item_399_choices = [('understands', 'understands'), ('produces', 'produces')]
item_399 = models.CharField(max_length=11, choices=item_399_choices, null=True)
item_400_choices = [('understands', 'understands'), ('produces', 'produces')]
item_400 = models.CharField(max_length=11, choices=item_400_choices, null=True)
item_401_choices = [('understands', 'understands'), ('produces', 'produces')]
item_401 = models.CharField(max_length=11, choices=item_401_choices, null=True)
item_402_choices = [('understands', 'understands'), ('produces', 'produces')]
item_402 = models.CharField(max_length=11, choices=item_402_choices, null=True)
item_403_choices = [('understands', 'understands'), ('produces', 'produces')]
item_403 = models.CharField(max_length=11, choices=item_403_choices, null=True)
item_404_choices = [('understands', 'understands'), ('produces', 'produces')]
item_404 = models.CharField(max_length=11, choices=item_404_choices, null=True)
item_405_choices = [('understands', 'understands'), ('produces', 'produces')]
item_405 = models.CharField(max_length=11, choices=item_405_choices, null=True)
item_406_choices = [('understands', 'understands'), ('produces', 'produces')]
item_406 = models.CharField(max_length=11, choices=item_406_choices, null=True)
item_407_choices = [('understands', 'understands'), ('produces', 'produces')]
item_407 = models.CharField(max_length=11, choices=item_407_choices, null=True)
item_408_choices = [('understands', 'understands'), ('produces', 'produces')]
item_408 = models.CharField(max_length=11, choices=item_408_choices, null=True)
item_409_choices = [('understands', 'understands'), ('produces', 'produces')]
item_409 = models.CharField(max_length=11, choices=item_409_choices, null=True)
item_410_choices = [('understands', 'understands'), ('produces', 'produces')]
item_410 = models.CharField(max_length=11, choices=item_410_choices, null=True)
item_411_choices = [('understands', 'understands'), ('produces', 'produces')]
item_411 = models.CharField(max_length=11, choices=item_411_choices, null=True)
item_412_choices = [('understands', 'understands'), ('produces', 'produces')]
item_412 = models.CharField(max_length=11, choices=item_412_choices, null=True)
item_413_choices = [('understands', 'understands'), ('produces', 'produces')]
item_413 = models.CharField(max_length=11, choices=item_413_choices, null=True)
item_414_choices = [('understands', 'understands'), ('produces', 'produces')]
item_414 = models.CharField(max_length=11, choices=item_414_choices, null=True)
item_415_choices = [('understands', 'understands'), ('produces', 'produces')]
item_415 = models.CharField(max_length=11, choices=item_415_choices, null=True)
item_416_choices = [('understands', 'understands'), ('produces', 'produces')]
item_416 = models.CharField(max_length=11, choices=item_416_choices, null=True)
item_417_choices = [('understands', 'understands'), ('produces', 'produces')]
item_417 = models.CharField(max_length=11, choices=item_417_choices, null=True)
item_418_choices = [('understands', 'understands'), ('produces', 'produces')]
item_418 = models.CharField(max_length=11, choices=item_418_choices, null=True)
item_419_choices = [('understands', 'understands'), ('produces', 'produces')]
item_419 = models.CharField(max_length=11, choices=item_419_choices, null=True)
item_420_choices = [('understands', 'understands'), ('produces', 'produces')]
item_420 = models.CharField(max_length=11, choices=item_420_choices, null=True)
item_421_choices = [('understands', 'understands'), ('produces', 'produces')]
item_421 = models.CharField(max_length=11, choices=item_421_choices, null=True)
item_422_choices = [('understands', 'understands'), ('produces', 'produces')]
item_422 = models.CharField(max_length=11, choices=item_422_choices, null=True)
item_423_choices = [('understands', 'understands'), ('produces', 'produces')]
item_423 = models.CharField(max_length=11, choices=item_423_choices, null=True)
item_424_choices = [('understands', 'understands'), ('produces', 'produces')]
item_424 = models.CharField(max_length=11, choices=item_424_choices, null=True)
item_425_choices = [('understands', 'understands'), ('produces', 'produces')]
item_425 = models.CharField(max_length=11, choices=item_425_choices, null=True)
item_426_choices = [('understands', 'understands'), ('produces', 'produces')]
item_426 = models.CharField(max_length=11, choices=item_426_choices, null=True)
item_427_choices = [('understands', 'understands'), ('produces', 'produces')]
item_427 = models.CharField(max_length=11, choices=item_427_choices, null=True)
item_428_choices = [('understands', 'understands'), ('produces', 'produces')]
item_428 = models.CharField(max_length=11, choices=item_428_choices, null=True)
item_429_choices = [('understands', 'understands'), ('produces', 'produces')]
item_429 = models.CharField(max_length=11, choices=item_429_choices, null=True)
item_430_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_430 = models.CharField(max_length=9, choices=item_430_choices, null=True)
item_431_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_431 = models.CharField(max_length=9, choices=item_431_choices, null=True)
item_432_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_432 = models.CharField(max_length=9, choices=item_432_choices, null=True)
item_433_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_433 = models.CharField(max_length=9, choices=item_433_choices, null=True)
item_434_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_434 = models.CharField(max_length=9, choices=item_434_choices, null=True)
item_435_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_435 = models.CharField(max_length=9, choices=item_435_choices, null=True)
item_436_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_436 = models.CharField(max_length=9, choices=item_436_choices, null=True)
item_437_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_437 = models.CharField(max_length=9, choices=item_437_choices, null=True)
item_438_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_438 = models.CharField(max_length=9, choices=item_438_choices, null=True)
item_439_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_439 = models.CharField(max_length=9, choices=item_439_choices, null=True)
item_440_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_440 = models.CharField(max_length=9, choices=item_440_choices, null=True)
item_441_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_441 = models.CharField(max_length=9, choices=item_441_choices, null=True)
item_442_choices = [('yes', 'yes'), ('no', 'no')]
item_442 = models.CharField(max_length=3, choices=item_442_choices, null=True)
item_443_choices = [('yes', 'yes'), ('no', 'no')]
item_443 = models.CharField(max_length=3, choices=item_443_choices, null=True)
item_444_choices = [('yes', 'yes'), ('no', 'no')]
item_444 = models.CharField(max_length=3, choices=item_444_choices, null=True)
item_445_choices = [('yes', 'yes'), ('no', 'no')]
item_445 = models.CharField(max_length=3, choices=item_445_choices, null=True)
item_446_choices = [('yes', 'yes'), ('no', 'no')]
item_446 = models.CharField(max_length=3, choices=item_446_choices, null=True)
item_447_choices = [('yes', 'yes'), ('no', 'no')]
item_447 = models.CharField(max_length=3, choices=item_447_choices, null=True)
item_448_choices = [('yes', 'yes'), ('no', 'no')]
item_448 = models.CharField(max_length=3, choices=item_448_choices, null=True)
item_449_choices = [('yes', 'yes'), ('no', 'no')]
item_449 = models.CharField(max_length=3, choices=item_449_choices, null=True)
item_450_choices = [('yes', 'yes'), ('no', 'no')]
item_450 = models.CharField(max_length=3, choices=item_450_choices, null=True)
item_451_choices = [('yes', 'yes'), ('no', 'no')]
item_451 = models.CharField(max_length=3, choices=item_451_choices, null=True)
item_452_choices = [('yes', 'yes'), ('no', 'no')]
item_452 = models.CharField(max_length=3, choices=item_452_choices, null=True)
item_453_choices = [('yes', 'yes'), ('no', 'no')]
item_453 = models.CharField(max_length=3, choices=item_453_choices, null=True)
item_454_choices = [('yes', 'yes'), ('no', 'no')]
item_454 = models.CharField(max_length=3, choices=item_454_choices, null=True)
item_455_choices = [('yes', 'yes'), ('no', 'no')]
item_455 = models.CharField(max_length=3, choices=item_455_choices, null=True)
item_456_choices = [('yes', 'yes'), ('no', 'no')]
item_456 = models.CharField(max_length=3, choices=item_456_choices, null=True)
item_457_choices = [('yes', 'yes'), ('no', 'no')]
item_457 = models.CharField(max_length=3, choices=item_457_choices, null=True)
item_458_choices = [('yes', 'yes'), ('no', 'no')]
item_458 = models.CharField(max_length=3, choices=item_458_choices, null=True)
item_459_choices = [('yes', 'yes'), ('no', 'no')]
item_459 = models.CharField(max_length=3, choices=item_459_choices, null=True)
item_460_choices = [('yes', 'yes'), ('no', 'no')]
item_460 = models.CharField(max_length=3, choices=item_460_choices, null=True)
item_461_choices = [('yes', 'yes'), ('no', 'no')]
item_461 = models.CharField(max_length=3, choices=item_461_choices, null=True)
item_462_choices = [('yes', 'yes'), ('no', 'no')]
item_462 = models.CharField(max_length=3, choices=item_462_choices, null=True)
item_463_choices = [('yes', 'yes'), ('no', 'no')]
item_463 = models.CharField(max_length=3, choices=item_463_choices, null=True)
item_464_choices = [('yes', 'yes'), ('no', 'no')]
item_464 = models.CharField(max_length=3, choices=item_464_choices, null=True)
item_465_choices = [('yes', 'yes'), ('no', 'no')]
item_465 = models.CharField(max_length=3, choices=item_465_choices, null=True)
item_466_choices = [('yes', 'yes'), ('no', 'no')]
item_466 = models.CharField(max_length=3, choices=item_466_choices, null=True)
item_467_choices = [('yes', 'yes'), ('no', 'no')]
item_467 = models.CharField(max_length=3, choices=item_467_choices, null=True)
item_468_choices = [('yes', 'yes'), ('no', 'no')]
item_468 = models.CharField(max_length=3, choices=item_468_choices, null=True)
item_469_choices = [('yes', 'yes'), ('no', 'no')]
item_469 = models.CharField(max_length=3, choices=item_469_choices, null=True)
item_470_choices = [('yes', 'yes'), ('no', 'no')]
item_470 = models.CharField(max_length=3, choices=item_470_choices, null=True)
item_471_choices = [('yes', 'yes'), ('no', 'no')]
item_471 = models.CharField(max_length=3, choices=item_471_choices, null=True)
item_472_choices = [('yes', 'yes'), ('no', 'no')]
item_472 = models.CharField(max_length=3, choices=item_472_choices, null=True)
item_473_choices = [('yes', 'yes'), ('no', 'no')]
item_473 = models.CharField(max_length=3, choices=item_473_choices, null=True)
item_474_choices = [('yes', 'yes'), ('no', 'no')]
item_474 = models.CharField(max_length=3, choices=item_474_choices, null=True)
item_475_choices = [('yes', 'yes'), ('no', 'no')]
item_475 = models.CharField(max_length=3, choices=item_475_choices, null=True)
item_476_choices = [('yes', 'yes'), ('no', 'no')]
item_476 = models.CharField(max_length=3, choices=item_476_choices, null=True)
item_477_choices = [('yes', 'yes'), ('no', 'no')]
item_477 = models.CharField(max_length=3, choices=item_477_choices, null=True)
item_478_choices = [('yes', 'yes'), ('no', 'no')]
item_478 = models.CharField(max_length=3, choices=item_478_choices, null=True)
item_479_choices = [('yes', 'yes'), ('no', 'no')]
item_479 = models.CharField(max_length=3, choices=item_479_choices, null=True)
item_480_choices = [('yes', 'yes'), ('no', 'no')]
item_480 = models.CharField(max_length=3, choices=item_480_choices, null=True)
item_481_choices = [('yes', 'yes'), ('no', 'no')]
item_481 = models.CharField(max_length=3, choices=item_481_choices, null=True)
item_482_choices = [('yes', 'yes'), ('no', 'no')]
item_482 = models.CharField(max_length=3, choices=item_482_choices, null=True)
item_483_choices = [('yes', 'yes'), ('no', 'no')]
item_483 = models.CharField(max_length=3, choices=item_483_choices, null=True)
item_484_choices = [('yes', 'yes'), ('no', 'no')]
item_484 = models.CharField(max_length=3, choices=item_484_choices, null=True)
item_485_choices = [('yes', 'yes'), ('no', 'no')]
item_485 = models.CharField(max_length=3, choices=item_485_choices, null=True)
item_486_choices = [('yes', 'yes'), ('no', 'no')]
item_486 = models.CharField(max_length=3, choices=item_486_choices, null=True)
item_487_choices = [('yes', 'yes'), ('no', 'no')]
item_487 = models.CharField(max_length=3, choices=item_487_choices, null=True)
item_488_choices = [('yes', 'yes'), ('no', 'no')]
item_488 = models.CharField(max_length=3, choices=item_488_choices, null=True)
item_489_choices = [('yes', 'yes'), ('no', 'no')]
item_489 = models.CharField(max_length=3, choices=item_489_choices, null=True)
item_490_choices = [('yes', 'yes'), ('no', 'no')]
item_490 = models.CharField(max_length=3, choices=item_490_choices, null=True)
item_491_choices = [('yes', 'yes'), ('no', 'no')]
item_491 = models.CharField(max_length=3, choices=item_491_choices, null=True)
item_492_choices = [('yes', 'yes'), ('no', 'no')]
item_492 = models.CharField(max_length=3, choices=item_492_choices, null=True)
|
gpl-2.0
| 4,795,566,595,396,682,000 | 79.218402 | 95 | 0.678746 | false |
cinemapub/bright-response
|
scripts/lib/tweepy/tweepy/models.py
|
1
|
12175
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
return max(ids) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
return min(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user')
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults(metadata.get('max_id'), metadata.get('since_id'))
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
for status in json['statuses']:
results.append(Status.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
|
mit
| -6,371,270,057,078,195,000 | 27.313953 | 84 | 0.55269 | false |
FEniCS/instant
|
test/test07.py
|
1
|
1463
|
from __future__ import print_function
import pytest
import numpy as N
import time
from instant import build_module
import sys
from functools import reduce
c_code = """
void func(int n1, double* array1, int n2, double* array2){
double a;
if ( n1 == n2 ) {
for (int i=0; i<n1; i++) {
a = array1[i];
array2[i] = sin(a) + cos(a) + tan(a);
}
} else {
printf("The arrays should have the same size.");
}
}
"""
def test_module():
# Guess arrayobject is either in sys.prefix or /usr/local
test7_ext = build_module(code=c_code, system_headers=["numpy/arrayobject.h"], cppargs='-g',
include_dirs=[N.get_include()],
init_code='import_array();', modulename='test7_ext',
arrays = [['n1', 'array1'], ['n2', 'array2']])
seed = 10000000.0
a = N.arange(seed)
t1 = time.time()
b = N.sin(a) + N.cos(a) + N.tan(a)
t2 = time.time()
print("With NumPy: ", t2-t1, "seconds")
from test7_ext import func
c = N.arange(seed)
t1 = time.time()
func(a, c)
t2 = time.time()
print("With instant: ", t2-t1, "seconds")
t1 = time.time()
d = N.sin(a)
d += N.cos(a)
d += N.tan(a)
t2 = time.time()
print("With NumPy inplace aritmetic: ", t2-t1, "seconds")
difference = abs(b - c)
sum = reduce( lambda a, b: a+b, difference)
assert abs(sum) < 1.0e-12
|
gpl-3.0
| -8,838,894,799,438,679,000 | 24.666667 | 95 | 0.535885 | false |
nextmovesoftware/smilesreading
|
scripts/WeiningerCEX.py
|
1
|
1431
|
import subprocess
import common
cex = """$D<MOL>
/_P<molecule>
/_V<Molecule>
/_S<1M>
/_L<XSMILES>
/_X<Molecule represented in XSMILES (Exchange SMILES)>
|
$MOL<%s>
|
"""
def isAromaticSmiles(smi):
N = len(smi)
for i in range(N):
x = smi[i]
if x == ":": return True
if x>='a' and x<='z':
if i==0 or smi[i-1]=='[' or x in "bcnsopfi" or smi[i-1:i+1] in ["cl", "br"]:
return True
return False
class MyHydrogenCounter(common.HydrogenCounter):
def getoutput(self, smi):
if isAromaticSmiles(smi):
return None, "Aromatic_smiles_not_supported"
# Neither are up/down bond symbols
command = ["/home/noel/Tools/tmp/cex132/src/applics/mol/printmol"]
proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(cex % smi)
out = stdout.split("\n")
if stderr:
return None, stderr.replace(" ", "_")
for line in out:
if "Implicit hcount" in line:
idx = line.find('"')
hcounts = map(int, line[idx+1:line.find('"', idx+1)].split(";"))
return hcounts, None
return None, "No_hcounts"
if __name__ == "__main__":
myname = "WeiningerCEX_132"
MyHydrogenCounter(myname).main()
|
bsd-2-clause
| -1,685,238,090,709,962,200 | 27.8125 | 111 | 0.542278 | false |
shanot/imp
|
modules/rmf/test/test_geometry.py
|
2
|
3590
|
from __future__ import print_function
import unittest
import IMP.rmf
import IMP.test
import RMF
from IMP.algebra import *
class Tests(IMP.test.TestCase):
def _assert_same(self, h0, h1):
self.assertAlmostEqual(IMP.atom.get_mass(h0),
IMP.atom.get_mass(h1), delta=1)
self.assertEqual(len(IMP.atom.get_leaves(h0)),
len(IMP.atom.get_leaves(h1)))
def test_round_trip(self):
"""Test reading and writing geometry"""
for suffix in IMP.rmf.suffixes:
nm = self.get_tmp_file_name("geometry" + suffix)
print(nm)
f = RMF.create_rmf_file(nm)
bb = IMP.algebra.BoundingBox3D(IMP.algebra.Vector3D(0, 0, 0),
IMP.algebra.Vector3D(10, 10, 10))
g = IMP.display.BoundingBoxGeometry(bb)
IMP.rmf.add_geometry(f, g)
IMP.rmf.save_frame(f, "zero")
del f
f = RMF.open_rmf_file_read_only(nm)
gs = IMP.rmf.create_geometries(f)
self.assertEqual(len(gs), 1)
gs[0].set_was_used(True)
cg = gs[0].get_components()
self.assertEqual(len(cg), 12)
def _do_test_round_trip(self, g, suffix):
gg = IMP.display.create_geometry(g)
nm = self.get_tmp_file_name("round_trip_g" + suffix)
print(nm)
rmf = RMF.create_rmf_file(nm)
IMP.rmf.add_geometries(rmf.get_root_node(), [gg])
IMP.rmf.save_frame(rmf, "zero")
del rmf
rmf = RMF.open_rmf_file_read_only(nm)
ggb = IMP.rmf.create_geometries(rmf)
IMP.rmf.load_frame(rmf, RMF.FrameID(0))
ggbt = gg.get_from(gg)
self.assertEqual(type(ggbt.get_geometry()), type(g))
def test_all_geometry(self):
"""Test reading and writing each type of geometry"""
for suffix in IMP.rmf.suffixes:
for g in [IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 1, 2), 3),
IMP.algebra.Segment3D(IMP.algebra.Vector3D(0, 1, 2),
IMP.algebra.Vector3D(3, 4, 5)),
IMP.algebra.Cylinder3D(
IMP.algebra.Segment3D(IMP.algebra.Vector3D(0, 1, 2),
IMP.algebra.Vector3D(3, 4, 5)), 6)]:
self._do_test_round_trip(g, suffix)
def test_3(self):
"""Testing surface geometry"""
for suffix in IMP.rmf.suffixes:
self.skipTest("surfaces not supported")
#self.skipTest("surface geometry is disabled")
g = IMP.algebra.DenseDoubleGrid3D(
1, IMP.algebra.BoundingBox3D((-10, -10, -10),
(10, 10, 10)))
for i in g.get_all_indexes():
c = g.get_center(i)
m = c.get_magnitude()
g[i] = 100 - m
# for i in g.get_all_indexes():
# print i, g.get_center(i), g[i]
gg = IMP.display.IsosurfaceGeometry(g, 95.0)
gg.set_name("isosurface")
rmf = RMF.create_rmf_file(self.get_tmp_file_name("iso" + suffix))
IMP.rmf.add_geometry(rmf, gg)
del rmf
rmf = RMF.open_rmf_file(self.get_tmp_file_name("iso" + suffix))
gs = IMP.rmf.create_geometries(rmf)
w = IMP.display.PymolWriter(self.get_tmp_file_name("iso.pym"))
w.add_geometry(gg)
gs[0].set_name("after")
w.add_geometry(gs[0])
if __name__ == '__main__':
IMP.test.main()
|
gpl-3.0
| 5,892,105,699,499,598,000 | 39.795455 | 84 | 0.523677 | false |
lowlevel86/wakefulness-forecaster
|
03_PlanRendezvous/genRendezvous.py
|
1
|
2962
|
import sys
import datetime
import time
true = 1
false = 0
if len(sys.argv) != 5:
print("This program creates a wake up schedule for catching an appointment.")
print("Usage: ")
print("python genRendezvous.py [date] [time] [minutes until end] [lengthen 'l' or shorten 's' sleep-wake cycle]")
print("date format: yyyy-mm-dd")
print("time format: hh:mm (add 12 to hh for PM)")
print("Example: ")
print("python genRendezvous.py 2017-05-05 13:30 60 l")
exit()
#convert the appointment date and time command line arguments into timestamps
appointmentMinTs = int(time.mktime(time.strptime(sys.argv[1]+" "+sys.argv[2], "%Y-%m-%d %H:%M"))/60)
appointmentEndMinTs = appointmentMinTs + int(sys.argv[3])
lengthenOrShortenDay = sys.argv[4]
with open("wakeUpForecast.txt") as f:
wakeUpForecastData = f.readlines()
forecastMinTsArray = []
for log in wakeUpForecastData:
forecastMinTsArray.append(int(log.split(" ")[2]))
#find when is the appointment and if it is while you are asleep or not
minsInHalfDay = 60*12
chgSchedule = false
appointmentDayNum = -1
for i in range(0, len(forecastMinTsArray)):
#if appointment is while you are asleep then change schedule
if appointmentMinTs < forecastMinTsArray[i]:
if appointmentEndMinTs > forecastMinTsArray[i] - minsInHalfDay:
chgSchedule = true
appointmentDayNum = i
#if the schedule is changing
lengthenDayAmount = 0
shortenDayAmount = 0
if chgSchedule == true:
#find the amount of minutes to lengthen each day
if lengthenOrShortenDay == 'l':
appointToWakeTime = appointmentEndMinTs - (forecastMinTsArray[appointmentDayNum] - minsInHalfDay)
lengthenDayAmount = float(appointToWakeTime) / float(appointmentDayNum)
print("The wake up schedule is lengthened by %.3f minutes per day until appointment." % (lengthenDayAmount))
#find the amount of minutes to shorten each day
if lengthenOrShortenDay == 's':
appointToWakeTime = forecastMinTsArray[appointmentDayNum] - appointmentMinTs
shortenDayAmount = float(appointToWakeTime) / float(appointmentDayNum)
print("The wake up schedule is shortened by %.3f minutes per day until appointment." % (shortenDayAmount))
else:
print("No need to change wake up schedule.")
#change the wake up schedule
for i in range(0, appointmentDayNum):
forecastMinTsArray[i] += (lengthenDayAmount - shortenDayAmount) * i
for i in range(appointmentDayNum, len(forecastMinTsArray)):
forecastMinTsArray[i] += (lengthenDayAmount - shortenDayAmount) * appointmentDayNum
file = open("wakeUpSchedule.txt", "w")
scheduleMinTs = 0
scheduleHourTs = 0
scheduleDayTs = 0
scheduleDateTime = ""
for i in range(0, len(forecastMinTsArray)):
scheduleMinTs = forecastMinTsArray[i]
scheduleHourTs = int(scheduleMinTs / 60)
scheduleDayTs = int(scheduleHourTs / 24)
scheduleDateTime = datetime.datetime.fromtimestamp(scheduleMinTs*60).strftime('%Y-%m-%d %H:%M')
file.write("%i %i %i %s\n" % (scheduleDayTs, scheduleHourTs, scheduleMinTs, scheduleDateTime))
file.close()
|
mit
| 4,711,496,272,851,396,000 | 34.686747 | 114 | 0.755908 | false |
botstory/todo-bot
|
todo/tasks/task_story_helper.py
|
1
|
1160
|
from botstory.ast import story_context
from todo.tasks import tasks_document
async def current_task(ctx):
"""
get current task from ctx
:param ctx:
:return:
"""
task_id = story_context.get_message_data(ctx, 'option', 'matches')[0]
return await tasks_document.TaskDocument.objects.find_by_id(task_id)
async def current_tasks(ctx):
"""
get current tasks from ctx
:param ctx:
:return:
"""
task_ids = story_context.get_message_data(ctx, 'option', 'matches')[0].split(',')
tasks = []
for task_id in task_ids:
tasks.append(await tasks_document.TaskDocument.objects.find_by_id(task_id))
return tasks
async def last_task(ctx=None, user=None, count_of_tasks=1):
if ctx:
user = ctx['user']
cursor = tasks_document.TaskDocument.objects({
'user_id': user['_id'],
}).sort(
updated_at='desc',
)
if count_of_tasks == 1:
return await cursor.first()
return reversed(await cursor.limit(count_of_tasks))
async def all_my_tasks(ctx):
return await tasks_document.TaskDocument.objects.find({
'user_id': ctx['user']['_id'],
})
|
mit
| 5,986,695,735,030,772,000 | 23.166667 | 85 | 0.626724 | false |
melecptp/trivial_pursuit_quiz
|
app.py
|
1
|
2745
|
# -*- coding:utf8 -*-
# !/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST']) # https://trivial-pursuit-quiz.herokuapp.com/webhook
def webhook():
req = request.get_json(silent=True, force=True)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action")=="askquestion":
question = {"question":"Qui a traversé la ville nue sur un cheval","answer":["Lady Godiva","Godiva"]}
res = makeWebhookResultForQuestion(question)
else:
return {}
return res
def makeWebhookResultForQuestion(question):
speechText = question['question']
displayText = question['question']
contextOut = [{"name":"question", "lifespan":20, "parameters":question},{"name":"wait_answer","lifespan":10}]
return {
"speech": speechText,
"displayText": displayText,
"contextOut": contextOut,
}
def makeWebhookResultForGoodAnswer(guess):
speechText = 'Bravo'
displayText = 'Bravo la réponse était bien {}'.format(guess)
contextOut = [{"name": "next_question", "lifespan": 1}]
return {
"speech": speechText,
"displayText": displayText,
"contextOut": contextOut,
}
def makeWebhookResultForBadAnswer(guess):
speechText = 'Non'
displayText = 'Non, la réonse n\'est pas {}'.format(guess)
return {
"speech": speechText,
"displayText": displayText,
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
apache-2.0
| 8,824,544,032,651,616,000 | 27.852632 | 113 | 0.686246 | false |
keras-team/keras-io
|
examples/nlp/semantic_similarity_with_bert.py
|
1
|
11604
|
"""
Title: Semantic Similarity with BERT
Author: [Mohamad Merchant](https://twitter.com/mohmadmerchant1)
Date created: 2020/08/15
Last modified: 2020/08/29
Description: Natural Language Inference by fine-tuning BERT model on SNLI Corpus.
"""
"""
## Introduction
Semantic Similarity is the task of determining how similar
two sentences are, in terms of what they mean.
This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus
to predict sentence semantic similarity with Transformers.
We will fine-tune a BERT model that takes two sentences as inputs
and that outputs a similarity score for these two sentences.
### References
* [BERT](https://arxiv.org/pdf/1810.04805.pdf)
* [SNLI](https://nlp.stanford.edu/projects/snli/)
"""
"""
## Setup
Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
"""
## Configuration
"""
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]
"""
## Load the Data
"""
"""shell
curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
tar -xvzf data.tar.gz
"""
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")
"""
Dataset Overview:
- sentence1: The premise caption that was supplied to the author of the pair.
- sentence2: The hypothesis caption that was written by the author of the pair.
- similarity: This is the label chosen by the majority of annotators.
Where no majority exists, the label "-" is used (we will skip such samples here).
Here are the "similarity" label values in our dataset:
- Contradiction: The sentences share no similarity.
- Entailment: The sentences have similar meaning.
- Neutral: The sentences are neutral.
"""
"""
Let's look at one sample from the dataset:
"""
print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")
"""
## Preprocessing
"""
# We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)
"""
Distribution of our training targets.
"""
print("Train Target Distribution")
print(train_df.similarity.value_counts())
"""
Distribution of our validation targets.
"""
print("Validation Target Distribution")
print(valid_df.similarity.value_counts())
"""
The value "-" appears as part of our training and validation targets.
We will skip these samples.
"""
train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
"""
One-hot encode training, validation, and test labels.
"""
train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)
"""
## Create a custom data generator
"""
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
"""
## Build the model
"""
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
sequence_output, pooled_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
"""
Create train and validation data generators
"""
train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
"""
## Train the Model
Training is done only for the top layers to perform "feature extraction",
which will allow the model to use the representations of the pretrained model.
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Fine-tuning
This step must only be performed after the feature extraction model has
been trained to convergence on the new data.
This is an optional last step where `bert_model` is unfreezed and retrained
with a very low learning rate. This can deliver meaningful improvement by
incrementally adapting the pretrained features to the new data.
"""
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
"""
## Train the entire model end-to-end
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Evaluate model on the test set
"""
test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
"""
## Inference on custom sentences
"""
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
"""
Check results on some example sentence pairs.
"""
sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs.
"""
sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs
"""
sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)
|
apache-2.0
| 7,685,159,664,534,825,000 | 29.536842 | 100 | 0.687004 | false |
tdyas/pants
|
tests/python/pants_test/backend/jvm/tasks/test_jar_publish_integration.py
|
1
|
16213
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from pants.base.build_environment import get_buildroot
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.contextutil import open_zip, temporary_dir
from pants.util.dirutil import safe_rmtree
def shared_artifacts(version, extra_jar=None):
published_file_list = [
f"ivy-{version}.xml",
f"hello-greet-{version}.jar",
f"hello-greet-{version}.pom",
f"hello-greet-{version}-sources.jar",
]
if extra_jar:
published_file_list.append(extra_jar)
return {f"org/pantsbuild/testproject/publish/hello-greet/{version}": published_file_list}
# TODO: Right now some options are set via config and some via cmd-line flags. Normalize this?
def publish_extra_config(unique_config):
return {
"GLOBAL": {
# Turn off --verify-config as some scopes in pants.toml will not be
# recognized due to the select few backend packages.
"verify_config": False,
"pythonpath": ["examples/src/python", "pants-plugins/src/python"],
"backend_packages": [
"example.pants_publish_plugin",
"internal_backend.repositories",
"pants.backend.codegen",
"pants.backend.jvm",
],
},
"publish.jar": {"publish_extras": {"extra_test_jar_example": unique_config}},
}
class JarPublishIntegrationTest(PantsRunIntegrationTest):
GOLDEN_DATA_DIR = "tests/python/pants_test/tasks/jar_publish_resources/"
# This is where all pushdb properties files will end up.
@property
def pushdb_root(self):
return os.path.join(get_buildroot(), "testprojects", "ivy", "pushdb")
def setUp(self):
# This attribute is required to see the full diff between ivy and pom files.
self.maxDiff = None
safe_rmtree(self.pushdb_root)
def tearDown(self):
safe_rmtree(self.pushdb_root)
def test_scala_publish(self):
unique_artifacts = {
"org/pantsbuild/testproject/publish/jvm-example-lib_2.12/0.0.1-SNAPSHOT": [
"ivy-0.0.1-SNAPSHOT.xml",
"jvm-example-lib_2.12-0.0.1-SNAPSHOT.jar",
"jvm-example-lib_2.12-0.0.1-SNAPSHOT.pom",
"jvm-example-lib_2.12-0.0.1-SNAPSHOT-sources.jar",
],
"org/pantsbuild/testproject/publish/hello/welcome_2.12/0.0.1-SNAPSHOT": [
"ivy-0.0.1-SNAPSHOT.xml",
"welcome_2.12-0.0.1-SNAPSHOT.jar",
"welcome_2.12-0.0.1-SNAPSHOT.pom",
"welcome_2.12-0.0.1-SNAPSHOT-sources.jar",
],
}
self.publish_test(
"testprojects/src/scala/org/pantsbuild/testproject/publish" ":jvm-run-example-lib",
dict(list(unique_artifacts.items()) + list(shared_artifacts("0.0.1-SNAPSHOT").items())),
[
"org.pantsbuild.testproject.publish/hello-greet/publish.properties",
"org.pantsbuild.testproject.publish/jvm-example-lib_2.12/publish.properties",
"org.pantsbuild.testproject.publish.hello/welcome_2.12/publish.properties",
],
extra_options=["--doc-scaladoc-skip"],
assert_publish_config_contents=True,
)
def test_java_publish(self):
self.publish_test(
"testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet",
shared_artifacts("0.0.1-SNAPSHOT"),
["org.pantsbuild.testproject.publish/hello-greet/publish.properties"],
)
def test_protobuf_publish(self):
unique_artifacts = {
"org/pantsbuild/testproject/publish/protobuf/protobuf-java/0.0.1-SNAPSHOT": [
"ivy-0.0.1-SNAPSHOT.xml",
"protobuf-java-0.0.1-SNAPSHOT.jar",
"protobuf-java-0.0.1-SNAPSHOT.pom",
"protobuf-java-0.0.1-SNAPSHOT-sources.jar",
],
"org/pantsbuild/testproject/protobuf/distance/0.0.1-SNAPSHOT/": [
"ivy-0.0.1-SNAPSHOT.xml",
"distance-0.0.1-SNAPSHOT.jar",
"distance-0.0.1-SNAPSHOT.pom",
"distance-0.0.1-SNAPSHOT-sources.jar",
],
}
self.publish_test(
"testprojects/src/java/org/pantsbuild/testproject/publish/protobuf" ":protobuf-java",
unique_artifacts,
[
"org.pantsbuild.testproject.publish.protobuf/protobuf-java/" "publish.properties",
"org.pantsbuild.testproject.protobuf/distance/publish.properties",
],
extra_options=["--doc-javadoc-skip"],
)
def test_named_snapshot(self):
name = "abcdef0123456789"
self.publish_test(
"testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet",
shared_artifacts(name),
["org.pantsbuild.testproject.publish/hello-greet/publish.properties"],
extra_options=[f"--named-snapshot={name}"],
)
def test_publish_override_flag_succeeds(self):
override = "com.twitter.foo#baz=0.1.0"
self.publish_test(
"testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet",
shared_artifacts("0.0.1-SNAPSHOT"),
["org.pantsbuild.testproject.publish/hello-greet/publish.properties"],
extra_options=[f"--override={override}"],
)
# Collect all the common factors for running a publish_extras test, and execute the test.
def publish_extras_runner(self, extra_config=None, artifact_name=None, success_expected=True):
self.publish_test(
"testprojects/src/java/org/pantsbuild/testproject/publish/hello/greet",
shared_artifacts("0.0.1-SNAPSHOT", artifact_name),
["org.pantsbuild.testproject.publish/hello-greet/publish.properties"],
extra_options=["--doc-javadoc-skip"],
extra_config=extra_config,
success_expected=success_expected,
)
#
# Run through all the permutations of the config parameters for publish_extras.
#
def test_publish_extras_name_classifier(self):
self.publish_extras_runner(
extra_config=publish_extra_config(
{"override_name": "{target_provides_name}-extra_example", "classifier": "classy"}
),
artifact_name="hello-greet-extra_example-0.0.1-SNAPSHOT-classy.jar",
)
def test_publish_extras_name(self):
self.publish_extras_runner(
extra_config=publish_extra_config(
{"override_name": "{target_provides_name}-extra_example"}
),
artifact_name="hello-greet-extra_example-0.0.1-SNAPSHOT.jar",
)
def test_publish_extras_name_extension(self):
self.publish_extras_runner(
extra_config=publish_extra_config(
{"override_name": "{target_provides_name}-extra_example", "extension": "zip"}
),
artifact_name="hello-greet-extra_example-0.0.1-SNAPSHOT.zip",
)
def test_publish_extras_extension(self):
self.publish_extras_runner(
extra_config=publish_extra_config({"extension": "zip"}),
artifact_name="hello-greet-0.0.1-SNAPSHOT.zip",
)
def test_publish_extras_extension_classifier(self):
self.publish_extras_runner(
extra_config=publish_extra_config({"classifier": "classy", "extension": "zip"}),
artifact_name="hello-greet-0.0.1-SNAPSHOT-classy.zip",
)
def test_publish_extras_classifier(self):
self.publish_extras_runner(
extra_config=publish_extra_config({"classifier": "classy"}),
artifact_name="hello-greet-0.0.1-SNAPSHOT-classy.jar",
)
# This test doesn't specify a proper set of parameters that uniquely name the extra artifact, and
# should fail with an error from pants.
def test_publish_extras_invalid_args(self):
self.publish_extras_runner(
extra_config=publish_extra_config({"extension": "jar"}),
artifact_name="hello-greet-0.0.1-SNAPSHOT.jar",
success_expected=False,
)
def test_scala_publish_classifiers(self):
self.publish_test(
"testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers",
dict(
{
"org/pantsbuild/testproject/publish/classifiers_2.12/0.0.1-SNAPSHOT": [
"classifiers_2.12-0.0.1-SNAPSHOT.pom",
"ivy-0.0.1-SNAPSHOT.xml",
]
}
),
[],
assert_publish_config_contents=True,
)
def test_override_via_coord(self):
self.publish_test(
target="testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers",
artifacts=dict(
{
"org/pantsbuild/testproject/publish/classifiers_2.12/1.2.3-SNAPSHOT": [
"classifiers_2.12-1.2.3-SNAPSHOT.pom",
"ivy-1.2.3-SNAPSHOT.xml",
]
}
),
pushdb_files=[],
extra_options=["--override=org.pantsbuild.testproject.publish#classifiers_2.12=1.2.3"],
assert_publish_config_contents=True,
)
def test_override_via_address(self):
target = "testprojects/src/scala/org/pantsbuild/testproject/publish/classifiers"
self.publish_test(
target=target,
artifacts=dict(
{
"org/pantsbuild/testproject/publish/classifiers_2.12/1.2.3-SNAPSHOT": [
"classifiers_2.12-1.2.3-SNAPSHOT.pom",
"ivy-1.2.3-SNAPSHOT.xml",
]
}
),
pushdb_files=[],
extra_options=[f"--override={target}=1.2.3"],
assert_publish_config_contents=True,
)
def test_invalidate_resources(self):
"""Tests that resource changes invalidate publishes."""
source_root = "testprojects/src/java"
target_relative_to_sourceroot = "org/pantsbuild/testproject/publish/hello/greet"
target = os.path.join(source_root, target_relative_to_sourceroot)
resource_relative_to_sourceroot = os.path.join(target_relative_to_sourceroot, "TEMP.txt")
resource = os.path.join(source_root, resource_relative_to_sourceroot)
with self.temporary_workdir() as workdir:
def publish(resource_content):
with temporary_dir() as publish_dir:
with self.temporary_file_content(resource, resource_content):
# Validate that the target depends on the relevant resource.
self.assertIn(
resource,
self.run_pants(["filedeps", "--transitive", target]).stdout_data,
)
pants_run = self.run_pants_with_workdir(
[
"publish.jar",
f"--local={publish_dir}",
"--named-snapshot=X",
"--no-dryrun",
target,
],
workdir=workdir,
)
self.assert_success(pants_run)
# Validate that the content in the resulting jar matches.
jar = os.path.join(
publish_dir,
"org/pantsbuild/testproject/publish/hello-greet/X/hello-greet-X.jar",
)
with open_zip(jar, mode="r") as j:
with j.open(resource_relative_to_sourceroot) as jar_entry:
self.assertEqual(resource_content, jar_entry.read())
# Publish the same target twice with different resource content.
publish(b"one")
publish(b"two")
def publish_test(
self,
target,
artifacts,
pushdb_files,
extra_options=None,
extra_config=None,
extra_env=None,
success_expected=True,
assert_publish_config_contents=False,
):
"""Tests that publishing the given target results in the expected output.
:param target: Target to test.
:param artifacts: A map from directories to a list of expected filenames.
:param pushdb_files: list of pushdb files that would be created if this weren't a local publish
:param extra_options: Extra command-line options to the pants run.
:param extra_config: Extra pants.toml configuration for the pants run.
:param extra_env: Extra environment variables for the pants run.
:param assert_publish_config_contents: Test the contents of the generated ivy and pom file.
If set to True, compares the generated ivy.xml and pom files in
tests/python/pants_test/tasks/jar_publish_resources/<package_name>/<artifact_name>/
"""
with temporary_dir() as publish_dir:
options = [f"--local={publish_dir}", "--no-dryrun", "--force"]
if extra_options:
options.extend(extra_options)
pants_run = self.run_pants(
["publish.jar"] + options + [target], config=extra_config, extra_env=extra_env
)
if success_expected:
self.assert_success(
pants_run, "'pants goal publish' expected success, but failed instead."
)
else:
self.assert_failure(
pants_run, "'pants goal publish' expected failure, but succeeded instead."
)
return
# New pushdb directory should be created for all artifacts.
for pushdb_file in pushdb_files:
pushdb_dir = os.path.dirname(os.path.join(self.pushdb_root, pushdb_file))
self.assertTrue(os.path.exists(pushdb_dir))
# But because we are doing local publishes, no pushdb files are created
for pushdb_file in pushdb_files:
self.assertFalse(os.path.exists(os.path.join(self.pushdb_root, pushdb_file)))
for directory, artifact_list in artifacts.items():
for artifact in artifact_list:
artifact_path = os.path.join(publish_dir, directory, artifact)
self.assertTrue(os.path.exists(artifact_path))
if assert_publish_config_contents:
if artifact.endswith("xml") or artifact.endswith("pom"):
self.compare_file_contents(artifact_path, directory)
def compare_file_contents(self, artifact_path, directory):
"""Tests the ivy.xml and pom.
:param artifact_path: Path of the artifact
:param directory: Directory where the artifact resides.
:return:
"""
# Strip away the version number
[package_dir, artifact_name, version] = directory.rsplit(os.path.sep, 2)
file_name = os.path.basename(artifact_path)
golden_file_nm = os.path.join(
JarPublishIntegrationTest.GOLDEN_DATA_DIR,
package_dir.replace(os.path.sep, "."),
artifact_name,
file_name,
)
with open(artifact_path, "r") as test_file:
generated_file = test_file.read()
with open(golden_file_nm, "r") as golden_file:
golden_file_contents = golden_file.read()
# Remove the publication sha attribute from ivy.xml
if artifact_path.endswith(".xml"):
generated_file = re.sub(r"publication=.*", "/>", generated_file)
return self.assertMultiLineEqual(generated_file, golden_file_contents)
|
apache-2.0
| -3,961,776,824,487,096,300 | 42.119681 | 103 | 0.576266 | false |
yakky/django-localflavor
|
localflavor/us/forms.py
|
1
|
4819
|
"""
USA-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, CharField
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
""""
A form field that validates input as a U.S. ZIP code. Valid formats are
XXXXX or XXXXX-XXXX.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length, min_length, *args, **kwargs)
class USPhoneNumberField(CharField):
"""
A form field that validates input as a U.S. phone number.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+)', '', smart_text(value))
m = phone_digits_re.search(value)
if m:
return '%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or group == '00' or serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if (area == '666' or (area == '987' and
group == '65' and
4320 <= int(serial) <= 4329) or
value == '078-05-1120' or
value == '219-09-9999'):
raise ValidationError(self.error_messages['invalid'])
return '%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': _('Enter a U.S. state or territory.'),
}
def clean(self, value):
from .us_states import STATES_NORMALIZED
super(USStateField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()]
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from .us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class USPSSelect(Select):
"""
A Select widget that uses a list of US Postal Service codes as its
choices.
"""
def __init__(self, attrs=None):
from .us_states import USPS_CHOICES
super(USPSSelect, self).__init__(attrs, choices=USPS_CHOICES)
|
bsd-3-clause
| 2,101,800,050,063,301,000 | 33.92029 | 112 | 0.600332 | false |
rwl/PyCIM
|
CIM14/IEC61970/Core/ReportingGroup.py
|
1
|
5830
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class ReportingGroup(IdentifiedObject):
"""A reporting group is used for various ad-hoc groupings used for reporting.
"""
def __init__(self, ReportingSuperGroup=None, TopologicalNode=None, BusNameMarker=None, PowerSystemResource=None, *args, **kw_args):
"""Initialises a new 'ReportingGroup' instance.
@param ReportingSuperGroup: Reporting super group to which this reporting group belongs.
@param TopologicalNode: The topological nodes that belong to the reporting group.
@param BusNameMarker: The BusNameMarkers that belong to this reporting group.
@param PowerSystemResource: PSR's which belong to this reporting group.
"""
self._ReportingSuperGroup = None
self.ReportingSuperGroup = ReportingSuperGroup
self._TopologicalNode = []
self.TopologicalNode = [] if TopologicalNode is None else TopologicalNode
self._BusNameMarker = []
self.BusNameMarker = [] if BusNameMarker is None else BusNameMarker
self._PowerSystemResource = []
self.PowerSystemResource = [] if PowerSystemResource is None else PowerSystemResource
super(ReportingGroup, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ReportingSuperGroup", "TopologicalNode", "BusNameMarker", "PowerSystemResource"]
_many_refs = ["TopologicalNode", "BusNameMarker", "PowerSystemResource"]
def getReportingSuperGroup(self):
"""Reporting super group to which this reporting group belongs.
"""
return self._ReportingSuperGroup
def setReportingSuperGroup(self, value):
if self._ReportingSuperGroup is not None:
filtered = [x for x in self.ReportingSuperGroup.ReportingGroup if x != self]
self._ReportingSuperGroup._ReportingGroup = filtered
self._ReportingSuperGroup = value
if self._ReportingSuperGroup is not None:
if self not in self._ReportingSuperGroup._ReportingGroup:
self._ReportingSuperGroup._ReportingGroup.append(self)
ReportingSuperGroup = property(getReportingSuperGroup, setReportingSuperGroup)
def getTopologicalNode(self):
"""The topological nodes that belong to the reporting group.
"""
return self._TopologicalNode
def setTopologicalNode(self, value):
for x in self._TopologicalNode:
x.ReportingGroup = None
for y in value:
y._ReportingGroup = self
self._TopologicalNode = value
TopologicalNode = property(getTopologicalNode, setTopologicalNode)
def addTopologicalNode(self, *TopologicalNode):
for obj in TopologicalNode:
obj.ReportingGroup = self
def removeTopologicalNode(self, *TopologicalNode):
for obj in TopologicalNode:
obj.ReportingGroup = None
def getBusNameMarker(self):
"""The BusNameMarkers that belong to this reporting group.
"""
return self._BusNameMarker
def setBusNameMarker(self, value):
for x in self._BusNameMarker:
x.ReportingGroup = None
for y in value:
y._ReportingGroup = self
self._BusNameMarker = value
BusNameMarker = property(getBusNameMarker, setBusNameMarker)
def addBusNameMarker(self, *BusNameMarker):
for obj in BusNameMarker:
obj.ReportingGroup = self
def removeBusNameMarker(self, *BusNameMarker):
for obj in BusNameMarker:
obj.ReportingGroup = None
def getPowerSystemResource(self):
"""PSR's which belong to this reporting group.
"""
return self._PowerSystemResource
def setPowerSystemResource(self, value):
for p in self._PowerSystemResource:
filtered = [q for q in p.ReportingGroup if q != self]
self._PowerSystemResource._ReportingGroup = filtered
for r in value:
if self not in r._ReportingGroup:
r._ReportingGroup.append(self)
self._PowerSystemResource = value
PowerSystemResource = property(getPowerSystemResource, setPowerSystemResource)
def addPowerSystemResource(self, *PowerSystemResource):
for obj in PowerSystemResource:
if self not in obj._ReportingGroup:
obj._ReportingGroup.append(self)
self._PowerSystemResource.append(obj)
def removePowerSystemResource(self, *PowerSystemResource):
for obj in PowerSystemResource:
if self in obj._ReportingGroup:
obj._ReportingGroup.remove(self)
self._PowerSystemResource.remove(obj)
|
mit
| -2,979,352,487,377,241,600 | 39.486111 | 135 | 0.693139 | false |
f0rk/sigdebug
|
sigdebug/__init__.py
|
1
|
1776
|
# Copyright 2011 Ryan P. Kelly <rpkelly@cpan.org>
#
# This code was inspired by this thread on stackoverflow:
# http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application
#
# This code is BSD licensed, please see LICENSE for the full text.
import sys
import signal
import traceback
import threading
from pdb import Pdb
__version__ = "0.2"
def interactive(signum, frame):
"""Enter into pdb at the location the process was interrupted.
:param signum: The signal that was received.
:param frame: A python stack frame.
"""
banner = "Received signal %s, Entering console.\nTraceback:\n" % signum
banner += "".join(traceback.format_stack(frame))
print(banner)
Pdb().set_trace(frame)
def stack(signum, frame):
"""When invoked, print out the Traceback for the provided stack frame.
:param signum: The signal that was received.
:param frame: A python stack frame.
"""
print("Traceback:\n" + "".join(traceback.format_stack(frame)))
def threadstacks(signum, frame):
"""Print out the Traceback of every thread currently available.
:param signum: The signal that was received.
:param frame: A python stack frame.
"""
for thread, stack in sys._current_frames().items():
message = "Thread #%s Traceback:\n" % thread
message += "".join(traceback.format_stack(frame))
print(message)
def register(signum, handler):
"""Register the given handler function for the given signal.
:param signum: The signal to respond to.
:param handler: The handler function.
"""
signal.signal(signum, handler)
def debug():
"""This function installs the interactive signal handler for SIGUSR1."""
register(signal.SIGUSR1, interactive)
|
bsd-2-clause
| 5,138,224,729,121,091,000 | 26.75 | 97 | 0.693131 | false |
mswart/acme-mgmtserver
|
acmems/storages.py
|
1
|
2870
|
import os
import os.path
from hashlib import sha384
from datetime import datetime, timedelta
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from acmems.config import ConfigurationError
class StorageImplementor():
def __init__(self, type, name, options):
self.type = type
self.name = name
self.parse(options)
class NoneStorageImplementor(StorageImplementor):
def parse(self, options):
if len(options) > 0:
raise ConfigurationError('none storage does not support any options, but found "{}"'.format('", "'.join(o[0] for o in options)))
def from_cache(self, csr):
return None
def add_to_cache(self, csr, certs):
return None
class FileStorageImplementor(StorageImplementor):
def parse(self, options):
self.directory = None
self.renew_within = None
for option, value in options:
if option == 'directory':
self.directory = value
elif option == 'renew-within':
self.renew_within = timedelta(days=int(value))
else:
raise ConfigurationError('FileStorage: unknown option "{}"'.format(option))
if self.directory is None:
raise ConfigurationError('FileStorage: option directory is required')
if self.renew_within is None:
self.renew_within = timedelta(days=14)
def cache_dir(self, csr):
hash = sha384(csr).hexdigest()
return os.path.join(self.directory, hash[0:2], hash[2:])
def from_cache(self, csr):
dir = self.cache_dir(csr)
if not os.path.isfile(os.path.join(dir, 'csr.pem')):
return None
if not os.path.isfile(os.path.join(dir, 'cert.pem')):
return None
if csr != open(os.path.join(dir, 'csr.pem'), 'rb').read():
# should not happen!!
return None
certpem = open(os.path.join(dir, 'cert.pem'), 'rb').read()
cert = x509.load_pem_x509_certificate(certpem, default_backend())
current_validation_time = cert.not_valid_after - datetime.now()
if current_validation_time < self.renew_within:
return None
else:
return certpem.decode('utf-8')
def add_to_cache(self, csr, cert):
dir = self.cache_dir(csr)
os.makedirs(dir, exist_ok=True)
with open(os.path.join(dir, 'csr.pem'), 'bw') as f:
f.write(csr)
with open(os.path.join(dir, 'cert.pem'), 'w') as f:
f.write(cert)
return True
implementors = {
'none': NoneStorageImplementor,
'file': FileStorageImplementor,
}
def setup(type, name, options):
try:
return implementors[type](type, name, options)
except KeyError:
raise ConfigurationError('Unsupported storage type "{}"'.format(type))
|
gpl-3.0
| -8,599,793,053,521,439,000 | 31.613636 | 140 | 0.613937 | false |
arthurtyukayev/python-safer
|
safer/api.py
|
1
|
1255
|
from requests import Session
SAFER_KEYWORD_URL = 'https://safer.fmcsa.dot.gov/keywordx.asp'
SAFER_QUERY_URL = 'https://safer.fmcsa.dot.gov/query.asp'
sess = Session()
sess.headers.update({
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'safer.fmcsa.dot.gov',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:68.0) Gecko/20100101 Firefox/68.0'
})
def api_call_search(query):
r = sess.get(url=SAFER_KEYWORD_URL, params={
'searchstring': '*{}*'.format(query.upper()),
'SEARCHTYPE': ''
})
return r
def api_call_get_usdot(usdot):
r = sess.post(url=SAFER_QUERY_URL, data={
'searchType': 'ANY',
'query_type': 'queryCarrierSnapshot',
'query_param': 'USDOT',
'query_string': usdot
})
return r
def api_call_get_mcmx(mcmx):
r = sess.post(url=SAFER_QUERY_URL, data={
'searchType': 'ANY',
'query_type': 'queryCarrierSnapshot',
'query_param': 'MC_MX',
'query_string': mcmx
})
return r
|
mit
| 7,464,027,450,929,518,000 | 26.888889 | 102 | 0.603984 | false |
todatamining/textclassifiction
|
utility.py
|
1
|
3762
|
# -*- coding: utf-8 -*-
# Author: HW <todatamining@gmail.com>
# See LICENSE.txt for details.
#!/usr/bin/env python
import re
import os
import math
import sys
import pickle
from nltk.stem.wordnet import WordNetLemmatizer
import os.path,subprocess
lmtzr = WordNetLemmatizer()
this_dir,this_filename = os.path.split(os.path.abspath(__file__))
sys.path.append(this_dir)
import en
from globalvariable import *
CACHE_DIR = "./.cache"
def myprint(sstr):
if DEBUG_ON:
print sstr
def saveObj(obj,writeTo):
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
with open(CACHE_DIR+"/"+writeTo,"wb") as f:
pickle.dump(obj,f)
def loadObj(loadFrom):
try:
with open(CACHE_DIR+"/"+loadFrom,"rb") as f:
return pickle.load(f)
except:
return None
def genFullPath(category,filename):
return TRAININGSET_DIR+category+"/"+filename
def contents(filename):
with file(filename) as f: return f.read().decode('utf-8')
def convertNoun(srclst):
return [lmtzr.lemmatize(item) for item in srclst]
def convertVerb(srclst):
dstlst = []
itemnew=""
for item in srclst:
#print(item) ############################when nos lib give error
#if (item.endswith("ed") or item.endswith("ing")) \
if en.is_verb(item) \
and (not en.is_noun(item)) \
and (not en.is_adjective(item)) \
and (not en.is_adverb(item)) \
and (item not in WIERDWORDS):
try:
itemnew = en.verb.present(item)
except:
print "unrecognized word:",item
itemnew = item
else:
itemnew = item;
dstlst.append(itemnew)
return dstlst
def removeWithStoplist(srclst,stopfile):
contt = contents(stopfile)
stoplist = contt.split("\n")
return [item.lower() for item in srclst if item.lower() not in stoplist]
def getFileInsideDir(dirname):
return os.listdir(dirname)
def extractWordList(filename):
contt = contents(filename)
try:
contt=contt.replace("’", "'");
contt=contt.replace("“", "'");
contt=contt.replace("”", "'");
contt=contt.replace("—", "-");
contt=contt.replace("-", " ");
contt=contt.replace("_", " ");
contt=contt.replace("'s", "");
contt=contt.replace("/", " ");
contt=contt.replace("\\", " ");
contt=re.sub(r'\b[^a-zA-Z]{2,}\b',r' ',contt);
contt=re.sub(r'\b[^a-zA-Z]{2,}',r' ',contt);
contt=re.sub(r'[\n\r\t\(\)\.",\?:;!+]+',r' ',contt);
except UnicodeError:
pass
lst = contt.split(" ")
a1=len(lst)
lst=removeWithStoplist(lst,os.path.join(this_dir,"./stoplist1"))
lst=removeWithStoplist(lst,os.path.join(this_dir,"./stoplist2"))
lst=convertVerb(lst)
lst=convertNoun(lst)
#lst=list(set(lst))
a2=len(lst)
#print(a1,a2)
return lst
def getMainContent(url):
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
tmpfile = "./.cache/tmp.txt"
otestfile = open(tmpfile, 'w')
otestfile.write("")
rslt = "ERROR"
try:
p1 = subprocess.Popen(["/usr/bin/java","-cp","./gettestset/boilerpipe/create_jar/:./gettestset/boilerpipe/boilerpipe-1.2.0.jar:./gettestset/boilerpipe/lib/*", "Getwebcontent",url], stdout=subprocess.PIPE)
print "extract main content of ",url[0:50]+"..."
rslt = p1.stdout.read()
rsltList = rslt.split("\n")
rslt = "\n".join([i for i in rsltList if i!=""][1:])
rslt = "ERROR" if ""==rslt else rslt
except urllib2.HTTPError:
print "ERROR",url
except urllib2.URLError, e:
print "There was an error: %r" % e
otestfile.write(rslt)
return tmpfile
|
gpl-2.0
| 8,074,592,267,643,158,000 | 28.559055 | 212 | 0.59057 | false |
derdmitry/socraticqs2
|
mysite/mysite/urls.py
|
1
|
1282
|
from django.conf.urls import patterns, include, url
from django.apps import apps
from mysite.views import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^$', home_page),
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^mysite/', include('mysite.foo.urls')),
(r'^ct/', include('ct.urls', namespace='ct')),
(r'^fsm/', include('fsm.urls', namespace='fsm')),
# Login / logout.
(r'^login/$', 'psa.views.custom_login'),
(r'^logout/$', logout_page, {'next_page': '/login/'}),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^email-sent/$', 'psa.views.validation_sent'),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^tmp-email-ask/$', 'psa.views.ask_stranger'),
url(r'^set-pass/$', 'psa.views.set_pass'),
url(r'^done/$', 'psa.views.done'),
)
if apps.is_installed('lti'):
urlpatterns += patterns(
'',
url(r'^lti/', include('lti.urls', namespace='lti')),
)
|
apache-2.0
| -236,776,820,969,100,900 | 28.136364 | 72 | 0.614665 | false |
Eficent/odoo-operating-unit
|
sale_stock_operating_unit/models/sale.py
|
1
|
2065
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
class SaleShop(orm.Model):
_inherit = 'sale.shop'
def _check_warehouse_operating_unit(self, cr, uid, ids, context=None):
for r in self.browse(cr, uid, ids, context=context):
if r.warehouse_id and r.operating_unit_id and \
r.warehouse_id.operating_unit_id != r.operating_unit_id:
return False
return True
_constraints = [
(_check_warehouse_operating_unit,
'The Operating Unit in the Warehouse must be the same as in the '
'Sale Shop.', ['operating_unit_id', 'warehouse_id'])]
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _prepare_order_picking(self, cr, uid, order, context=None):
res = super(SaleOrder, self)._prepare_order_picking(cr, uid, order,
context=context)
if order.operating_unit_id:
res.update({
'operating_unit_id': order.operating_unit_id.id
})
return res
|
agpl-3.0
| 5,402,796,366,250,727,000 | 38.711538 | 78 | 0.583051 | false |
Geoportail-Luxembourg/geoportailv3
|
geoportal/geoportailv3_geoportal/lib/sw_helper.py
|
1
|
1302
|
import os
import glob
import time
UNUSED = '/static-ngeo/UNUSED_CACHE_VERSION/build/'
BUILD_PATH = '/app/geoportailv3_geoportal/static-ngeo/build'
def get_built_filenames(pattern):
return [os.path.basename(name) for name in glob.glob(BUILD_PATH + '/' + pattern)]
def get_urls(request):
main_js_url = UNUSED + get_built_filenames('main.*.js')[0]
main_css_url = UNUSED + get_built_filenames('main.*.css')[0]
gov_light_url = UNUSED + get_built_filenames('gov-light.*.png')[0]
urls = [
'/',
'/dynamic.js?interface=main',
'/getuserinfo',
'/themes?version=2&background=background&interface=main&catalogue=true&min_levels=1',
request.static_path('geoportailv3_geoportal:static-ngeo/images/arrow.png'),
main_js_url,
main_css_url,
gov_light_url
]
if 'dev' in request.params:
urls.append('/dev/main.html')
urls.append('/dev/main.css')
urls.append('/dev/main.js')
woffs = glob.glob('/app/geoportailv3_geoportal/static-ngeo/build/*.woff')
for stuff in get_built_filenames('*.woff'):
urls.append(UNUSED + stuff)
for lang in ['fr', 'en', 'lb', 'de']:
urls.append(request.static_path('geoportailv3_geoportal:static-ngeo/build/' + lang + '.json'))
return urls
|
mit
| -468,138,615,993,281,700 | 30.756098 | 102 | 0.635177 | false |
arpho/mmasgis5
|
mmasgis/pvListQueryBuilderBase.py
|
1
|
2003
|
from constants import *
class PvListQueryBuilderBase:
"""
@param utbs:[Utb]
"""
def __init__(self,utbs):
self.utbs=utbs
self.cons=cons
def usesIstat(self):
b=False
for u in self.utbs:
b=b or u.usesIstat()
return b
def usesCap(self):
b=False
for u in self.utbs:
b=b or u.usesCap()
return b
"""
estrae i cap presenti in self.utbs
@return: [CAP]
"""
def getCAPusers(self):
l=[]
for i in self.utbs:
if i.usesCap():
l.append(i)
return l
def getIstatUser(self):
l=[]
for i in self.utbs:
if i.usesIstat():
l.append(i)
return l
def combineIstat(self):
c=""
for u in self.getIstatUser():
c+=u.getSelect()
c+=" union "
c=c[0:len(c)-6]
return c
def getCapQuery(self):
cap=self.getCAPusers()
query=""
queryCap=""
first=True
for c in cap:
if not first:
queryCap+=" or "
first=False
first=False
queryCap+=c.getSelect()
#print "capQuery:",queryCap
return queryCap
def getQuery(self):
query=""
if(self.usesCap()):
first=True
cap=self.getCAPusers()
query=""
queryCap=""
for c in cap:
if not first:
queryCap+=" or "
first=False
first=False
queryCap+=c.getSelect()
query=self.cons.queryListByIstat.format(queryCap)
if( self.usesIstat()):
query+=" or tc_istat_id in ("
query+=self.combineIstat()
query+=")"
return query
def getIstatQuery(self):
return "("+self.combineIstat()+")"
def getFullQuery(self):
query=""
if(self.usesCap() and self.usesIstat()):
query=self.cons.queryListByIstat.format(self.getCapQuery())
query+=" or tc_istat_id in("
query+=self.combineIstat()
query+=")"
return query
if(self.usesCap()):
query=self.cons.queryListByIstat.format(self.getCapQuery())
return query
if(self.usesIstat()):
q=" tc_istat_id in ("
q+=self.combineIstat()
q+=")"
query=self.cons.queryListByIstat.format(q)
return query
return query
|
mit
| -6,272,507,948,375,201,000 | 16.275862 | 62 | 0.612581 | false |
RayRuizhiLiao/ITK_4D
|
Wrapping/Generators/Python/Tests/GeodesicActiveContourImageFilter.py
|
1
|
6544
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# GeodesicActiveContourImageFilter.py
# Translated by Charl P. Botha <http://cpbotha.net/> from the cxx original.
# $Id: GeodesicActiveContourImageFilter.py,v 1.1 2006/09/06 20:58:42 glehmann
# Exp $
# example runs:
# ------------
# 1. Left ventricle:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png lventricle.png \
# 81 114 5 1 -0.5 3 2
#
# 2. White matter:
# python GeodesicActiveContourImageFilter.py \
# ../Data/BrainProtonDensitySlice.png wmatter.png \
# 56 92 5 1 -0.3 2 10
#
# See the ITK Software Guide, section 9.3.3 "Geodesic Active Contours
# Segmentation" as well as the CXX example for more comments.
from __future__ import print_function
import itk
from sys import argv, stderr
import os
itk.auto_progress(2)
def main():
if len(argv) < 10:
errMsg = "Missing parameters\n" \
"Usage: %s\n" % (argv[0],) + \
" inputImage outputImage\n" \
" seedX seedY InitialDistance\n" \
" Sigma SigmoidAlpha SigmoidBeta\n" \
" PropagationScaling\n"
print(errMsg, file=stderr)
return
# We're going to build the following pipelines:
# 1. reader -> smoothing -> gradientMagnitude -> sigmoid -> FI
# 2. fastMarching -> geodesicActiveContour(FI) -> thresholder -> writer
# The output of pipeline 1 is a feature image that is used by the
# geodesicActiveContour object. Also see figure 9.18 in the ITK
# Software Guide.
# we wan't to know what is happening
# itk.auto_progress(True)
InternalPixelType = itk.F
Dimension = 2
InternalImageType = itk.Image[InternalPixelType, Dimension]
OutputPixelType = itk.UC
OutputImageType = itk.Image[OutputPixelType, Dimension]
reader = itk.ImageFileReader[InternalImageType].New(FileName=argv[1])
# needed to give the size to the fastmarching filter
reader.Update()
outputDirectory = os.path.dirname(argv[2])
smoothing = itk.CurvatureAnisotropicDiffusionImageFilter[
InternalImageType,
InternalImageType].New(
reader,
TimeStep=0.125,
NumberOfIterations=5,
ConductanceParameter=9.0)
gradientMagnitude = itk.GradientMagnitudeRecursiveGaussianImageFilter[
InternalImageType,
InternalImageType].New(
smoothing,
Sigma=float(argv[6]))
sigmoid = itk.SigmoidImageFilter[InternalImageType, InternalImageType].New(
gradientMagnitude,
OutputMinimum=0.0,
OutputMaximum=1.1,
Alpha=float(argv[7]),
Beta=float(argv[8]))
seedPosition = itk.Index[2]()
seedPosition.SetElement(0, int(argv[3]))
seedPosition.SetElement(1, int(argv[4]))
node = itk.LevelSetNode[InternalPixelType, Dimension]()
node.SetValue(-float(argv[5]))
node.SetIndex(seedPosition)
seeds = itk.VectorContainer[
itk.UI, itk.LevelSetNode[InternalPixelType, Dimension]].New()
seeds.Initialize()
seeds.InsertElement(0, node)
fastMarching = itk.FastMarchingImageFilter[
InternalImageType,
InternalImageType].New(
sigmoid,
TrialPoints=seeds,
SpeedConstant=1.0,
OutputSize=reader.GetOutput().GetBufferedRegion().GetSize())
geodesicActiveContour = itk.GeodesicActiveContourLevelSetImageFilter[
InternalImageType,
InternalImageType,
InternalPixelType].New(
fastMarching,
# it is required to use the explicitly the FeatureImage
# - itk segfault without that :-(
FeatureImage=sigmoid.GetOutput(),
PropagationScaling=float(argv[9]),
CurvatureScaling=1.0,
AdvectionScaling=1.0,
MaximumRMSError=0.02,
NumberOfIterations=800)
thresholder = itk.BinaryThresholdImageFilter[
InternalImageType,
OutputImageType].New(
geodesicActiveContour,
LowerThreshold=-1000,
UpperThreshold=0,
OutsideValue=0,
InsideValue=255)
writer = itk.ImageFileWriter[OutputImageType].New(
thresholder,
FileName=argv[2])
def rescaleAndWrite(filter, fileName):
caster = itk.RescaleIntensityImageFilter[
InternalImageType,
OutputImageType].New(
filter,
OutputMinimum=0,
OutputMaximum=255)
itk.write(caster, os.path.join(outputDirectory, fileName))
rescaleAndWrite(smoothing, "GeodesicActiveContourImageFilterOutput1.png")
rescaleAndWrite(
gradientMagnitude,
"GeodesicActiveContourImageFilterOutput2.png")
rescaleAndWrite(sigmoid, "GeodesicActiveContourImageFilterOutput3.png")
rescaleAndWrite(
fastMarching,
"GeodesicActiveContourImageFilterOutput4.png")
writer.Update()
print("")
print(
"Max. no. iterations: %d" %
(geodesicActiveContour.GetNumberOfIterations()))
print(
"Max. RMS error: %.3f" %
(geodesicActiveContour.GetMaximumRMSError()))
print("")
print(
"No. elapsed iterations: %d"
% (geodesicActiveContour.GetElapsedIterations()))
print("RMS change: %.3f" % (geodesicActiveContour.GetRMSChange()))
itk.write(fastMarching, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput4.mha"))
itk.write(sigmoid, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput3.mha"))
itk.write(gradientMagnitude, os.path.join(outputDirectory,
"GeodesicActiveContourImageFilterOutput2.mha"))
if __name__ == "__main__":
main()
|
apache-2.0
| -4,370,467,138,431,609,300 | 31.906736 | 79 | 0.6386 | false |
amyxchen/openhtf
|
examples/example_plug.py
|
1
|
3172
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example plug for OpenHTF."""
import time
import openhtf.conf as conf
import openhtf.plugs as plugs
conf.Declare('example_plug_increment', default_value=1,
description='Increment constant for example plug.')
class ExamplePlug(plugs.BasePlug): # pylint: disable=no-init
"""Example of a simple plug.
This plug simply keeps a value and increments it each time Increment() is
called. You'll notice a few paradigms here:
- @conf.InjectPositionalArgs
This is generally a good way to pass in any configuration that your
plug needs, such as an IP address or serial port to connect to. If
You want to use your plug outside of the OpenHTF framework, you can
still manually instantiate it, but you must pass the arguments by
keyword (as a side effect of the way InjectPositionalArgs is
implemented).
For example, if you had no openhtf.conf loaded, you could do this:
my_plug = ExamplePlug(example_plug_increment=4)
- TearDown()
This method will be called automatically by the OpenHTF framework at
the end of test execution. Here is a good place to do any close()
calls or similar resource cleanup that you need to do. In this case,
we don't have anything to do, so we simply log a message so you can
see when it gets called.
- Persistent 'value'
You'll notice that value is an instance attribute, not a class
attribute. This is because plugs are instantiated once at the
beginning of the test, and then the same instance is passed into
all test phases that use that plug type. Because of this, you
don't have to do anything special to maintain state within a plug
across phases.
This does imply, however, that if you *want* per-phase TearDown()
semantics, you have to implement them manually. The recommended
way to do this is to make your plug support Python's context
manager interface (__enter__ and __exit__), and then access it via
a with: block at the beginning of every phase where it is used.
"""
@conf.InjectPositionalArgs
def __init__(self, example_plug_increment):
self.increment = example_plug_increment
self.value = 0
def __str__(self):
return '<%s: %s>' % (type(self).__name__, self.value)
__repr__ = __str__
def TearDown(self):
"""Tear down the plug instance."""
self.logger.info('Tearing down %s', self)
def Increment(self):
"""Increment our value, return the new value."""
self.value += self.increment
return self.value
|
apache-2.0
| 4,989,892,666,170,478,000 | 36.761905 | 75 | 0.703972 | false |
csarn/qthexedit
|
docks/struct_explorer.py
|
1
|
8347
|
from PySide.QtCore import *
from PySide.QtGui import *
from construct import *
from construct.adapters import *
from math import *
from binascii import *
#from ..mmapslice import *
def red(cons):
cons.color = Qt.red
return cons
class TreeNode(object):
def __init__(self, value, row, parent, cons, name="", root=None):
self.value = value
self.row = row
self.parent = parent
self.name = name
self.cons = cons
self.root = root or self
try:
self.offset = self.parent.offset + self.parent.size_so_far
except:
self.offset = 0
self.children = []
if isinstance(parent, TreeNode):
# try:
parent.size_so_far += len(self.cons.build(self.value))
# except:
# parent.size_so_far += self.cons.size()
self.size_so_far = 0
if isinstance(self.value, dict): # struct
for i, con in enumerate(self.cons.subcons):
if isinstance(con, ConstAdapter):
self.children.append(TreeNode(con.value, i, self, con, con.name or "Magic", self.root))
else:
self.children.append(TreeNode(self.value[con.name], i, self, con, con.name, self.root))
elif isinstance(self.value, list):
for i, v in enumerate(self.value):
self.children.append(TreeNode(v, i, self, self.cons.subcon,
"{}[{}]".format(self.name, i), self.root))
def read_value(self, val):
# if isinstance(self.value, Password):
# assert len(val) < 16
# return Password(val, self.value.length)
if isinstance(self.value, (int, float)):
return eval(val, globals(), {self.parent.name: self.parent.value})
elif isinstance(self.value, str):
return val
else:
raise Exception('dont know how to read for a value of %s', self.value)
def editable(self):
if isinstance(self.cons, ConstAdapter):
return False
if isinstance(self.value, (dict, list)):
return False
return True
def size(self):
return len(self.cons.build(self.value))
class ConstructModel(QAbstractItemModel):
def __init__(self, *children):
super(ConstructModel, self).__init__()
self.root = TreeNode(None,0,None, None)
for child in children:
self.root.children.append(child)
#self.setRootIndex
def columnCount(self, parent):
return 4
def rowCount(self, parent):
if parent.row() == -1 and parent.column() == -1:
return len(self.root.children)
if parent.isValid():
item = parent.internalPointer()
return len(item.children)
def index(self, row, column, item):
if item.isValid():
item = item.internalPointer()
elif len(self.root.children) == 0:
return QModelIndex()
else:
return self.createIndex(row,column,self.root.children[row])
return self.createIndex(row, column, item.children[row])
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
if index.column() == 1 and index.internalPointer().editable():
return super(ConstructModel,self).flags(index) | Qt.ItemIsEditable
return super(ConstructModel,self).flags(index)
def setData(self, index, value, role):
if index.isValid() and role == Qt.EditRole:
try:
item = index.internalPointer()
parent = item.parent
row = index.row()
if isinstance(parent.value, dict):
key = item.name
print key
elif isinstance(parent.value, list):
key = row
else:
return False
val = item.read_value(value)
try:
# build and reparse, else item.value might be float when the cons is int
parent.value[key] = item.value = item.cons.parse(item.cons.build(val))
except:
return False
data = item.root.cons.build(item.root.value)
self.item.buf[:len(data)] = data
print self.buf[:len(data)].__repr__()
self.dataChanged.emit(index, index)
except Exception as e:
raise
return False
def data(self, index, role):
if role not in (Qt.DisplayRole, Qt.EditRole):
return
if not index.isValid():
return
item = index.internalPointer()
if role in (Qt.DisplayRole, Qt.EditRole):
if index.column() == 0:
return str(item.name)
elif index.column() == 1:
if isinstance(item.value, dict):
return "<Struct>"
elif isinstance(item.value, list):
return "<Array>"
else:
return str(item.value)
elif index.column() == 2:
return hex(item.offset)
else:
return hex(item.size())
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return
if orientation == Qt.Horizontal:
return ['Name', 'Value','Offset','Size'][section]
def parent(self, index):
if not index.isValid():
return QModelIndex()
item = index.internalPointer()
if item == self.root:
return QModelIndex()
return self.createIndex(item.row,0,item.parent)
def add_tree(self, tree):
self.beginInsertRows(self.index(0,0, QModelIndex()),
len(self.root.children),
len(self.root.children))
self.root.children.append(tree)
tree.parent = self.root
self.endInsertRows()
def rm_tree(self, int_index):
self.beginRemoveRows(self.index(0,0, QModelIndex()),
int_index,
int_index)
del self.root.children[int_index]
self.endRemoveRows()
def clear(self):
self.root.children = []
self.reset()
class StructExplorer(QWidget):
def __init__(self, *roots):
super(StructExplorer, self).__init__(None)
self.tv = tv = QTreeView()
self.roots = roots
self.model = ConstructModel()
tv.setItemsExpandable(True)
tv.setModel(self.model)
tv.expandAll()
self.layout = l = QGridLayout()
self.setLayout(l)
self.setMinimumWidth(500)
l.addWidget(tv, 0, 0, 1, 4)
self.button = b = QPushButton("ok")
self.button.clicked.connect(self.klick)
l.addWidget(b, 1, 0)
self.label = QLabel("")
l.addWidget(self.label, 1, 1)
self.b2 = QPushButton("clear")
self.b2.clicked.connect(self.klock)
l.addWidget(self.b2, 1, 2)
self.sm = self.tv.selectionModel()
self.sm.currentRowChanged.connect(self.updatelabel)
self.model.dataChanged.connect(self.updatelabel)
self.i = 0
def updatelabel(self, current, previous):
item = current.internalPointer()
if isinstance(item, TreeNode):
self.label.setText(hexlify(item.cons.build(item.value)))
#self.data[item.offset,item.offset+item.size()])
def klick(self):
self.model.add_tree(self.roots[self.i % len(self.roots)])
self.i += 1
def klock(self):
self.model.clear()
if __name__ == '__main__':
app = QApplication([])
import mmap
content = "\x05hello\x08world!!!"
data = mmap.mmap(-1, len(content))
data[:] = content
cons = Struct("foo",
PascalString("first"),
PascalString("second"))
root1 = TreeNode(cons.parse(data), 0, None, cons, cons.name)
root2 = TreeNode(cons.parse(data), 0, None, cons, cons.name)
root3 = TreeNode(cons.parse(data), 0, None, cons, cons.name)
root4 = TreeNode(cons.parse(data), 0, None, cons, cons.name)
w = StructExplorer(root1, root2, root3, root4)
w.show()
app.exec_()
|
gpl-2.0
| 6,008,047,980,486,020,000 | 32.25498 | 107 | 0.5487 | false |
arnomoonens/DeepRL
|
yarll/agents/actorcritic/a3c.py
|
1
|
4785
|
# -*- coding: utf8 -*-
"""
Asynchronous Advantage Actor Critic (A3C)
Most of the work is done in `a3c_worker.py`.
Based on:
- Pseudo code from Asynchronous Methods for Deep Reinforcement Learning
- Tensorflow code from https://github.com/yao62995/A3C/blob/master/A3C_atari.py and
https://github.com/openai/universe-starter-agent/tree/f16f37d9d3bc8146cf68a75557e1ba89824b7e54
"""
import logging
import multiprocessing
import subprocess
import signal
import sys
import os
from typing import Optional
from six.moves import shlex_quote
from yarll.agents.agent import Agent
logging.getLogger().setLevel("INFO")
class A3C(Agent):
"""Asynchronous Advantage Actor Critic learner."""
def __init__(self, env, monitor_path: str, monitor: bool = False, video: bool = True, **usercfg) -> None:
super(A3C, self).__init__(**usercfg)
self.env = env
self.env_name = env.spec.id
self.monitor = monitor
self.monitor_path = monitor_path
self.video = video
self.task_type: Optional[str] = None # To be filled in by subclass
self.config.update(dict(
gamma=0.99, # Discount past rewards by a percentage
learning_rate=1e-4,
n_hidden_units=20,
n_hidden_layers=1,
gradient_clip_value=50,
n_tasks=multiprocessing.cpu_count(), # Use as much tasks as there are cores on the current system
T_max=8e5,
shared_optimizer=False,
episode_max_length=env.spec.tags.get("wrapper_config.TimeLimit.max_episode_steps"),
n_local_steps=20,
vf_coef=0.5,
entropy_coef=0.01,
loss_reducer="sum", # use tf.reduce_sum or tf.reduce_mean for the loss
save_model=False
))
self.config.update(usercfg)
self.current_folder = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
self.ps_process: Optional[subprocess.Popen] = None
def signal_handler(self, received_signal: int, frame):
logging.info("SIGINT signal received: Requesting a stop...")
sys.exit(128 + received_signal)
def start_parameter_server(self):
cmd = [
sys.executable,
os.path.join(self.current_folder, "parameter_server.py"),
self.config["n_tasks"]]
processed_cmd = " ".join(shlex_quote(str(x)) for x in cmd)
self.ps_process = subprocess.Popen(processed_cmd, shell=True)
def stop_parameter_server(self):
self.ps_process.terminate()
def start_signal_handler(self):
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
def learn(self):
self.start_signal_handler()
self.start_parameter_server()
worker_processes = []
for task_id in range(int(self.config["n_tasks"])):
cmd = [
sys.executable,
os.path.join(self.current_folder, "a3c_worker.py"),
self.env_name,
self.task_type,
task_id,
int(self.config["n_tasks"]),
self.config["config_path"],
"--monitor_path", self.monitor_path
]
processed_cmd = " ".join(shlex_quote(str(x)) for x in cmd)
p = subprocess.Popen(processed_cmd, shell=True)
worker_processes.append(p)
for p in worker_processes:
p.wait()
self.stop_parameter_server()
class A3CDiscrete(A3C):
"""A3C for a discrete action space"""
def __init__(self, env, monitor_path: str, monitor: bool = False, **usercfg) -> None:
super(A3CDiscrete, self).__init__(env, monitor_path, monitor=monitor, **usercfg)
self.task_type = "A3CTaskDiscrete"
class A3CDiscreteCNN(A3C):
"""A3C for a discrete action space"""
def __init__(self, env, monitor_path: str, monitor: bool = False, **usercfg) -> None:
super(A3CDiscreteCNN, self).__init__(env, monitor_path, monitor=monitor, **usercfg)
self.task_type = "A3CTaskDiscreteCNN"
class A3CDiscreteCNNRNN(A3C):
"""A3C for a discrete action space"""
def __init__(self, env, monitor_path: str, monitor: bool = False, **usercfg) -> None:
super(A3CDiscreteCNNRNN, self).__init__(env, monitor_path, monitor=monitor, **usercfg)
self.task_type = "A3CTaskDiscreteCNNRNN"
self.config["RNN"] = True
class A3CContinuous(A3C):
"""A3C for a continuous action space"""
def __init__(self, env, monitor_path: str, monitor: bool = False, **usercfg) -> None:
super(A3CContinuous, self).__init__(env, monitor_path, monitor=monitor, **usercfg)
self.task_type = "A3CTaskContinuous"
|
mit
| 1,326,070,416,083,443,700 | 37.58871 | 110 | 0.622989 | false |
synth3tk/the-blue-alliance
|
controllers/main_controller.py
|
1
|
14227
|
import os
import logging
import datetime
import webapp2
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.webapp import template
import tba_config
from base_controller import CacheableHandler
from consts.event_type import EventType
from consts.notification_type import NotificationType
from helpers.event_helper import EventHelper
from models.event import Event
from models.insight import Insight
from models.team import Team
from models.sitevar import Sitevar
def render_static(page):
memcache_key = "main_%s" % page
html = memcache.get(memcache_key)
if html is None:
path = os.path.join(os.path.dirname(__file__), "../templates/%s.html" % page)
html = template.render(path, {})
if tba_config.CONFIG["memcache"]:
memcache.set(memcache_key, html, 86400)
return html
def handle_404(request, response, exception):
response.write(render_static("404"))
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write(render_static("500"))
response.set_status(500)
class MainKickoffHandler(CacheableHandler):
CACHE_VERSION = 3
CACHE_KEY_FORMAT = "main_kickoff"
def __init__(self, *args, **kw):
super(MainKickoffHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
kickoff_datetime_est = datetime.datetime(2016, 1, 9, 10, 30)
kickoff_datetime_utc = kickoff_datetime_est + datetime.timedelta(hours=5)
is_kickoff = datetime.datetime.now() >= kickoff_datetime_est - datetime.timedelta(days=1) # turn on 1 day before
self.template_values.update({
'is_kickoff': is_kickoff,
'kickoff_datetime_est': kickoff_datetime_est,
'kickoff_datetime_utc': kickoff_datetime_utc,
})
path = os.path.join(os.path.dirname(__file__), "../templates/index_kickoff.html")
return template.render(path, self.template_values)
class MainBuildseasonHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_buildseason"
def __init__(self, *args, **kw):
super(MainBuildseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
endbuild_datetime_est = datetime.datetime(2016, 2, 23, 23, 59)
endbuild_datetime_utc = endbuild_datetime_est + datetime.timedelta(hours=5)
week_events = EventHelper.getWeekEvents()
self.template_values.update({
'endbuild_datetime_est': endbuild_datetime_est,
'endbuild_datetime_utc': endbuild_datetime_utc,
'events': week_events,
})
path = os.path.join(os.path.dirname(__file__), "../templates/index_buildseason.html")
return template.render(path, self.template_values)
class MainChampsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_champs"
def __init__(self, *args, **kw):
super(MainChampsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
year = datetime.datetime.now().year
event_keys = Event.query(Event.year == year, Event.event_type_enum.IN(EventType.CMP_EVENT_TYPES)).fetch(100, keys_only=True)
events = [event_key.get() for event_key in event_keys]
self.template_values.update({
"events": events,
"year": year,
})
insights = ndb.get_multi([ndb.Key(Insight, Insight.renderKeyName(year, insight_name)) for insight_name in Insight.INSIGHT_NAMES.values()])
for insight in insights:
if insight:
self.template_values[insight.name] = insight
path = os.path.join(os.path.dirname(__file__), '../templates/index_champs.html')
return template.render(path, self.template_values)
class MainCompetitionseasonHandler(CacheableHandler):
CACHE_VERSION = 5
CACHE_KEY_FORMAT = "main_competitionseason"
def __init__(self, *args, **kw):
super(MainCompetitionseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
self.template_values.update({
"events": week_events,
})
path = os.path.join(os.path.dirname(__file__), '../templates/index_competitionseason.html')
return template.render(path, self.template_values)
class MainInsightsHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_insights"
def __init__(self, *args, **kw):
super(MainInsightsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
year = datetime.datetime.now().year
self.template_values.update({
"events": week_events,
"year": year,
})
insights = ndb.get_multi([ndb.Key(Insight, Insight.renderKeyName(year, insight_name)) for insight_name in Insight.INSIGHT_NAMES.values()])
for insight in insights:
if insight:
self.template_values[insight.name] = insight
path = os.path.join(os.path.dirname(__file__), '../templates/index_insights.html')
return template.render(path, self.template_values)
class MainOffseasonHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_offseason"
def __init__(self, *args, **kw):
super(MainOffseasonHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def _render(self, *args, **kw):
week_events = EventHelper.getWeekEvents()
self.template_values.update({
"events": week_events,
})
path = os.path.join(os.path.dirname(__file__), '../templates/index_offseason.html')
return template.render(path, self.template_values)
class ContactHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_contact"
def __init__(self, *args, **kw):
super(ContactHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/contact.html")
return template.render(path, self.template_values)
class HashtagsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_hashtags"
def __init__(self, *args, **kw):
super(HashtagsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/hashtags.html")
return template.render(path, self.template_values)
class AboutHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_about"
def __init__(self, *args, **kw):
super(AboutHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/about.html")
return template.render(path, self.template_values)
class ThanksHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_thanks"
def __init__(self, *args, **kw):
super(ThanksHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/thanks.html")
return template.render(path, self.template_values)
class OprHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_opr"
def __init__(self, *args, **kw):
super(OprHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/opr.html")
return template.render(path, self.template_values)
class SearchHandler(webapp2.RequestHandler):
def get(self):
try:
q = self.request.get("q")
logging.info("search query: %s" % q)
if q.isdigit():
team_id = "frc%s" % q
team = Team.get_by_id(team_id)
if team:
self.redirect(team.details_url)
return None
elif len(q) in {3, 4, 5}: # event shorts are between 3 and 5 characters long
year = datetime.datetime.now().year # default to current year
event_id = "%s%s" % (year, q)
event = Event.get_by_id(event_id)
if event:
self.redirect(event.details_url)
return None
except Exception, e:
logging.warning("warning: %s" % e)
finally:
self.response.out.write(render_static("search"))
class GamedayHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_gameday"
def __init__(self, *args, **kw):
super(GamedayHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60
def _render(self, *args, **kw):
special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts')
special_webcasts_temp = special_webcasts_future.get_result()
if special_webcasts_temp:
special_webcasts_temp = special_webcasts_temp.contents
else:
special_webcasts_temp = {}
special_webcasts = []
for webcast in special_webcasts_temp.values():
toAppend = {}
for key, value in webcast.items():
toAppend[str(key)] = str(value)
special_webcasts.append(toAppend)
ongoing_events = []
ongoing_events_w_webcasts = []
week_events = EventHelper.getWeekEvents()
for event in week_events:
if event.now:
ongoing_events.append(event)
if event.webcast:
valid = []
for webcast in event.webcast:
if 'type' in webcast and 'channel' in webcast:
event_webcast = {'event': event}
valid.append(event_webcast)
# Add webcast numbers if more than one for an event
if len(valid) > 1:
count = 1
for event in valid:
event['count'] = count
count += 1
ongoing_events_w_webcasts += valid
self.template_values.update({
'special_webcasts': special_webcasts,
'ongoing_events': ongoing_events,
'ongoing_events_w_webcasts': ongoing_events_w_webcasts
})
path = os.path.join(os.path.dirname(__file__), '../templates/gameday.html')
return template.render(path, self.template_values)
class WebcastsHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_webcasts"
def __init__(self, *args, **kw):
super(WebcastsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
year = datetime.datetime.now().year
event_keys = Event.query(Event.year == year).order(Event.start_date).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
self.template_values.update({
'events': events,
'year': year,
})
path = os.path.join(os.path.dirname(__file__), '../templates/webcasts.html')
return template.render(path, self.template_values)
class RecordHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_record"
def __init__(self, *args, **kw):
super(RecordHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/record.html")
return template.render(path, self.template_values)
class ApiDocumentationHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "api_docs"
def __init__(self, *args, **kw):
super(ApiDocumentationHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/apidocs.html")
return template.render(path, self.template_values)
class ApiWriteHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "api_write"
def __init__(self, *args, **kw):
super(ApiWriteHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/apiwrite.html")
return template.render(path, self.template_values)
class MatchInputHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "match_input"
def __init__(self, *args, **kw):
super(MatchInputHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60
def _render(self, *args, **kw):
path = os.path.join(os.path.dirname(__file__), "../templates/matchinput.html")
return template.render(path, self.template_values)
class WebhookDocumentationHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "webhook_docs"
def __init__(self, *args, **kw):
super(WebhookDocumentationHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
self.template_values['enabled'] = NotificationType.enabled_notifications
self.template_values['types'] = NotificationType.types
path = os.path.join(os.path.dirname(__file__), "../templates/webhookdocs.html")
return template.render(path, self.template_values)
|
mit
| 7,097,353,811,669,205,000 | 33.7 | 146 | 0.605258 | false |
brvnl/master
|
parser/ArticleOrganizer.py
|
1
|
6740
|
# encoding=utf8
from parser.Calendar import Calendar
from parser.Definitions import Definitions
import logging
import datetime
import csv
import sys
#======================================================================================
# This file contains classes to help on the organization of the files. The basic concept
# of the organization is to group file names acording to a criteria, such as the publish
# data. In this case, a hash may created in which the key (timestamp) will group all ar-
# ticles publised in that given date.
#======================================================================================
# --------------------------------------------------------------------
# This base class works as an interface for the groupers, which must
# implement the methods below. These methods are used by classes such
# as Representation, which can then process the data even not knowing
# in which level it is grouped.
# --------------------------------------------------------------------
class GroupBase(object):
def __init__(self, l_files):
self.l_files = l_files
def raw(self):
return self.l_files
def group(self):
pass
def getId(self):
pass
# --------------------------------------------------------------------
# Builds a hash indexed by [date] and whose values are a [list of fil
# es] for that given date.
# --------------------------------------------------------------------
class GroupByDays(GroupBase):
def group(self):
logger = logging.getLogger('GroupByDays')
logging.basicConfig(format='%(asctime)s %(levelname)s* %(message)s', level=logging.INFO)
filesByDay = {}
# Building a hash of [timestamp] -> [list of news files]
for file in self.l_files:
filename = file.rstrip("\n\r")
filename = filename.rstrip("\n")
# Expecting file path as the example below:
# /Volumes/Repository/Mestrado/Data/uol.com.br/20160210000000BreakingNews.txt
paths = filename.split("/")
if (len(paths) < 2):
continue
filename = paths.pop()
try:
timestamp = datetime.datetime.strptime(filename[:14], '%Y%m%d%H%M%S').strftime('%Y-%m-%d')
except:
logger.warn("Cannot evaluate timestamp on file \"%s\". Discarded." %(filename))
if timestamp in filesByDay:
filesByDay[timestamp].append(file)
else:
filesByDay[timestamp] = [file]
logger.info("Files organized in \"%d\" buckets." %(len(filesByDay.keys())))
return filesByDay
def getId(self):
return "Days"
# --------------------------------------------------------------------
# This class has the same behavior as GroupByDays, but with the article
# has been published in an non regular day, it will be assigned to the
# imediate previous regular day instead.
# --------------------------------------------------------------------
class GroupByBusinessDays(GroupBase):
def __init__(self, l_files, calendar=Definitions.CALENDARS_PATH + "/Regular.txt"):
self.l_files = l_files
self.cal = Calendar(filePath=calendar)
def group(self):
logger = logging.getLogger('GroupByBusinessDays')
logging.basicConfig(format='%(asctime)s %(levelname)s* %(message)s', level=logging.INFO)
filesByDay = {}
# Building a hash of [timestamp] -> [list of news files]
for file in self.l_files:
filename = file.rstrip("\n\r")
filename = filename.rstrip("\n")
# Expecting file path as the example below:
# /Volumes/Repository/Mestrado/Data/uol.com.br/20160210000000BreakingNews.txt
paths = filename.split("/")
if (len(paths) < 2):
continue
filename = paths.pop()
try:
timestamp = datetime.datetime.strptime(filename[:14], '%Y%m%d%H%M%S').strftime('%Y-%m-%d')
except:
logger.warn("Cannot evaluate timestamp on file \"%s\". Discarded.\n" %(filename))
continue
if (not self.cal.isBusinessDay(timestamp)):
timestamp = self.cal.previousBusinessDay(timestamp)
if timestamp in filesByDay:
filesByDay[timestamp].append(file)
else:
filesByDay[timestamp] = [file]
logger.info("Files organized in \"%d\" buckets.\n" %(len(filesByDay.keys())))
return filesByDay
def getId(self):
return "BDays"
# --------------------------------------------------------------------
# This grouper explores the assumption that an article may impact not
# only the current day, but a number of days following (parameter days
# ). To address the issue, the same article will be assigned to the n
# days following its publication day.
# For this grouper it is also possible to specify a calendar treatment
# by replacing the default Regular calendar by the desired one.
# --------------------------------------------------------------------
class GroupByDaysRange(GroupBase):
def __init__(self, l_files, days=1, calendar=Definitions.CALENDARS_PATH + "/Regular.txt"):
self.l_files = l_files
self.days = days
self.cal = Calendar(filePath=calendar)
def group(self):
logger = logging.getLogger('GroupByDaysRange')
logging.basicConfig(format='%(asctime)s %(levelname)s* %(message)s', level=logging.INFO)
filesByDay = {}
# Building a hash of [timestamp] -> [list of news files]
for file in self.l_files:
filename = file.rstrip("\n\r")
filename = filename.rstrip("\n")
# Expecting file path as the example below:
# /Volumes/Repository/Mestrado/Data/uol.com.br/20160210000000BreakingNews.txt
paths = filename.split("/")
if (len(paths) < 2):
continue
filename = paths.pop()
try:
timestamp = datetime.datetime.strptime(filename[:14], '%Y%m%d%H%M%S').strftime('%Y-%m-%d')
except:
logger.warn("Cannot evaluate timestamp on file \"%s\". Discarded." %(filename))
for t in range(self.days):
if timestamp in filesByDay:
filesByDay[timestamp].append(file)
else:
filesByDay[timestamp] = [file]
timestamp = self.cal.nextBusinessDay(timestamp)
logger.info("Files organized in \"%d\" buckets." %(len(filesByDay.keys())))
return filesByDay
def getId(self):
return "DRg-" + str(self.days)
|
gpl-3.0
| -3,853,865,321,979,589,000 | 36.45 | 106 | 0.544659 | false |
mdrasmus/spimap
|
test/scripts/old/test_hky.py
|
1
|
1196
|
import scipy
from scipy.linalg import expm, logm, eig
from scipy import dot
def makeQ(bgfreq, tsvratio):
a, c, g, t = bgfreq
y = c + t
r = a + g
p = r / y
b = 1. / (2*r*y * (1 + tsvratio))
ay = (r*y*tsvratio - a*g - c*t) / \
(2*(1.+tsvratio)*(y*a*g*p + r*c*t))
ar = p * ay
print "b =", b
print "ay =", ay
print "ar =", ar
Q = scipy.array([[0, b*c, ar*g/r+b*g, b*t],
[b*a, 0, b*g, ay*t/y+b*t],
[ar*a/r+b*a, b*c, 0, b*t],
[b*a, ay*c/y+b*c, b*g, 0]])
for i in xrange(4):
tot = 0
for j in xrange(4):
if i != j:
tot += Q[i][j]
Q[i][i] = - tot
return Q
# make substitution matrix
# P = e^(Q*t)
bgfreq = [.2, .25, .3, .25]
#bgfreq = [.25, .25, .25, .25]
tsvratio = 4
time = 2
Q = makeQ(bgfreq, tsvratio)
P = expm(Q * time)
#s = dot(P, [1, 0, 0, 0])
#print s, s[1]+s[2]+s[3]
print Q
print P
x="""
0.304214 0.110134 0.280301 0.110134
0.110134 0.304214 0.110134 0.280301
0.420451 0.165201 0.444364 0.165201
0.165201 0.420451 0.165201 0.444364
"""
P2 = list2matrix(map(float, x.split()), nrows=4)
|
gpl-2.0
| 8,262,586,175,940,815,000 | 19.271186 | 48 | 0.4699 | false |
tommo/gii
|
support/waf/waflib/Tools/python.py
|
1
|
17525
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007-2010 (ita)
# Gustavo Carneiro (gjc), 2007
"""
Support for Python, detect the headers and libraries and provide
*use* variables to link C/C++ programs against them::
def options(opt):
opt.load('compiler_c python')
def configure(conf):
conf.load('compiler_c python')
conf.check_python_version((2,4,2))
conf.check_python_headers()
def build(bld):
bld.program(features='pyembed', source='a.c', target='myprog')
bld.shlib(features='pyext', source='b.c', target='mylib')
"""
import os, sys
from waflib import Utils, Options, Errors, Logs
from waflib.TaskGen import extension, before_method, after_method, feature
from waflib.Configure import conf
FRAG = '''
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main(int argc, char **argv)
{
(void)argc; (void)argv;
Py_Initialize();
Py_Finalize();
return 0;
}
'''
"""
Piece of C/C++ code used in :py:func:`waflib.Tools.python.check_python_headers`
"""
INST = '''
import sys, py_compile
py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3])
'''
"""
Piece of Python code used in :py:func:`waflib.Tools.python.install_pyfile` for installing python files
"""
DISTUTILS_IMP = ['from distutils.sysconfig import get_config_var, get_python_lib']
@extension('.py')
def process_py(self, node):
"""
Add a callback using :py:func:`waflib.Tools.python.install_pyfile` to install a python file
"""
try:
if not self.bld.is_install:
return
except AttributeError:
return
try:
if not self.install_path:
return
except AttributeError:
self.install_path = '${PYTHONDIR}'
# i wonder now why we wanted to do this after the build is over
# issue #901: people want to preserve the structure of installed files
def inst_py(ctx):
install_from = getattr(self, 'install_from', None)
if install_from:
install_from = self.path.find_dir(install_from)
install_pyfile(self, node, install_from)
self.bld.add_post_fun(inst_py)
def install_pyfile(self, node, install_from=None):
"""
Execute the installation of a python file
:param node: python file
:type node: :py:class:`waflib.Node.Node`
"""
from_node = install_from or node.parent
tsk = self.bld.install_as(self.install_path + '/' + node.path_from(from_node), node, postpone=False)
path = tsk.get_install_path()
if self.bld.is_install < 0:
Logs.info("+ removing byte compiled python files")
for x in 'co':
try:
os.remove(path + x)
except OSError:
pass
if self.bld.is_install > 0:
try:
st1 = os.stat(path)
except OSError:
Logs.error('The python file is missing, this should not happen')
for x in ['c', 'o']:
do_inst = self.env['PY' + x.upper()]
try:
st2 = os.stat(path + x)
except OSError:
pass
else:
if st1.st_mtime <= st2.st_mtime:
do_inst = False
if do_inst:
lst = (x == 'o') and [self.env['PYFLAGS_OPT']] or []
(a, b, c) = (path, path + x, tsk.get_install_path(destdir=False) + x)
argv = self.env['PYTHON'] + lst + ['-c', INST, a, b, c]
Logs.info('+ byte compiling %r' % (path + x))
env = self.env.env or None
ret = Utils.subprocess.Popen(argv, env=env).wait()
if ret:
raise Errors.WafError('py%s compilation failed %r' % (x, path))
@feature('py')
def feature_py(self):
"""
Dummy feature which does nothing
"""
pass
@feature('pyext')
@before_method('propagate_uselib_vars', 'apply_link')
@after_method('apply_bundle')
def init_pyext(self):
"""
Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the
*lib* prefix from library names.
"""
self.uselib = self.to_list(getattr(self, 'uselib', []))
if not 'PYEXT' in self.uselib:
self.uselib.append('PYEXT')
# override shlib_PATTERN set by the osx module
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN = self.env.pyext_PATTERN
self.env.fcshlib_PATTERN = self.env.dshlib_PATTERN = self.env.pyext_PATTERN
try:
if not self.install_path:
return
except AttributeError:
self.install_path = '${PYTHONARCHDIR}'
@feature('pyext')
@before_method('apply_link', 'apply_bundle')
def set_bundle(self):
if Utils.unversioned_sys_platform() == 'darwin':
self.mac_bundle = True
@before_method('propagate_uselib_vars')
@feature('pyembed')
def init_pyembed(self):
"""
Add the PYEMBED variable.
"""
self.uselib = self.to_list(getattr(self, 'uselib', []))
if not 'PYEMBED' in self.uselib:
self.uselib.append('PYEMBED')
@conf
def get_python_variables(self, variables, imports=None):
"""
Spawn a new python process to dump configuration variables
:param variables: variables to print
:type variables: list of string
:param imports: one import by element
:type imports: list of string
:return: the variable values
:rtype: list of string
"""
if not imports:
try:
imports = self.python_imports
except AttributeError:
imports = DISTUTILS_IMP
program = list(imports) # copy
program.append('')
for v in variables:
program.append("print(repr(%s))" % v)
os_env = dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool
except KeyError:
pass
try:
out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env)
except Errors.WafError:
self.fatal('The distutils module is unusable: install "python-devel"?')
self.to_log(out)
return_values = []
for s in out.split('\n'):
s = s.strip()
if not s:
continue
if s == 'None':
return_values.append(None)
elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else: break
return return_values
@conf
def check_python_headers(conf):
"""
Check for headers and libraries necessary to extend or embed python by using the module *distutils*.
On success the environment variables xxx_PYEXT and xxx_PYEMBED are added:
* PYEXT: for compiling python extensions
* PYEMBED: for embedding a python interpreter
"""
# FIXME rewrite
env = conf.env
if not env['CC_NAME'] and not env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not env['PYTHON_VERSION']:
conf.check_python_version()
pybin = conf.env.PYTHON
if not pybin:
conf.fatal('Could not find the python executable')
v = 'prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS'.split()
try:
lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)]
conf.to_log("Configuration returned from %r:\n%r\n" % (pybin, '\n'.join(vals)))
dct = dict(zip(v, lst))
x = 'MACOSX_DEPLOYMENT_TARGET'
if dct[x]:
conf.env[x] = conf.environ[x] = dct[x]
env['pyext_PATTERN'] = '%s' + dct['SO'] # not a mistake
# Check for python libraries for embedding
all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEMBED')
all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEXT')
result = None
#name = 'python' + env['PYTHON_VERSION']
# TODO simplify this
for name in ('python' + env['PYTHON_VERSION'], 'python' + env['PYTHON_VERSION'] + 'm', 'python' + env['PYTHON_VERSION'].replace('.', '')):
# LIBPATH_PYEMBED is already set; see if it works.
if not result and env['LIBPATH_PYEMBED']:
path = env['LIBPATH_PYEMBED']
conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path)
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBPATH_PYEMBED' % name)
if not result and dct['LIBDIR']:
path = [dct['LIBDIR']]
conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path)
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBDIR' % name)
if not result and dct['LIBPL']:
path = [dct['LIBPL']]
conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in python_LIBPL' % name)
if not result:
path = [os.path.join(dct['prefix'], "libs")]
conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $prefix/libs' % name)
if result:
break # do not forget to set LIBPATH_PYEMBED
if result:
env['LIBPATH_PYEMBED'] = path
env.append_value('LIB_PYEMBED', [name])
else:
conf.to_log("\n\n### LIB NOT FOUND\n")
# under certain conditions, python extensions must link to
# python libraries, not just python embedding programs.
if (Utils.is_win32 or sys.platform.startswith('os2')
or dct['Py_ENABLE_SHARED']):
env['LIBPATH_PYEXT'] = env['LIBPATH_PYEMBED']
env['LIB_PYEXT'] = env['LIB_PYEMBED']
# We check that pythonX.Y-config exists, and if it exists we
# use it to get only the includes, else fall back to distutils.
num = '.'.join(env['PYTHON_VERSION'].split('.')[:2])
conf.find_program([''.join(pybin) + '-config', 'python%s-config' % num, 'python-config-%s' % num, 'python%sm-config' % num], var='PYTHON_CONFIG', mandatory=False)
includes = []
if conf.env.PYTHON_CONFIG:
for incstr in conf.cmd_and_log([ conf.env.PYTHON_CONFIG, '--includes']).strip().split():
# strip the -I or /I
if (incstr.startswith('-I') or incstr.startswith('/I')):
incstr = incstr[2:]
# append include path, unless already given
if incstr not in includes:
includes.append(incstr)
conf.to_log("Include path for Python extensions (found via python-config --includes): %r\n" % (includes,))
env['INCLUDES_PYEXT'] = includes
env['INCLUDES_PYEMBED'] = includes
else:
conf.to_log("Include path for Python extensions "
"(found via distutils module): %r\n" % (dct['INCLUDEPY'],))
env['INCLUDES_PYEXT'] = [dct['INCLUDEPY']]
env['INCLUDES_PYEMBED'] = [dct['INCLUDEPY']]
# Code using the Python API needs to be compiled with -fno-strict-aliasing
if env['CC_NAME'] == 'gcc':
env.append_value('CFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_value('CFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env['CXX_NAME'] == 'gcc':
env.append_value('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_value('CXXFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env.CC_NAME == "msvc":
from distutils.msvccompiler import MSVCCompiler
dist_compiler = MSVCCompiler()
dist_compiler.initialize()
env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared)
# See if it compiles
try:
conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H',
uselib='PYEMBED', fragment=FRAG,
errmsg=':-(')
except conf.errors.ConfigurationError:
# python3.2, oh yeah
xx = conf.env.CXX_NAME and 'cxx' or 'c'
flags = ['--cflags', '--libs', '--ldflags']
for f in flags:
conf.check_cfg(msg='Asking python-config for pyembed %s flags' % f,
path=conf.env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=[f])
conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H', msg='Getting pyembed flags from python-config',
fragment=FRAG, errmsg='Could not build a python embedded interpreter',
features='%s %sprogram pyembed' % (xx, xx))
for f in flags:
conf.check_cfg(msg='Asking python-config for pyext %s flags' % f,
path=conf.env.PYTHON_CONFIG, package='', uselib_store='PYEXT', args=[f])
conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H', msg='Getting pyext flags from python-config',
features='%s %sshlib pyext' % (xx, xx), fragment=FRAG, errmsg='Could not build python extensions')
@conf
def check_python_version(conf, minver=None):
"""
Check if the python interpreter is found matching a given minimum version.
minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver.
If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR'
(eg. '2.4') of the actual python version found, and PYTHONDIR is
defined, pointing to the site-packages directory appropriate for
this python version, where modules/packages/extensions should be
installed.
:param minver: minimum version
:type minver: tuple of int
"""
assert minver is None or isinstance(minver, tuple)
pybin = conf.env['PYTHON']
if not pybin:
conf.fatal('could not find the python executable')
# Get python version string
cmd = pybin + ['-c', 'import sys\nfor x in sys.version_info: print(str(x))']
Logs.debug('python: Running python command %r' % cmd)
lines = conf.cmd_and_log(cmd).split()
assert len(lines) == 5, "found %i lines, expected 5: %r" % (len(lines), lines)
pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4]))
# compare python version with the minimum required
result = (minver is None) or (pyver_tuple >= minver)
if result:
# define useful environment variables
pyver = '.'.join([str(x) for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION'] = pyver
if 'PYTHONDIR' in conf.environ:
pydir = conf.environ['PYTHONDIR']
else:
if Utils.is_win32:
(python_LIBDEST, pydir) = conf.get_python_variables(
["get_config_var('LIBDEST') or ''",
"get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']])
else:
python_LIBDEST = None
(pydir,) = conf.get_python_variables( ["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST = os.path.join(conf.env['LIBDIR'], "python" + pyver)
else:
python_LIBDEST = os.path.join(conf.env['PREFIX'], "lib", "python" + pyver)
if 'PYTHONARCHDIR' in conf.environ:
pyarchdir = conf.environ['PYTHONARCHDIR']
else:
(pyarchdir, ) = conf.get_python_variables( ["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']])
if not pyarchdir:
pyarchdir = pydir
if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist
conf.define('PYTHONDIR', pydir)
conf.define('PYTHONARCHDIR', pyarchdir)
conf.env['PYTHONDIR'] = pydir
conf.env['PYTHONARCHDIR'] = pyarchdir
# Feedback
pyver_full = '.'.join(map(str, pyver_tuple[:3]))
if minver is None:
conf.msg('Checking for python version', pyver_full)
else:
minver_str = '.'.join(map(str, minver))
conf.msg('Checking for python version', pyver_tuple, ">= %s" % (minver_str,) and 'GREEN' or 'YELLOW')
if not result:
conf.fatal('The python version is too old, expecting %r' % (minver,))
PYTHON_MODULE_TEMPLATE = '''
import %s as current_module
version = getattr(current_module, '__version__', None)
if version is not None:
print(str(version))
else:
print('unknown version')
'''
@conf
def check_python_module(conf, module_name, condition=''):
"""
Check if the selected python interpreter can import the given python module::
def configure(conf):
conf.check_python_module('pygccxml')
conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)")
:param module_name: module
:type module_name: string
"""
msg = 'Python module %s' % module_name
if condition:
msg = '%s (%s)' % (msg, condition)
conf.start_msg(msg)
try:
ret = conf.cmd_and_log(conf.env['PYTHON'] + ['-c', PYTHON_MODULE_TEMPLATE % module_name])
except Exception:
conf.end_msg(False)
conf.fatal('Could not find the python module %r' % module_name)
ret = ret.strip()
if condition:
conf.end_msg(ret)
if ret == 'unknown version':
conf.fatal('Could not check the %s version' % module_name)
from distutils.version import LooseVersion
def num(*k):
if isinstance(k[0], int):
return LooseVersion('.'.join([str(x) for x in k]))
else:
return LooseVersion(k[0])
d = {'num': num, 'ver': LooseVersion(ret)}
ev = eval(condition, {}, d)
if not ev:
conf.fatal('The %s version does not satisfy the requirements' % module_name)
else:
if ret == 'unknown version':
conf.end_msg(True)
else:
conf.end_msg(ret)
def configure(conf):
"""
Detect the python interpreter
"""
try:
conf.find_program('python', var='PYTHON')
except conf.errors.ConfigurationError:
Logs.warn("could not find a python executable, setting to sys.executable '%s'" % sys.executable)
conf.env.PYTHON = sys.executable
if conf.env.PYTHON != sys.executable:
Logs.warn("python executable %r differs from system %r" % (conf.env.PYTHON, sys.executable))
conf.env.PYTHON = conf.cmd_to_list(conf.env.PYTHON)
v = conf.env
v['PYCMD'] = '"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS'] = ''
v['PYFLAGS_OPT'] = '-O'
v['PYC'] = getattr(Options.options, 'pyc', 1)
v['PYO'] = getattr(Options.options, 'pyo', 1)
def options(opt):
"""
Add the options ``--nopyc`` and ``--nopyo``
"""
opt.add_option('--nopyc',
action='store_false',
default=1,
help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]',
dest = 'pyc')
opt.add_option('--nopyo',
action='store_false',
default=1,
help='Do not install optimised compiled .pyo files (configuration) [Default:install]',
dest='pyo')
|
mit
| -1,669,416,766,679,091,500 | 31.453704 | 163 | 0.674579 | false |
linsalrob/bioinformatics
|
bin/translate_longest_orf.py
|
1
|
1066
|
"""
Translate a sequence (eg. a metagenome) and print the longest ORF
"""
import sys
import os
from robseq import translate
from rob import read_fasta
from rob import rc
try:
faf = sys.argv[1]
size = int(sys.argv[2])
except:
sys.exit(sys.argv[0] + " <fasta file> <min orf size in amino acids>")
fa = read_fasta(faf)
for seqid in fa:
outputid = seqid.split(' ')[0]
c = 0
orfs = []
location = {}
for frame in range(3):
dna = fa[seqid][frame:]
prot = translate(dna)
pieces = prot.split('*')
orfs += pieces
for p in pieces:
location[p] = " frame: " + str(frame) + " strand: +"
original = rc(fa[seqid])
for frame in range(3):
dna = original[frame:]
prot = translate(dna)
pieces = prot.split('*')
orfs += pieces
for p in pieces:
location[p] = " frame: -" + str(frame) + " strand: -"
longest = max(orfs, key=len)
if len(longest) > size:
print(">" + outputid + " " + location[longest] + "\n" + longest)
|
mit
| 4,582,330,701,247,208,400 | 21.208333 | 73 | 0.54878 | false |
oneklc/dimod
|
dimod/reference/composites/higherordercomposites.py
|
1
|
14849
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
Composites that convert binary quadratic model samplers into polynomial samplers
or that work with binary polynomials.
Higher-order composites implement three sampling methods (similar to
:class:`.Sampler`):
* :meth:`.PolySampler.sample_poly`
* :meth:`.PolySampler.sample_hising`
* :meth:`.PolySampler.sample_hubo`
"""
from __future__ import division
import numpy as np
from dimod.core.polysampler import ComposedPolySampler, PolySampler
from dimod.higherorder.polynomial import BinaryPolynomial
from dimod.higherorder.utils import make_quadratic, poly_energies
from dimod.response import SampleSet
__all__ = 'HigherOrderComposite', 'PolyScaleComposite', 'PolyTruncateComposite'
class HigherOrderComposite(ComposedPolySampler):
"""Convert a binary quadratic model sampler to a binary polynomial sampler.
Energies of the returned samples do not include the penalties.
Args:
sampler (:obj:`dimod.Sampler`):
A dimod sampler
Example:
This example uses :class:`.HigherOrderComposite` to instantiate a
composed sampler that submits a simple Ising problem to a sampler.
The composed sampler creates a bqm from a higher order problem.
>>> sampler = dimod.HigherOrderComposite(dimod.ExactSolver())
>>> h = {0: -0.5, 1: -0.3, 2: -0.8}
>>> J = {(0, 1, 2): -1.7}
>>> sampleset = sampler.sample_hising(h, J, discard_unsatisfied=True)
>>> sampleset.first # doctest: +SKIP
Sample(sample={0: 1, 1: 1, 2: 1},
energy=-3.3,
num_occurrences=1,
penalty_satisfaction=True)
"""
def __init__(self, child_sampler):
self._children = [child_sampler]
@property
def children(self):
"""A list containing the wrapped sampler."""
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param['penalty_strength'] = []
param['discard_unsatisfied'] = []
param['keep_penalty_variables'] = []
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample_ising(self, h, J, offset=0, *args, **kwargs):
# need to handle offset input for backwards compatibility
if offset:
J[()] = offset
return ComposedPolySampler.sample_ising(self, h, J, *args, **kwargs)
def sample_poly(self, poly, penalty_strength=1.0,
keep_penalty_variables=False,
discard_unsatisfied=False, **parameters):
"""Sample from the given binary polynomial.
Takes the given binary polynomial, introduces penalties, reduces the
higher-order problem into a quadratic problem and sends it to its child
sampler.
Args:
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
penalty_strength (float, optional): Strength of the reduction constraint.
Insufficient strength can result in the binary quadratic model
not having the same minimization as the polynomial.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
**parameters: Parameters for the sampling method, specified by
the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
bqm = make_quadratic(poly, penalty_strength, vartype=poly.vartype)
response = self.child.sample(bqm, **parameters)
return polymorph_response(response, poly, bqm,
penalty_strength=penalty_strength,
keep_penalty_variables=keep_penalty_variables,
discard_unsatisfied=discard_unsatisfied)
def penalty_satisfaction(response, bqm):
""" Creates a penalty satisfaction list
Given a sampleSet and a bqm object, will create a binary list informing
whether the penalties introduced during degree reduction are satisfied for
each sample in sampleSet
Args:
response (:obj:`.SampleSet`): Samples corresponding to provided bqm
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
Returns:
:obj:`numpy.ndarray`: a binary array of penalty satisfaction information
"""
record = response.record
label_dict = response.variables.index
if len(bqm.info['reduction']) == 0:
return np.array([1] * len(record.sample))
penalty_vector = np.prod([record.sample[:, label_dict[qi]] *
record.sample[:, label_dict[qj]]
== record.sample[:,
label_dict[valdict['product']]]
for (qi, qj), valdict in
bqm.info['reduction'].items()], axis=0)
return penalty_vector
def polymorph_response(response, poly, bqm,
penalty_strength=None,
keep_penalty_variables=True,
discard_unsatisfied=False):
""" Transforms the sampleset for the higher order problem.
Given a response of a penalized HUBO, this function creates a new sampleset
object, taking into account penalty information and calculates the
energies of samples for the higherorder problem.
Args:
response (:obj:`.SampleSet`): response for a penalized hubo.
poly (:obj:`.BinaryPolynomial`):
A binary polynomial.
bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model of the
reduced problem.
penalty_strength (float, optional): default is None, if provided,
will be added to the info field of the returned sampleSet object.
keep_penalty_variables (bool, optional): default is True. if False
will remove the variables used for penalty from the samples
discard_unsatisfied (bool, optional): default is False. If True
will discard samples that do not satisfy the penalty conditions.
Returns:
(:obj:`.SampleSet'): A sampleSet object that has additional penalty
information. The energies of samples are calculated for the HUBO
ignoring the penalty variables.
"""
record = response.record
penalty_vector = penalty_satisfaction(response, bqm)
original_variables = bqm.variables
if discard_unsatisfied:
samples_to_keep = list(map(bool, list(penalty_vector)))
penalty_vector = np.array([True] * np.sum(samples_to_keep))
else:
samples_to_keep = list(map(bool, [1] * len(record.sample)))
samples = record.sample[samples_to_keep]
energy_vector = poly.energies((samples, response.variables))
if not keep_penalty_variables:
original_variables = poly.variables
idxs = [response.variables.index[v] for v in original_variables]
samples = np.asarray(samples[:, idxs])
num_samples, num_variables = np.shape(samples)
datatypes = [('sample', np.dtype(np.int8), (num_variables,)),
('energy', energy_vector.dtype),
('penalty_satisfaction',
penalty_vector.dtype)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names if
name not in {'sample',
'energy'})
data = np.rec.array(np.empty(num_samples, dtype=datatypes))
data.sample = samples
data.energy = energy_vector
for name in record.dtype.names:
if name not in {'sample', 'energy'}:
data[name] = record[name][samples_to_keep]
data['penalty_satisfaction'] = penalty_vector
response.info['reduction'] = bqm.info['reduction']
if penalty_strength is not None:
response.info['penalty_strength'] = penalty_strength
return SampleSet(data, original_variables, response.info,
response.vartype)
class PolyScaleComposite(ComposedPolySampler):
"""Composite to scale biases of a binary polynomial.
Args:
child (:obj:`.PolySampler`):
A binary polynomial sampler.
Examples:
>>> linear = {'a': -4.0, 'b': -4.0}
>>> quadratic = {('a', 'b'): 3.2, ('a', 'b', 'c'): 1}
>>> sampler = dimod.PolyScaleComposite(dimod.HigherOrderComposite(dimod.ExactSolver()))
>>> response = sampler.sample_hising(linear, quadratic, scalar=0.5,
... ignored_terms=[('a','b')])
"""
def __init__(self, child):
if not isinstance(child, PolySampler):
raise TypeError("Child sampler must be a PolySampler")
self._children = [child]
@property
def children(self):
"""The child sampler in a list"""
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param.update({'scalar': [],
'bias_range': [],
'poly_range': [],
'ignored_terms': [],
})
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None,
ignored_terms=None, **parameters):
"""Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
"""
if ignored_terms is None:
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
# scale and normalize happen in-place so we need to make a copy
original, poly = poly, poly.copy()
if scalar is not None:
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range,
ignored_terms=ignored_terms)
# we need to know how much we scaled by, which we can do by looking
# at the biases
try:
v = next(v for v, bias in original.items()
if bias and v not in ignored_terms)
except StopIteration:
# nothing to scale
scalar = 1
else:
scalar = poly[v] / original[v]
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
# we need to recalculate the energy
sampleset.record.energy = original.energies((sampleset.record.sample,
sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset
class PolyTruncateComposite(ComposedPolySampler):
"""Composite to truncate the returned samples
Post-processing is expensive and sometimes one might want to only
treat the lowest energy samples. This composite layer allows one to
pre-select the samples within a multi-composite pipeline
Args:
child_sampler (:obj:`dimod.PolySampler`):
A dimod binary polynomial sampler.
n (int):
Maximum number of rows in the returned sample set.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
truncating. Note that sample order is maintained in the
underlying array.
aggregate (bool, optional, default=False):
If True, aggregate the samples before truncating.
Note:
If aggregate is True :attr:`.SampleSet.record.num_occurrences` are
accumulated but no other fields are.
"""
def __init__(self, child_sampler, n, sorted_by='energy', aggregate=False):
if n < 1:
raise ValueError('n should be a positive integer, recived {}'.format(n))
self._children = [child_sampler]
self._truncate_kwargs = dict(n=n, sorted_by=sorted_by)
self._aggregate = aggregate
@property
def children(self):
return self._children
@property
def parameters(self):
return self.child.parameters.copy()
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample_poly(self, poly, **kwargs):
"""Sample from the binary polynomial and truncate output.
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
**kwargs:
Parameters for the sampling method, specified by the child
sampler.
Returns:
:obj:`dimod.SampleSet`
"""
tkw = self._truncate_kwargs
if self._aggregate:
return self.child.sample_poly(poly, **kwargs).aggregate().truncate(**tkw)
else:
return self.child.sample_poly(poly, **kwargs).truncate(**tkw)
|
apache-2.0
| 7,922,676,447,443,684,000 | 34.694712 | 94 | 0.608728 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.