repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
fastavro/fastavro
fastavro/_write_py.py
xz_write_block
python
def xz_write_block(encoder, block_bytes, compression_level): data = lzma.compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data)
Write block in "xz" codec.
https://github.com/fastavro/fastavro/blob/b918a377403778b715456663ae526d26acb6588f/fastavro/_write_py.py#L328-L332
import json from io import BytesIO from os import urandom, SEEK_SET import bz2 import lzma import zlib from .const import NAMED_TYPES from .io.binary_encoder import BinaryEncoder from .io.json_encoder import AvroJSONEncoder from .validation import _validate from .read import HEADER_SCHEMA, SYNC_SIZE, MAGIC, reader from .logical_writers import LOGICAL_WRITERS from .schema import extract_record_type, extract_logical_type, parse_schema from ._write_common import _is_appendable def write_null(encoder, datum, schema, named_schemas, fname): encoder.write_null() def write_boolean(encoder, datum, schema, named_schemas, fname): encoder.write_boolean(datum) def write_int(encoder, datum, schema, named_schemas, fname): encoder.write_int(datum) def write_long(encoder, datum, schema, named_schemas, fname): encoder.write_long(datum) def write_float(encoder, datum, schema, named_schemas, fname): encoder.write_float(datum) def write_double(encoder, datum, schema, named_schemas, fname): encoder.write_double(datum) def write_bytes(encoder, datum, schema, named_schemas, fname): encoder.write_bytes(datum) def write_utf8(encoder, datum, schema, named_schemas, fname): encoder.write_utf8(datum) def write_crc32(encoder, datum): encoder.write_crc32(datum) def write_fixed(encoder, datum, schema, named_schemas, fname): if len(datum) != schema["size"]: raise ValueError( f"data of length {len(datum)} does not match schema size: {schema}" ) encoder.write_fixed(datum) def write_enum(encoder, datum, schema, named_schemas, fname): index = schema["symbols"].index(datum) encoder.write_enum(index) def write_array(encoder, datum, schema, named_schemas, fname): encoder.write_array_start() if len(datum) > 0: encoder.write_item_count(len(datum)) dtype = schema["items"] for item in datum: write_data(encoder, item, dtype, named_schemas, fname) encoder.end_item() encoder.write_array_end() def write_map(encoder, datum, schema, named_schemas, fname): encoder.write_map_start() if len(datum) > 0: encoder.write_item_count(len(datum)) vtype = schema["values"] for key, val in datum.items(): encoder.write_utf8(key) write_data(encoder, val, vtype, named_schemas, fname) encoder.write_map_end() def write_union(encoder, datum, schema, named_schemas, fname): best_match_index = -1 if isinstance(datum, tuple): (name, datum) = datum for index, candidate in enumerate(schema): extracted_type = extract_record_type(candidate) if extracted_type in NAMED_TYPES: schema_name = candidate["name"] else: schema_name = extracted_type if name == schema_name: best_match_index = index break if best_match_index == -1: field = f"on field {fname}" if fname else "" msg = ( f"provided union type name {name} not found in schema " + f"{schema} {field}" ) raise ValueError(msg) index = best_match_index else: pytype = type(datum) most_fields = -1 could_be_float = False for index, candidate in enumerate(schema): if could_be_float: if extract_record_type(candidate) == "double": best_match_index = index break else: continue if _validate(datum, candidate, named_schemas, raise_errors=False): record_type = extract_record_type(candidate) if record_type == "record": logical_type = extract_logical_type(candidate) if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, candidate) candidate_fields = set(f["name"] for f in candidate["fields"]) datum_fields = set(datum) fields = len(candidate_fields.intersection(datum_fields)) if fields > most_fields: best_match_index = index most_fields = fields elif record_type == "float": best_match_index = index could_be_float = True else: best_match_index = index break if best_match_index == -1: field = f"on field {fname}" if fname else "" raise ValueError( f"{repr(datum)} (type {pytype}) do not match {schema} {field}" ) index = best_match_index encoder.write_index(index, schema[index]) write_data(encoder, datum, schema[index], named_schemas, fname) def write_record(encoder, datum, schema, named_schemas, fname): for field in schema["fields"]: name = field["name"] if name not in datum and "default" not in field and "null" not in field["type"]: raise ValueError(f"no value and no default for {name}") write_data( encoder, datum.get(name, field.get("default")), field["type"], named_schemas, name, ) WRITERS = { "null": write_null, "boolean": write_boolean, "string": write_utf8, "int": write_int, "long": write_long, "float": write_float, "double": write_double, "bytes": write_bytes, "fixed": write_fixed, "enum": write_enum, "array": write_array, "map": write_map, "union": write_union, "error_union": write_union, "record": write_record, "error": write_record, } def write_data(encoder, datum, schema, named_schemas, fname): record_type = extract_record_type(schema) logical_type = extract_logical_type(schema) fn = WRITERS.get(record_type) if fn: if logical_type: prepare = LOGICAL_WRITERS.get(logical_type) if prepare: datum = prepare(datum, schema) try: return fn(encoder, datum, schema, named_schemas, fname) except TypeError as ex: if fname: raise TypeError(f"{ex} on field {fname}") raise else: return write_data(encoder, datum, named_schemas[record_type], named_schemas, "") def write_header(encoder, metadata, sync_marker): header = { "magic": MAGIC, "meta": {key: value.encode() for key, value in metadata.items()}, "sync": sync_marker, } write_data(encoder, header, HEADER_SCHEMA, {}, "") def null_write_block(encoder, block_bytes, compression_level): encoder.write_long(len(block_bytes)) encoder._fo.write(block_bytes) def deflate_write_block(encoder, block_bytes, compression_level): if compression_level is not None: data = zlib.compress(block_bytes, compression_level)[2:-1] else: data = zlib.compress(block_bytes)[2:-1] encoder.write_long(len(data)) encoder._fo.write(data) def bzip2_write_block(encoder, block_bytes, compression_level): data = bz2.compress(block_bytes) encoder.write_long(len(data)) encoder._fo.write(data)
MIT License
zihenglin/lstm-mobility-model
lstm_mobility_model/components/historical_distributions.py
HistoricalSpatialDistribution.load_d_file
python
def load_d_file(d_path): D_total = {} os.chdir(d_path) for fi in glob.glob("*.npy"): typ = fi.split('.npy')[0] D_total[typ] = np.load(fi) return {'Other': D_total['Other_weekday']}
Joint dist of dh and dw
https://github.com/zihenglin/lstm-mobility-model/blob/36f21b46a5c9382f90ece561a3efb1885be3c74f/lstm_mobility_model/components/historical_distributions.py#L151-L161
import glob import os import tensorflow as tf import numpy as np class HistoricalTemporalDistribution(object): DEFAULT_DURATION_MATRIX_FILES = ['Home_weekday', 'Work', 'Other_weekday'] DEFAULT_TEMORAL_SCALE = 4 MAXIMUM_START_HOUR = 23.9 def __init__(self, duration_matrices): self.duration_matrices = duration_matrices @staticmethod def from_duration_matrix_files(duration_matrix_path, duration_matrix_files=None): duration_matrix_files = HistoricalTemporalDistribution.DEFAULT_DURATION_MATRIX_FILES if duration_matrix_files is None else duration_matrix_files duration_matrices = [] for file_name in duration_matrix_files: duration_matrices.append(np.load(os.path.join(duration_matrix_path, file_name + '.npy'))) return HistoricalTemporalDistribution( tf.Variable(np.array(duration_matrices), dtype='float', name='duration_matrix')) def _get_flat_transformed_start_hour(self, start_hour, max_start_hour=MAXIMUM_START_HOUR): start_hour = tf.reshape(tf.minimum(start_hour, max_start_hour), [-1]) start_hour *= HistoricalTemporalDistribution.DEFAULT_TEMORAL_SCALE return tf.cast(start_hour, 'int32') def _get_gathered_duration_matrices(self, flat_activity_type_indices): return tf.gather(self.duration_matrices, flat_activity_type_indices) def _get_duration_distributions(self, flat_transformed_start_hour, gathered_duration_matrices): total_data_length = flat_transformed_start_hour.get_shape()[0].value range_indices = np.array(range(total_data_length))[:, np.newaxis] flat_transformed_start_hour = tf.reshape( flat_transformed_start_hour, [total_data_length, 1]) start_hour_2d_indices = tf.concat([range_indices, flat_transformed_start_hour], axis=1) return tf.gather_nd(gathered_duration_matrices, start_hour_2d_indices) def sample_duration(self, activity_type_ind, start_hour, minimum_duration=0, **kwargs): flat_activity_type_ind = tf.cast(tf.reshape(activity_type_ind, [-1]), 'int32') gathered_duration_matrices = self._get_gathered_duration_matrices(flat_activity_type_ind) flat_start_hour = self._get_flat_transformed_start_hour(start_hour) duration_distributions = self._get_duration_distributions(flat_start_hour, gathered_duration_matrices) sampled_duration_transformed = tf.multinomial( tf.log(duration_distributions), 1, **kwargs) sampled_duration = tf.cast(sampled_duration_transformed, 'float32') / HistoricalTemporalDistribution.DEFAULT_TEMORAL_SCALE sampled_duration = tf.maximum(sampled_duration, minimum_duration) return tf.reshape(sampled_duration, start_hour.get_shape()) class HistoricalSpatialDistribution(object): def __init__(self): pass @staticmethod
MIT License
yelp/mysql_streamer
replication_handler/helpers/lists.py
unlist
python
def unlist(a_list): if len(a_list) > 1: raise ValueError(len(a_list)) if len(a_list) == 0: return None else: return a_list[0]
Convert the (possibly) single item list into a single item
https://github.com/yelp/mysql_streamer/blob/568b807458ef93f1adce56f89665bce5a6e3f8f5/replication_handler/helpers/lists.py#L23-L31
from __future__ import absolute_import from __future__ import unicode_literals
Apache License 2.0
raamana/pyradigm
pyradigm/multiple.py
BaseMultiDataset.get_subsets
python
def get_subsets(self, subset_list): for modality, data in self._modalities.items(): yield modality, ( (np.array(itemgetter(*subset)(data)), np.array(itemgetter(*subset)(self.targets))) for subset in subset_list )
Returns the requested subsets of data while iterating over modalities if subset_list were to contain two sets of IDs e.g. (train, test) return data would be this tuple: (modality, (train_data, train_targets), (test_data, test_targets))
https://github.com/raamana/pyradigm/blob/7c40b228fcd3b447c738852ef9c870a8d679da98/pyradigm/multiple.py#L250-L264
import random from warnings import warn from collections.abc import Iterable from collections import Counter from copy import copy from operator import itemgetter from sys import version_info from abc import abstractmethod import numpy as np if version_info.major > 2: from pyradigm.base import BaseDataset, CompatibilityException from pyradigm import MLDataset, ClassificationDataset as ClfDataset, RegressionDataset as RegrDataset else: raise NotImplementedError('pyradigm supports only python 3 or higher! ' 'Upgrade to Python 3+ is recommended.') class BaseMultiDataset(object): def __init__(self, dataset_class=BaseDataset, dataset_spec=None, name='MultiDataset'): if issubclass(dataset_class, BaseDataset): self._dataset_class = dataset_class else: raise TypeError('Input class type is not recognized!' ' Must be a child class of pyradigm.BaseDataset') self.name = name self._list = list() self._is_init = False self.modality_count = 0 self._ids = set() self.targets = dict() self._modalities = dict() self._labels = dict() self.feature_names = dict() self.num_features = list() if dataset_spec is not None: if not isinstance(dataset_spec, Iterable) or len(dataset_spec) < 1: raise ValueError('Input must be a list of atleast two datasets.') self._load(dataset_spec) def _load(self, dataset_spec): for idx, ds in enumerate(dataset_spec): self.append(ds, idx) def _get_id(self): self.modality_count += 1 return self.modality_count def append(self, dataset, identifier): if isinstance(dataset, str): dataset = self._dataset_class(dataset_path=dataset) if not isinstance(dataset, self._dataset_class): raise CompatibilityException('Incompatible dataset. ' 'You can only add instances of ' 'type {}'.format(self._dataset_class)) if len(dataset.description)>0: identifier = dataset.description if not self._is_init: self._ids = set(dataset.samplet_ids) self.targets = dataset.targets self._target_sizes = dataset.target_sizes self.num_samplets = len(self._ids) self._modalities[identifier] = dataset.data self.feature_names[identifier] = dataset.feature_names self.num_features.append(dataset.num_features) self._dataset = copy(dataset) self._dataset.data = {id_: np.zeros(1) for id_ in self._ids} if hasattr(dataset, 'attr'): self._common_attr = dataset.attr self._common_attr_dtype = dataset.attr_dtype else: self._common_attr = dict() self._common_attr_dtype = dict() self._attr = dict() self._is_init = True else: if set(dataset.samplet_ids) != self._ids: raise CompatibilityException( 'Differing set of IDs in two datasets.' ' Unable to add this dataset to the MultiDataset.') if dataset.targets != self.targets: raise CompatibilityException( 'Targets for some IDs differ in the two datasets.' ' Unable to add this dataset to the MultiDataset.') if identifier not in self._modalities: self._modalities[identifier] = dataset.data self.feature_names[identifier] = dataset.feature_names self.num_features.append(dataset.num_features) else: raise KeyError('{} already exists in MultiDataset' ''.format(identifier)) if hasattr(dataset, 'attr'): if len(self._common_attr) < 1: self._common_attr = dataset.attr.copy() self._common_attr_dtype = dataset.attr_dtype.copy() else: for a_name in dataset.attr: if a_name not in self._common_attr: self._common_attr[a_name] = dataset.attr[a_name] self._common_attr_dtype[a_name] = dataset.attr_dtype[a_name] elif self._common_attr[a_name] != dataset.attr[a_name]: raise ValueError( 'Values and/or IDs differ for attribute {}. ' 'Ensure all datasets have common attributes ' 'with the same values'.format(a_name)) self.modality_count += 1 @property def samplet_ids(self): return list(self._ids) @property def modality_ids(self): return sorted(list(self._modalities.keys())) @abstractmethod def __str__(self): def _common_str(self): return "{}:\n\t{} samples, {} modalities, dims: {}" "\n\tIdentifiers: {}" "\n\tAttributes: {}" "".format(self.name, self.num_samplets, self.modality_count, self.num_features, ', '.join([str(k) for k in self.modality_ids]), ', '.join([str(k) for k in self._common_attr.keys()])) @abstractmethod def holdout(self, train_perc=0.7, num_rep=50, return_ids_only=False, format='MLDataset'): def _get_data(self, id_list, format='MLDataset'): format = format.lower() features = list() for modality, data in self._modalities.items(): if format in ('ndarray', 'data_matrix'): subset = np.array(itemgetter(*id_list)(data)) elif format in ('mldataset', 'pyradigm'): subset = self._dataset.get_subset(id_list) subset.data = { id_: data[id_] for id_ in id_list } else: raise ValueError('Invalid output format - choose only one of ' 'pyradigm or data_matrix') features.append(subset) return features def __iter__(self): for modality, data in self._modalities.items(): yield modality, np.array([np.array(item) for item in data.values()])
MIT License
ansible-community/ansible-lint
src/ansiblelint/file_utils.py
Lintable.content
python
def content(self) -> str: if self._content is None: with open(self.path, mode='r', encoding='utf-8') as f: self._content = f.read() return self._content
Retried file content, from internal cache or disk.
https://github.com/ansible-community/ansible-lint/blob/306573167ad21c37a5aa72017bda57e1bad28c80/src/ansiblelint/file_utils.py#L185-L190
import copy import logging import os import pathlib import subprocess import sys from argparse import Namespace from collections import OrderedDict from contextlib import contextmanager from pathlib import Path from tempfile import NamedTemporaryFile from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union import wcmatch.pathlib from wcmatch.wcmatch import RECURSIVE, WcMatch from ansiblelint.config import BASE_KINDS, options from ansiblelint.constants import FileType if TYPE_CHECKING: BasePathLike = os.PathLike[Any] else: BasePathLike = os.PathLike _logger = logging.getLogger(__package__) def normpath(path: Union[str, BasePathLike]) -> str: relpath = os.path.relpath(str(path)) abspath = os.path.abspath(str(path)) if abspath in relpath: return abspath return relpath @contextmanager def cwd(path: Union[str, BasePathLike]) -> Iterator[None]: old_pwd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(old_pwd) def expand_path_vars(path: str) -> str: path = str(path).strip() path = os.path.expanduser(path) path = os.path.expandvars(path) return path def expand_paths_vars(paths: List[str]) -> List[str]: paths = [expand_path_vars(p) for p in paths] return paths def kind_from_path(path: Path, base: bool = False) -> FileType: pathex = wcmatch.pathlib.PurePath(path.absolute().resolve()) kinds = options.kinds if not base else BASE_KINDS for entry in kinds: for k, v in entry.items(): if pathex.globmatch( v, flags=( wcmatch.pathlib.GLOBSTAR | wcmatch.pathlib.BRACE | wcmatch.pathlib.DOTGLOB ), ): return str(k) if base: return "" if path.is_dir(): return "role" if str(path) == '/dev/stdin': return "playbook" return "" class Lintable: def __init__( self, name: Union[str, Path], content: Optional[str] = None, kind: Optional[FileType] = None, ): self.filename: str = str(name) self.dir: str = "" self.kind: Optional[FileType] = None if isinstance(name, str): self.name = normpath(name) self.path = Path(self.name) else: self.name = str(name) self.path = name self._content = content self.role = "" parts = self.path.parent.parts if 'roles' in parts: role = self.path while role.parent.name != "roles" and role.name: role = role.parent if role.exists: self.role = role.name if str(self.path) in ['/dev/stdin', '-']: self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml") self.filename = self.file.name self._content = sys.stdin.read() self.file.write(self._content) self.file.flush() self.path = Path(self.file.name) self.name = 'stdin' self.kind = 'playbook' self.dir = '/' else: self.kind = kind or kind_from_path(self.path) if not self.dir: if self.kind == "role": self.dir = str(self.path.resolve()) else: self.dir = str(self.path.parent.resolve()) self.base_kind = kind_from_path(self.path, base=True) def __getitem__(self, key: Any) -> Any: if key == 'path': return str(self.path) if key == 'type': return str(self.kind) raise NotImplementedError() def get(self, key: Any, default: Any = None) -> Any: try: return self.__getitem__(key) except NotImplementedError: return default @property
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor_package/google/protobuf/text_format.py
MergeLines
python
def MergeLines(lines, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False): parser = _Parser(allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field) return parser.MergeLines(lines, message)
Parses a text representation of a protocol message into a message. See Merge() for more details. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. descriptor_pool: A DescriptorPool used to resolve Any types. allow_unknown_field: if True, skip over unknown field and keep parsing. Avoid to use this option if possible. It may hide some errors (e.g. spelling error on field name) Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor_package/google/protobuf/text_format.py#L771-L802
__author__ = 'kenton@google.com (Kenton Varda)' import encodings.raw_unicode_escape import encodings.unicode_escape import io import re import six if six.PY3: long = int from google.protobuf.internal import decoder from google.protobuf.internal import type_checkers from google.protobuf import descriptor from google.protobuf import text_encoding __all__ = ['MessageToString', 'Parse', 'PrintMessage', 'PrintField', 'PrintFieldValue', 'Merge', 'MessageToBytes'] _INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(), type_checkers.Int32ValueChecker(), type_checkers.Uint64ValueChecker(), type_checkers.Int64ValueChecker()) _FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?$', re.IGNORECASE) _FLOAT_NAN = re.compile('nanf?$', re.IGNORECASE) _QUOTES = frozenset(("'", '"')) _ANY_FULL_TYPE_NAME = 'google.protobuf.Any' class Error(Exception): class ParseError(Error): def __init__(self, message=None, line=None, column=None): if message is not None and line is not None: loc = str(line) if column is not None: loc += ':{0}'.format(column) message = '{0} : {1}'.format(loc, message) if message is not None: super(ParseError, self).__init__(message) else: super(ParseError, self).__init__() self._line = line self._column = column def GetLine(self): return self._line def GetColumn(self): return self._column class TextWriter(object): def __init__(self, as_utf8): if six.PY2: self._writer = io.BytesIO() else: self._writer = io.StringIO() def write(self, val): if six.PY2: if isinstance(val, six.text_type): val = val.encode('utf-8') return self._writer.write(val) def close(self): return self._writer.close() def getvalue(self): return self._writer.getvalue() def MessageToString( message, as_utf8=False, as_one_line=False, use_short_repeated_primitives=False, pointy_brackets=False, use_index_order=False, float_format=None, double_format=None, use_field_number=False, descriptor_pool=None, indent=0, message_formatter=None, print_unknown_fields=False, force_colon=False): out = TextWriter(as_utf8) printer = _Printer( out, indent, as_utf8, as_one_line, use_short_repeated_primitives, pointy_brackets, use_index_order, float_format, double_format, use_field_number, descriptor_pool, message_formatter, print_unknown_fields=print_unknown_fields, force_colon=force_colon) printer.PrintMessage(message) result = out.getvalue() out.close() if as_one_line: return result.rstrip() return result def MessageToBytes(message, **kwargs): text = MessageToString(message, **kwargs) if isinstance(text, bytes): return text codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii' return text.encode(codec) def _IsMapEntry(field): return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.message_type.has_options and field.message_type.GetOptions().map_entry) def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False, use_short_repeated_primitives=False, pointy_brackets=False, use_index_order=False, float_format=None, double_format=None, use_field_number=False, descriptor_pool=None, message_formatter=None, print_unknown_fields=False, force_colon=False): printer = _Printer( out=out, indent=indent, as_utf8=as_utf8, as_one_line=as_one_line, use_short_repeated_primitives=use_short_repeated_primitives, pointy_brackets=pointy_brackets, use_index_order=use_index_order, float_format=float_format, double_format=double_format, use_field_number=use_field_number, descriptor_pool=descriptor_pool, message_formatter=message_formatter, print_unknown_fields=print_unknown_fields, force_colon=force_colon) printer.PrintMessage(message) def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False, use_short_repeated_primitives=False, pointy_brackets=False, use_index_order=False, float_format=None, double_format=None, message_formatter=None, print_unknown_fields=False, force_colon=False): printer = _Printer(out, indent, as_utf8, as_one_line, use_short_repeated_primitives, pointy_brackets, use_index_order, float_format, double_format, message_formatter=message_formatter, print_unknown_fields=print_unknown_fields, force_colon=force_colon) printer.PrintField(field, value) def PrintFieldValue(field, value, out, indent=0, as_utf8=False, as_one_line=False, use_short_repeated_primitives=False, pointy_brackets=False, use_index_order=False, float_format=None, double_format=None, message_formatter=None, print_unknown_fields=False, force_colon=False): printer = _Printer(out, indent, as_utf8, as_one_line, use_short_repeated_primitives, pointy_brackets, use_index_order, float_format, double_format, message_formatter=message_formatter, print_unknown_fields=print_unknown_fields, force_colon=force_colon) printer.PrintFieldValue(field, value) def _BuildMessageFromTypeName(type_name, descriptor_pool): if descriptor_pool is None: from google.protobuf import descriptor_pool as pool_mod descriptor_pool = pool_mod.Default() from google.protobuf import symbol_database database = symbol_database.Default() try: message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) except KeyError: return None message_type = database.GetPrototype(message_descriptor) return message_type() WIRETYPE_LENGTH_DELIMITED = 2 WIRETYPE_START_GROUP = 3 class _Printer(object): def __init__( self, out, indent=0, as_utf8=False, as_one_line=False, use_short_repeated_primitives=False, pointy_brackets=False, use_index_order=False, float_format=None, double_format=None, use_field_number=False, descriptor_pool=None, message_formatter=None, print_unknown_fields=False, force_colon=False): self.out = out self.indent = indent self.as_utf8 = as_utf8 self.as_one_line = as_one_line self.use_short_repeated_primitives = use_short_repeated_primitives self.pointy_brackets = pointy_brackets self.use_index_order = use_index_order self.float_format = float_format if double_format is not None: self.double_format = double_format else: self.double_format = float_format self.use_field_number = use_field_number self.descriptor_pool = descriptor_pool self.message_formatter = message_formatter self.print_unknown_fields = print_unknown_fields self.force_colon = force_colon def _TryPrintAsAnyMessage(self, message): if '/' not in message.type_url: return False packed_message = _BuildMessageFromTypeName(message.TypeName(), self.descriptor_pool) if packed_message: packed_message.MergeFromString(message.value) colon = ':' if self.force_colon else '' self.out.write('%s[%s]%s ' % (self.indent * ' ', message.type_url, colon)) self._PrintMessageFieldValue(packed_message) self.out.write(' ' if self.as_one_line else '\n') return True else: return False def _TryCustomFormatMessage(self, message): formatted = self.message_formatter(message, self.indent, self.as_one_line) if formatted is None: return False out = self.out out.write(' ' * self.indent) out.write(formatted) out.write(' ' if self.as_one_line else '\n') return True def PrintMessage(self, message): if self.message_formatter and self._TryCustomFormatMessage(message): return if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and self._TryPrintAsAnyMessage(message)): return fields = message.ListFields() if self.use_index_order: fields.sort( key=lambda x: x[0].number if x[0].is_extension else x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): entry_submsg = value.GetEntryClass()(key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if (self.use_short_repeated_primitives and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE and field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_STRING): self._PrintShortRepeatedPrimitivesValue(field, value) else: for element in value: self.PrintField(field, element) else: self.PrintField(field, value) if self.print_unknown_fields: self._PrintUnknownFields(message.UnknownFields()) def _PrintUnknownFields(self, unknown_fields): out = self.out for field in unknown_fields: out.write(' ' * self.indent) out.write(str(field.field_number)) if field.wire_type == WIRETYPE_START_GROUP: if self.as_one_line: out.write(' { ') else: out.write(' {\n') self.indent += 2 self._PrintUnknownFields(field.data) if self.as_one_line: out.write('} ') else: self.indent -= 2 out.write(' ' * self.indent + '}\n') elif field.wire_type == WIRETYPE_LENGTH_DELIMITED: try: (embedded_unknown_message, pos) = decoder._DecodeUnknownFieldSet( memoryview(field.data), 0, len(field.data)) except Exception: pos = 0 if pos == len(field.data): if self.as_one_line: out.write(' { ') else: out.write(' {\n') self.indent += 2 self._PrintUnknownFields(embedded_unknown_message) if self.as_one_line: out.write('} ') else: self.indent -= 2 out.write(' ' * self.indent + '}\n') else: out.write(': \"') out.write(text_encoding.CEscape(field.data, False)) out.write('\" ' if self.as_one_line else '\"\n') else: out.write(': ') out.write(str(field.data)) out.write(' ' if self.as_one_line else '\n') def _PrintFieldName(self, field): out = self.out out.write(' ' * self.indent) if self.use_field_number: out.write(str(field.number)) else: if field.is_extension: out.write('[') if (field.containing_type.GetOptions().message_set_wire_format and field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL): out.write(field.message_type.full_name) else: out.write(field.full_name) out.write(']') elif field.type == descriptor.FieldDescriptor.TYPE_GROUP: out.write(field.message_type.name) else: out.write(field.name) if (self.force_colon or field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE): out.write(':') def PrintField(self, field, value): self._PrintFieldName(field) self.out.write(' ') self.PrintFieldValue(field, value) self.out.write(' ' if self.as_one_line else '\n') def _PrintShortRepeatedPrimitivesValue(self, field, value): self._PrintFieldName(field) self.out.write(' [') for i in six.moves.range(len(value) - 1): self.PrintFieldValue(field, value[i]) self.out.write(', ') self.PrintFieldValue(field, value[-1]) self.out.write(']') if self.force_colon: self.out.write(':') self.out.write(' ' if self.as_one_line else '\n') def _PrintMessageFieldValue(self, value): if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if self.as_one_line: self.out.write('%s ' % openb) self.PrintMessage(value) self.out.write(closeb) else: self.out.write('%s\n' % openb) self.indent += 2 self.PrintMessage(value) self.indent -= 2 self.out.write(' ' * self.indent + closeb) def PrintFieldValue(self, field, value): out = self.out if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: self._PrintMessageFieldValue(value) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if isinstance(value, six.text_type) and (six.PY2 or not self.as_utf8): out_value = value.encode('utf-8') else: out_value = value if field.type == descriptor.FieldDescriptor.TYPE_BYTES: out_as_utf8 = False else: out_as_utf8 = self.as_utf8 out.write(text_encoding.CEscape(out_value, out_as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write('true') else: out.write('false') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT: if self.float_format is not None: out.write('{1:{0}}'.format(self.float_format, value)) else: out.write(str(type_checkers.ToShortestFloat(value))) elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_DOUBLE and self.double_format is not None): out.write('{1:{0}}'.format(self.double_format, value)) else: out.write(str(value)) def Parse(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False): return ParseLines(text.split(b'\n' if isinstance(text, bytes) else u'\n'), message, allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field) def Merge(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False): return MergeLines( text.split(b'\n' if isinstance(text, bytes) else u'\n'), message, allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field) def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False): parser = _Parser(allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field) return parser.ParseLines(lines, message)
Apache License 2.0
repsac/io_three
addons/io_three/exporter/io.py
dump
python
def dump(filepath, data, options=None): options = options or {} logger.debug("io.dump(%s, data, options=%s)", filepath, options) compress = options.get(constants.COMPRESSION, constants.NONE) if compress == constants.MSGPACK: try: import msgpack except ImportError: logger.error("msgpack module not found") raise logger.info("Dumping to msgpack") func = lambda x, y: msgpack.dump(x, y) mode = 'wb' else: round_off = options.get(constants.ENABLE_PRECISION) if round_off: _json.ROUND = options[constants.PRECISION] else: _json.ROUND = None indent = options.get(constants.INDENT, True) indent = 4 if indent else None logger.info("Dumping to JSON") func = lambda x, y: _json.json.dump(x, y, indent=indent) mode = 'w' logger.info("Writing to %s", filepath) with open(filepath, mode=mode) as stream: func(data, stream)
Dump the output to disk (JSON, msgpack, etc) :param filepath: output file path :param data: serializable data to write to disk :param options: (Default value = None) :type options: dict
https://github.com/repsac/io_three/blob/56b2f35b7d56aab6df131c7a3aecb43cda915ca3/addons/io_three/exporter/io.py#L38-L76
import os import shutil from .. import constants, logger from . import _json def copy_registered_textures(dest, registration): logger.debug("io.copy_registered_textures(%s, %s)", dest, registration) os.makedirs(dest, exist_ok=True) for value in registration.values(): copy(value['file_path'], dest) def copy(src, dst): logger.debug("io.copy(%s, %s)" % (src, dst)) if os.path.isdir(dst): file_name = os.path.basename(src) dst = os.path.join(dst, file_name) if src != dst: shutil.copy(src, dst)
MIT License
fizzadar/pyinfra
pyinfra/api/operations.py
_run_single_op
python
def _run_single_op(state, op_hash): state.trigger_callbacks('operation_start', op_hash) op_meta = state.get_op_meta(op_hash) _log_operation_start(op_meta) failed_hosts = set() if op_meta['serial']: with progress_spinner(state.inventory.iter_active_hosts()) as progress: for host in state.inventory.iter_active_hosts(): result = _run_server_op(state, host, op_hash) progress(host) if not result: failed_hosts.add(host) else: batches = [list(state.inventory.iter_active_hosts())] if op_meta['parallel']: parallel = op_meta['parallel'] hosts = list(state.inventory.iter_active_hosts()) batches = [ hosts[i:i + parallel] for i in range(0, len(hosts), parallel) ] for batch in batches: with progress_spinner(batch) as progress: greenlet_to_host = { state.pool.spawn(_run_server_op, state, host, op_hash): host for host in batch } for greenlet in gevent.iwait(greenlet_to_host.keys()): host = greenlet_to_host[greenlet] progress(host) for greenlet, host in six.iteritems(greenlet_to_host): if not greenlet.get(): failed_hosts.add(host) state.fail_hosts(failed_hosts) if pyinfra.is_cli: click.echo(err=True) state.trigger_callbacks('operation_end', op_hash)
Run a single operation for all servers. Can be configured to run in serial.
https://github.com/fizzadar/pyinfra/blob/2cc084dd2ae3e2f2c62884afe9e96783deeb721b/pyinfra/api/operations.py#L288-L348
from __future__ import print_function, unicode_literals import traceback from itertools import product from socket import ( error as socket_error, timeout as timeout_error, ) import click import gevent import six from paramiko import SSHException import pyinfra from pyinfra import logger from pyinfra.progress import progress_spinner from .command import ( FunctionCommand, PyinfraCommand, StringCommand, ) from .exceptions import PyinfraError from .operation_kwargs import get_executor_kwarg_keys from .util import ( format_exception, log_error_or_warning, log_host_command_error, memoize, print_host_combined_output, ) @memoize def show_pre_or_post_condition_warning(condition_name): logger.warning('The `{0}` argument is in beta!'.format(condition_name)) def _run_shell_command(state, host, command, global_kwargs, executor_kwargs): status = False combined_output_lines = [] try: status, combined_output_lines = command.execute(state, host, executor_kwargs) except (timeout_error, socket_error, SSHException) as e: log_host_command_error( host, e, timeout=global_kwargs['timeout'], ) if status is False and not state.print_output: print_host_combined_output(host, combined_output_lines) return status def _run_server_op(state, host, op_hash): state.trigger_callbacks('operation_host_start', host, op_hash) if op_hash not in state.ops[host]: logger.info('{0}{1}'.format(host.print_prefix, click.style('Skipped', 'blue'))) return True op_data = state.get_op_data(host, op_hash) global_kwargs = op_data['global_kwargs'] op_meta = state.get_op_meta(op_hash) ignore_errors = global_kwargs['ignore_errors'] logger.debug('Starting operation {0} on {1}'.format( ', '.join(op_meta['names']), host, )) executor_kwarg_keys = get_executor_kwarg_keys() base_executor_kwargs = { key: global_kwargs[key] for key in executor_kwarg_keys if key in global_kwargs } precondition = global_kwargs['precondition'] if precondition: show_pre_or_post_condition_warning('precondition') if precondition and not _run_shell_command( state, host, StringCommand(precondition), global_kwargs, base_executor_kwargs, ): log_error_or_warning( host, ignore_errors, description='precondition failed: {0}'.format(precondition), ) if not ignore_errors: state.trigger_callbacks('operation_host_error', host, op_hash) return False state.ops_run.add(op_hash) for i, command in enumerate(op_data['commands']): status = False executor_kwargs = base_executor_kwargs.copy() executor_kwargs.update(command.executor_kwargs) if not isinstance(command, PyinfraCommand): raise TypeError('{0} is an invalid pyinfra command!'.format(command)) if isinstance(command, FunctionCommand): try: status = command.execute(state, host, executor_kwargs) except Exception as e: logger.warning(traceback.format_exc()) logger.error('{0}{1}'.format( host.print_prefix, click.style( 'Unexpected error in Python callback: {0}'.format( format_exception(e), ), 'red', ), )) elif isinstance(command, StringCommand): status = _run_shell_command(state, host, command, global_kwargs, executor_kwargs) else: try: status = command.execute(state, host, executor_kwargs) except (timeout_error, socket_error, SSHException, IOError) as e: log_host_command_error( host, e, timeout=global_kwargs['timeout'], ) if status is False: break state.results[host]['commands'] += 1 else: postcondition = global_kwargs['postcondition'] if postcondition: show_pre_or_post_condition_warning('postcondition') if postcondition and not _run_shell_command( state, host, StringCommand(postcondition), global_kwargs, base_executor_kwargs, ): log_error_or_warning( host, ignore_errors, description='postcondition failed: {0}'.format(postcondition), ) if not ignore_errors: state.trigger_callbacks('operation_host_error', host, op_hash) return False state.results[host]['ops'] += 1 state.results[host]['success_ops'] += 1 logger.info('{0}{1}'.format( host.print_prefix, click.style( 'Success' if len(op_data['commands']) > 0 else 'No changes', 'green', ), )) if global_kwargs['on_success']: global_kwargs['on_success'](state, host, op_hash) state.trigger_callbacks('operation_host_success', host, op_hash) return True state.results[host]['error_ops'] += 1 log_error_or_warning(host, ignore_errors) if global_kwargs['on_error']: global_kwargs['on_error'](state, host, op_hash) if ignore_errors: state.results[host]['ops'] += 1 state.trigger_callbacks('operation_host_error', host, op_hash) if ignore_errors: return True return False def _log_operation_start(op_meta): op_types = [] if op_meta['serial']: op_types.append('serial') if op_meta['run_once']: op_types.append('run once') args = '' if op_meta['args']: args = '({0})'.format(', '.join(str(arg) for arg in op_meta['args'])) logger.info('{0} {1} {2}'.format( click.style('--> Starting{0}operation:'.format( ' {0} '.format(', '.join(op_types)) if op_types else ' ', ), 'blue'), click.style(', '.join(op_meta['names']), bold=True), args, )) def _run_server_ops(state, host, progress=None): logger.debug('Running all ops on {0}'.format(host)) for op_hash in state.get_op_order(): op_meta = state.get_op_meta(op_hash) _log_operation_start(op_meta) result = _run_server_op(state, host, op_hash) if progress: progress((host, op_hash)) if result is False: raise PyinfraError('Error in operation {0} on {1}'.format( ', '.join(op_meta['names']), host, )) if pyinfra.is_cli: click.echo(err=True) def _run_serial_ops(state): for host in list(state.inventory.iter_active_hosts()): host_operations = product([host], state.get_op_order()) with progress_spinner(host_operations) as progress: try: _run_server_ops( state, host, progress=progress, ) except PyinfraError: state.fail_hosts({host}) def _run_no_wait_ops(state): hosts_operations = product(state.inventory.iter_active_hosts(), state.get_op_order()) with progress_spinner(hosts_operations) as progress: greenlets = [ state.pool.spawn( _run_server_ops, state, host, progress=progress, ) for host in state.inventory.iter_active_hosts() ] gevent.joinall(greenlets)
MIT License
kislyuk/aegea
aegea/packages/github3/api.py
iter_gists
python
def iter_gists(username=None, number=-1, etag=None): return gh.iter_gists(username, number, etag)
Iterate over public gists or gists for the provided username. :param str username: (optional), if provided, get the gists for this user instead of the authenticated user. :param int number: (optional), number of gists to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`Gist <github3.gists.Gist>`
https://github.com/kislyuk/aegea/blob/585d28475b129bae6a01f9a7a97be558a5f04dd8/aegea/packages/github3/api.py#L166-L178
from .github import GitHub, GitHubEnterprise gh = GitHub() def authorize(login, password, scopes, note='', note_url='', client_id='', client_secret='', two_factor_callback=None): gh = GitHub() gh.login(two_factor_callback=two_factor_callback) return gh.authorize(login, password, scopes, note, note_url, client_id, client_secret) def login(username=None, password=None, token=None, url=None, two_factor_callback=None): g = None if (username and password) or token: g = GitHubEnterprise(url) if url is not None else GitHub() g.login(username, password, token, two_factor_callback) return g def emojis(): return gh.emojis() emojis.__doc__ = gh.emojis.__doc__ def gist(id_num): return gh.gist(id_num) def gitignore_template(language): return gh.gitignore_template(language) def gitignore_templates(): return gh.gitignore_templates() def iter_all_repos(number=-1, etag=None): return gh.iter_all_repos(number, etag) def iter_all_users(number=-1, etag=None): return gh.iter_all_users(number, etag) def iter_events(number=-1, etag=None): return gh.iter_events(number, etag) def iter_followers(username, number=-1, etag=None): return gh.iter_followers(username, number, etag) if username else [] def iter_following(username, number=-1, etag=None): return gh.iter_following(username, number, etag) if username else []
Apache License 2.0
abarker/camel-snake-pep8
camel_snake_pep8.py
compare_changes_with_final_names
python
def compare_changes_with_final_names(module_realpath_list, changes_dict, accepted=True): if accepted: accept_or_reject_word = "Accepted" accept_reject_warning = (" The original name before the change still appears" " in these modules:") else: accept_or_reject_word = "Rejected" accept_reject_warning = (" The name suggested but rejected appears in these" " modules:") for path, change_set in changes_dict.items(): printed_header = False for name, new_name in change_set: found_file_paths = set() for module_realpath in module_realpath_list: final_names_set = final_names_sets_dict[module_realpath] name_to_search_for = name if accepted else new_name if name_to_search_for in final_names_set: found_file_paths.add(module_realpath) if found_file_paths: if not printed_header: print_warning("Warnings for module {0}".format( os.path.relpath(path, project_dir_realpath)), "\n") printed_header = True print(" {0} change: {1} to {2}.".format( accept_or_reject_word, CURR_NAME_COLOR + name + RESET, NEW_NAME_COLOR + new_name + RESET)) print_warning(accept_reject_warning) for fpath in sorted(found_file_paths): print(" ", os.path.relpath(fpath, project_dir_realpath)) print()
This routine does the real work for `analyze_names_in_final_state`, looping over the designated changes and modules.
https://github.com/abarker/camel-snake-pep8/blob/c328293ea8cd5875eda506f35d45ef293c9ec15f/camel_snake_pep8.py#L135-L168
from __future__ import print_function, division import sys import os import re import itertools from collections import defaultdict import argparse import fnmatch import glob import platform import rope from rope.base.project import Project from rope.base.libutils import get_string_scope, get_string_module from rope.refactor.rename import Rename from rope.base import worder from colorama import Fore, Back, Style, init as colorama_init colorama_init() system_os = platform.system() change_function_and_method_names = True change_function_and_method_arguments = True change_function_and_method_keywords = True change_assigned_variables = True change_class_names = True BANNER_WIDTH = 78 if system_os == "Windows": BLUE_INFO_COLOR = Fore.BLUE + Back.WHITE + Style.BRIGHT YELLOW_WARNING_COLOR = Fore.YELLOW + Back.BLACK + Style.BRIGHT RED_ERROR_COLOR = Fore.RED NEW_NAME_COLOR = Fore.GREEN CURR_NAME_COLOR = Fore.CYAN else: BLUE_INFO_COLOR = Fore.BLUE + Style.BRIGHT YELLOW_WARNING_COLOR = Fore.YELLOW RED_ERROR_COLOR = Fore.RED NEW_NAME_COLOR = Fore.GREEN CURR_NAME_COLOR = Fore.CYAN RESET = Style.RESET_ALL RESET = Style.RESET_ALL REJECTED_CHANGE_MAGIC_COOKIE = "_XxX_CamelSnakePep8_PreserveName_XxX_" SOA_FOLLOWED_CALLS = 1 python_version = sys.version_info[0] filterfalse = itertools.ifilterfalse if python_version == 2 else itertools.filterfalse original_names_sets_dict = {} final_names_sets_dict = {} modified_modules_set = set() def user_input(*args, **kwargs): if python_version == 2: print(*args, end="") else: print(*args, end="", flush=True) if python_version == 2: input_fun = raw_input else: input_fun = input if cmdline_args.yes_to_all: print("y") return "y" if cmdline_args.yes_no_default: print("") return "" return input_fun(*args, **kwargs) def save_set_of_all_names_in_module(file_realpath, save_dict): names_in_module = rope_iterate_worder(file_realpath, unfiltered=True) name_set = set(c[0] for c in names_in_module) if file_realpath not in save_dict: save_dict[file_realpath] = name_set user_accepted_changes_sets_dict = defaultdict(set) user_rejected_changes_sets_dict = defaultdict(set) rope_rejected_changes_sets_dict = defaultdict(set) def save_changes(realpath_list, change, user=True, accepted=True): if len(change) > 2: change = (change[1], change[3]) for path in realpath_list: if user and accepted: user_accepted_changes_sets_dict[path].add(change) elif user and not accepted: user_rejected_changes_sets_dict[path].add(change) else: rope_rejected_changes_sets_dict[path].add(change)
MIT License
hass-emulated-hue/core
emulated_hue/config.py
Config.bridge_serial
python
def bridge_serial(self) -> str: return self._bridge_serial
Return the bridge serial of the emulated bridge.
https://github.com/hass-emulated-hue/core/blob/7dbc4dfe6a39f67695e888179c8e02e7bf005570/emulated_hue/config.py#L95-L97
import datetime import hashlib import logging import os from typing import TYPE_CHECKING, Any, Optional from getmac import get_mac_address from .utils import async_save_json, create_secure_string, get_local_ip, load_json if TYPE_CHECKING: from emulated_hue import HueEmulator else: HueEmulator = "HueEmulator" LOGGER = logging.getLogger(__name__) CONFIG_FILE = "emulated_hue.json" DEFINITIONS_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), "definitions.json" ) DEFAULT_THROTTLE_MS = 0 class Config: def __init__( self, hue: HueEmulator, data_path: str, http_port: int, https_port: int, use_default_ports: bool, ): self.hue = hue self.data_path = data_path if not os.path.isdir(data_path): os.mkdir(data_path) self._config = load_json(self.get_path(CONFIG_FILE)) self._definitions = load_json(DEFINITIONS_FILE) self._link_mode_enabled = False self._link_mode_discovery_key = None self._ip_addr = get_local_ip() LOGGER.info("Auto detected listen IP address is %s", self.ip_addr) self.http_port = http_port self.https_port = https_port self.use_default_ports = use_default_ports if http_port != 80 or https_port != 443: LOGGER.warning( "Non default http/https ports detected. Hue apps require the bridge at the default ports 80/443, use at your own risk." ) if self.use_default_ports: LOGGER.warning( "Using default HTTP port for discovery with non default HTTP/S ports. Are you using a reverse proxy?" ) mac_addr = str(get_mac_address(ip=self.ip_addr)) if not mac_addr or len(mac_addr) < 16: mac_addr = str(get_mac_address()) if not mac_addr or len(mac_addr) < 16: mac_addr = "b6:82:d3:45:ac:29" self._mac_addr = mac_addr mac_str = mac_addr.replace(":", "") self._bridge_id = (mac_str[:6] + "FFFE" + mac_str[6:]).upper() self._bridge_serial = mac_str.lower() self._bridge_uid = f"2f402f80-da50-11e1-9b23-{mac_str}" @property def ip_addr(self) -> str: return self._ip_addr @property def mac_addr(self) -> str: return self._mac_addr @property def bridge_id(self) -> str: return self._bridge_id @property
Apache License 2.0
google/objax
examples/fixmatch/libml/augment/randaugment/augment_ops.py
shear_y
python
def shear_y(image, level): image = tfa.image.transform_ops.transform( wrap(image), [1., 0., 0., level, 1., 0., 0., 0.]) return unwrap(image)
Equivalent of PIL Shearing in Y dimension.
https://github.com/google/objax/blob/647d28bfd2fa47daa416f0f77f5f09c1bcda3c6f/examples/fixmatch/libml/augment/randaugment/augment_ops.py#L193-L201
import math import tensorflow as tf import tensorflow_addons as tfa REPLACE_VALUE = 128 def blend(image1, image2, factor): image1 = tf.cast(image1, tf.float32) image2 = tf.cast(image2, tf.float32) return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8) def wrap(image): shape = tf.shape(image) extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) extended = tf.concat([image, extended_channel], 2) return extended def unwrap(image): image_shape = tf.shape(image) flattened_image = tf.reshape(image, [-1, image_shape[2]]) alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1) replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1], image.dtype) flattened_image = tf.where( tf.equal(alpha_channel, 0), tf.ones_like(flattened_image, dtype=image.dtype) * replace, flattened_image) image = tf.reshape(flattened_image, image_shape) image = tf.slice( image, [0, 0, 0], [image_shape[0], image_shape[1], image_shape[2] - 1]) return image def solarize(image, threshold=128): threshold = tf.saturate_cast(threshold, image.dtype) return tf.where(image < threshold, image, 255 - image) def solarize_add(image, addition=0, threshold=128): threshold = tf.saturate_cast(threshold, image.dtype) added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32) added_im = tf.saturate_cast(added_im, tf.uint8) return tf.where(image < threshold, added_im, image) def invert(image): return 255 - tf.convert_to_tensor(image) def invert_blend(image, factor): return blend(invert(image), image, factor) def color(image, factor): degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) return blend(degenerate, image, factor) def contrast(image, factor): grayscale_im = tf.image.rgb_to_grayscale(image) mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32)) mean = tf.saturate_cast(mean + 0.5, tf.uint8) degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean degenerate = tf.image.grayscale_to_rgb(degenerate) return blend(degenerate, image, factor) def brightness(image, factor): degenerate = tf.zeros_like(image) return blend(degenerate, image, factor) def posterize(image, bits): shift = tf.cast(8 - bits, image.dtype) return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) def rotate(image, degrees): degrees_to_radians = math.pi / 180.0 radians = degrees * degrees_to_radians image = tfa.image.transform_ops.rotate(wrap(image), radians) return unwrap(image) def translate_x(image, pixels): image = tfa.image.translate_ops.translate(wrap(image), [-pixels, 0]) return unwrap(image) def translate_y(image, pixels): image = tfa.image.translate_ops.translate(wrap(image), [0, -pixels]) return unwrap(image) def shear_x(image, level): image = tfa.image.transform_ops.transform( wrap(image), [1., level, 0., 0., 1., 0., 0., 0.]) return unwrap(image)
Apache License 2.0
arise-initiative/robomimic
robomimic/algo/bcq.py
BCQ._train_actor_on_batch
python
def _train_actor_on_batch(self, batch, action_sampler_outputs, critic_outputs, epoch, no_backprop=False): assert self.algo_config.actor.enabled info = OrderedDict() s_batch = batch["obs"] goal_s_batch = batch["goal_obs"] sampled_actions = self.nets["action_sampler"](s_batch, goal_s_batch).detach() perturbed_actions = self.nets["actor"](s_batch, sampled_actions, goal_s_batch) actor_loss = -(self.nets["critic"][0](s_batch, perturbed_actions, goal_s_batch)).mean() info["actor/loss"] = actor_loss if not no_backprop: actor_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["actor"], optim=self.optimizers["actor"], loss=actor_loss, ) info["actor/grad_norms"] = actor_grad_norms return info
A modular helper function that can be overridden in case subclasses would like to modify training behavior for the perturbation actor. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training action_sampler_outputs (dict): dictionary of outputs from the action sampler. Currently unused, although more sophisticated models may use it. critic_outputs (dict): dictionary of outputs from the critic. Currently unused, although more sophisticated models may use it. epoch (int): epoch number - required by some Algos that need to perform staged training and early stopping no_backprop (bool): if True, don't perform any learning updates. Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging
https://github.com/arise-initiative/robomimic/blob/2804dd97dd1625ec861298a35cb677129d3bfacc/robomimic/algo/bcq.py#L338-L386
from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import robomimic.models.obs_nets as ObsNets import robomimic.models.policy_nets as PolicyNets import robomimic.models.value_nets as ValueNets import robomimic.models.vae_nets as VAENets import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.torch_utils as TorchUtils import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.loss_utils as LossUtils from robomimic.algo import register_algo_factory_func, PolicyAlgo, ValueAlgo @register_algo_factory_func("bcq") def algo_config_to_class(algo_config): if algo_config.critic.distributional.enabled: return BCQ_Distributional, {} if algo_config.action_sampler.gmm.enabled: return BCQ_GMM, {} assert algo_config.action_sampler.vae.enabled return BCQ, {} class BCQ(PolicyAlgo, ValueAlgo): def __init__(self, **kwargs): PolicyAlgo.__init__(self, **kwargs) self.set_discount(self.algo_config.discount) def _create_networks(self): self.nets = nn.ModuleDict() self._create_critics() self._create_action_sampler() if self.algo_config.actor.enabled: self._create_actor() with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.hard_update( source=self.nets["critic"][critic_ind], target=self.nets["critic_target"][critic_ind], ) if self.algo_config.actor.enabled: TorchUtils.hard_update( source=self.nets["actor"], target=self.nets["actor_target"], ) self.nets = self.nets.float().to(self.device) def _create_critics(self): critic_class = ValueNets.ActionValueNetwork critic_args = dict( obs_shapes=self.obs_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, **ObsNets.obs_encoder_args_from_config(self.obs_config.encoder), ) self.nets["critic"] = nn.ModuleList() self.nets["critic_target"] = nn.ModuleList() for _ in range(self.algo_config.critic.ensemble.n): critic = critic_class(**critic_args) self.nets["critic"].append(critic) critic_target = critic_class(**critic_args) self.nets["critic_target"].append(critic_target) def _create_action_sampler(self): assert self.algo_config.action_sampler.vae.enabled self.nets["action_sampler"] = PolicyNets.VAEActor( obs_shapes=self.obs_shapes, ac_dim=self.ac_dim, device=self.device, goal_shapes=self.goal_shapes, **VAENets.vae_args_from_config(self.algo_config.action_sampler.vae), **ObsNets.obs_encoder_args_from_config(self.obs_config.encoder), ) def _create_actor(self): assert self.algo_config.actor.enabled actor_class = PolicyNets.PerturbationActorNetwork actor_args = dict( obs_shapes=self.obs_shapes, goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, perturbation_scale=self.algo_config.actor.perturbation_scale, **ObsNets.obs_encoder_args_from_config(self.obs_config.encoder), ) self.nets["actor"] = actor_class(**actor_args) self.nets["actor_target"] = actor_class(**actor_args) def _check_epoch(self, net_name, epoch): epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"]) epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"]) return (epoch_start_check and epoch_end_check) def set_discount(self, discount): self.discount = discount def process_batch_for_training(self, batch): input_batch = dict() n_step = self.algo_config.n_step assert batch["actions"].shape[1] >= n_step input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]} input_batch["goal_obs"] = batch.get("goal_obs", None) input_batch["actions"] = batch["actions"][:, 0, :] reward_seq = batch["rewards"][:, :n_step] discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) new_discount = (self.algo_config.discount ** n_step) self.set_discount(new_discount) done_seq = batch["dones"][:, :n_step] input_batch["dones"] = (done_seq.sum(dim=1) > 0).float().unsqueeze(1) if self.algo_config.infinite_horizon: done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] if done_inds.shape[0] > 0: input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount)) return TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device) def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): info = OrderedDict() if self.algo_config.action_sampler.vae.prior.use_categorical: temperature = self.algo_config.action_sampler.vae.prior.categorical_init_temp - epoch * self.algo_config.action_sampler.vae.prior.categorical_temp_anneal_step temperature = max(temperature, self.algo_config.action_sampler.vae.prior.categorical_min_temp) self.nets["action_sampler"].set_gumbel_temperature(temperature) vae_inputs = dict( actions=batch["actions"], obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) if (self.algo_config.action_sampler.freeze_encoder_epoch != -1) and (epoch >= self.algo_config.action_sampler.freeze_encoder_epoch): vae_inputs["freeze_encoder"] = True vae_outputs = self.nets["action_sampler"].forward_train(**vae_inputs) recons_loss = vae_outputs["reconstruction_loss"] kl_loss = vae_outputs["kl_loss"] vae_loss = recons_loss + self.algo_config.action_sampler.vae.kl_weight * kl_loss info["action_sampler/loss"] = vae_loss info["action_sampler/recons_loss"] = recons_loss info["action_sampler/kl_loss"] = kl_loss if not self.algo_config.action_sampler.vae.prior.use_categorical: with torch.no_grad(): encoder_variance = torch.exp(vae_outputs["encoder_params"]["logvar"]).mean() info["action_sampler/encoder_variance"] = encoder_variance outputs = TensorUtils.detach(vae_outputs) if not no_backprop: vae_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["action_sampler"], optim=self.optimizers["action_sampler"], loss=vae_loss, ) info["action_sampler/grad_norms"] = vae_grad_norms return info, outputs def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backprop=False): info = OrderedDict() s_batch = batch["obs"] a_batch = batch["actions"] r_batch = batch["rewards"] ns_batch = batch["next_obs"] goal_s_batch = batch["goal_obs"] done_mask_batch = 1. - batch["dones"] info["done_masks"] = done_mask_batch q_targets = self._get_target_values( next_states=ns_batch, goal_states=goal_s_batch, rewards=r_batch, dones=done_mask_batch, action_sampler_outputs=action_sampler_outputs, ) info["critic/q_targets"] = q_targets critic_outputs = [] for critic_ind, critic in enumerate(self.nets["critic"]): critic_loss, critic_output = self._compute_critic_loss( critic=critic, states=s_batch, actions=a_batch, goal_states=goal_s_batch, q_targets=q_targets, ) info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss critic_outputs.append(critic_output) if not no_backprop: critic_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["critic"][critic_ind], optim=self.optimizers["critic"][critic_ind], loss=critic_loss, max_grad_norm=self.algo_config.critic.max_gradient_norm, ) info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms return info, critic_outputs
MIT License
anandsaha/rl.capstone
archive/interface_robot.py
RobotArm.goto_position
python
def goto_position(self, pos):
Make the robot arm gripper go to the specified position (x, y z coordinates)
https://github.com/anandsaha/rl.capstone/blob/7ee9eee225dc0348aabc950ce24b63a39c37fdc0/archive/interface_robot.py#L33-L34
import time import vrep import numpy as np class RobotArm(object): def __init__(self, ip, port): def __del__(self): def disconnect(self): def get_position(self, handle): @staticmethod def get_env_dimensions():
MIT License
hyperledger/aries-cloudagent-python
aries_cloudagent/protocols/connections/v1_0/messages/connection_response.py
ConnectionResponse.__init__
python
def __init__(self, *, connection: ConnectionDetail = None, **kwargs): super().__init__(**kwargs) self.connection = connection
Initialize connection response object. Args: connection: Connection details object
https://github.com/hyperledger/aries-cloudagent-python/blob/fec69f1a2301e4745fc9d40cea190050e3f595fa/aries_cloudagent/protocols/connections/v1_0/messages/connection_response.py#L26-L35
from marshmallow import EXCLUDE, fields from .....messaging.agent_message import AgentMessage, AgentMessageSchema from ..message_types import CONNECTION_RESPONSE, PROTOCOL_PACKAGE from ..models.connection_detail import ConnectionDetail, ConnectionDetailSchema HANDLER_CLASS = ( f"{PROTOCOL_PACKAGE}.handlers." "connection_response_handler.ConnectionResponseHandler" ) class ConnectionResponse(AgentMessage): class Meta: handler_class = HANDLER_CLASS schema_class = "ConnectionResponseSchema" message_type = CONNECTION_RESPONSE
Apache License 2.0
wangronin/bayesian-optimization
bayes_optim/search_space/node.py
Node.add_child
python
def add_child(self, node: Node, branch: str = None) -> Node: node.is_root = False self.children.append(node) self.branches.append(branch) return self
add a child node to a node Parameters ---------- node : Node the child node to add branch : str, optional the branching condition to this child, which is an Python expression evaluated to `True` when the conditiion is satisfied, by default None Returns ------- Node return the updated node
https://github.com/wangronin/bayesian-optimization/blob/ffbcf4c8813dfa603b9065355e20eda0ccb99e30/bayes_optim/search_space/node.py#L26-L45
from __future__ import annotations from copy import copy, deepcopy from typing import Any, Dict, List, Tuple class Node: def __init__(self, name: str, data: Any = None): self.name: str = name self.data: Any = data self.is_root: bool = True self.children: List[Node] = [] self.branches: List = []
BSD 3-Clause New or Revised License
optibus/playback
playback/recording.py
Recording.get_data
python
def get_data(self, key): pass
:param key: Data key :type key: basestring :return: Recorded data under given key :rtype: Any :raise: playback.exceptions.RecordingKeyError
https://github.com/optibus/playback/blob/7e8ea29f764a52753aef4e3334fd9bcf7826e082/playback/recording.py#L39-L47
import uuid from abc import ABCMeta, abstractmethod class Recording(object): __metaclass__ = ABCMeta def __init__(self, _id=None): self.id = _id or uuid.uuid1().hex self._closed = False @abstractmethod def _set_data(self, key, value): pass def set_data(self, key, value): assert not self._closed self._set_data(key, value) @abstractmethod
BSD 3-Clause New or Revised License
geoffxy/habitat
experiments/gnmt/seq2seq/models/decoder.py
ResidualRecurrentDecoder.forward
python
def forward(self, inputs, context, inference=False): self.inference = inference enc_context, enc_len, hidden = context hidden = self.init_hidden(hidden) x = self.embedder(inputs) x, h, attn, scores = self.att_rnn(x, hidden[0], enc_context, enc_len) self.append_hidden(h) x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[0](x, hidden[1]) self.append_hidden(h) for i in range(1, len(self.rnn_layers)): residual = x x = torch.cat((x, attn), dim=2) x = self.dropout(x) x, h = self.rnn_layers[i](x, hidden[i + 1]) self.append_hidden(h) x = x + residual x = self.classifier(x) hidden = self.package_hidden() return x, scores, [enc_context, enc_len, hidden]
Execute the decoder. :param inputs: tensor with inputs to the decoder :param context: state of encoder, encoder sequence lengths and hidden state of decoder's LSTM layers :param inference: if True stores and repackages hidden state
https://github.com/geoffxy/habitat/blob/decc70d18c4a1db7bb109fd59b2b60567bf74375/experiments/gnmt/seq2seq/models/decoder.py#L187-L222
import itertools import torch import torch.nn as nn import gnmt.seq2seq.data.config as config from gnmt.seq2seq.models.attention import BahdanauAttention from gnmt.seq2seq.utils import init_lstm_ class RecurrentAttention(nn.Module): def __init__(self, input_size=1024, context_size=1024, hidden_size=1024, num_layers=1, batch_first=False, dropout=0.2, init_weight=0.1): super(RecurrentAttention, self).__init__() self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=batch_first) init_lstm_(self.rnn, init_weight) self.attn = BahdanauAttention(hidden_size, context_size, context_size, normalize=True, batch_first=batch_first) self.dropout = nn.Dropout(dropout) def forward(self, inputs, hidden, context, context_len): self.attn.set_mask(context_len, context) inputs = self.dropout(inputs) rnn_outputs, hidden = self.rnn(inputs, hidden) attn_outputs, scores = self.attn(rnn_outputs, context) return rnn_outputs, hidden, attn_outputs, scores class Classifier(nn.Module): def __init__(self, in_features, out_features, init_weight=0.1): super(Classifier, self).__init__() self.classifier = nn.Linear(in_features, out_features) nn.init.uniform_(self.classifier.weight.data, -init_weight, init_weight) nn.init.uniform_(self.classifier.bias.data, -init_weight, init_weight) def forward(self, x): out = self.classifier(x) return out class ResidualRecurrentDecoder(nn.Module): def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2, batch_first=False, embedder=None, init_weight=0.1): super(ResidualRecurrentDecoder, self).__init__() self.num_layers = num_layers self.att_rnn = RecurrentAttention(hidden_size, hidden_size, hidden_size, num_layers=1, batch_first=batch_first, dropout=dropout) self.rnn_layers = nn.ModuleList() for _ in range(num_layers - 1): self.rnn_layers.append( nn.LSTM(2 * hidden_size, hidden_size, num_layers=1, bias=True, batch_first=batch_first)) for lstm in self.rnn_layers: init_lstm_(lstm, init_weight) if embedder is not None: self.embedder = embedder else: self.embedder = nn.Embedding(vocab_size, hidden_size, padding_idx=config.PAD) nn.init.uniform_(self.embedder.weight.data, -init_weight, init_weight) self.classifier = Classifier(hidden_size, vocab_size) self.dropout = nn.Dropout(p=dropout) def init_hidden(self, hidden): if hidden is not None: hidden = hidden.chunk(self.num_layers) hidden = tuple(i.chunk(2) for i in hidden) else: hidden = [None] * self.num_layers self.next_hidden = [] return hidden def append_hidden(self, h): if self.inference: self.next_hidden.append(h) def package_hidden(self): if self.inference: hidden = torch.cat(tuple(itertools.chain(*self.next_hidden))) else: hidden = None return hidden
Apache License 2.0
databiosphere/toil
src/toil/__init__.py
lookupEnvVar
python
def lookupEnvVar(name, envName, defaultValue): try: value = os.environ[envName] except KeyError: log.info('Using default %s of %s as %s is not set.', name, defaultValue, envName) return defaultValue else: log.info('Overriding %s of %s with %s from %s.', name, defaultValue, value, envName) return value
Use this for looking up environment variables that control Toil and are important enough to log the result of that lookup. :param str name: the human readable name of the variable :param str envName: the name of the environment variable to lookup :param str defaultValue: the fall-back value :return: the value of the environment variable or the default value the variable is not set :rtype: str
https://github.com/databiosphere/toil/blob/eb2ae8365ae2ebdd50132570b20f7d480eb40cac/src/toil/__init__.py#L241-L259
import errno import logging import os import re import socket import subprocess import sys import time from datetime import datetime import requests from pytz import timezone from docker.errors import ImageNotFound from toil.lib.memoize import memoize from toil.lib.retry import retry from toil.version import currentCommit log = logging.getLogger(__name__) def which(cmd, mode=os.F_OK | os.X_OK, path=None): def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": if not os.curdir in path: path.insert(0, os.curdir) pathext = os.environ.get("PATHEXT", "").split(os.pathsep) if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None def toilPackageDirPath(): result = os.path.dirname(os.path.realpath(__file__)) assert result.endswith('/toil') return result def inVirtualEnv(): return ('VIRTUAL_ENV' in os.environ or 'CONDA_DEFAULT_ENV' in os.environ or hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)) def resolveEntryPoint(entryPoint): if os.environ.get("TOIL_CHECK_ENV", None) == 'True' and inVirtualEnv(): path = os.path.join(os.path.dirname(sys.executable), entryPoint) if os.path.isfile(path): assert os.access(path, os.X_OK) return path return entryPoint @memoize def physicalMemory() -> int: try: return os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') except ValueError: return int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']).decode('utf-8').strip()) def physicalDisk(directory: str) -> int: diskStats = os.statvfs(directory) return diskStats.f_frsize * diskStats.f_bavail def applianceSelf(forceDockerAppliance=False): import toil.version registry = lookupEnvVar(name='docker registry', envName='TOIL_DOCKER_REGISTRY', defaultValue=toil.version.dockerRegistry) name = lookupEnvVar(name='docker name', envName='TOIL_DOCKER_NAME', defaultValue=toil.version.dockerName) appliance = lookupEnvVar(name='docker appliance', envName='TOIL_APPLIANCE_SELF', defaultValue=registry + '/' + name + ':' + toil.version.dockerTag) checkDockerSchema(appliance) if forceDockerAppliance: return appliance else: return checkDockerImageExists(appliance=appliance) def customDockerInitCmd(): command = lookupEnvVar(name='user-defined custom docker init command', envName='TOIL_CUSTOM_DOCKER_INIT_COMMAND', defaultValue='') _check_custom_bash_cmd(command) return command.replace("'", "'\\''") def customInitCmd(): command = lookupEnvVar(name='user-defined custom init command', envName='TOIL_CUSTOM_INIT_COMMAND', defaultValue='') _check_custom_bash_cmd(command) return command.replace("'", "'\\''") def _check_custom_bash_cmd(cmd_str): assert not re.search(r'[\n\r\t]', cmd_str), f'"{cmd_str}" contains invalid characters (newline and/or tab).'
Apache License 2.0
dlinzhao/jsnet
utils/pointnet_util.py
pointnet_fp_module
python
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True, is_dist=False): with tf.variable_scope(scope) as sc: dist, idx = three_nn(xyz1, xyz2) dist = tf.maximum(dist, 1e-10) norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True) norm = tf.tile(norm, [1, 1, 3]) weight = (1.0 / dist) / norm interpolated_points = three_interpolate(points2, idx, weight) if points1 is not None: new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) else: new_points1 = interpolated_points new_points1 = tf.expand_dims(new_points1, 2) for i, num_out_channel in enumerate(mlp): new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv_%d' % (i), bn_decay=bn_decay, is_dist=is_dist) new_points1 = tf.squeeze(new_points1, [2]) return new_points1
PointNet Feature Propogation (FP) Module Input: xyz1: (batch_size, ndataset1, 3) TF tensor xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1 points1: (batch_size, ndataset1, nchannel1) TF tensor points2: (batch_size, ndataset2, nchannel2) TF tensor mlp: list of int32 -- output size for MLP on each point Return: new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
https://github.com/dlinzhao/jsnet/blob/8fec285e1ccc12a0c5806f4adc60846420ca46a1/utils/pointnet_util.py#L162-L193
import os import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(os.path.join(ROOT_DIR, 'utils')) sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling')) sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/grouping')) sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/3d_interpolation')) from tf_sampling import farthest_point_sample, gather_point from tf_grouping import query_ball_point, group_point, knn_point from tf_interpolate import three_nn, three_interpolate import tensorflow as tf import numpy as np import tf_util def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz def sample_and_group_all(xyz, points, use_xyz=True): batch_size = xyz.get_shape()[0].value nsample = xyz.get_shape()[1].value new_xyz = tf.constant(np.tile(np.array([0, 0, 0]).reshape((1, 1, 3)), (batch_size, 1, 1)), dtype=tf.float32) idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1, 1, nsample)), (batch_size, 1, 1))) grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3)) if points is not None: if use_xyz: new_points = tf.concat([xyz, points], axis=2) else: new_points = points new_points = tf.expand_dims(new_points, 1) else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False, is_dist=False): data_format = 'NCHW' if use_nchw else 'NHWC' with tf.variable_scope(scope) as sc: if group_all: nsample = xyz.get_shape()[1].value new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz) else: new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz) if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2]) for i, num_out_channel in enumerate(mlp): new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d' % (i), bn_decay=bn_decay, data_format=data_format, is_dist=is_dist) if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1]) if pooling == 'max': new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool') elif pooling == 'avg': new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool') elif pooling == 'weighted_avg': with tf.variable_scope('weighted_avg'): dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True) exp_dists = tf.exp(-dists * 5) weights = exp_dists / tf.reduce_sum(exp_dists, axis=2, keep_dims=True) new_points *= weights new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True) elif pooling == 'max_and_avg': max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool') avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool') new_points = tf.concat([avg_points, max_points], axis=-1) if mlp2 is not None: if use_nchw: new_points = tf.transpose(new_points, [0, 3, 1, 2]) for i, num_out_channel in enumerate(mlp2): new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv_post_%d' % (i), bn_decay=bn_decay, data_format=data_format, is_dist=is_dist) if use_nchw: new_points = tf.transpose(new_points, [0, 2, 3, 1]) new_points = tf.squeeze(new_points, [2]) return new_xyz, new_points, idx
MIT License
morgan-stanley/treadmill
lib/python/treadmill/tests/treadmill_ldap_patch.py
evaluate_filter_node
python
def evaluate_filter_node(self, node, candidates): node.matched = set() node.unmatched = set() if node.elements: for element in node.elements: self.evaluate_filter_node(element, candidates) if node.tag == ROOT: return node.elements[0].matched elif node.tag == AND: for element in node.elements: if not node.matched: node.matched.update(element.matched) else: node.matched.intersection_update(element.matched) if not node.unmatched: node.unmatched.update(element.unmatched) else: node.unmatched.intersection_update(element.unmatched) elif node.tag == OR: for element in node.elements: node.matched.update(element.matched) node.unmatched.update(element.unmatched) elif node.tag == NOT: node.matched = node.elements[0].unmatched node.unmatched = node.elements[0].matched elif node.tag == MATCH_GREATER_OR_EQUAL: attr_name = node.assertion['attr'] attr_value = node.assertion['value'] for candidate in candidates: if attr_name in self.connection.server.dit[candidate]: for value in self.connection.server.dit[candidate][attr_name]: if value.isdigit() and attr_value.isdigit(): if int(value) >= int(attr_value): node.matched.add(candidate) else: node.unmatched.add(candidate) else: if to_unicode( value, SERVER_ENCODING ).lower() >= to_unicode( attr_value, SERVER_ENCODING ).lower(): node.matched.add(candidate) else: node.unmatched.add(candidate) elif node.tag == MATCH_LESS_OR_EQUAL: attr_name = node.assertion['attr'] attr_value = node.assertion['value'] for candidate in candidates: if attr_name in self.connection.server.dit[candidate]: for value in self.connection.server.dit[candidate][attr_name]: if value.isdigit() and attr_value.isdigit(): if int(value) <= int(attr_value): node.matched.add(candidate) else: node.unmatched.add(candidate) else: if to_unicode( value, SERVER_ENCODING ).lower() <= to_unicode( attr_value, SERVER_ENCODING ).lower(): node.matched.add(candidate) else: node.unmatched.add(candidate) elif node.tag == MATCH_EXTENSIBLE: self.connection.last_error = 'Extensible match not allowed in Mock strategy' if log_enabled(ERROR): log(ERROR, '<%s> for <%s>', self.connection.last_error, self.connection) raise LDAPDefinitionError(self.connection.last_error) elif node.tag == MATCH_PRESENT: attr_name = node.assertion['attr'] for candidate in candidates: if attr_name in self.connection.server.dit[candidate]: node.matched.add(candidate) else: node.unmatched.add(candidate) elif node.tag == MATCH_SUBSTRING: attr_name = node.assertion['attr'] if 'initial' in node.assertion and node.assertion['initial'] is not None: substring_filter = re.escape( to_unicode(node.assertion['initial'], SERVER_ENCODING) ) else: substring_filter = '' if 'any' in node.assertion and node.assertion['any'] is not None: for middle in node.assertion['any']: substring_filter += '.*' + re.escape( to_unicode(middle, SERVER_ENCODING) ) if 'final' in node.assertion and node.assertion['final'] is not None: substring_filter += '.*' + re.escape( to_unicode(node.assertion['final'], SERVER_ENCODING) ) if substring_filter and not node.assertion.get('any', None) and not node.assertion.get('final', None): substring_filter += '.*' regex_filter = re.compile( substring_filter, flags=re.UNICODE | re.IGNORECASE ) for candidate in candidates: if attr_name in self.connection.server.dit[candidate]: for value in self.connection.server.dit[candidate][attr_name]: if regex_filter.match(to_unicode(value, SERVER_ENCODING)): node.matched.add(candidate) else: node.unmatched.add(candidate) else: node.unmatched.add(candidate) elif node.tag == MATCH_EQUAL or node.tag == MATCH_APPROX: attr_name = node.assertion['attr'] attr_value = node.assertion['value'] for candidate in candidates: if attr_name in self.connection.server.dit[candidate] and self.equal(candidate, attr_name, attr_value): node.matched.add(candidate) else: node.unmatched.add(candidate) return None
After evaluation each 2 sets are added to each MATCH node, one for the matched object and one for unmatched object. The unmatched object set is needed if a superior node is a NOT that reverts the evaluation. The BOOLEAN nodes mix the sets returned by the MATCH nodes
https://github.com/morgan-stanley/treadmill/blob/f18267c665baf6def4374d21170198f63ff1cde4/lib/python/treadmill/tests/treadmill_ldap_patch.py#L35-L169
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re from ldap3.strategy import mockBase from ldap3.strategy.mockBase import ( to_unicode, log, log_enabled, ERROR, LDAPDefinitionError, SERVER_ENCODING, ROOT, AND, OR, NOT, MATCH_APPROX, MATCH_GREATER_OR_EQUAL, MATCH_LESS_OR_EQUAL, MATCH_EXTENSIBLE, MATCH_PRESENT, MATCH_SUBSTRING, MATCH_EQUAL ) def monkey_patch(): mockBase.MockBaseStrategy.evaluate_filter_node = evaluate_filter_node
Apache License 2.0
chengyangfu/retinamask
maskrcnn_benchmark/modeling/poolers.py
Pooler.__init__
python
def __init__(self, output_size, scales, sampling_ratio, canonical_level=4): super(Pooler, self).__init__() poolers = [] for scale in scales: poolers.append( ROIAlign( output_size, spatial_scale=scale, sampling_ratio=sampling_ratio ) ) self.poolers = nn.ModuleList(poolers) self.output_size = output_size lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item() lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item() self.map_levels = LevelMapper( lvl_min, lvl_max, canonical_level=canonical_level )
Arguments: output_size (list[tuple[int]] or list[int]): output size for the pooled region scales (list[float]): scales for each Pooler sampling_ratio (int): sampling ratio for ROIAlign
https://github.com/chengyangfu/retinamask/blob/5bcf2e744c6d9b4574682975dde0d4c24ff4cc59/maskrcnn_benchmark/modeling/poolers.py#L55-L78
import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.layers import ROIAlign from .utils import cat class LevelMapper(object): def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6): self.k_min = k_min self.k_max = k_max self.s0 = canonical_scale self.lvl0 = canonical_level self.eps = eps def __call__(self, boxlists): s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists])) target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps)) target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max) return target_lvls.to(torch.int64) - self.k_min class Pooler(nn.Module):
MIT License
qiskit/qiskit-experiments
qiskit_experiments/calibration_management/calibrations.py
Calibrations.get_template
python
def get_template( self, schedule_name: str, qubits: Optional[Tuple[int, ...]] = None ) -> ScheduleBlock: key = ScheduleKey(schedule_name, self._to_tuple(qubits)) if key in self._schedules: return self._schedules[key] if ScheduleKey(schedule_name, ()) in self._schedules: return self._schedules[ScheduleKey(schedule_name, ())] if qubits: msg = f"Could not find schedule {schedule_name} on qubits {qubits}." else: msg = f"Could not find schedule {schedule_name}." raise CalibrationError(msg)
Get a template schedule. Allows the user to get a template schedule that was previously registered. A template schedule will typically be fully parametric, i.e. all pulse parameters and channel indices are represented by :class:`Parameter`. Args: schedule_name: The name of the template schedule. qubits: The qubits under which the template schedule was registered. Returns: The registered template schedule. Raises: CalibrationError: if no template schedule for the given schedule name and qubits was registered.
https://github.com/qiskit/qiskit-experiments/blob/39e2a1b6d65bd1df47a94a731b425f98d0f7e3e2/qiskit_experiments/calibration_management/calibrations.py#L227-L260
import os from collections import defaultdict from datetime import datetime, timezone from typing import Any, Dict, Set, Tuple, Union, List, Optional import csv import dataclasses import warnings import re from qiskit.pulse import ( ScheduleBlock, DriveChannel, ControlChannel, MeasureChannel, Call, Instruction, AcquireChannel, RegisterSlot, MemorySlot, Schedule, ) from qiskit.pulse.channels import PulseChannel from qiskit.circuit import Parameter, ParameterExpression from qiskit_experiments.exceptions import CalibrationError from qiskit_experiments.calibration_management.parameter_value import ParameterValue from qiskit_experiments.calibration_management.calibration_key_types import ( ParameterKey, ParameterValueType, ScheduleKey, ) class Calibrations: __channel_pattern__ = r"^ch\d[\.\d]*\${0,1}[\d]*$" def __init__(self, control_config: Dict[Tuple[int, ...], List[ControlChannel]] = None): self._controls_config = control_config if control_config else {} self._controls_config_r = {} for qubits, channels in self._controls_config.items(): for channel in channels: self._controls_config_r[channel] = qubits self._parameter_map = {} self._parameter_map_r = defaultdict(set) self._params = defaultdict(list) self._schedules = {} self._schedules_qubits = {} self._hash_to_counter_map = {} self._parameter_counter = 0 def add_schedule( self, schedule: ScheduleBlock, qubits: Union[int, Tuple[int, ...]] = None, num_qubits: Optional[int] = None, ): qubits = self._to_tuple(qubits) if len(qubits) == 0 and num_qubits is None: raise CalibrationError("Both qubits and num_qubits cannot simultaneously be None.") num_qubits = len(qubits) or num_qubits if not isinstance(schedule, ScheduleBlock): raise CalibrationError(f"{schedule.name} is not a ScheduleBlock.") sched_key = ScheduleKey(schedule.name, qubits) if sched_key in self._schedules_qubits and self._schedules_qubits[sched_key] != num_qubits: raise CalibrationError( f"Cannot add schedule {schedule.name} acting on {num_qubits} qubits." "self already contains a schedule with the same name acting on " f"{self._schedules_qubits[sched_key]} qubits. Remove old schedule first." ) if schedule.name.startswith(ScheduleBlock.prefix): raise CalibrationError( f"{self.__class__.__name__} uses the `name` property of the schedule as part of a " f"database key. Using the automatically generated name {schedule.name} may have " f"unintended consequences. Please define a meaningful and unique schedule name." ) param_indices = set() for ch in schedule.channels: if isinstance(ch.index, Parameter): if len(ch.index.parameters) != 1: raise CalibrationError(f"Channel {ch} can only have one parameter.") param_indices.add(ch.index) if re.compile(self.__channel_pattern__).match(ch.index.name) is None: raise CalibrationError( f"Parameterized channel must correspond to {self.__channel_pattern__}" ) for block in schedule.blocks: if isinstance(block, Call): if isinstance(block.subroutine, Schedule): raise CalibrationError( "Calling a Schedule is forbidden, call ScheduleBlock instead." ) if (block.subroutine.name, qubits) not in self._schedules: raise CalibrationError( f"Cannot register schedule block {schedule.name} with unregistered " f"subroutine {block.subroutine.name}." ) self._clean_parameter_map(schedule.name, qubits) self._schedules[sched_key] = schedule self._schedules_qubits[sched_key] = num_qubits params_to_register = set() for inst in self._exclude_calls(schedule, []): for param in inst.parameters: if param not in param_indices: params_to_register.add(param) if len(params_to_register) != len(set(param.name for param in params_to_register)): raise CalibrationError(f"Parameter names in {schedule.name} must be unique.") for param in params_to_register: self._register_parameter(param, qubits, schedule) def _exclude_calls( self, schedule: ScheduleBlock, instructions: List[Instruction] ) -> List[Instruction]: for block in schedule.blocks: if isinstance(block, ScheduleBlock): instructions = self._exclude_calls(block, instructions) else: if not isinstance(block, Call): instructions.append(block) return instructions
Apache License 2.0
dials/dials
algorithms/refinement/parameterisation/autoreduce.py
AutoReduce.__call__
python
def __call__(self): if self._options.action == "fail": self.check_and_fail() elif self._options.action == "fix": self.check_and_fix() elif self._options.action == "remove": self.check_and_remove()
Perform checks and parameter reduction according to the selected option. Returns: None
https://github.com/dials/dials/blob/a2cb71bf410e179b92554bcce2e21388e1dc25d1/algorithms/refinement/parameterisation/autoreduce.py#L249-L263
import logging logger = logging.getLogger(__name__) from libtbx.phil import parse from scitbx.array_family import flex from dials.algorithms.refinement import DialsRefineConfigError phil_str = """ min_nref_per_parameter = 5 .help = "the smallest number of reflections per parameter for a" "model parameterisation below which the parameterisation will" "not be made in full, but the action described below will be" "triggered." .type = int(value_min=1) action = *fail fix remove .help = "action to take if there are too few reflections across the" "experiments related to a particular model parameterisation." "If fail, an exception will be raised and refinement will not" "proceed. If fix, refinement will continue but with the" "parameters relating to that model remaining fixed at their" "initial values. If remove, parameters relating to that model" "will be fixed, and in addition all reflections related to" "that parameterisation will be removed. This will therefore" "remove these reflections from other parameterisations of the" "global model too. For example, if a crystal model could not" "be parameterised it will be excised completely and not" "contribute to the joint refinement of the detector and beam." "In the fix mode, reflections emanating from that crystal will" "still form residuals and will contribute to detector and beam" "refinement." .type = choice """ phil_scope = parse(phil_str) def id_associated_refs(result): vals = list(result.values()) try: vals = [v.as_dense_vector() for v in vals] except AttributeError: pass vals = [flex.abs(v) > 1e-10 for v in vals] val = vals[0] for v in vals[1:]: val = val | v return val def count_associated_refs(result): refs = id_associated_refs(result) return refs.count(True) class AutoReduce: def __init__( self, options, pred_param, reflection_manager, constraints_manager=None, constraints_manager_factory=None, ): self._options = options self.pred_param = pred_param self.param_names = self.pred_param.get_param_names() self.reflection_manager = reflection_manager self.constraints_manager = constraints_manager self.constraints_manager_factory = constraints_manager_factory def _nref_per_param(self): obs = self.reflection_manager.get_obs() try: self.pred_param.compose(obs) except AttributeError: pass nref_per_param = flex.size_t( self.pred_param.get_gradients(obs, callback=count_associated_refs) ) if self.constraints_manager is not None: for link in self.constraints_manager.get_constrained_parameter_indices(): sel = flex.size_t(link) total = flex.sum(nref_per_param.select(sel)) nref_per_param.set_selected(sel, total) return nref_per_param def check_and_fail(self): sel = ( self._nref_per_param() < self._options.min_nref_per_parameter ).iselection() if len(sel) > 0: names = ", ".join([self.param_names[i] for i in sel]) msg = f"Too few reflections to parameterise {names}.\n" msg += ( "Try setting " "refinement.parameterisation.auto_reduction.action " "to fix these parameters (=fix) or additionally remove the " "associated reflections (=remove)." ) raise DialsRefineConfigError(msg) def check_and_fix(self): sel = self._nref_per_param() < self._options.min_nref_per_parameter isel = sel.iselection() if len(isel) > 0: names = ", ".join([self.param_names[i] for i in isel]) msg = f"Too few reflections to parameterise {names}.\n" msg += "These parameters will be fixed for refinement." logger.warning(msg) self.pred_param.fix_params(sel) if self.constraints_manager is not None: self.constraints_manager = self.constraints_manager_factory() def check_and_remove(self): det_params = self.pred_param.get_detector_parameterisations() if len(det_params) == 1: n_exp = len(det_params[0].get_experiment_ids()) if n_exp == 1 and not det_params[0].is_multi_state(): raise DialsRefineConfigError( "For single experiment, single panel refinement " "auto_reduction.action=remove cannot be used as it could only " "remove all reflections from refinement" ) while True: obs = self.reflection_manager.get_obs() try: self.pred_param.compose(obs) except AttributeError: pass refs_by_parameters = self.pred_param.get_gradients( obs, callback=id_associated_refs ) nref_per_param = flex.size_t( [refs.count(True) for refs in refs_by_parameters] ) if self.constraints_manager is not None: for ( link ) in self.constraints_manager.get_constrained_parameter_indices(): sel = flex.size_t(link) total = flex.sum(nref_per_param.select(sel)) nref_per_param.set_selected(sel, total) sel = nref_per_param < self._options.min_nref_per_parameter if sel.count(True) == 0: break names = ", ".join([self.param_names[i] for i in sel.iselection()]) msg = f"Too few reflections to parameterise {names}.\n" msg += ( "These parameters will be fixed for refinement and " "the associated reflections will be removed." ) logger.warning(msg) self.pred_param.fix_params(sel) if self.constraints_manager is not None: self.constraints_manager = self.constraints_manager_factory() refs_to_filter = flex.bool(len(obs), True) for remove, refs in zip(sel, refs_by_parameters): if remove: refs_to_filter = refs_to_filter & ~refs self.reflection_manager.filter_obs(refs_to_filter)
BSD 3-Clause New or Revised License
learningequality/ricecooker
examples/tutorial/sushichef.py
TutorialChef.construct_channel
python
def construct_channel(self, *args, **kwargs): channel = self.get_channel(*args, **kwargs) exampletopic = TopicNode(source_id="topic-1", title="Example Topic") channel.add_child(exampletopic) examplesubtopic = TopicNode(source_id="topic-1a", title="Example Subtopic") exampletopic.add_child(examplesubtopic) document_file = DocumentFile(path="http://www.pdf995.com/samples/pdf.pdf") examplepdf = DocumentNode(title="Example PDF", source_id="example-pdf", files=[document_file], license=get_license(licenses.PUBLIC_DOMAIN)) video_file = VideoFile(path="https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4") fancy_license = get_license(licenses.SPECIAL_PERMISSIONS, description='Special license for ricecooker fans only.', copyright_holder='The chef video makers') examplevideo = VideoNode(title="Example Video", source_id="example-video", files=[video_file], license=fancy_license) audio_file = AudioFile(path="https://ia802508.us.archive.org/5/items/testmp3testfile/mpthreetest.mp3") exampleaudio = AudioNode(title="Example Audio", source_id="example-audio", files=[audio_file], license=get_license(licenses.PUBLIC_DOMAIN)) channel.add_child(examplepdf) exampletopic.add_child(examplevideo) examplesubtopic.add_child(exampleaudio) return channel
This method is reponsible for creating a `ChannelNode` object and populating it with `TopicNode` and `ContentNode` children.
https://github.com/learningequality/ricecooker/blob/eb74437b489b79aa2151b64394cefba500a15fc7/examples/tutorial/sushichef.py#L27-L87
from ricecooker.chefs import SushiChef from ricecooker.classes.nodes import ChannelNode, HTML5AppNode, TopicNode, VideoNode, DocumentNode, AudioNode from ricecooker.classes.files import DocumentFile, VideoFile, AudioFile from le_utils.constants import licenses from ricecooker.classes.licenses import get_license class TutorialChef(SushiChef): channel_info = { 'CHANNEL_SOURCE_DOMAIN': '<yourdomain.org>', 'CHANNEL_SOURCE_ID': '<yourid>', 'CHANNEL_TITLE': 'The tutorial channel', 'CHANNEL_LANGUAGE': 'en', }
MIT License
sumeshi/evtx2es
src/evtx2es/models/Evtx2es.py
Evtx2es.gen_records
python
def gen_records(self, shift: Union[str, datetime], multiprocess: bool, chunk_size: int) -> Generator: gen_path = iter(lambda: str(self.path), None) gen_shift = iter(lambda: shift, None) if multiprocess: with Pool(cpu_count()) as pool: results = pool.starmap_async( process_by_chunk, zip( generate_chunks(chunk_size, self.parser.records_json()), gen_path, gen_shift, ) ) yield list(chain.from_iterable(results.get(timeout=None))) else: buffer: List[List[dict]] = list() for records in generate_chunks(chunk_size, self.parser.records_json()): if chunk_size <= len(buffer): yield list(chain.from_iterable(buffer)) buffer.clear() else: buffer.append(process_by_chunk(records, gen_path, gen_shift)) else: yield list(chain.from_iterable(buffer))
Generates the formatted Eventlog records chunks. Args: multiprocess (bool): Flag to run multiprocessing. chunk_size (int): Size of the chunk to be processed for each process. Yields: Generator: Yields List[dict].
https://github.com/sumeshi/evtx2es/blob/6e0a5508c4c0a6f72f4065c4293dc5e72bb68d57/src/evtx2es/models/Evtx2es.py#L180-L214
from datetime import datetime, timedelta from itertools import chain from pathlib import Path from typing import List, Generator, Iterable, Union from itertools import islice from multiprocessing import Pool, cpu_count import orjson from evtx import PyEvtxParser def generate_chunks(chunk_size: int, iterable: Iterable) -> Generator: i = iter(iterable) piece = list(islice(i, chunk_size)) while piece: yield piece piece = list(islice(i, chunk_size)) def format_record(record: dict, filepath: str, shift: Union[str, datetime]): record["data"] = orjson.loads(record.get("data")) eventid_field = record.get("data", {}).get("Event", {}).get("System", {}).get("EventID") if type(eventid_field) is dict: record["data"]["Event"]["System"]["EventID"] = eventid_field.get("#text") try: status = record.get("data").get("Event").get("EventData").get("Status") record["data"]["Event"]["EventData"]["Status"] = None except Exception: pass record["winlog"] = { "channel": record["data"]["Event"]["System"]["Channel"], "computer_name": record["data"]["Event"]["System"]["Computer"], "event_id": record["data"]["Event"]["System"]["EventID"], "opcode": record["data"]["Event"]["System"].get("Opcode"), "provider_guid": record["data"]["Event"]["System"]["Provider"][ "#attributes" ].get("Guid"), "provider_name": record["data"]["Event"]["System"]["Provider"][ "#attributes" ]["Name"], "record_id": record["data"]["Event"]["System"]["EventRecordID"], "task": record["data"]["Event"]["System"]["Task"], "version": record["data"]["Event"]["System"].get("Version"), } try: record["winlog"]["process"] = { "pid": record["data"]["Event"]["System"]["Execution"]["#attributes"][ "ProcessID" ], "thread_id": record["data"]["Event"]["System"]["Execution"][ "#attributes" ]["ThreadID"], } except KeyError: pass except TypeError: pass try: record["userdata"] = { "address": record["data"]["Event"]["UserData"]["EventXML"]["Address"], "sessionid": record["data"]["Event"]["UserData"]["EventXML"][ "SessionID" ], "user": record["data"]["Event"]["UserData"]["EventXML"]["User"], } except KeyError: pass except TypeError: pass record.update( { "log": {"file": {"name": filepath}}, "event": { "code": record["winlog"]["event_id"], "created": record["data"]["Event"]["System"]["TimeCreated"][ "#attributes" ]["SystemTime"], }, } ) if shift != '0': current_timestamp = datetime.strptime(record["event"]["created"], "%Y-%m-%d"'T'"%H:%M:%S.%fZ") final_timestamp = current_timestamp + timedelta(seconds=shift.seconds) + timedelta(days=shift.days) record["@timestamp"] = final_timestamp.strftime("%Y-%m-%d"'T'"%H:%M:%S.%fZ") else: record["@timestamp"] = record["event"]["created"] record["winlog"]["event_data"] = record["data"]["Event"].get( "EventData", dict() ) del record["data"] if ( record["winlog"]["event_data"] is None or len(record["winlog"]["event_data"]) == 0 ): del record["winlog"]["event_data"] else: if record["winlog"]["event_data"]: for k, v in record["winlog"]["event_data"].items(): if k in ("ProcessId") and type(v) == str: if v.startswith("0x"): record["winlog"]["event_data"][k] = int(v, 16) else: try: record["winlog"]["event_data"][k] = int(v) except ValueError: record["winlog"]["event_data"][k] = 0 if type(v) is int: if v < -(2 ** 63): record["winlog"]["event_data"][k] = -(2 ** 63) elif v > 2 ** 63 - 1: record["winlog"]["event_data"][k] = 2 ** 63 - 1 return record def process_by_chunk(records: List[str], filepath: Union[Generator, str], shift: Union[Generator, str, datetime]) -> List[dict]: filepath = filepath if type(filepath) is str else filepath.__next__() shift = shift if type(shift) is str else shift.__next__() concatenated_json: str = f"[{','.join([orjson.dumps(record).decode('utf-8') for record in records])}]" record_list: List[dict] = orjson.loads(concatenated_json) return [ format_record(record, filepath=filepath, shift=shift) for record in record_list ] class Evtx2es(object): def __init__(self, input_path: Path) -> None: self.path = input_path self.parser = PyEvtxParser(self.path.open(mode="rb"))
MIT License
airshipit/armada
armada/handlers/schema.py
_load_schemas
python
def _load_schemas(): schema_dir = _get_schema_dir() for schema_file in os.listdir(schema_dir): with open(os.path.join(schema_dir, schema_file)) as f: for schema in yaml.safe_load_all(f): name = schema['metadata']['name'] if name in _SCHEMAS: raise RuntimeError( 'Duplicate schema specified for: %s.' % name) _SCHEMAS[name] = _get_schema_info(name, schema['data'])
Populates ``_SCHEMAS`` with the schemas defined in package ``armada.schemas``.
https://github.com/airshipit/armada/blob/026a00a88ece2994083193256a7f7c02d4608f2a/armada/handlers/schema.py#L63-L76
import os import re import pkg_resources import yaml TYPE_CHART = 'Chart' TYPE_CHARTGROUP = 'ChartGroup' TYPE_MANIFEST = 'Manifest' VERSION_FORMAT = r'^v(\d+)$' VERSION_MIN = 1 VERSION_MAX = 2 _SCHEMAS = {} class SchemaInfo(object): def __init__(self, type, version, data): self.type = type self.version = version self.data = data def __eq__(self, other): return self.type == other.type and self.version == other.version def get_schema_info(name): return _SCHEMAS.get(name) def _get_schema_info(name, data): parts = name.split('/') prefix, type, version_string = parts version_match = re.search(VERSION_FORMAT, version_string) version = int(version_match.group(1)) return SchemaInfo(type, version, data) def _get_schema_dir(): return pkg_resources.resource_filename('armada', 'schemas')
Apache License 2.0
python-tap/tappy
tap/directive.py
Directive.reason
python
def reason(self): return self._reason
Get the reason for the directive.
https://github.com/python-tap/tappy/blob/0c38a487d6e0113412902ab7a521120cf9da332f/tap/directive.py#L63-L65
import re class Directive(object): skip_pattern = re.compile( r"""^SKIP\S* (?P<whitespace>\s*) # Optional whitespace. (?P<reason>.*) # Slurp up the rest.""", re.IGNORECASE | re.VERBOSE, ) todo_pattern = re.compile( r"""^TODO\b # The directive name (?P<whitespace>\s*) # Immediately following must be whitespace. (?P<reason>.*) # Slurp up the rest.""", re.IGNORECASE | re.VERBOSE, ) def __init__(self, text): self._text = text self._skip = False self._todo = False self._reason = None match = self.skip_pattern.match(text) if match: self._skip = True self._reason = match.group("reason") match = self.todo_pattern.match(text) if match: if match.group("whitespace"): self._todo = True else: if match.group("reason") == "": self._todo = True self._reason = match.group("reason") @property def text(self): return self._text @property def skip(self): return self._skip @property def todo(self): return self._todo @property
BSD 2-Clause Simplified License
python/the-knights-who-say-ni
ni/test/test_main.py
FakeCLAHost.problems
python
async def problems(self, client, usernames): self.usernames = usernames return self._problems
Check if all of the specified usernames have signed the CLA.
https://github.com/python/the-knights-who-say-ni/blob/1bc3b74231f39fc47dcb76a48fcfcb946ec8e83a/ni/test/test_main.py#L18-L21
import http import unittest.mock as mock from typing import AbstractSet, FrozenSet, Mapping from .. import __main__ from .. import abc as ni_abc from .. import github from . import util class FakeCLAHost(ni_abc.CLAHost): def __init__(self, problems=None): self._problems = problems
Apache License 2.0
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/encoding/filters/text/text_api.py
TextApi.delete
python
def delete(self, filter_id, **kwargs): return self.api_client.delete( '/encoding/filters/text/{filter_id}', path_params={'filter_id': filter_id}, type=BitmovinResponse, **kwargs )
Delete Text Filter :param filter_id: Id of the Text Filter :type filter_id: string_types, required :return: Id of the Text Filter. :rtype: BitmovinResponse
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/encoding/filters/text/text_api.py#L51-L66
from __future__ import absolute_import from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase from bitmovin_api_sdk.common.poscheck import poscheck_except from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope from bitmovin_api_sdk.models.response_error import ResponseError from bitmovin_api_sdk.models.text_filter import TextFilter from bitmovin_api_sdk.encoding.filters.text.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.filters.text.text_filter_list_query_params import TextFilterListQueryParams class TextApi(BaseApi): @poscheck_except(2) def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None): super(TextApi, self).__init__( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.customdata = CustomdataApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) def create(self, text_filter, **kwargs): return self.api_client.post( '/encoding/filters/text', text_filter, type=TextFilter, **kwargs )
MIT License
vincent-lg/tsunami
src/secondaires/jeux/plateaux/oie/case.py
CaseLabyrinthe.__init__
python
def __init__(self): Case.__init__(self, "le labyrinthe", "h", "|rg|", "sur", "arrivez", "arrive", " !")
Constructeur de la case.
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/jeux/plateaux/oie/case.py#L199-L202
from abstraits.obase import BaseObj class Case(BaseObj): numero = 1 def __init__(self, titre, symbole, couleur, article="sur", verbe="arrivez", a_verbe="arrive", ponctuation="."): BaseObj.__init__(self) self.titre = titre self.symbole = symbole if len(symbole) > 1: raise ValueError("le symbole de la case {} fait plus " "d'un caractère".format(titre)) self.couleur = couleur self.article = article self.verbe = verbe self.a_verbe = a_verbe self.ponctuation = ponctuation self.numero = Case.numero Case.numero += 1 def __getnewargs__(self): return ("", "", "") def __str__(self): return self.couleur + self.symbole + "|ff|" def arrive(self, jeu, plateau, partie, personnage, coup): pion = jeu.pions[personnage] personnage << "Vous {} {} {} ({}){}".format( self.verbe, self.article, self.titre, self.numero, self.ponctuation) partie.envoyer("Le pion {} {} {} {} ({}){}".format( pion.couleur, self.a_verbe, self.article, self.titre, self.numero, self.ponctuation), personnage) class CaseOie(Case): def __init__(self): Case.__init__(self, "l'oie", "o", "|rg|") def __getnewargs__(self): return () def arrive(self, jeu, plateau, partie, personnage, coup): Case.arrive(self, jeu, plateau, partie, personnage, coup) jeu.avancer(personnage, coup) class CasePont(Case): def __init__(self): Case.__init__(self, "le pont", "t", "|rg|") def __getnewargs__(self): return () def arrive(self, jeu, plateau, partie, personnage, coup): Case.arrive(self, jeu, plateau, partie, personnage, coup) jeu.avancer(personnage, 6) class CaseHotellerie(Case): def __init__(self): Case.__init__(self, "l'auberge", "x", "|rg|") def __getnewargs__(self): return () def arrive(self, jeu, plateau, partie, personnage, coup): Case.arrive(self, jeu, plateau, partie, personnage, coup) pion = jeu.pions[personnage] personnage << "Vous êtes condamné à occuper cette case trois " "tours durant." partie.envoyer("Le pion {} est condamné à occuper cette case trois " "tours durant.".format(pion.couleur), personnage) jeu.hotellerie[personnage] = 2 class CasePuits(Case): def __init__(self): Case.__init__(self, "le puits", "O", "|rg|", "dans", "tombez", "tombe", " !") def __getnewargs__(self): return () def arrive(self, jeu, plateau, partie, personnage, coup): Case.arrive(self, jeu, plateau, partie, personnage, coup) if jeu.puits: autre = jeu.puits autre << "Vous pouvez sortir du puits !" a_pion = jeu.pions[autre] partie.envoyer("Le pion {} sort du puits.".format(a_pion.couleur), autre) pion = jeu.pions[personnage] personnage << "Vous devez attendre que quelqu'un vous délivre." jeu.puits = personnage class CasePrison(Case): def __init__(self): Case.__init__(self, "la prison", "*", "|rg|", "dans", "entrez", "entre", " !") def __getnewargs__(self): return () def arrive(self, jeu, plateau, partie, personnage, coup): Case.arrive(self, jeu, plateau, partie, personnage, coup) if jeu.prison: autre = jeu.prison autre << "Vous êtes libre ! Vous pouvez quitter la prison." a_pion = jeu.pions[autre] partie.envoyer("Le pion {} quitte la prison.".format( a_pion.couleur), autre) pion = jeu.pions[personnage] personnage << "Vous devez attendre que quelqu'un vous délivre." jeu.prison = personnage class CaseLabyrinthe(Case):
BSD 3-Clause New or Revised License
prompt-toolkit/python-prompt-toolkit
prompt_toolkit/layout/menus.py
MultiColumnCompletionMenuControl.get_key_bindings
python
def get_key_bindings(self) -> "KeyBindings": from prompt_toolkit.key_binding.key_bindings import KeyBindings kb = KeyBindings() @Condition def filter() -> bool: app = get_app() complete_state = app.current_buffer.complete_state if complete_state is None or complete_state.complete_index is None: return False return any(window.content == self for window in app.layout.visible_windows) def move(right: bool = False) -> None: buff = get_app().current_buffer complete_state = buff.complete_state if complete_state is not None and complete_state.complete_index is not None: new_index = complete_state.complete_index if right: new_index += self._rendered_rows else: new_index -= self._rendered_rows if 0 <= new_index < len(complete_state.completions): buff.go_to_completion(new_index) @kb.add("left", is_global=True, filter=filter) def _left(event: E) -> None: move() @kb.add("right", is_global=True, filter=filter) def _right(event: E) -> None: move(True) return kb
Expose key bindings that handle the left/right arrow keys when the menu is displayed.
https://github.com/prompt-toolkit/python-prompt-toolkit/blob/ef64785fa6fcbd2be952c9781ff5f1dc9cb011d0/prompt_toolkit/layout/menus.py#L561-L609
import math from itertools import zip_longest from typing import ( TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union, cast, ) from prompt_toolkit.application.current import get_app from prompt_toolkit.buffer import CompletionState from prompt_toolkit.completion import Completion from prompt_toolkit.data_structures import Point from prompt_toolkit.filters import ( Condition, FilterOrBool, has_completions, is_done, to_filter, ) from prompt_toolkit.formatted_text import ( StyleAndTextTuples, fragment_list_width, to_formatted_text, ) from prompt_toolkit.key_binding.key_processor import KeyPressEvent from prompt_toolkit.layout.utils import explode_text_fragments from prompt_toolkit.mouse_events import MouseEvent, MouseEventType from prompt_toolkit.utils import get_cwidth from .containers import ConditionalContainer, HSplit, ScrollOffsets, Window from .controls import GetLinePrefixCallable, UIContent, UIControl from .dimension import Dimension from .margins import ScrollbarMargin if TYPE_CHECKING: from prompt_toolkit.key_binding.key_bindings import ( KeyBindings, NotImplementedOrNone, ) __all__ = [ "CompletionsMenu", "MultiColumnCompletionsMenu", ] E = KeyPressEvent class CompletionsMenuControl(UIControl): MIN_WIDTH = 7 def has_focus(self) -> bool: return False def preferred_width(self, max_available_width: int) -> Optional[int]: complete_state = get_app().current_buffer.complete_state if complete_state: menu_width = self._get_menu_width(500, complete_state) menu_meta_width = self._get_menu_meta_width(500, complete_state) return menu_width + menu_meta_width else: return 0 def preferred_height( self, width: int, max_available_height: int, wrap_lines: bool, get_line_prefix: Optional[GetLinePrefixCallable], ) -> Optional[int]: complete_state = get_app().current_buffer.complete_state if complete_state: return len(complete_state.completions) else: return 0 def create_content(self, width: int, height: int) -> UIContent: complete_state = get_app().current_buffer.complete_state if complete_state: completions = complete_state.completions index = complete_state.complete_index menu_width = self._get_menu_width(width, complete_state) menu_meta_width = self._get_menu_meta_width( width - menu_width, complete_state ) show_meta = self._show_meta(complete_state) def get_line(i: int) -> StyleAndTextTuples: c = completions[i] is_current_completion = i == index result = _get_menu_item_fragments( c, is_current_completion, menu_width, space_after=True ) if show_meta: result += self._get_menu_item_meta_fragments( c, is_current_completion, menu_meta_width ) return result return UIContent( get_line=get_line, cursor_position=Point(x=0, y=index or 0), line_count=len(completions), ) return UIContent() def _show_meta(self, complete_state: CompletionState) -> bool: return any(c.display_meta_text for c in complete_state.completions) def _get_menu_width(self, max_width: int, complete_state: CompletionState) -> int: return min( max_width, max( self.MIN_WIDTH, max(get_cwidth(c.display_text) for c in complete_state.completions) + 2, ), ) def _get_menu_meta_width( self, max_width: int, complete_state: CompletionState ) -> int: def meta_width(completion: Completion) -> int: return get_cwidth(completion.display_meta_text) if self._show_meta(complete_state): return min( max_width, max(meta_width(c) for c in complete_state.completions) + 2 ) else: return 0 def _get_menu_item_meta_fragments( self, completion: Completion, is_current_completion: bool, width: int ) -> StyleAndTextTuples: if is_current_completion: style_str = "class:completion-menu.meta.completion.current" else: style_str = "class:completion-menu.meta.completion" text, tw = _trim_formatted_text(completion.display_meta, width - 2) padding = " " * (width - 1 - tw) return to_formatted_text( cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)], style=style_str, ) def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone": b = get_app().current_buffer if mouse_event.event_type == MouseEventType.MOUSE_UP: b.go_to_completion(mouse_event.position.y) b.complete_state = None elif mouse_event.event_type == MouseEventType.SCROLL_DOWN: b.complete_next(count=3, disable_wrap_around=True) elif mouse_event.event_type == MouseEventType.SCROLL_UP: b.complete_previous(count=3, disable_wrap_around=True) return None def _get_menu_item_fragments( completion: Completion, is_current_completion: bool, width: int, space_after: bool = False, ) -> StyleAndTextTuples: if is_current_completion: style_str = "class:completion-menu.completion.current %s %s" % ( completion.style, completion.selected_style, ) else: style_str = "class:completion-menu.completion " + completion.style text, tw = _trim_formatted_text( completion.display, (width - 2 if space_after else width - 1) ) padding = " " * (width - 1 - tw) return to_formatted_text( cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)], style=style_str, ) def _trim_formatted_text( formatted_text: StyleAndTextTuples, max_width: int ) -> Tuple[StyleAndTextTuples, int]: width = fragment_list_width(formatted_text) if width > max_width: result = [] remaining_width = max_width - 3 for style_and_ch in explode_text_fragments(formatted_text): ch_width = get_cwidth(style_and_ch[1]) if ch_width <= remaining_width: result.append(style_and_ch) remaining_width -= ch_width else: break result.append(("", "...")) return result, max_width - remaining_width else: return formatted_text, width class CompletionsMenu(ConditionalContainer): def __init__( self, max_height: Optional[int] = None, scroll_offset: Union[int, Callable[[], int]] = 0, extra_filter: FilterOrBool = True, display_arrows: FilterOrBool = False, z_index: int = 10 ** 8, ) -> None: extra_filter = to_filter(extra_filter) display_arrows = to_filter(display_arrows) super().__init__( content=Window( content=CompletionsMenuControl(), width=Dimension(min=8), height=Dimension(min=1, max=max_height), scroll_offsets=ScrollOffsets(top=scroll_offset, bottom=scroll_offset), right_margins=[ScrollbarMargin(display_arrows=display_arrows)], dont_extend_width=True, style="class:completion-menu", z_index=z_index, ), filter=has_completions & ~is_done & extra_filter, ) class MultiColumnCompletionMenuControl(UIControl): _required_margin = 3 def __init__(self, min_rows: int = 3, suggested_max_column_width: int = 30) -> None: assert min_rows >= 1 self.min_rows = min_rows self.suggested_max_column_width = suggested_max_column_width self.scroll = 0 self._rendered_rows = 0 self._rendered_columns = 0 self._total_columns = 0 self._render_pos_to_completion: Dict[Tuple[int, int], Completion] = {} self._render_left_arrow = False self._render_right_arrow = False self._render_width = 0 def reset(self) -> None: self.scroll = 0 def has_focus(self) -> bool: return False def preferred_width(self, max_available_width: int) -> Optional[int]: complete_state = get_app().current_buffer.complete_state if complete_state is None: return 0 column_width = self._get_column_width(complete_state) result = int( column_width * math.ceil(len(complete_state.completions) / float(self.min_rows)) ) while ( result > column_width and result > max_available_width - self._required_margin ): result -= column_width return result + self._required_margin def preferred_height( self, width: int, max_available_height: int, wrap_lines: bool, get_line_prefix: Optional[GetLinePrefixCallable], ) -> Optional[int]: complete_state = get_app().current_buffer.complete_state if complete_state is None: return 0 column_width = self._get_column_width(complete_state) column_count = max(1, (width - self._required_margin) // column_width) return int(math.ceil(len(complete_state.completions) / float(column_count))) def create_content(self, width: int, height: int) -> UIContent: complete_state = get_app().current_buffer.complete_state if complete_state is None: return UIContent() column_width = self._get_column_width(complete_state) self._render_pos_to_completion = {} _T = TypeVar("_T") def grouper( n: int, iterable: Iterable[_T], fillvalue: Optional[_T] = None ) -> Iterable[List[_T]]: args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def is_current_completion(completion: Completion) -> bool: return ( complete_state is not None and complete_state.complete_index is not None and c == complete_state.current_completion ) HORIZONTAL_MARGIN_REQUIRED = 3 column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width) if column_width > self.suggested_max_column_width: column_width //= column_width // self.suggested_max_column_width visible_columns = max(1, (width - self._required_margin) // column_width) columns_ = list(grouper(height, complete_state.completions)) rows_ = list(zip(*columns_)) selected_column = (complete_state.complete_index or 0) // height self.scroll = min( selected_column, max(self.scroll, selected_column - visible_columns + 1) ) render_left_arrow = self.scroll > 0 render_right_arrow = self.scroll < len(rows_[0]) - visible_columns fragments_for_line = [] for row_index, row in enumerate(rows_): fragments: StyleAndTextTuples = [] middle_row = row_index == len(rows_) // 2 if render_left_arrow: fragments.append(("class:scrollbar", "<" if middle_row else " ")) elif render_right_arrow: fragments.append(("", " ")) for column_index, c in enumerate(row[self.scroll :][:visible_columns]): if c is not None: fragments += _get_menu_item_fragments( c, is_current_completion(c), column_width, space_after=False ) for x in range(column_width): self._render_pos_to_completion[ (column_index * column_width + x, row_index) ] = c else: fragments.append(("class:completion", " " * column_width)) if render_left_arrow or render_right_arrow: fragments.append(("class:completion", " ")) if render_right_arrow: fragments.append(("class:scrollbar", ">" if middle_row else " ")) elif render_left_arrow: fragments.append(("class:completion", " ")) fragments_for_line.append( to_formatted_text(fragments, style="class:completion-menu") ) self._rendered_rows = height self._rendered_columns = visible_columns self._total_columns = len(columns_) self._render_left_arrow = render_left_arrow self._render_right_arrow = render_right_arrow self._render_width = ( column_width * visible_columns + render_left_arrow + render_right_arrow + 1 ) def get_line(i: int) -> StyleAndTextTuples: return fragments_for_line[i] return UIContent(get_line=get_line, line_count=len(rows_)) def _get_column_width(self, complete_state: CompletionState) -> int: return max(get_cwidth(c.display_text) for c in complete_state.completions) + 1 def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone": b = get_app().current_buffer def scroll_left() -> None: b.complete_previous(count=self._rendered_rows, disable_wrap_around=True) self.scroll = max(0, self.scroll - 1) def scroll_right() -> None: b.complete_next(count=self._rendered_rows, disable_wrap_around=True) self.scroll = min( self._total_columns - self._rendered_columns, self.scroll + 1 ) if mouse_event.event_type == MouseEventType.SCROLL_DOWN: scroll_right() elif mouse_event.event_type == MouseEventType.SCROLL_UP: scroll_left() elif mouse_event.event_type == MouseEventType.MOUSE_UP: x = mouse_event.position.x y = mouse_event.position.y if x == 0: if self._render_left_arrow: scroll_left() elif x == self._render_width - 1: if self._render_right_arrow: scroll_right() else: completion = self._render_pos_to_completion.get((x, y)) if completion: b.apply_completion(completion) return None
BSD 3-Clause New or Revised License
pokemongof/pokemongo-bot-desktop
build/pywin/Lib/smtplib.py
SMTP.ehlo
python
def ehlo(self, name=''): self.esmtp_features = {} self.putcmd(self.ehlo_msg, name or self.local_hostname) (code, msg) = self.getreply() if code == -1 and len(msg) == 0: self.close() raise SMTPServerDisconnected("Server not connected") self.ehlo_resp = msg if code != 250: return (code, msg) self.does_esmtp = 1 resp = self.ehlo_resp.split('\n') del resp[0] for each in resp: auth_match = OLDSTYLE_AUTH.match(each) if auth_match: self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") + " " + auth_match.groups(0)[0] continue m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each) if m: feature = m.group("feature").lower() params = m.string[m.end("feature"):].strip() if feature == "auth": self.esmtp_features[feature] = self.esmtp_features.get(feature, "") + " " + params else: self.esmtp_features[feature] = params return (code, msg)
SMTP 'ehlo' command. Hostname to send for this command defaults to the FQDN of the local host.
https://github.com/pokemongof/pokemongo-bot-desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/smtplib.py#L407-L455
import socket import re import email.utils import base64 import hmac from email.base64mime import encode as encode_base64 from sys import stderr __all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException", "SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError", "SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError", "quoteaddr", "quotedata", "SMTP"] SMTP_PORT = 25 SMTP_SSL_PORT = 465 CRLF = "\r\n" _MAXLINE = 8192 OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) class SMTPException(Exception): class SMTPServerDisconnected(SMTPException): class SMTPResponseException(SMTPException): def __init__(self, code, msg): self.smtp_code = code self.smtp_error = msg self.args = (code, msg) class SMTPSenderRefused(SMTPResponseException): def __init__(self, code, msg, sender): self.smtp_code = code self.smtp_error = msg self.sender = sender self.args = (code, msg, sender) class SMTPRecipientsRefused(SMTPException): def __init__(self, recipients): self.recipients = recipients self.args = (recipients,) class SMTPDataError(SMTPResponseException): class SMTPConnectError(SMTPResponseException): class SMTPHeloError(SMTPResponseException): class SMTPAuthenticationError(SMTPResponseException): def quoteaddr(addr): m = (None, None) try: m = email.utils.parseaddr(addr)[1] except AttributeError: pass if m == (None, None): return "<%s>" % addr elif m is None: return "<>" else: return "<%s>" % m def _addr_only(addrstring): displayname, addr = email.utils.parseaddr(addrstring) if (displayname, addr) == ('', ''): return addrstring return addr def quotedata(data): return re.sub(r'(?m)^\.', '..', re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)) try: import ssl except ImportError: _have_ssl = False else: class SSLFakeFile: def __init__(self, sslobj): self.sslobj = sslobj def readline(self, size=-1): if size < 0: size = None str = "" chr = None while chr != "\n": if size is not None and len(str) >= size: break chr = self.sslobj.read(1) if not chr: break str += chr return str def close(self): pass _have_ssl = True class SMTP: debuglevel = 0 file = None helo_resp = None ehlo_msg = "ehlo" ehlo_resp = None does_esmtp = 0 default_port = SMTP_PORT def __init__(self, host='', port=0, local_hostname=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): self.timeout = timeout self.esmtp_features = {} if host: (code, msg) = self.connect(host, port) if code != 220: raise SMTPConnectError(code, msg) if local_hostname is not None: self.local_hostname = local_hostname else: fqdn = socket.getfqdn() if '.' in fqdn: self.local_hostname = fqdn else: addr = '127.0.0.1' try: addr = socket.gethostbyname(socket.gethostname()) except socket.gaierror: pass self.local_hostname = '[%s]' % addr def set_debuglevel(self, debuglevel): self.debuglevel = debuglevel def _get_socket(self, host, port, timeout): if self.debuglevel > 0: print>>stderr, 'connect:', (host, port) return socket.create_connection((host, port), timeout) def connect(self, host='localhost', port=0): if not port and (host.find(':') == host.rfind(':')): i = host.rfind(':') if i >= 0: host, port = host[:i], host[i + 1:] try: port = int(port) except ValueError: raise socket.error, "nonnumeric port" if not port: port = self.default_port if self.debuglevel > 0: print>>stderr, 'connect:', (host, port) self.sock = self._get_socket(host, port, self.timeout) (code, msg) = self.getreply() if self.debuglevel > 0: print>>stderr, "connect:", msg return (code, msg) def send(self, str): if self.debuglevel > 0: print>>stderr, 'send:', repr(str) if hasattr(self, 'sock') and self.sock: try: self.sock.sendall(str) except socket.error: self.close() raise SMTPServerDisconnected('Server not connected') else: raise SMTPServerDisconnected('please run connect() first') def putcmd(self, cmd, args=""): if args == "": str = '%s%s' % (cmd, CRLF) else: str = '%s %s%s' % (cmd, args, CRLF) self.send(str) def getreply(self): resp = [] if self.file is None: self.file = self.sock.makefile('rb') while 1: try: line = self.file.readline(_MAXLINE + 1) except socket.error as e: self.close() raise SMTPServerDisconnected("Connection unexpectedly closed: " + str(e)) if line == '': self.close() raise SMTPServerDisconnected("Connection unexpectedly closed") if self.debuglevel > 0: print>>stderr, 'reply:', repr(line) if len(line) > _MAXLINE: raise SMTPResponseException(500, "Line too long.") resp.append(line[4:].strip()) code = line[:3] try: errcode = int(code) except ValueError: errcode = -1 break if line[3:4] != "-": break errmsg = "\n".join(resp) if self.debuglevel > 0: print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg) return errcode, errmsg def docmd(self, cmd, args=""): self.putcmd(cmd, args) return self.getreply() def helo(self, name=''): self.putcmd("helo", name or self.local_hostname) (code, msg) = self.getreply() self.helo_resp = msg return (code, msg)
MIT License
michaelkonobeev/adashift
author_code_base/nmt/nmt/model.py
BaseModel.train
python
def train(self, sess): assert self.mode == tf.contrib.learn.ModeKeys.TRAIN output_tuple = TrainOutputTuple(train_summary=self.train_summary, train_loss=self.train_loss, predict_count=self.predict_count, global_step=self.global_step, word_count=self.word_count, batch_size=self.batch_size, grad_norm=self.grad_norm, learning_rate=self.learning_rate) return sess.run([self.update, output_tuple])
Execute train graph.
https://github.com/michaelkonobeev/adashift/blob/bf86b021d42e922078a39246770f0f875300a6f3/author_code_base/nmt/nmt/model.py#L341-L352
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import numpy as np import tensorflow as tf from . import model_helper from .utils import iterator_utils from .utils import misc_utils as utils from .utils import vocab_utils from . import optimizer_all utils.check_tensorflow_version() __all__ = ["BaseModel", "Model"] class TrainOutputTuple(collections.namedtuple( "TrainOutputTuple", ("train_summary", "train_loss", "predict_count", "global_step", "word_count", "batch_size", "grad_norm", "learning_rate"))): pass class EvalOutputTuple(collections.namedtuple( "EvalOutputTuple", ("eval_loss", "predict_count", "batch_size"))): pass class InferOutputTuple(collections.namedtuple( "InferOutputTuple", ("infer_logits", "infer_summary", "sample_id", "sample_words"))): pass class BaseModel(object): def __init__(self, hparams, mode, iterator, source_vocab_table, target_vocab_table, reverse_target_vocab_table=None, scope=None, extra_args=None): self._set_params_initializer(hparams, mode, iterator, source_vocab_table, target_vocab_table, scope, extra_args) self.extract_encoder_layers = (hasattr(hparams, "extract_encoder_layers") and hparams.extract_encoder_layers) res = self.build_graph(hparams, scope=scope) if not self.extract_encoder_layers: self._set_train_or_infer(res, reverse_target_vocab_table, hparams) self.saver = tf.train.Saver( tf.global_variables(), max_to_keep=hparams.num_keep_ckpts) def _set_params_initializer(self, hparams, mode, iterator, source_vocab_table, target_vocab_table, scope, extra_args=None): assert isinstance(iterator, iterator_utils.BatchedInput) self.iterator = iterator self.mode = mode self.src_vocab_table = source_vocab_table self.tgt_vocab_table = target_vocab_table self.src_vocab_size = hparams.src_vocab_size self.tgt_vocab_size = hparams.tgt_vocab_size self.num_gpus = hparams.num_gpus self.time_major = hparams.time_major if hparams.use_char_encode: assert (not self.time_major), ("Can't use time major for" " char-level inputs.") self.dtype = tf.float32 self.num_sampled_softmax = hparams.num_sampled_softmax self.single_cell_fn = None if extra_args: self.single_cell_fn = extra_args.single_cell_fn self.num_units = hparams.num_units self.num_encoder_layers = hparams.num_encoder_layers self.num_decoder_layers = hparams.num_decoder_layers assert self.num_encoder_layers assert self.num_decoder_layers if hasattr(hparams, "num_residual_layers"): self.num_encoder_residual_layers = hparams.num_residual_layers self.num_decoder_residual_layers = hparams.num_residual_layers else: self.num_encoder_residual_layers = hparams.num_encoder_residual_layers self.num_decoder_residual_layers = hparams.num_decoder_residual_layers self.batch_size = tf.size(self.iterator.source_sequence_length) self.global_step = tf.Variable(0, trainable=False) self.random_seed = hparams.random_seed initializer = model_helper.get_initializer( hparams.init_op, self.random_seed, hparams.init_weight) tf.get_variable_scope().set_initializer(initializer) if extra_args and extra_args.encoder_emb_lookup_fn: self.encoder_emb_lookup_fn = extra_args.encoder_emb_lookup_fn else: self.encoder_emb_lookup_fn = tf.nn.embedding_lookup self.init_embeddings(hparams, scope) def _set_train_or_infer(self, res, reverse_target_vocab_table, hparams): if self.mode == tf.contrib.learn.ModeKeys.TRAIN: self.train_loss = res[1] self.word_count = tf.reduce_sum( self.iterator.source_sequence_length) + tf.reduce_sum( self.iterator.target_sequence_length) elif self.mode == tf.contrib.learn.ModeKeys.EVAL: self.eval_loss = res[1] elif self.mode == tf.contrib.learn.ModeKeys.INFER: self.infer_logits, _, self.final_context_state, self.sample_id = res self.sample_words = reverse_target_vocab_table.lookup( tf.to_int64(self.sample_id)) if self.mode != tf.contrib.learn.ModeKeys.INFER: self.predict_count = tf.reduce_sum( self.iterator.target_sequence_length) params = tf.trainable_variables() if self.mode == tf.contrib.learn.ModeKeys.TRAIN: self.learning_rate = tf.constant(hparams.learning_rate) self.learning_rate = self._get_learning_rate_warmup(hparams) self.learning_rate = self._get_learning_rate_decay(hparams) if hparams.optimizer == "sgd": opt = tf.train.GradientDescentOptimizer(self.learning_rate) elif hparams.optimizer == 'adam': opt = optimizer_all.Adam(learning_rate=self.learning_rate, beta1=hparams.beta1, beta2=hparams.beta2, epsilon=hparams.epsilon) elif hparams.optimizer == 'adaShift': opt = optimizer_all.AdaShift(learning_rate=self.learning_rate, keep_num=hparams.keep_num, beta1=hparams.beta1, beta2=hparams.beta2, epsilon=hparams.epsilon, pred_g_op=hparams.pred_g_op, use_mov=(hparams.use_mov==1), mov_num=hparams.mov_num) elif hparams.optimizer == "amsgrad": opt = optimizer_all.AMSGrad(learning_rate=self.learning_rate, beta1=hparams.beta1, beta2=hparams.beta2, epsilon=hparams.epsilon) else: assert 'No optimizer has been chosed, name may be wrong' gradients = tf.gradients( self.train_loss, params, colocate_gradients_with_ops=hparams.colocate_gradients_with_ops) clipped_grads, grad_norm_summary, grad_norm = model_helper.gradient_clip( gradients, max_gradient_norm=hparams.max_gradient_norm) self.grad_norm_summary = grad_norm_summary self.grad_norm = grad_norm self.update = opt.apply_gradients( zip(clipped_grads, params), global_step=self.global_step) self.train_summary = self._get_train_summary() elif self.mode == tf.contrib.learn.ModeKeys.INFER: self.infer_summary = self._get_infer_summary(hparams) utils.print_out("# Trainable variables") utils.print_out("Format: <name>, <shape>, <(soft) device placement>") for param in params: utils.print_out(" %s, %s, %s" % (param.name, str(param.get_shape()), param.op.device)) def _get_learning_rate_warmup(self, hparams): warmup_steps = hparams.warmup_steps warmup_scheme = hparams.warmup_scheme utils.print_out(" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s" % (hparams.learning_rate, warmup_steps, warmup_scheme)) if warmup_scheme == "t2t": warmup_factor = tf.exp(tf.log(0.01) / warmup_steps) inv_decay = warmup_factor**( tf.to_float(warmup_steps - self.global_step)) else: raise ValueError("Unknown warmup scheme %s" % warmup_scheme) return tf.cond( self.global_step < hparams.warmup_steps, lambda: inv_decay * self.learning_rate, lambda: self.learning_rate, name="learning_rate_warump_cond") def _get_decay_info(self, hparams): if hparams.decay_scheme in ["luong5", "luong10", "luong234"]: decay_factor = 0.5 if hparams.decay_scheme == "luong5": start_decay_step = int(hparams.num_train_steps / 2) decay_times = 5 elif hparams.decay_scheme == "luong10": start_decay_step = int(hparams.num_train_steps / 2) decay_times = 10 elif hparams.decay_scheme == "luong234": start_decay_step = int(hparams.num_train_steps * 2 / 3) decay_times = 4 remain_steps = hparams.num_train_steps - start_decay_step decay_steps = int(remain_steps / decay_times) elif hparams.decay_scheme == "self": start_decay_step = 8000 decay_steps = 1000 decay_factor = 0.5 elif hparams.decay_scheme == "ming": start_decay_step = 8000 decay_steps = 3000 decay_factor = 0.5 elif not hparams.decay_scheme: start_decay_step = hparams.num_train_steps decay_steps = 0 decay_factor = 1.0 elif hparams.decay_scheme: raise ValueError("Unknown decay scheme %s" % hparams.decay_scheme) return start_decay_step, decay_steps, decay_factor def _get_learning_rate_decay(self, hparams): start_decay_step, decay_steps, decay_factor = self._get_decay_info(hparams) utils.print_out(" decay_scheme=%s, start_decay_step=%d, decay_steps %d, " "decay_factor %g" % (hparams.decay_scheme, start_decay_step, decay_steps, decay_factor)) return tf.cond( self.global_step < start_decay_step, lambda: self.learning_rate, lambda: tf.train.exponential_decay( self.learning_rate, (self.global_step - start_decay_step), decay_steps, decay_factor, staircase=True), name="learning_rate_decay_cond") def init_embeddings(self, hparams, scope): self.embedding_encoder, self.embedding_decoder = ( model_helper.create_emb_for_encoder_and_decoder( share_vocab=hparams.share_vocab, src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, src_embed_size=self.num_units, tgt_embed_size=self.num_units, num_enc_partitions=hparams.num_enc_emb_partitions, num_dec_partitions=hparams.num_dec_emb_partitions, src_vocab_file=hparams.src_vocab_file, tgt_vocab_file=hparams.tgt_vocab_file, src_embed_file=hparams.src_embed_file, tgt_embed_file=hparams.tgt_embed_file, use_char_encode=hparams.use_char_encode, scope=scope,)) def _get_train_summary(self): train_summary = tf.summary.merge( [tf.summary.scalar("lr", self.learning_rate), tf.summary.scalar("train_loss", self.train_loss)] + self.grad_norm_summary) return train_summary
MIT License
seeways/djangoedusysdemo
extra_apps/xadmin/plugins/xversion.py
_autoregister
python
def _autoregister(admin, model, follow=None): if model._meta.proxy: raise RegistrationError("Proxy models cannot be used with django-reversion, register the parent class instead") if not is_registered(model): follow = follow or [] for parent_cls, field in model._meta.parents.items(): follow.append(field.name) _autoregister(admin, parent_cls) register(model, follow=follow, format=admin.reversion_format)
Registers a model with reversion, if required.
https://github.com/seeways/djangoedusysdemo/blob/e94e8504dd4fa82598b30caeb8eaa542080f6853/extra_apps/xadmin/plugins/xversion.py#L31-L40
from crispy_forms.utils import TEMPLATE_PACK from django.contrib.contenttypes.fields import GenericRelation from django.contrib.contenttypes.models import ContentType from django.core.exceptions import PermissionDenied from django.db import models from django.db.models.query import QuerySet from django.forms.models import model_to_dict from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.template.response import TemplateResponse from django.utils import six from django.utils.encoding import force_text, smart_text from django.utils.safestring import mark_safe from django.utils.text import capfirst from django.utils.translation import ugettext as _ from xadmin.layout import Field, render_field from xadmin.plugins.inline import Inline from xadmin.plugins.actions import BaseActionView from xadmin.plugins.inline import InlineModelAdmin from xadmin.sites import site from xadmin.util import unquote, quote, model_format_dict, is_related_field2 from xadmin.views import BaseAdminPlugin, ModelAdminView, CreateAdminView, UpdateAdminView, DetailAdminView, ModelFormAdminView, DeleteAdminView, ListAdminView from xadmin.views.base import csrf_protect_m, filter_hook from xadmin.views.detail import DetailAdminUtil from reversion.models import Revision, Version from reversion.revisions import is_active, register, is_registered, set_comment, create_revision, set_user from contextlib import contextmanager from functools import partial
Apache License 2.0
autonomousvision/neat
leaderboard/team_code/base_agent.py
BaseAgent._get_distance
python
def _get_distance(self, target): sensor_transform = self._sensors['rgb_front'].get_transform() distance = np.sqrt( (sensor_transform.location.x - target.x) ** 2 + (sensor_transform.location.y - target.y) ** 2 + (sensor_transform.location.z - target.z) ** 2) return distance
Returns the distance from the (rgb_front) camera to the target Args: target ([type]): [description] Returns: [type]: [description]
https://github.com/autonomousvision/neat/blob/686f4a0b5b5bf20c99f323e9542f5b68808df2de/leaderboard/team_code/base_agent.py#L485-L501
import time import os import datetime import pathlib import json import cv2 import carla from leaderboard.autoagents import autonomous_agent from team_code.planner import RoutePlanner import numpy as np from PIL import Image, ImageDraw SAVE_PATH = os.environ.get('SAVE_PATH', None) class BaseAgent(autonomous_agent.AutonomousAgent): def setup(self, path_to_conf_file): self.track = autonomous_agent.Track.SENSORS self.config_path = path_to_conf_file self.step = -1 self.wall_start = time.time() self.initialized = False self._sensor_data = { 'width': 400, 'height': 300, 'fov': 100 } self.weather_id = None self.save_path = None if SAVE_PATH is not None: now = datetime.datetime.now() string = pathlib.Path(os.environ['ROUTES']).stem + '_' string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) print (string) self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string self.save_path.mkdir(parents=True, exist_ok=False) for sensor in self.sensors(): if hasattr(sensor, 'save') and sensor['save']: (self.save_path / sensor['id']).mkdir() (self.save_path / 'measurements').mkdir(parents=True, exist_ok=True) (self.save_path / 'lidar').mkdir(parents=True, exist_ok=True) (self.save_path / 'lidar_360').mkdir(parents=True, exist_ok=True) (self.save_path / 'topdown').mkdir(parents=True, exist_ok=True) for pos in ['front', 'left', 'right', 'rear']: for sensor_type in ['rgb', 'seg', 'depth']: name = sensor_type + '_' + pos (self.save_path / name).mkdir() def _init(self): self._command_planner = RoutePlanner(7.5, 25.0, 257) self._command_planner.set_route(self._global_plan, True) self.initialized = True self._sensor_data['calibration'] = self._get_camera_to_car_calibration(self._sensor_data) self._sensors = self.sensor_interface._sensors_objects def _get_position(self, gps): gps = (gps - self._command_planner.mean) * self._command_planner.scale return gps def sensors(self): if SAVE_PATH is not None: return [ { 'type': 'sensor.camera.rgb', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'rgb_front' }, { 'type': 'sensor.camera.semantic_segmentation', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'seg_front' }, { 'type': 'sensor.camera.depth', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'depth_front' }, { 'type': 'sensor.camera.rgb', 'x': -1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'rgb_rear' }, { 'type': 'sensor.camera.semantic_segmentation', 'x': -1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'seg_rear' }, { 'type': 'sensor.camera.depth', 'x': -1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'depth_rear' }, { 'type': 'sensor.camera.rgb', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'rgb_left' }, { 'type': 'sensor.camera.semantic_segmentation', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'seg_left' }, { 'type': 'sensor.camera.depth', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'depth_left' }, { 'type': 'sensor.camera.rgb', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'rgb_right' }, { 'type': 'sensor.camera.semantic_segmentation', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'seg_right' }, { 'type': 'sensor.camera.depth', 'x': 1.3, 'y': 0.0, 'z': 2.3, 'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0, 'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'], 'id': 'depth_right' }, { 'type': 'sensor.lidar.ray_cast', 'x': 1.3, 'y': 0.0, 'z': 2.5, 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0, 'rotation_frequency':10, 'id': 'lidar' }, { 'type': 'sensor.lidar.ray_cast', 'x': 1.3, 'y': 0.0, 'z': 2.5, 'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0, 'rotation_frequency':20, 'id': 'lidar_360' }, { 'type': 'sensor.other.imu', 'x': 0.0, 'y': 0.0, 'z': 0.0, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'sensor_tick': 0.05, 'id': 'imu' }, { 'type': 'sensor.other.gnss', 'x': 0.0, 'y': 0.0, 'z': 0.0, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'sensor_tick': 0.01, 'id': 'gps' }, { 'type': 'sensor.speedometer', 'reading_frequency': 20, 'id': 'speed' } ] else: return [ { 'type': 'sensor.other.imu', 'x': 0.0, 'y': 0.0, 'z': 0.0, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'sensor_tick': 0.05, 'id': 'imu' }, { 'type': 'sensor.other.gnss', 'x': 0.0, 'y': 0.0, 'z': 0.0, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 'sensor_tick': 0.01, 'id': 'gps' }, { 'type': 'sensor.speedometer', 'reading_frequency': 20, 'id': 'speed' } ] def tick(self, input_data): weather = self._weather_to_dict(self._world.get_weather()) gps = input_data['gps'][1][:2] speed = input_data['speed'][1]['speed'] compass = input_data['imu'][1][-1] traffic_lights = self._find_obstacle('*traffic_light*') stop_signs = self._find_obstacle('*stop*') depth = {} seg = {} for pos in ['front', 'left', 'right', 'rear']: seg_cam = 'seg_' + pos depth_cam = 'depth_' + pos _segmentation = np.copy(input_data[seg_cam][1][:, :, 2]) depth[pos] = self._get_depth(input_data[depth_cam][1][:, :, :3]) self._change_seg_tl(_segmentation, depth[pos], traffic_lights) self._change_seg_stop(_segmentation, depth[pos], stop_signs, seg_cam) seg[pos] = _segmentation rgb_front = cv2.cvtColor(input_data['rgb_front'][1][:, :, :3], cv2.COLOR_BGR2RGB) rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB) rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB) rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB) depth_front = cv2.cvtColor(input_data['depth_front'][1][:, :, :3], cv2.COLOR_BGR2RGB) depth_left = cv2.cvtColor(input_data['depth_left'][1][:, :, :3], cv2.COLOR_BGR2RGB) depth_right = cv2.cvtColor(input_data['depth_right'][1][:, :, :3], cv2.COLOR_BGR2RGB) depth_rear = cv2.cvtColor(input_data['depth_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB) return { 'rgb_front': rgb_front, 'seg_front': seg['front'], 'depth_front': depth_front, 'rgb_rear': rgb_rear, 'seg_rear': seg['rear'], 'depth_rear': depth_rear, 'rgb_left': rgb_left, 'seg_left': seg['left'], 'depth_left': depth_left, 'rgb_right': rgb_right, 'seg_right': seg['right'], 'depth_right': depth_right, 'lidar' : input_data['lidar'][1], 'lidar_360': input_data['lidar_360'][1], 'gps': gps, 'speed': speed, 'compass': compass, 'weather': weather, } def _weather_to_dict(self, carla_weather): weather = { 'cloudiness': carla_weather.cloudiness, 'precipitation': carla_weather.precipitation, 'precipitation_deposits': carla_weather.precipitation_deposits, 'wind_intensity': carla_weather.wind_intensity, 'sun_azimuth_angle': carla_weather.sun_azimuth_angle, 'sun_altitude_angle': carla_weather.sun_altitude_angle, 'fog_density': carla_weather.fog_density, 'fog_distance': carla_weather.fog_distance, 'wetness': carla_weather.wetness, 'fog_falloff': carla_weather.fog_falloff, } return weather def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6): for stop in stop_signs: _dist = self._get_distance(stop.get_transform().location) _region = np.abs(depth_img - _dist) seg_img[(_region < _region_size) & (seg_img == 12)] = 26 trigger = stop.trigger_volume _trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger) _x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0] if _x > 0: bb = self._create_2d_bb_points(trigger, 4) trig_loc_world = self._trig_to_world(bb, stop, trigger) cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True) cords_x_y_z = cords_x_y_z[:3, :] cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]]) bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1) if np.any(camera_bbox[:,2] > 0): camera_bbox = np.array(camera_bbox) polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))] img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0) ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1) _region = np.array(img) seg_img[(_region == 1) & (seg_img == 6)] = 27 def _trig_to_world(self, bb, parent, trigger): bb_transform = carla.Transform(trigger.location) bb_vehicle_matrix = self.get_matrix(bb_transform) vehicle_world_matrix = self.get_matrix(parent.get_transform()) bb_world_matrix = vehicle_world_matrix @ bb_vehicle_matrix world_cords = bb_world_matrix @ bb.T return world_cords def _create_2d_bb_points(self, actor_bb, scale_factor=1): cords = np.zeros((4, 4)) extent = actor_bb.extent x = extent.x * scale_factor y = extent.y * scale_factor z = extent.z * scale_factor cords[0, :] = np.array([x, y, 0, 1]) cords[1, :] = np.array([-x, y, 0, 1]) cords[2, :] = np.array([-x, -y, 0, 1]) cords[3, :] = np.array([x, -y, 0, 1]) return cords def _get_sensor_position(self, cam): sensor_transform = self._sensors[cam].get_transform() return sensor_transform def _world_to_sensor(self, cords, sensor, move_cords=False): sensor_world_matrix = self.get_matrix(sensor) world_sensor_matrix = np.linalg.inv(sensor_world_matrix) sensor_cords = np.dot(world_sensor_matrix, cords) if move_cords: _num_cords = range(sensor_cords.shape[1]) modified_cords = np.array([]) for i in _num_cords: if sensor_cords[0,i] < 0: for j in _num_cords: if sensor_cords[0,j] > 0: _direction = sensor_cords[:,i] - sensor_cords[:,j] _distance = -sensor_cords[0,j] / _direction[0] new_cord = sensor_cords[:,j] + _distance[0,0] * _direction * 0.9999 modified_cords = np.hstack([modified_cords, new_cord]) if modified_cords.size else new_cord else: modified_cords = np.hstack([modified_cords, sensor_cords[:,i]]) if modified_cords.size else sensor_cords[:,i] return modified_cords else: return sensor_cords def get_matrix(self, transform): rotation = transform.rotation location = transform.location c_y = np.cos(np.radians(rotation.yaw)) s_y = np.sin(np.radians(rotation.yaw)) c_r = np.cos(np.radians(rotation.roll)) s_r = np.sin(np.radians(rotation.roll)) c_p = np.cos(np.radians(rotation.pitch)) s_p = np.sin(np.radians(rotation.pitch)) matrix = np.matrix(np.identity(4)) matrix[0, 3] = location.x matrix[1, 3] = location.y matrix[2, 3] = location.z matrix[0, 0] = c_p * c_y matrix[0, 1] = c_y * s_p * s_r - s_y * c_r matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r matrix[1, 0] = s_y * c_p matrix[1, 1] = s_y * s_p * s_r + c_y * c_r matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r matrix[2, 0] = s_p matrix[2, 1] = -c_p * s_r matrix[2, 2] = c_p * c_r return matrix def _change_seg_tl(self, seg_img, depth_img, traffic_lights, _region_size=4): for tl in traffic_lights: _dist = self._get_distance(tl.get_transform().location) _region = np.abs(depth_img - _dist) if tl.get_state() == carla.TrafficLightState.Red: state = 23 elif tl.get_state() == carla.TrafficLightState.Yellow: state = 24 elif tl.get_state() == carla.TrafficLightState.Green: state = 25 else: state = 18 seg_img[(_region < _region_size) & (seg_img == 18)] = state def _get_dist(self, p1, p2): distance = np.sqrt( (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2) return distance
MIT License
rucio/rucio
lib/rucio/rse/protocols/cache.py
Default.get
python
def get(self, pfn, dest, transfer_timeout=None): raise NotImplementedError
Provides access to files stored inside connected the RSE. :param pfn Physical file name of requested file :param dest Name and path of the files when stored at the client :param transfer_timeout Transfer timeout (in seconds) :raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/rse/protocols/cache.py#L89-L98
try: from exceptions import NotImplementedError except ImportError: pass from rucio.rse.protocols import protocol class Default(protocol.RSEProtocol): def __init__(self, protocol_attr, rse_settings, logger=None): super(Default, self).__init__(protocol_attr, rse_settings, logger=logger) self.attributes.pop('determinism_type', None) self.files = [] def _get_path(self, scope, name): return '%s/%s' % (scope, name) def path2pfn(self, path): return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path]) def exists(self, pfn): raise NotImplementedError def connect(self): raise NotImplementedError def close(self): raise NotImplementedError
Apache License 2.0
ngageoint/sarpy
sarpy/annotation/afrl.py
GroundTruthConstructor.localize_for_sicd
python
def localize_for_sicd(self, sicd, base_sicd_file, populate_in_periphery=False, include_out_of_range=False): out_research = self.get_final_structure() out_research.apply_sicd( sicd, base_sicd_file, populate_in_periphery=populate_in_periphery, include_out_of_range=include_out_of_range) return out_research
Localize the AFRL structure for the given sicd structure. This returns **a static copy** of the AFRL structure, and this method can be repeatedly applied for a sequence of different sicd files which all apply to the same ground truth scenario. Parameters ---------- sicd : SICDType base_sicd_file : str populate_in_periphery : bool include_out_of_range : bool Returns ------- ResearchType
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/annotation/afrl.py#L214-L241
__classification__ = 'UNCLASSIFIED' __author__ = "Thomas McCullough" from typing import Dict from sarpy.io.complex.sicd_elements.SICD import SICDType from sarpy.io.complex.sicd import SICDReader from sarpy.annotation.afrl_elements.Research import ResearchType from sarpy.annotation.afrl_elements.DetailCollectionInfo import DetailCollectionInfoType from sarpy.annotation.afrl_elements.DetailSubCollectionInfo import DetailSubCollectionInfoType from sarpy.annotation.afrl_elements.DetailObjectInfo import DetailObjectInfoType, PlaneNominalType, NominalType, TheObjectType, GeoLocationType as ObjectGeoLocation, ImageLocationType as ObjectImageLocation from sarpy.annotation.afrl_elements.DetailFiducialInfo import DetailFiducialInfoType, TheFiducialType, GeoLocationType as FiducialGeoLocation, ImageLocationType as FiducialImageLocation from sarpy.annotation.afrl_elements.DetailImageInfo import DetailImageInfoType from sarpy.annotation.afrl_elements.DetailSensorInfo import DetailSensorInfoType from sarpy.annotation.label import LabelSchema, FileLabelCollection, LabelCollection, LabelFeature, LabelMetadataList, LabelMetadata class GroundTruthConstructor(object): __slots__ = ( '_collection_info', '_subcollection_info', '_objects', '_fiducials') def __init__(self, collection_info, subcollection_info): self._collection_info = collection_info self._subcollection_info = subcollection_info self._objects = [] self._fiducials = [] def add_fiducial(self, the_fiducial): if not isinstance(the_fiducial, TheFiducialType): raise TypeError('Requires an object of type `TheFiducialType`, got `{}`'.format(type(the_fiducial))) if the_fiducial.ImageLocation is not None: raise ValueError('The fiducial has ImageLocation already set.') if the_fiducial.SlantPlane is not None or the_fiducial.GroundPlane is not None: raise ValueError('The fiducial already has the SlantPlane or GroundPlane set.') self._fiducials.append(the_fiducial) def add_fiducial_from_arguments(self, Name=None, SerialNumber=None, FiducialType=None, GeoLocation=None): self.add_fiducial( TheFiducialType( Name=Name, SerialNumber=SerialNumber, FiducialType=FiducialType, GeoLocation=GeoLocation)) def add_object(self, the_object): if not isinstance(the_object, TheObjectType): raise TypeError('Requires an object of type `TheObjectType`, got `{}`'.format(type(the_object))) if the_object.ImageLocation is not None: raise ValueError('The object has ImageLocation already set.') if the_object.SlantPlane is not None or the_object.GroundPlane is not None: raise ValueError('The object already has the SlantPlane or GroundPlane set.') self._objects.append(the_object) def add_object_from_arguments( self, SystemName=None, SystemComponent=None, NATOName=None, Function=None, Version=None, DecoyType=None, SerialNumber=None, ObjectClass='Unknown', ObjectSubClass='Unknown', ObjectTypeClass='Unknown', ObjectType='Unknown', ObjectLabel=None, Size=None, Orientation=None, Articulation=None, Configuration=None, Accessories=None, PaintScheme=None, Camouflage=None, Obscuration=None, ObscurationPercent=None, ImageLevelObscuration=None, GeoLocation=None, TargetToClutterRatio=None, VisualQualityMetric=None, UnderlyingTerrain=None, OverlyingTerrain=None, TerrainTexture=None, SeasonalCover=None): self.add_object( TheObjectType(SystemName=SystemName, SystemComponent=SystemComponent, NATOName=NATOName, Function=Function, Version=Version, DecoyType=DecoyType, SerialNumber=SerialNumber, ObjectClass=ObjectClass, ObjectSubClass=ObjectSubClass, ObjectTypeClass=ObjectTypeClass, ObjectType=ObjectType, ObjectLabel=ObjectLabel, Size=Size, Orientation=Orientation, Articulation=Articulation, Configuration=Configuration, Accessories=Accessories, PaintScheme=PaintScheme, Camouflage=Camouflage, Obscuration=Obscuration, ObscurationPercent=ObscurationPercent, ImageLevelObscuration=ImageLevelObscuration, GeoLocation=GeoLocation, TargetToClutterRatio=TargetToClutterRatio, VisualQualityMetric=VisualQualityMetric, UnderlyingTerrain=UnderlyingTerrain, OverlyingTerrain=OverlyingTerrain, TerrainTexture=TerrainTexture, SeasonalCover=SeasonalCover)) def get_final_structure(self): return ResearchType( DetailCollectionInfo=self._collection_info, DetailSubCollectionInfo=self._subcollection_info, DetailFiducialInfo=DetailFiducialInfoType( NumberOfFiducialsInScene=len(self._fiducials), Fiducials=self._fiducials), DetailObjectInfo=DetailObjectInfoType( NumberOfObjectsInScene=len(self._objects), Objects=self._objects)).copy()
MIT License
netbox-community/netbox
netbox/netbox/context_processors.py
settings_and_registry
python
def settings_and_registry(request): return { 'settings': django_settings, 'registry': registry, 'preferences': request.user.config if request.user.is_authenticated else {}, }
Expose Django settings and NetBox registry stores in the template context. Example: {{ settings.DEBUG }}
https://github.com/netbox-community/netbox/blob/d66fc8f661c4bdcbf1ee61323e8393a995afcdc8/netbox/netbox/context_processors.py#L6-L14
from django.conf import settings as django_settings from extras.registry import registry
Apache License 2.0
nastools/homeassistant
homeassistant/components/light/wemo.py
WemoLight.name
python
def name(self): return self.device.name
Return the name of the light.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/components/light/wemo.py#L79-L81
import logging from datetime import timedelta import homeassistant.util as util import homeassistant.util.color as color_util from homeassistant.components.light import ( Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, ATTR_TRANSITION, ATTR_XY_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION, SUPPORT_XY_COLOR) DEPENDENCIES = ['wemo'] MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10) MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100) _LOGGER = logging.getLogger(__name__) SUPPORT_WEMO = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_RGB_COLOR | SUPPORT_TRANSITION | SUPPORT_XY_COLOR) def setup_platform(hass, config, add_devices, discovery_info=None): import pywemo.discovery as discovery if discovery_info is not None: location = discovery_info[2] mac = discovery_info[3] device = discovery.device_from_description(location, mac) if device: setup_bridge(device, add_devices) def setup_bridge(bridge, add_devices): lights = {} @util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS) def update_lights(): bridge.bridge_update() new_lights = [] for light_id, device in bridge.Lights.items(): if light_id not in lights: lights[light_id] = WemoLight(device, update_lights) new_lights.append(lights[light_id]) if new_lights: add_devices(new_lights) update_lights() class WemoLight(Light): def __init__(self, device, update_lights): self.light_id = device.name self.device = device self.update_lights = update_lights @property def unique_id(self): deviceid = self.device.uniqueID return '{}.{}'.format(self.__class__, deviceid) @property
MIT License
alexa/alexa-apis-for-python
ask-smapi-model/ask_smapi_model/v1/skill/certification/certification_response.py
CertificationResponse.__init__
python
def __init__(self, id=None, status=None, skill_submission_timestamp=None, review_tracking_info=None, result=None): self.__discriminator_value = None self.id = id self.status = status self.skill_submission_timestamp = skill_submission_timestamp self.review_tracking_info = review_tracking_info self.result = result
:param id: Certification Id for the skill :type id: (optional) str :param status: :type status: (optional) ask_smapi_model.v1.skill.certification.certification_status.CertificationStatus :param skill_submission_timestamp: Timestamp for when the skill was submitted for certification. :type skill_submission_timestamp: (optional) datetime :param review_tracking_info: :type review_tracking_info: (optional) ask_smapi_model.v1.skill.certification.review_tracking_info.ReviewTrackingInfo :param result: :type result: (optional) ask_smapi_model.v1.skill.certification.certification_result.CertificationResult
https://github.com/alexa/alexa-apis-for-python/blob/bfe5e694daaca71bfb1a4199ca8d2514f1cac6c9/ask-smapi-model/ask_smapi_model/v1/skill/certification/certification_response.py#L63-L84
import pprint import re import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime from ask_smapi_model.v1.skill.certification.certification_status import CertificationStatus as CertificationStatus_f3c4064f from ask_smapi_model.v1.skill.certification.certification_result import CertificationResult as CertificationResult_84c93325 from ask_smapi_model.v1.skill.certification.review_tracking_info import ReviewTrackingInfo as ReviewTrackingInfo_efea7da2 class CertificationResponse(object): deserialized_types = { 'id': 'str', 'status': 'ask_smapi_model.v1.skill.certification.certification_status.CertificationStatus', 'skill_submission_timestamp': 'datetime', 'review_tracking_info': 'ask_smapi_model.v1.skill.certification.review_tracking_info.ReviewTrackingInfo', 'result': 'ask_smapi_model.v1.skill.certification.certification_result.CertificationResult' } attribute_map = { 'id': 'id', 'status': 'status', 'skill_submission_timestamp': 'skillSubmissionTimestamp', 'review_tracking_info': 'reviewTrackingInfo', 'result': 'result' } supports_multiple_types = False
Apache License 2.0
o365/python-o365
O365/address_book.py
ContactFolder.create_child_folder
python
def create_child_folder(self, folder_name): if not folder_name: return None if self.root: url = self.build_url(self._endpoints.get('root_folders')) else: url = self.build_url( self._endpoints.get('child_folders').format(id=self.folder_id)) response = self.con.post(url, data={self._cc('displayName'): folder_name}) if not response: return None folder = response.json() return self.__class__(parent=self, **{self._cloud_data_key: folder})
Creates a new child folder :param str folder_name: name of the new folder to create :return: newly created folder :rtype: ContactFolder or None
https://github.com/o365/python-o365/blob/696f338c5321aae6143386bd2d59f6f9f4493b60/O365/address_book.py#L843-L868
import datetime as dt import logging from dateutil.parser import parse from requests.exceptions import HTTPError from .utils import Recipients from .utils import AttachableMixin, TrackerSet from .utils import Pagination, NEXT_LINK_KEYWORD, ApiComponent from .message import Message, RecipientType from .category import Category log = logging.getLogger(__name__) class Contact(ApiComponent, AttachableMixin): _endpoints = { 'contact': '/contacts', 'root_contact': '/contacts/{id}', 'child_contact': '/contactFolders/{folder_id}/contacts', 'photo': '/contacts/{id}/photo/$value', 'photo_size': '/contacts/{id}/photos/{size}/$value', } message_constructor = Message def __init__(self, *, parent=None, con=None, **kwargs): if parent and con: raise ValueError('Need a parent or a connection but not both') self.con = parent.con if parent else con main_resource = kwargs.pop('main_resource', None) or ( getattr(parent, 'main_resource', None) if parent else None) super().__init__( protocol=parent.protocol if parent else kwargs.get('protocol'), main_resource=main_resource) cloud_data = kwargs.get(self._cloud_data_key, {}) cc = self._cc self._track_changes = TrackerSet(casing=cc) self.object_id = cloud_data.get(cc('id'), None) self.__created = cloud_data.get(cc('createdDateTime'), None) self.__modified = cloud_data.get(cc('lastModifiedDateTime'), None) local_tz = self.protocol.timezone self.__created = parse(self.__created).astimezone( local_tz) if self.__created else None self.__modified = parse(self.__modified).astimezone( local_tz) if self.__modified else None self.__display_name = cloud_data.get(cc('displayName'), '') self.__name = cloud_data.get(cc('givenName'), '') self.__surname = cloud_data.get(cc('surname'), '') self.__title = cloud_data.get(cc('title'), '') self.__job_title = cloud_data.get(cc('jobTitle'), '') self.__company_name = cloud_data.get(cc('companyName'), '') self.__department = cloud_data.get(cc('department'), '') self.__office_location = cloud_data.get(cc('officeLocation'), '') self.__business_phones = cloud_data.get(cc('businessPhones'), []) or [] self.__mobile_phone = cloud_data.get(cc('mobilePhone'), '') self.__home_phones = cloud_data.get(cc('homePhones'), []) or [] emails = cloud_data.get(cc('emailAddresses'), []) self.__emails = Recipients( recipients=[(rcp.get(cc('name'), ''), rcp.get(cc('address'), '')) for rcp in emails], parent=self, field=cc('emailAddresses')) email = cloud_data.get(cc('email')) self.__emails.untrack = True if email and email not in self.__emails: self.__emails.add(email) self.__business_address = cloud_data.get(cc('businessAddress'), {}) self.__home_address = cloud_data.get(cc('homesAddress'), {}) self.__other_address = cloud_data.get(cc('otherAddress'), {}) self.__preferred_language = cloud_data.get(cc('preferredLanguage'), None) self.__categories = cloud_data.get(cc('categories'), []) self.__folder_id = cloud_data.get(cc('parentFolderId'), None) self.__personal_notes = cloud_data.get(cc('personalNotes'), '') mail = cloud_data.get(cc('mail'), None) user_principal_name = cloud_data.get(cc('userPrincipalName'), None) if mail and mail not in self.emails: self.emails.add(mail) if user_principal_name and user_principal_name not in self.emails: self.emails.add(user_principal_name) self.__emails.untrack = False def __str__(self): return self.__repr__() def __repr__(self): return self.display_name or self.full_name or 'Unknown Name' def __eq__(self, other): return self.object_id == other.object_id @property def created(self): return self.__created @property def modified(self): return self.__modified @property def display_name(self): return self.__display_name @display_name.setter def display_name(self, value): self.__display_name = value self._track_changes.add(self._cc('displayName')) @property def name(self): return self.__name @name.setter def name(self, value): self.__name = value self._track_changes.add(self._cc('givenName')) @property def surname(self): return self.__surname @surname.setter def surname(self, value): self.__surname = value self._track_changes.add(self._cc('surname')) @property def full_name(self): return '{} {}'.format(self.name, self.surname).strip() @property def title(self): return self.__title @title.setter def title(self, value): self.__title = value self._track_changes.add(self._cc('title')) @property def job_title(self): return self.__job_title @job_title.setter def job_title(self, value): self.__job_title = value self._track_changes.add(self._cc('jobTitle')) @property def company_name(self): return self.__company_name @company_name.setter def company_name(self, value): self.__company_name = value self._track_changes.add(self._cc('companyName')) @property def department(self): return self.__department @department.setter def department(self, value): self.__department = value self._track_changes.add(self._cc('department')) @property def office_location(self): return self.__office_location @office_location.setter def office_location(self, value): self.__office_location = value self._track_changes.add(self._cc('officeLocation')) @property def business_phones(self): return self.__business_phones @business_phones.setter def business_phones(self, value): if isinstance(value, tuple): value = list(value) if not isinstance(value, list): value = [value] self.__business_phones = value self._track_changes.add(self._cc('businessPhones')) @property def mobile_phone(self): return self.__mobile_phone @mobile_phone.setter def mobile_phone(self, value): self.__mobile_phone = value self._track_changes.add(self._cc('mobilePhone')) @property def home_phones(self): return self.__home_phones @home_phones.setter def home_phones(self, value): if isinstance(value, list): self.__home_phones = value elif isinstance(value, str): self.__home_phones = [value] elif isinstance(value, tuple): self.__home_phones = list(value) else: raise ValueError('home_phones must be a list') self._track_changes.add(self._cc('homePhones')) @property def emails(self): return self.__emails @property def main_email(self): if not self.emails: return None return self.emails[0].address @property def business_address(self): return self.__business_address @business_address.setter def business_address(self, value): if not isinstance(value, dict): raise ValueError('"business_address" must be dict') self.__business_address = value self._track_changes.add(self._cc('businessAddress')) @property def home_address(self): return self.__home_address @home_address.setter def home_address(self, value): if not isinstance(value, dict): raise ValueError('"home_address" must be dict') self.__home_address = value self._track_changes.add(self._cc('homesAddress')) @property def other_address(self): return self.__other_address @other_address.setter def other_address(self, value): if not isinstance(value, dict): raise ValueError('"other_address" must be dict') self.__other_address = value self._track_changes.add(self._cc('otherAddress')) @property def preferred_language(self): return self.__preferred_language @preferred_language.setter def preferred_language(self, value): self.__preferred_language = value self._track_changes.add(self._cc('preferredLanguage')) @property def categories(self): return self.__categories @categories.setter def categories(self, value): if isinstance(value, list): self.__categories = [] for val in value: if isinstance(val, Category): self.__categories.append(val.name) else: self.__categories.append(val) elif isinstance(value, str): self.__categories = [value] elif isinstance(value, Category): self.__categories = [value.name] else: raise ValueError('categories must be a list') self._track_changes.add(self._cc('categories')) @property def personal_notes(self): return self.__personal_notes @personal_notes.setter def personal_notes(self, value): self.__personal_notes = value self._track_changes.add(self._cc('personalNotes')) @property def folder_id(self): return self.__folder_id def to_api_data(self, restrict_keys=None): cc = self._cc data = { cc('displayName'): self.__display_name, cc('givenName'): self.__name, cc('surname'): self.__surname, cc('title'): self.__title, cc('jobTitle'): self.__job_title, cc('companyName'): self.__company_name, cc('department'): self.__department, cc('officeLocation'): self.__office_location, cc('businessPhones'): self.__business_phones, cc('mobilePhone'): self.__mobile_phone, cc('homePhones'): self.__home_phones, cc('emailAddresses'): [{self._cc('name'): recipient.name or '', self._cc('address'): recipient.address} for recipient in self.emails], cc('businessAddress'): self.__business_address, cc('homesAddress'): self.__home_address, cc('otherAddress'): self.__other_address, cc('categories'): self.__categories, cc('personalNotes'): self.__personal_notes, } if restrict_keys: restrict_keys.add(cc( 'givenName')) for key in list(data.keys()): if key not in restrict_keys: del data[key] return data def delete(self): if not self.object_id: raise RuntimeError('Attempting to delete an unsaved Contact') url = self.build_url( self._endpoints.get('root_contact').format(id=self.object_id)) response = self.con.delete(url) return bool(response) def save(self): if self.object_id: if not self._track_changes: return True url = self.build_url( self._endpoints.get('root_contact').format(id=self.object_id)) method = self.con.patch data = self.to_api_data(restrict_keys=self._track_changes) else: if self.__folder_id: url = self.build_url( self._endpoints.get('child_contact').format( folder_id=self.__folder_id)) else: url = self.build_url(self._endpoints.get('contact')) method = self.con.post data = self.to_api_data(restrict_keys=self._track_changes) response = method(url, data=data) if not response: return False if not self.object_id: contact = response.json() self.object_id = contact.get(self._cc('id'), None) self.__created = contact.get(self._cc('createdDateTime'), None) self.__modified = contact.get(self._cc('lastModifiedDateTime'), None) local_tz = self.protocol.timezone self.__created = parse(self.created).astimezone( local_tz) if self.__created else None self.__modified = parse(self.modified).astimezone( local_tz) if self.__modified else None else: self.__modified = self.protocol.timezone.localize(dt.datetime.now()) return True def new_message(self, recipient=None, *, recipient_type=RecipientType.TO): if isinstance(recipient_type, str): recipient_type = RecipientType(recipient_type) recipient = recipient or self.emails.get_first_recipient_with_address() if not recipient: return None new_message = self.message_constructor(parent=self, is_draft=True) target_recipients = getattr(new_message, str(recipient_type.value)) target_recipients.add(recipient) return new_message def get_profile_photo(self, size=None): if size is None: url = self.build_url(self._endpoints.get('photo').format(id=self.object_id)) else: url = self.build_url(self._endpoints.get('photo_size').format(id=self.object_id, size=size)) try: response = self.con.get(url) except HTTPError as e: log.debug('Error while retrieving the contact profile photo. Error: {}'.format(e)) return None if not response: return None return response.content def update_profile_photo(self, photo): url = self.build_url(self._endpoints.get('photo').format(id=self.object_id)) response = self.con.patch(url, data=photo, headers={'Content-type': 'image/jpeg'}) return bool(response) class BaseContactFolder(ApiComponent): _endpoints = { 'root_contacts': '/contacts', 'folder_contacts': '/contactFolders/{id}/contacts', 'get_folder': '/contactFolders/{id}', 'root_folders': '/contactFolders', 'child_folders': '/contactFolders/{id}/childFolders' } contact_constructor = Contact message_constructor = Message def __init__(self, *, parent=None, con=None, **kwargs): if parent and con: raise ValueError('Need a parent or a connection but not both') self.con = parent.con if parent else con main_resource = kwargs.pop('main_resource', None) or ( getattr(parent, 'main_resource', None) if parent else None) super().__init__( protocol=parent.protocol if parent else kwargs.get('protocol'), main_resource=main_resource) self.root = kwargs.pop('root', False) cloud_data = kwargs.get(self._cloud_data_key, {}) self.name = cloud_data.get(self._cc('displayName'), kwargs.get('name', '')) self.folder_id = cloud_data.get(self._cc('id'), None) self.parent_id = cloud_data.get(self._cc('parentFolderId'), None) def __str__(self): return self.__repr__() def __repr__(self): return 'Contact Folder: {}'.format(self.name) def __eq__(self, other): return self.folder_id == other.folder_id def get_contacts(self, limit=100, *, query=None, order_by=None, batch=None): if self.root: url = self.build_url(self._endpoints.get('root_contacts')) else: url = self.build_url( self._endpoints.get('folder_contacts').format( id=self.folder_id)) if limit is None or limit > self.protocol.max_top_value: batch = self.protocol.max_top_value params = {'$top': batch if batch else limit} if order_by: params['$orderby'] = order_by if query: if isinstance(query, str): params['$filter'] = query else: params.update(query.as_params()) response = self.con.get(url, params=params) if not response: return iter(()) data = response.json() contacts = (self.contact_constructor(parent=self, **{self._cloud_data_key: contact}) for contact in data.get('value', [])) next_link = data.get(NEXT_LINK_KEYWORD, None) if batch and next_link: return Pagination(parent=self, data=contacts, constructor=self.contact_constructor, next_link=next_link, limit=limit) else: return contacts def get_contact_by_email(self, email): if not email: return None query = self.q().any(collection='email_addresses', attribute='address', word=email.strip(), operation='eq') contacts = list(self.get_contacts(limit=1, query=query)) return contacts[0] if contacts else None class ContactFolder(BaseContactFolder): def get_folder(self, folder_id=None, folder_name=None): if folder_id and folder_name: raise RuntimeError('Provide only one of the options') if not folder_id and not folder_name: raise RuntimeError('Provide one of the options') if folder_id: url = self.build_url( self._endpoints.get('get_folder').format(id=folder_id)) params = None else: if self.root: url = self.build_url(self._endpoints.get('root_folders')) else: url = self.build_url( self._endpoints.get('child_folders').format( id=self.folder_id)) params = {'$filter': "{} eq '{}'".format(self._cc('displayName'), folder_name), '$top': 1} response = self.con.get(url, params=params) if not response: return None if folder_id: folder = response.json() else: folder = response.json().get('value') folder = folder[0] if folder else None if folder is None: return None return self.__class__(con=self.con, protocol=self.protocol, main_resource=self.main_resource, **{self._cloud_data_key: folder}) def get_folders(self, limit=None, *, query=None, order_by=None): if self.root: url = self.build_url(self._endpoints.get('root_folders')) else: url = self.build_url( self._endpoints.get('child_folders').format(id=self.folder_id)) params = {} if limit: params['$top'] = limit if order_by: params['$orderby'] = order_by if query: if isinstance(query, str): params['$filter'] = query else: params.update(query.as_params()) response = self.con.get(url, params=params or None) if not response: return [] data = response.json() return [self.__class__(parent=self, **{self._cloud_data_key: folder}) for folder in data.get('value', [])]
Apache License 2.0
icb-dcm/pyabc
pyabc/acceptor/acceptor.py
Acceptor.is_adaptive
python
def is_adaptive(self) -> bool: return False
Whether the class is dynamically updated after each generation, based on the last generation's available data. Default: False.
https://github.com/icb-dcm/pyabc/blob/3cef3237a819caba40efe6eb4f775822b4d66955/pyabc/acceptor/acceptor.py#L177-L182
import numpy as np import pandas as pd from typing import Callable, Union import logging from ..distance import Distance, SCALE_LIN, StochasticKernel from ..epsilon import Epsilon from ..parameters import Parameter from .pdf_norm import pdf_norm_from_kernel, pdf_norm_max_found from ..storage import save_dict_to_json logger = logging.getLogger("ABC.Acceptor") class AcceptorResult(dict): def __init__(self, distance: float, accept: bool, weight: float = 1.0): super().__init__() self.distance = distance self.accept = accept self.weight = weight def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ class Acceptor: def __init__(self): pass def initialize( self, t: int, get_weighted_distances: Callable[[], pd.DataFrame], distance_function: Distance, x_0: dict): pass def update(self, t: int, get_weighted_distances: Callable[[], pd.DataFrame], prev_temp: float, acceptance_rate: float): pass def __call__(self, distance_function: Distance, eps: Epsilon, x: dict, x_0: dict, t: int, par: Parameter): raise NotImplementedError() def requires_calibration(self) -> bool: return False
BSD 3-Clause New or Revised License
drorlab/atom3d
examples/lba/enn/model.py
ENN_LBA.forward
python
def forward(self, data, covariance_test=False): atom_scalars, atom_mask, edge_scalars, edge_mask, atom_positions = self.prepare_input(data) spherical_harmonics, norms = self.sph_harms(atom_positions, atom_positions) rad_func_levels = self.rad_funcs(norms, edge_mask * (norms > 0)) atom_reps_in = self.input_func_atom(atom_scalars, atom_mask, edge_scalars, edge_mask, norms) edge_net_in = self.input_func_edge(atom_scalars, atom_mask, edge_scalars, edge_mask, norms) atoms_all, edges_all = self.cormorant_cg(atom_reps_in, atom_mask, edge_net_in, edge_mask, rad_func_levels, norms, spherical_harmonics) atom_scalars = self.get_scalars_atom(atoms_all) edge_scalars = self.get_scalars_edge(edges_all) prediction = self.output_layer_atom(atom_scalars, atom_mask) if covariance_test: return prediction, atoms_all, atoms_all else: return prediction
Runs a forward pass of the network. :param data: Dictionary of data to pass to the network. :type data : :obj:`dict` :param covariance_test: If true, returns all of the atom-level representations twice. :type covariance_test: :obj:`bool`, optional :return prediction: The output of the layer :rtype prediction: :obj:`torch.Tensor`
https://github.com/drorlab/atom3d/blob/7eacb676f56b4130fd805f4b2901a600170b88f9/examples/lba/enn/model.py#L118-L158
import torch import torch.nn as nn import logging from cormorant.cg_lib import CGModule, SphericalHarmonicsRel from cormorant.nn import RadialFilters from cormorant.nn import InputMPNN, InputLinear from cormorant.nn import OutputPMLP, OutputLinear, GetScalarsAtom from cormorant.nn import NoLayer from cormorant.nn import BasicMLP from atom3d.models.enn import ENN class ENN_LBA(CGModule): def __init__(self, maxl, max_sh, num_cg_levels, num_channels, num_species, cutoff_type, hard_cut_rad, soft_cut_rad, soft_cut_width, weight_init, level_gain, charge_power, basis_set, charge_scale, gaussian_mask, cgprod_bounded=True, cg_pow_normalization='none', cg_agg_normalization='none', device=None, dtype=None, cg_dict=None): logging.info('Initializing network!') level_gain = expand_var_list(level_gain, num_cg_levels) hard_cut_rad = expand_var_list(hard_cut_rad, num_cg_levels) soft_cut_rad = expand_var_list(soft_cut_rad, num_cg_levels) soft_cut_width = expand_var_list(soft_cut_width, num_cg_levels) maxl = expand_var_list(maxl, num_cg_levels) max_sh = expand_var_list(max_sh, num_cg_levels) num_channels = expand_var_list(num_channels, num_cg_levels+1) logging.info('hard_cut_rad: {}'.format(hard_cut_rad)) logging.info('soft_cut_rad: {}'.format(soft_cut_rad)) logging.info('soft_cut_width: {}'.format(soft_cut_width)) logging.info('maxl: {}'.format(maxl)) logging.info('max_sh: {}'.format(max_sh)) logging.info('num_channels: {}'.format(num_channels)) super().__init__(maxl=max(maxl+max_sh), device=device, dtype=dtype, cg_dict=cg_dict) device, dtype, cg_dict = self.device, self.dtype, self.cg_dict self.num_cg_levels = num_cg_levels self.num_channels = num_channels self.charge_power = charge_power self.charge_scale = charge_scale self.num_species = num_species self.sph_harms = SphericalHarmonicsRel(max(max_sh), conj=True, device=device, dtype=dtype, cg_dict=cg_dict) self.rad_funcs = RadialFilters(max_sh, basis_set, num_channels, num_cg_levels, device=self.device, dtype=self.dtype) tau_pos = self.rad_funcs.tau num_scalars_in = self.num_species * (self.charge_power + 1) num_scalars_out = num_channels[0] self.input_func_atom = InputLinear(num_scalars_in, num_scalars_out, device=self.device, dtype=self.dtype) self.input_func_edge = NoLayer() tau_in_atom = self.input_func_atom.tau tau_in_edge = self.input_func_edge.tau self.cormorant_cg = ENN(maxl, max_sh, tau_in_atom, tau_in_edge, tau_pos, num_cg_levels, num_channels, level_gain, weight_init, cutoff_type, hard_cut_rad, soft_cut_rad, soft_cut_width, cgprod_bounded=cgprod_bounded, cg_pow_normalization=cg_pow_normalization, cg_agg_normalization=cg_agg_normalization, device=self.device, dtype=self.dtype, cg_dict=self.cg_dict) tau_cg_levels_atom = self.cormorant_cg.tau_levels_atom tau_cg_levels_edge = self.cormorant_cg.tau_levels_edge self.get_scalars_atom = GetScalarsAtom(tau_cg_levels_atom, device=self.device, dtype=self.dtype) self.get_scalars_edge = NoLayer() num_scalars_atom = self.get_scalars_atom.num_scalars num_scalars_edge = self.get_scalars_edge.num_scalars self.output_layer_atom = OutputLinear(num_scalars_atom, bias=True, device=self.device, dtype=self.dtype) self.output_layer_edge = NoLayer() logging.info('Model initialized. Number of parameters: {}'.format( sum([p.nelement() for p in self.parameters()])))
MIT License
parquery/swagger-to
swagger_to/__init__.py
snake_case
python
def snake_case(identifier: str) -> str: prefix = '' prefix_mtch = _NONLETTER_PREFIX_RE.search(identifier) if prefix_mtch: prefix = prefix_mtch.group(0) suffix = '' suffix_mtch = _NONLETTER_SUFFIX_RE.search(identifier) if suffix_mtch: suffix = suffix_mtch.group(0) trimmed = identifier[len(prefix):len(identifier) - len(suffix)] if not trimmed: return identifier parts = [ part for underscore_part in trimmed.split("_") for dash_part in underscore_part.split("-") for part in camel_case_split(identifier=dash_part) ] result = '_'.join(parts) return ''.join([prefix, result.lower(), suffix])
Convert an identifier to a lowercase snake case. >>> snake_case(identifier='CamelCase') 'camel_case' >>> snake_case(identifier='camelCase') 'camel_case' >>> snake_case(identifier='snake_case') 'snake_case' >>> snake_case(identifier='Snake_case') 'snake_case' >>> snake_case(identifier='Dash-Case') 'dash_case' >>> snake_case(identifier='dash-case') 'dash_case' >>> snake_case(identifier='_CamelCase') '_camel_case' >>> snake_case(identifier='CamelCase_') 'camel_case_' >>> snake_case(identifier='__') '__' :param identifier: to be converted :return: lowercase snake_case identifier
https://github.com/parquery/swagger-to/blob/e6809ce8c3712121666d686c53fe12ca754877bd/swagger_to/__init__.py#L279-L338
import re import string from typing import List, MutableMapping, Tuple import collections import icontract LOCAL_DEFINITION_REF_RE = re.compile(r'^#/definitions/(?P<name>[a-zA-Z0-9_.\- ]+)$') NAME_RE = re.compile(r'^[a-zA-Z0-9_.\- ]+$') @icontract.require( lambda ref: LOCAL_DEFINITION_REF_RE.match(ref), 'swagger-to can handle only definitions local to the spec file. ' 'Please create an issue if you need to handle a different kind of ' 'definition reference: https://github.com/Parquery/swagger-to/issues/new', error=ValueError, enabled=True) @icontract.ensure(lambda result: NAME_RE.match(result)) def parse_definition_ref(ref: str) -> str: mtch = LOCAL_DEFINITION_REF_RE.match(ref) assert mtch is not None return mtch.group('name') LOCAL_PARAMETER_REF_RE = re.compile(r'^#/parameters/(?P<name>[a-zA-Z0-9_.\- ]+)$') @icontract.require( lambda ref: LOCAL_PARAMETER_REF_RE.match(ref), 'swagger-to can handle only parameters local to the spec file. ' 'Please create an issue if you need to handle a different kind of ' 'parameter reference: https://github.com/Parquery/swagger-to/issues/new', error=ValueError, enabled=True) @icontract.ensure(lambda result: NAME_RE.match(result)) def parse_parameter_ref(ref: str) -> str: mtch = LOCAL_PARAMETER_REF_RE.match(ref) assert mtch is not None return mtch.group('name') SPECIALS = ['URLs', 'IDs', 'URL', 'ID', 'HTTP', 'HTTPS', 'JSONLD', 'JSON'] def camel_case_split(identifier: str) -> List[str]: if identifier == '': raise ValueError("Unexpected empty identifier") cur = 0 parts = [] while cur < len(identifier): found_special = False for special in SPECIALS: if identifier[cur:cur + len(special)] == special: parts.append(special) cur += len(special) found_special = True if not found_special: if identifier[cur] in string.ascii_uppercase: parts.append('') parts[-1] += identifier[cur] cur += 1 else: if len(parts) == 0: parts.append('') parts[-1] += identifier[cur] cur += 1 return parts _NONLETTER_PREFIX_RE = re.compile(r'^[^a-zA-Z]+') _NONLETTER_SUFFIX_RE = re.compile(r'[^a-zA-Z]+$') def _trim_nonletters(identifier: str) -> Tuple[str, str, str]: if not identifier: return '', identifier, '' prefix = '' prefix_mtch = _NONLETTER_PREFIX_RE.search(identifier) if prefix_mtch: prefix = prefix_mtch.group(0) suffix = '' suffix_mtch = _NONLETTER_SUFFIX_RE.search(identifier) if suffix_mtch: suffix = suffix_mtch.group(0) trimmed = identifier[len(prefix):len(identifier) - len(suffix)] return prefix, trimmed, suffix @icontract.require( lambda identifier: identifier != '', error=lambda: ValueError("Unexpected empty identifier"), enabled=True) @icontract.ensure(lambda result: '-' not in result) @icontract.ensure(lambda result: result[0] == result[0].upper()) @icontract.ensure(lambda result: result != '') def capital_camel_case(identifier: str) -> str: prefix, trimmed, suffix = _trim_nonletters(identifier=identifier) if not trimmed: return identifier parts = [ part for underscore_part in trimmed.split("_") for dash_part in underscore_part.split("-") for part in camel_case_split(identifier=dash_part) ] new_parts = [] for part in parts: part = part.lower() if part in ['url', 'id']: new_parts.append(part.upper()) elif part == 'urls': new_parts.append('URLs') elif part == 'ids': new_parts.append('IDs') else: new_parts.append(part[0].upper() + part[1:].lower()) return "".join([prefix] + new_parts + [suffix]) @icontract.require( lambda identifier: identifier != '', error=lambda: ValueError("Unexpected empty identifier"), enabled=True) @icontract.ensure(lambda result: '-' not in result) @icontract.ensure(lambda result: result[0] == result[0].lower()) @icontract.ensure(lambda result: result != '') def camel_case(identifier: str) -> str: prefix, trimmed, suffix = _trim_nonletters(identifier=identifier) if not trimmed: return identifier parts = [ part for underscore_part in trimmed.split("_") for dash_part in underscore_part.split("-") for part in camel_case_split(identifier=dash_part) ] new_parts = [parts[0].lower()] for part in parts[1:]: part = part.lower() if part in ['url', 'id']: new_parts.append(part.upper()) elif part == 'urls': new_parts.append('URLs') elif part == 'ids': new_parts.append('IDs') else: new_parts.append(part[0].upper() + part[1:].lower()) return "".join([prefix] + new_parts + [suffix]) @icontract.require( lambda identifier: identifier != '', error=lambda: ValueError("Unexpected empty identifier"), enabled=True) @icontract.ensure(lambda result: '-' not in result) @icontract.ensure(lambda result: result == result.lower())
MIT License
silhm/motu-mcu-control
modules/motu.py
Motu._query
python
def _query(self, address, value): value = json.dumps(value) url = "{}/datastore{}".format(self.url, address) r = requests.post(url, {"json": value}) print("{} : {}".format(url, value)) return True if r.status_code is 200 else False
Send the corresponding message - address is a string - value is an array Return True if well received by the motu soundcard
https://github.com/silhm/motu-mcu-control/blob/c0426f72fb778ea985c3e59bd831873d2d8623d0/modules/motu.py#L36-L48
import json import requests import time import sys from modules.midiHelper import * from modules.settings import Settings fader_api_range = [0, 4] fader_midi_range = [-8192, 8192] class Motu: def __init__(self, ipAddr=None, port=80): self.ipAddr = ipAddr self.port = port self.url = "http://{}:{}".format(ipAddr, port) self.uid = self._getUid() self.waitOnline() self.motuSettings = self._getSettings() self.settings = Settings() if self.motuSettings: print("========== Motu AVB: ===========") print("* Name : {}".format(self.motuSettings["hostname"])) print("* uid : {}".format(self.uid)) print("* Sample rate : {}".format(self.motuSettings["cfg/0/current_sampling_rate"])) print("================================")
MIT License
duetosymmetry/qnm
qnm/angular.py
M_matrix_elem
python
def M_matrix_elem(s, c, m, l, lprime): if (lprime == l-2): return -c*c*_calA(s,lprime,m) if (lprime == l-1): return (-c*c*_calD(s,lprime,m) + 2*c*s*_calF(s,lprime,m)) if (lprime == l ): return (swsphericalh_A(s,lprime,m) - c*c*_calB(s,lprime,m) + 2*c*s*_calH(s,lprime,m)) if (lprime == l+1): return (-c*c*_calE(s,lprime,m) + 2*c*s*_calG(s,lprime,m)) if (lprime == l+2): return -c*c*_calC(s,lprime,m) return 0.
The (l, lprime) matrix element from the spherical-spheroidal decomposition matrix from Eq. (55). Parameters ---------- s: int Spin-weight of interest c: complex Oblateness of the spheroidal harmonic m: int Magnetic quantum number l: int Angular quantum number of interest lprime: int Primed quantum number of interest Returns ------- complex Matrix element M_{l, lprime}
https://github.com/duetosymmetry/qnm/blob/25ceed85b0674707d6e4831dc2cb99c7ac568b05/qnm/angular.py#L107-L149
from __future__ import division, print_function, absolute_import from numba import njit import numpy as np @njit(cache=True) def _calF(s, l, m): if ((0==s) and (0 == l+1)): return 0. return (np.sqrt( ((l+1)**2 - m*m) / (2*l+3) / (2*l+1) ) * np.sqrt( ( (l+1)**2 - s*s) / (l+1)**2 )) @njit(cache=True) def _calG(s, l, m): if (0 == l): return 0. return np.sqrt( ( l*l - m*m ) / (4*l*l - 1)) * np.sqrt(1 - s*s/l/l) @njit(cache=True) def _calH(s, l, m): if (0 == l) or (0 == s): return 0. return - m*s/l/(l+1) @njit(cache=True) def _calA(s, l, m): return _calF(s,l,m) * _calF(s,l+1,m) @njit(cache=True) def _calD(s, l, m): return _calF(s,l,m) * (_calH(s,l+1,m) + _calH(s,l,m)) @njit(cache=True) def _calB(s, l, m): return (_calF(s,l,m) * _calG(s,l+1,m) + _calG(s,l,m) * _calF(s,l-1,m) + _calH(s,l,m)**2) @njit(cache=True) def _calE(s, l, m): return _calG(s,l,m) * (_calH(s,l-1,m) + _calH(s,l,m)) @njit(cache=True) def _calC(s, l, m): return _calG(s,l,m) * _calG(s,l-1,m) @njit(cache=True) def swsphericalh_A(s, l, m): return l*(l+1) - s*(s+1) @njit(cache=True)
MIT License
nikhilkumarsingh/pyinrail
pyinrail/pyinrail.py
RailwayEnquiry.get_config
python
def get_config(self): return {"src": self.src, "dest": self.dest, "date": self.date}
get current configuration of source station, destination station and date of journey
https://github.com/nikhilkumarsingh/pyinrail/blob/b01323e6785c8a19dcd4bbab90a681f6d039aa1e/pyinrail/pyinrail.py#L41-L45
import os import json import time from io import BytesIO import requests import demjson import pytesseract import pandas as pd from PIL import Image from fuzzywuzzy import process from .utils import * class RailwayEnquiry: def __init__(self, src=None, dest=None, date=None): self.session = {} if not os.path.exists(os.path.join(os.path.dirname(__file__), 'stations.json')): self.load_stations() self.stations = json.load(open(os.path.join(os.path.dirname(__file__), 'stations.json'))) if not os.path.exists(os.path.join(os.path.dirname(__file__), 'trains.json')): self.load_trains() self.trains = json.load(open(os.path.join(os.path.dirname(__file__), 'trains.json'))) self.src = self.get_stn_code(src) if src else None self.dest = self.get_stn_code(dest) if dest else None self.date = date self.create_session()
MIT License
iofu728/spider
buildmd/buildmd.py
Buildmd.load_collect_once
python
def load_collect_once(self, index): baseurl = 'https://shoucang.taobao.com/item_collect_n.htm?t=' url = baseurl + str(int(round(time.time() * 1000))) if index: url += 'ifAllTag=0&tab=0&tagId=&categoryCount=0&type=0&tagName=&categoryName=&needNav=false&startRow=' + str(30 * index) collect_html = basic_req(url, 0) if collect_html != True and collect_html != False: collect_list = collect_html.find_all('li', class_=["J_FavListItem g-i-item fav-item ", "J_FavListItem g-i-item fav-item isinvalid", "J_FavListItem g-i-item fav-item istmall ", "J_FavListItem g-i-item fav-item istmall isinvalid"]) print(len(collect_list)) if collect_html == True or collect_html == False or not len(collect_list): if can_retry(baseurl + str(index), index): self.load_collect_once(index) return text = [] for collect in collect_list: data_id = collect['data-id'] title = collect.find_all('a', class_='img-item-title-link')[0].text price = collect.find_all('div', class_='g_price')[0].strong.text if len( collect.find_all('div', class_='g_price')) else '0' text.append("||".join([data_id, title, price])) self.collect[index] = text
load taobao collect
https://github.com/iofu728/spider/blob/3b4565a40411888bb06c23461800089269d56b2c/buildmd/buildmd.py#L379-L408
import codecs import threading import time import os import re import random from bs4 import BeautifulSoup from proxy.getproxy import GetFreeProxy from util.util import begin_time, end_time, changeCookie, changeHtmlTimeout, basic_req, can_retry, get_accept, get_content_type from urllib.request import urlopen proxy_req = GetFreeProxy().proxy_req data_dir = 'buildmd/data/' class Buildmd(object): def __init__(self, ): self.request_list = [] self.failured_map = {} self.img_map = {} self.goods = {} self.collect = {} self.special_list = ['620买长款双面羊绒大衣已经很划算了。', '\xa0\xa0\xa0\xa0版型好看又百搭', '\xa0质量很好', '\xa0\xa0fromlala瘦竹竿', '\xa0mylittlebanana'] self.title_list = ['衣服', '下装', '包包', '配饰', '鞋子', '鞋子:', '饰品:', '鞋', '包', '鞋包', '鞋包:', '配饰:', '衣服:', '下装:', '包包:', '袜子:'] self.goods_candidate = [] self.headers = {} self.goods_map = {} self.title2map = {} self.url2goods = {} self.goods_name = {} def joint_url(self, tid): return 'http://note.youdao.com/yws/public/note/' + str(tid) + '?editorType=0&cstk=S0RcfVHi' def find_title(self, index: int): if int(index) < 5: return 'winter18/' + str(index + 1) + '.md' if int(index) < 9: return 'autumn18/' + str(index - 4) + '.md' if int(index) < 19: return 'summer18/' + str(index - 8) + '.md' if int(index) < 23: return 'spring18/' + str(index - 18) + '.md' if int(index) < 25: return 'winter17/' + str(index - 22) + '.md' def get_lists(self): url = self.joint_url('3bb0c25eca85e764b6d55a281faf7195') title_json = proxy_req(url, 1) if not title_json: if can_retry(url): self.get_lists() return content = BeautifulSoup( title_json['content'], 'html.parser').find_all('a') self.request_list = [ re.split(r'/|=', index.text)[-1] for index in content] def build_md(self, load_img=False): version = begin_time() threadings = [] for index, tid in enumerate(self.request_list): work = threading.Thread( target=self.build_md_once, args=(index, tid,)) threadings.append(work) for work in threadings: work.start() for work in threadings: work.join() if not load_img: return img_map = {k: self.img_map[k] for k in sorted(self.img_map.keys())} img_threadings = [] for index in img_map.keys(): for img_id, img_url in enumerate(img_map[index]): work = threading.Thread( target=self.load_img, args=(index, img_id, img_url,)) img_threadings.append(work) for work in img_threadings: work.start() for work in img_threadings: work.join() end_time(version) def build_md_once(self, index, tid): url = self.joint_url(tid) title_json = proxy_req(url, 1) if not title_json: if can_retry(url, index): self.build_md_once(index, tid) return content = BeautifulSoup( title_json['content'], 'html.parser').find_all('div') text = [] img_href = [] img_id = 1 ttid = 1 img_title = self.find_title(index).split('/')[1][:-3] for word in content: temp_text = '' if word.span and len(word.span.text) and not word.span.text[0].isdigit: temp_text = '## ' + word.span.text ttid = 1 if word.img: temp_text = '![image](img/' + img_title + str(img_id) + '.jpg)' img_href.append(word.img['src'].replace('https', 'http')) img_id += 1 if not len(temp_text): temp_text = word.text if len(temp_text) and temp_text[0].isdigit(): temp_text = str(ttid) + '. **' + ' '.join(temp_text.split('\xa0')[1:]).strip() + '**' ttid += 1 if len(temp_text) and temp_text[0:2] == '//': temp_text = str(ttid) + '. **' + ' '.join(temp_text.split('\xa0')[2:]).strip() + '**' ttid += 1 if len(temp_text) and (temp_text[0] == '¥' or temp_text[0] == '€'): temp_text = '<a>' + temp_text + '</a>' text.append(temp_text) with codecs.open(data_dir + self.find_title(index), 'w', encoding='utf-8') as f: f.write('\n'.join(text)) self.img_map[index] = img_href print(index, len(img_href)) def load_img(self, index, img_id, img_url): img = proxy_req(img_url, 2) if img == True or img == False: if can_retry(img_url): self.load_img(index, img_id, img_url) return with codecs.open('buildmd/' + self.find_title(index).split('/')[0] + '/img/' + self.find_title(index).split('/')[1][:-3] + str(img_id + 1) + '.jpg', 'wb') as f: f.write(img.content) def load_goods(self): version = begin_time() if not os.path.exists('%scookie' % data_dir): print('Youdao Note cookie not exist!!!') return with codecs.open('%scookie' % data_dir, 'r', encoding='utf-8') as f: cookie = f.readline() changeCookie(cookie[:-1]) threadings = [] for index, tid in enumerate(self.request_list): work = threading.Thread( target=self.load_goods_once, args=(index, tid,)) threadings.append(work) for work in threadings: work.start() for work in threadings: work.join() goods = [self.goods[k] for k in sorted(self.goods.keys())] goods = sum(goods, []) with codecs.open('%sgoods' % data_dir, 'w', encoding='utf-8') as f: f.write("\n".join(goods)) end_time(version) def load_goods_once(self, index, tid): url = self.joint_url(tid) title_json = proxy_req(url, 1) if not title_json: if can_retry(url, index): self.load_goods_once(index, tid) return content = BeautifulSoup( title_json['content'], 'html.parser') content = content.find_all('div') if not len(content): if can_retry(url, index): self.load_goods_once(index, tid) return text = [] ttid = 0 text.append(self.find_title(index)) good_text = [] describe = [] title = '' url = '' tpud = '' for word in content: temp_text = '' temp_text = word.text if not len(temp_text): continue if len(temp_text) and temp_text not in self.special_list and not '€' in temp_text and ((temp_text[0].isdigit() and (not '【' in temp_text or '【已下架】'in temp_text)) or (temp_text[0] == '\xa0' and not 'http' in temp_text and not '¥' in temp_text and not '微信' in temp_text and not '(' in temp_text) or (word.span and len(word.span.text.replace('\xa0', '')) and (word.span['style'] == 'font-size:16px;color:#fc9db1;font-weight:bold;' or word.span['style'] == 'font-size:16px;color:#1e6792;background-color:#ffffff;font-weight:bold;'))): temp_text = temp_text.replace('\xa0', ' ').replace('|', '') temp_text = temp_text.replace( '//', '').replace('¥', '').strip() if not re.search(r'\d\.\d', temp_text): temp_text = temp_text.replace('.', ' ') elif temp_text.count('.') > 1: temp_text = temp_text.replace('.', ' ', 1) temp_list = temp_text.split() print(temp_list) if not len(temp_list): continue if ttid: text.append(' '.join([*good_text, *[url, tpud]])) url = '' tpud = '' ttid += 1 describe = [] good_text = [] if len(title): text.append(title) title = '' if temp_list[0].isdigit(): good_text.append(str(int(temp_list[0]))) else: good_text.append(str(ttid)) good_text.append(temp_list[0]) if len(temp_list) == 1: continue if len(good_text) == 1: good_text.append(temp_list[1]) elif temp_list[1].isdigit(): good_text.append(str(int(temp_list[1]))) if len(temp_list) > 2: describe = temp_list[2:] if len(temp_list) > 2 and temp_list[2].isdigit(): good_text.append(str(int(temp_list[2]))) elif len(temp_list) > 3 and temp_list[3].isdigit(): good_text.append(str(int(temp_list[3]))) describe = temp_list[2] if len(temp_list) > 4: describe = [*describe, *temp_list[4:]] elif len(temp_list) > 3 and len(temp_list[2]) > 3 and temp_list[2][2:].isdigit(): if len(temp_list[3]) > 3 and temp_list[3][2:].isdigit(): good_text.append(temp_list[2] + '/' + temp_list[3]) else: good_text.append(str(int(temp_list[2][2:]))) continue elif len(temp_list) > 2 and re.search(r'\d', temp_list[2]): digit_list = re.findall(r"\d+\.?\d*", temp_list[2]) good_text.append(digit_list[0]) if len(temp_list) > 3: describe = [*describe, *temp_list[3:]] elif len(temp_list) > 2: describe.append(temp_list[2]) if len(temp_list) > 3: describe = temp_list[3:] elif 'http' in temp_text: temp_text = temp_text.replace('\xa0', '').strip() print('http', temp_text) url = temp_text elif temp_text.count('€') == 2 or temp_text.count('¥') == 2: temp_text = temp_text.replace('\xa0', '').strip() print('¥', temp_text) tpud = temp_text elif '【店铺链接】' in temp_text: temp_text = temp_text.replace('\xa0', '').strip() print('【店铺链接】', temp_text) url += temp_text elif temp_text in self.title_list: print(2, temp_text) temp_text = temp_text.replace('\xa0', '') title = temp_text elif len(good_text) == 1: temp_text = temp_text.replace('\xa0', ' ').replace( '.', ' ').replace('¥', '').replace('|', '') temp_list = temp_text.split() print(3, temp_list) if not len(temp_list): continue elif len(temp_list) > 1 and temp_list[1].isdigit(): good_text.append(temp_list[0]) good_text.append(str(int(temp_list[1]))) describe = temp_list[2:] else: describe.append(temp_text) elif temp_text.count('¥') == 1: temp_text = temp_text.replace('¥', '').replace( '\xa0', '').replace('|', '').strip() digit_list = re.findall(r"\d+\.?\d*", temp_text) print('$', digit_list) if len(digit_list): good_text.append(digit_list[0]) else: temp_text = temp_text.replace('\xa0', '') print(4, temp_text) describe.append(temp_text) if len(good_text): text.append(' '.join([*good_text, *[url, tpud]])) text.append(' ') self.goods[index] = text print(len(text)) def load_collect(self, page): version = begin_time() if not os.path.exists('%scookie_collect' % data_dir): print('TB cookie not exist!!!') return with codecs.open('%scookie_collect' % data_dir, 'r', encoding='utf-8') as f: cookie = f.readline() changeCookie(cookie[:-1]) changeHtmlTimeout(30) for block in range(page // 10 + 1): begin = block * 10 end = min(page, (block + 1) * 10) threadings = [] for index in range(begin, end): work = threading.Thread( target=self.load_collect_once, args=(index,)) threadings.append(work) for work in threadings: work.start() for work in threadings: work.join() collect = [self.collect[k] for k in sorted(self.collect.keys())] collect = sum(collect, []) with codecs.open('%scollect_wyy' % data_dir, 'w', encoding='utf-8') as f: f.write("\n".join(collect)) end_time(version)
MIT License
radakb/pynamd
pynamd/cphlog.py
TitratableSystemSet.micro_occupancies_equiv
python
def micro_occupancies_equiv(self, segresids=[], notsegresids=[], resnames=[], notresnames=[]): args = (segresids, notsegresids, resnames, notresnames) nstates = self.nstates_micro_equiv otype = 'micro_occupancies_equiv' return self._combine_occupancies(nstates, otype, *args)
Return the microstate occupancies (combining equivalent states) from each pH value stacked as a ndarray. (Optional) selection keywords ----------------------------- segresids : list explicit residue selections of the form <segid:resid> notsegresids : list explicit residue exclusions of the form <segid:resid> resnames : list explict selection by residue name notresnames : list explicit exclusion by residue name Returns ------- occupancy : ndarray the occupancies of the selected residues
https://github.com/radakb/pynamd/blob/e31b8529e7052725916ebfa1ce7dd089d72117ec/pynamd/cphlog.py#L272-L296
import collections import json import warnings import numpy as np from scipy.optimize import root, leastsq from pynamd.msmle import MSMLE def _validate_typed_list(list_, type_): if not hasattr(list_, '__iter__'): raise ValueError('lists must be iterable') for i, v in enumerate(list_): try: type_(v) except ValueError: raise ValueError( 'Cannot cast list element %s as %s'%(str(v), str(type_)) ) list_[i] = type_(v) return list_ def _validate_state_dict(dict_): if not isinstance(dict_, dict): raise ValueError('states must be a dict object') for k, v in dict_.items(): dict_.pop(k, None) try: dict_[str(k)] = _validate_typed_list(v, int) except ValueError: raise ValueError('Values in state dicts must be integer lists!') vlen = len(list(dict_.values())[0]) for k, v in dict_.items(): if len(v) == vlen: continue raise ValueError( 'Bad list length for state %s (%d != %d)'%(k, len(v), vlen) ) return dict_ def _validate_float(value): try: float(value) except ValueError: raise ValueError('Value must be a float quantity') return float(value) class TitratableSystemSet(collections.Mapping): _MULTISTATE_METHODS = ('uwham') _log10 = np.log(10) def __init__(self, *args, **kwargs): self._od = collections.OrderedDict(*args, **kwargs) self._msmle = None def __delitem__(self, pH): del self._od[_validate_float(pH)] def __getitem__(self, pH): return self._od[_validate_float(pH)] def __iter__(self): return iter(self._od) def __len__(self): return len(self._od) def __setitem__(self, pH, system): if not isinstance(system, TitratableSystem): raise ValueError( 'TitratableSystemSets can only contain TitratableSystem objects' ) self._od[_validate_float(pH)] = system @property def _residues(self): return list(self.values())[0] @property def pHs(self): return np.asarray(list(self.keys())) @property def numpHs(self): return self.pHs.size @property def nsamples(self): return np.asarray([s.nsamples for s in self.values()], np.int32) @property def nsites(self): return self._residues.nsites @property def nprotons(self): nprotons = np.zeros(self.nsamples.sum(), np.int32) indices = np.hstack((np.zeros(1, np.int32), self.nsamples.cumsum())) for i, j, tsys in zip(indices[:-1], indices[1:], self.values()): nprotons[i:j] += tsys.nprotons return nprotons @property def nstates_micro_noequiv(self): return self._residues.nstates_micro_noequiv @property def nstates_micro_equiv(self): return self._residues.nstates_micro_equiv @property def nstates_macro(self): return self._residues.nstates_macro def nresidues(self, segresids=[], notsegresids=[], resnames=[], notresnames=[]): args = (segresids, notsegresids, resnames, notresnames) return self._residues.nresidues(*args) def segresids(self, segresids=[], notsegresids=[], resnames=[], notresnames=[]): args = (segresids, notsegresids, resnames, notresnames) return self._residues.segresids(*args) def resnames(self, segresids=[], notsegresids=[], resnames=[], notresnames=[]): args = (segresids, notsegresids, resnames, notresnames) return self._residues.resnames(*args) def _combine_occupancies(self, nstates, occupancy_type, segresids, notsegresids, resnames, notresnames): args = (segresids, notsegresids, resnames, notresnames) mask = self._residues._selection_mask(*args) _nstates = nstates*mask occ = np.zeros((self.nsamples.sum(), _nstates.sum()), np.int32) indices = np.hstack((np.zeros(1, np.int32), self.nsamples.cumsum())) for i, j, tsys in zip(indices[:-1], indices[1:], self.values()): occ[i:j] += tsys.__getattribute__(occupancy_type)(*args) return occ def _select_occupancies(self, segresidname, micro, noequiv): segid, resid, resname = segresidname.split(':') segresid = '%s:%s'%(segid, resid) kwargs = {'segresids': [segresid], 'resnames': [resname]} micro = (True if noequiv else micro) if micro: if noequiv: return self.micro_occupancies_noequiv(**kwargs).T else: return self.micro_occupancies_equiv(**kwargs).T else: return self.macro_occupancies(**kwargs).T def micro_occupancies_noequiv(self, segresids=[], notsegresids=[], resnames=[], notresnames=[]): args = (segresids, notsegresids, resnames, notresnames) nstates = self.nstates_micro_noequiv otype = 'micro_occupancies_noequiv' return self._combine_occupancies(nstates, otype, *args)
MIT License
bitromortac/lndmanage
lndmanage/lib/ln_utilities.py
channel_unbalancedness_and_commit_fee
python
def channel_unbalancedness_and_commit_fee(local_balance, capacity, commit_fee, initiator): commit_fee = 0 if not initiator else commit_fee return -(2 * float(local_balance + commit_fee) / capacity - 1), commit_fee
Calculates the unbalancedness. :param local_balance: int :param capacity: int :param commit_fee: int :param initiator: bool :return: float: in [-1.0, 1.0]
https://github.com/bitromortac/lndmanage/blob/9a43a414fcbb7fd7efc0b7156a8bd0996ad8b24d/lndmanage/lib/ln_utilities.py#L41-L56
import re import time def convert_short_channel_id_to_channel_id(blockheight, transaction, output): return blockheight << 40 | transaction << 16 | output def convert_channel_id_to_short_channel_id(channel_id): return channel_id >> 40, channel_id >> 16 & 0xFFFFFF, channel_id & 0xFFFF def extract_short_channel_id_from_string(string): match = re.search(r'[0-9]+:[0-9]+:[0-9]+', string) group = match.group() groups = list(map(int, group.split(':'))) assert len(groups) == 3 return groups
MIT License
square/connect-python-sdk
squareconnect/models/v1_variation.py
V1Variation.price_money
python
def price_money(self): return self._price_money
Gets the price_money of this V1Variation. The item variation's price, if any. :return: The price_money of this V1Variation. :rtype: V1Money
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/v1_variation.py#L198-L206
from pprint import pformat from six import iteritems import re class V1Variation(object): def __init__(self, id=None, name=None, item_id=None, ordinal=None, pricing_type=None, price_money=None, sku=None, track_inventory=None, inventory_alert_type=None, inventory_alert_threshold=None, user_data=None, v2_id=None): self.swagger_types = { 'id': 'str', 'name': 'str', 'item_id': 'str', 'ordinal': 'int', 'pricing_type': 'str', 'price_money': 'V1Money', 'sku': 'str', 'track_inventory': 'bool', 'inventory_alert_type': 'str', 'inventory_alert_threshold': 'int', 'user_data': 'str', 'v2_id': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'item_id': 'item_id', 'ordinal': 'ordinal', 'pricing_type': 'pricing_type', 'price_money': 'price_money', 'sku': 'sku', 'track_inventory': 'track_inventory', 'inventory_alert_type': 'inventory_alert_type', 'inventory_alert_threshold': 'inventory_alert_threshold', 'user_data': 'user_data', 'v2_id': 'v2_id' } self._id = id self._name = name self._item_id = item_id self._ordinal = ordinal self._pricing_type = pricing_type self._price_money = price_money self._sku = sku self._track_inventory = track_inventory self._inventory_alert_type = inventory_alert_type self._inventory_alert_threshold = inventory_alert_threshold self._user_data = user_data self._v2_id = v2_id @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def item_id(self): return self._item_id @item_id.setter def item_id(self, item_id): self._item_id = item_id @property def ordinal(self): return self._ordinal @ordinal.setter def ordinal(self, ordinal): self._ordinal = ordinal @property def pricing_type(self): return self._pricing_type @pricing_type.setter def pricing_type(self, pricing_type): self._pricing_type = pricing_type @property
Apache License 2.0
fuyukai/owapi
owapi/blizz_interface.py
get_user_page
python
async def get_user_page( ctx: HTTPRequestContext, battletag: str, platform: str = "pc", region: str = "us", cache_time=300, cache_404=False, ) -> etree._Element: if platform != "pc": region = "" built_url = B_PAGE_URL.format( region=region, btag=battletag.replace("#", "-"), platform=platform ) page_body = await get_page_body(ctx, built_url, cache_time=cache_time, cache_404=cache_404) if not page_body: return None parse_partial = functools.partial(_parse_page, page_body) loop = asyncio.get_event_loop() parsed = await loop.run_in_executor(None, parse_partial) node = parsed.findall(".//section[@class='u-nav-offset']//h1[@class='u-align-center']") for nodes in node: if nodes.text.strip() == "Profile Not Found": return None return parsed
Downloads the BZ page for a user, and parses it.
https://github.com/fuyukai/owapi/blob/c5d7a34e6ac9ab98856ee518404af26a1781811e/owapi/blizz_interface.py#L77-L109
import asyncio import functools import logging import traceback import aiohttp from kyoukai.asphalt import HTTPRequestContext from lxml import etree try: from html5_parser import parse _has_html5_parser = True except ImportError: _has_html5_parser = False from werkzeug.exceptions import HTTPException, NotFound, InternalServerError from owapi import util B_BASE_URL = "https://playoverwatch.com/en-us/" B_PAGE_URL = B_BASE_URL + "career/{platform}{region}/{btag}" B_HEROES_URL = B_BASE_URL + "heroes" B_HERO_URL = B_HEROES_URL + "/{hero}" AVAILABLE_REGIONS = ["/eu", "/us", "/kr"] logger = logging.getLogger("OWAPI") async def get_page_body(ctx: HTTPRequestContext, url: str, cache_time=300, cache_404=False) -> str: async def _real_get_body(_, url: str): logger.info("GET => {}".format(url)) async with ctx.session.get(url) as req: assert isinstance(req, aiohttp.ClientResponse) logger.info("GET => {} => {}".format(url, req.status)) if req.status != 200: return None return (await req.read()).decode() result = await util.with_cache( ctx, _real_get_body, url, expires=cache_time, cache_404=cache_404 ) return result def _parse_page_html5(content: str) -> etree._Element: if content and content.lower() != "none": data = parse(content) return data def _parse_page_lxml(content: str) -> etree._Element: if content and content.lower() != "none": data = etree.HTML(content) return data
MIT License
nickrusso42518/nots
plugins/filter/filter.py
FilterModule.ios_ospf_dbsum
python
def ios_ospf_dbsum(text): return_dict = {} process_pattern = r""" Process\s+(?P<process_id>\d+)\s+database\s+summary\s+ (?:LSA\s+Type\s+Count\s+Delete\s+Maxage\s+)? Router\s+(?P<total_lsa1>\d+).*\n\s+ Network\s+(?P<total_lsa2>\d+).*\n\s+ Summary\s+Net\s+(?P<total_lsa3>\d+).*\n\s+ Summary\s+ASBR\s+(?P<total_lsa4>\d+).*\n\s+ Type-7\s+Ext\s+(?P<total_lsa7>\d+).* \s+Type-5\s+Ext\s+(?P<total_lsa5>\d+) """ regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL) match = regex.search(text) key_filler_list = [ "process_id", "total_lsa1", "total_lsa2", "total_lsa3", "total_lsa4", "total_lsa5", "total_lsa7", ] process = FilterModule._read_match(match, key_filler_list) return_dict.update({"process": process}) area_pattern = r""" Area\s+(?P<id>\d+)\s+database\s+summary\s+ (?:LSA\s+Type\s+Count\s+Delete\s+Maxage\s+)? Router\s+(?P<num_lsa1>\d+).*\n\s+ Network\s+(?P<num_lsa2>\d+).*\n\s+ Summary\s+Net\s+(?P<num_lsa3>\d+).*\n\s+ Summary\s+ASBR\s+(?P<num_lsa4>\d+).*\n\s+ Type-7\s+Ext\s+(?P<num_lsa7>\d+) """ areas = FilterModule._get_match_items(area_pattern, text) return_dict.update({"areas": areas}) return return_dict
Parses information from the Cisco IOS "show ip ospf database database-summary" command family. This is useful for verifying various characteristics of an OSPF database to count LSAs for simple verification. Note that this parser is generic enough to cover Cisco IOS-XR also.
https://github.com/nickrusso42518/nots/blob/7bdd37f88edfd004c5a5bab0e9340da29cd29709/plugins/filter/filter.py#L331-L377
import re import ipaddress class FilterModule(object): @staticmethod def filters(): return { "ios_ospf_neighbor": FilterModule.ios_ospf_neighbor, "ios_ospf_basic": FilterModule.ios_ospf_basic, "ios_ospf_dbsum": FilterModule.ios_ospf_dbsum, "ios_ospf_traffic": FilterModule.ios_ospf_traffic, "ios_ospf_frr": FilterModule.ios_ospf_frr, "ios_bfd_neighbor": FilterModule.ios_bfd_neighbor, "check_bfd_up": FilterModule.check_bfd_up, "iosxr_ospf_traffic": FilterModule.iosxr_ospf_traffic, "iosxr_ospf_basic": FilterModule.iosxr_ospf_basic, "iosxr_ospf_neighbor": FilterModule.iosxr_ospf_neighbor, "nxos_ospf_basic": FilterModule.nxos_ospf_basic, "nxos_ospf_neighbor": FilterModule.nxos_ospf_neighbor, "nxos_ospf_dbsum": FilterModule.nxos_ospf_dbsum, "nxos_ospf_traffic": FilterModule.nxos_ospf_traffic, } @staticmethod def _read_match(match, key_filler_list=None): return_dict = None if match: return_dict = match.groupdict() for key in return_dict.keys(): return_dict[key] = FilterModule._try_int(return_dict[key]) elif key_filler_list: return_dict = {} for key in key_filler_list: return_dict.update({key: None}) return return_dict @staticmethod def _get_match_items(pattern, text, extra_flags=0): regex = re.compile(pattern, re.VERBOSE + extra_flags) items = [match.groupdict() for match in regex.finditer(text)] for item in items: for key in item.keys(): item[key] = FilterModule._try_int(item[key]) return items @staticmethod def nxos_ospf_traffic(text): process_pattern = r""" OSPF\s+Process\s+ID\s+(?P<pid>\d+)\s+ .*? Ignored\s+LSAs:\s+(?P<ignore_lsa>\d+),\s+ LSAs\s+dropped\s+during\s+SPF:\s+(?P<lsa_drop_spf>\d+)\s+ LSAs\s+dropped\s+during\s+graceful\s+restart:\s+(?P<lsa_drop_gr>\d+) \s+Errors:\s+ drops\s+in\s+(?P<drops_in>\d+),\s+ drops\s+out\s+(?P<drops_out>\d+),\s+ errors\s+in\s+(?P<errors_in>\d+),\s+ errors\s+out\s+(?P<errors_out>\d+),\s+ hellos\s+in\s+(?P<hellos_in>\d+),\s+ dbds\s+in\s+(?P<dbds_in>\d+),\s+ lsreq\s+in\s+(?P<lsreq_in>\d+),\s+ lsu\s+in\s+(?P<lsu_in>\d+),\s+ lsacks\s+in\s+(?P<lsacks_in>\d+),\s+ unknown\s+in\s+(?P<unk_in>\d+),\s+ unknown\s+out\s+(?P<unk_out>\d+),\s+ no\s+ospf\s+(?P<no_ospf>\d+),\s+ bad\s+version\s+(?P<bad_ver>\d+),\s+ bad\s+crc\s+(?P<bad_crc>\d+),\s+ dup\s+rid\s+(?P<dup_rid>\d+),\s+ dup\s+src\s+(?P<dup_src>\d+),\s+ invalid\s+src\s+(?P<inv_src>\d+),\s+ invalid\s+dst\s+(?P<inv_dst>\d+),\s+ no\s+nbr\s+(?P<no_nbr>\d+),\s+ passive\s+(?P<passive>\d+),\s+ wrong\s+area\s+(?P<wrong_area>\d+),\s+ pkt\s+length\s+(?P<pkt_len>\d+),\s+ nbr\s+changed\s+rid/ip\s+addr\s+(?P<nbr_change>\d+)\s+ bad\s+auth\s+(?P<bad_auth>\d+),\s+ no\s+vrf\s+(?P<no_vrf>\d+) """ return FilterModule._get_match_items(process_pattern, text, re.DOTALL) @staticmethod def nxos_ospf_dbsum(text): return_dict = {} process_pattern = r""" Process\s+(?P<process_id>\d+)\s+database\s+summary\s+ LSA\s+Type\s+Count\s+ Opaque\s+Link\s+\d+\s+ Router\s+(?P<total_lsa1>\d+)\s+ Network\s+(?P<total_lsa2>\d+)\s+ Summary\s+Network\s+(?P<total_lsa3>\d+)\s+ Summary\s+ASBR\s+(?P<total_lsa4>\d+)\s+ Type-7\s+AS\s+External\s+(?P<total_lsa7>\d+)\s+ Opaque\s+Area\s+\d+\s+ Type-5\s+AS\s+External\s+(?P<total_lsa5>\d+) """ regex = re.compile(process_pattern, re.VERBOSE) match = regex.search(text) key_filler_list = [ "process_id", "total_lsa1", "total_lsa2", "total_lsa3", "total_lsa4", "total_lsa5", "total_lsa7", ] process = FilterModule._read_match(match, key_filler_list) return_dict.update({"process": process}) area_pattern = r""" Area\s+(?P<id>\d+\.\d+\.\d+\.\d+)\s+database\s+summary\s+ LSA\s+Type\s+Count\s+ Opaque\s+Link\s+\d+\s+ Router\s+(?P<num_lsa1>\d+)\s+ Network\s+(?P<num_lsa2>\d+)\s+ Summary\s+Network\s+(?P<num_lsa3>\d+)\s+ Summary\s+ASBR\s+(?P<num_lsa4>\d+)\s+ Type-7\s+AS\s+External\s+(?P<num_lsa7>\d+)\s+ """ areas = FilterModule._get_match_items(area_pattern, text) return_dict.update({"areas": areas}) return return_dict @staticmethod def nxos_ospf_neighbor(text): pattern = r""" (?P<rid>\d+\.\d+\.\d+\.\d+)\s+ (?P<priority>\d+)\s+ (?P<state>\w+)/\s* (?P<role>[A-Z-]+)\s+ (?P<uptime>[0-9:hdwy]+|-)\s+ (?P<peer>\d+\.\d+\.\d+\.\d+)\s+ (?P<intf>[0-9A-Za-z./_-]+) """ return FilterModule._ospf_neighbor(pattern, text, ["uptime"]) @staticmethod def nxos_ospf_basic(text): return_dict = {} process_pattern = r""" Routing\s+Process\s+(?P<id>\d+)\s+with\s+ID\s+(?P<rid>\d+\.\d+\.\d+\.\d+) .* \s*Reference\s+Bandwidth\s+is\s+(?P<ref_bw>\d+)\s+Mbps .* \s*SPF\s+throttling\s+delay\s+time\s+of\s+(?P<init_spf>\d+)(?:\.\d+)\s+msecs, \s*SPF\s+throttling\s+hold\s+time\s+of\s+(?P<min_spf>\d+)(?:\.\d+)\s+msecs, \s*SPF\s+throttling\s+maximum\s+wait\s+time\s+of\s+(?P<max_spf>\d+)(?:\.\d+)\s+msecs """ regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL) match = regex.search(text) process = FilterModule._read_match(match, ["process"]) if process: is_abr = text.find("area border") != -1 is_asbr = text.find("autonomous system boundary") != -1 is_stub_rtr = text.find("Originating router LSA with max") != -1 process.update( {"is_abr": is_abr, "is_asbr": is_asbr, "is_stub_rtr": is_stub_rtr,} ) return_dict.update({"process": process}) area_pattern = r""" Area\s+(?:BACKBONE)?\((?P<id_dd>\d+\.\d+\.\d+\.\d+)\)\s+ \s+(?:Area\s+has\s+existed.*)\n \s+Interfaces\s+in\s+this\s+area:\s+(?P<num_intfs>\d+).*\n \s+(?:Passive.*)\n \s+(?:This\s+area\s+is\s+a\s+(?P<type>\w+)\s+area)? """ regex = re.compile(area_pattern, re.VERBOSE) areas = [match.groupdict() for match in regex.finditer(text)] for area in areas: area["num_intfs"] = FilterModule._try_int(area["num_intfs"]) converted_dd = ipaddress.IPv4Address(area["id_dd"]) area["id"] = FilterModule._try_int(converted_dd) if not area["type"]: area["type"] = "standard" else: area["type"] = area["type"].lower() return_dict.update({"areas": areas}) return return_dict @staticmethod def _try_int(text): try: return int(text) except ValueError: return text @staticmethod def ios_ospf_neighbor(text): pattern = r""" (?P<rid>\d+\.\d+\.\d+\.\d+)\s+ (?P<priority>\d+)\s+ (?P<state>\w+)/\s* (?P<role>[A-Z-]+)\s+ (?P<deadtime>[0-9:]+|-)\s+ (?P<peer>\d+\.\d+\.\d+\.\d+)\s+ (?P<intf>[0-9A-Za-z./_-]+) """ return FilterModule._ospf_neighbor(pattern, text, ["deadtime"]) @staticmethod def ios_ospf_basic(text): return_dict = {} process_pattern = r""" Routing\s+Process\s+"ospf\s+(?P<id>\d+)"\s+with\s+ID\s+(?P<rid>\d+\.\d+\.\d+\.\d+) .* \s*Initial\s+SPF\s+schedule\s+delay\s+(?P<init_spf>\d+)\s+msecs \s*Minimum\s+hold\s+time\s+between\s+two\s+consecutive\s+SPFs\s+(?P<min_spf>\d+)\s+msecs \s*Maximum\s+wait\s+time\s+between\s+two\s+consecutive\s+SPFs\s+(?P<max_spf>\d+)\s+msecs .* \s*Reference\s+bandwidth\s+unit\s+is\s+(?P<ref_bw>\d+)\s+mbps """ regex = re.compile(process_pattern, re.VERBOSE + re.DOTALL) match = regex.search(text) process = FilterModule._read_match(match, ["process"]) if process: process.update( { "is_abr": text.find("area border") != -1, "is_asbr": text.find("autonomous system boundary") != -1, "is_stub_rtr": text.find("Originating router-LSAs with") != -1, "has_ispf": text.find("Incremental-SPF enabled") != -1, "has_bfd": text.find("BFD is enabled") != -1, "has_ttlsec": text.find("Strict TTL checking enabled") != -1, } ) return_dict.update({"process": process}) area_pattern = r""" Area\s+(?:BACKBONE\()?(?P<id>\d+)(?:\))?\s+ Number\s+of\s+interfaces\s+in\s+this\s+area\s+is\s+(?P<num_intfs>\d+).*\n \s+(?:It\s+is\s+a\s+(?P<type>\w+)\s+area)? """ regex = re.compile(area_pattern, re.VERBOSE) areas = [match.groupdict() for match in regex.finditer(text)] for area in areas: area["num_intfs"] = FilterModule._try_int(area["num_intfs"]) area["id"] = FilterModule._try_int(area["id"]) if not area["type"]: area["type"] = "standard" else: area["type"] = area["type"].lower() return_dict.update({"areas": areas}) return return_dict @staticmethod
BSD 3-Clause New or Revised License
fizzadar/pyinfra
pyinfra/operations/postgresql.py
dump
python
def dump( dest, database=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, state=None, host=None, ): yield StringCommand(make_psql_command( executable='pg_dump', database=database, user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ), '>', dest)
Dump a PostgreSQL database into a ``.sql`` file. Requires ``pg_dump``. + dest: name of the file to dump the SQL to + database: name of the database to dump + postgresql_*: global module arguments, see above Example: .. code:: python postgresql.dump( name='Dump the pyinfra_stuff database', dest='/tmp/pyinfra_stuff.dump', database='pyinfra_stuff', sudo_user='postgres', )
https://github.com/fizzadar/pyinfra/blob/2cc084dd2ae3e2f2c62884afe9e96783deeb721b/pyinfra/operations/postgresql.py#L239-L273
from pyinfra.api import MaskString, operation, StringCommand from pyinfra.facts.postgresql import make_execute_psql_command, make_psql_command @operation def sql( sql, database=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, state=None, host=None, ): yield make_execute_psql_command( sql, database=database, user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) @operation def role( role, present=True, password=None, login=True, superuser=False, inherit=False, createdb=False, createrole=False, replication=False, connection_limit=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, state=None, host=None, ): roles = host.fact.postgresql_roles( postgresql_user, postgresql_password, postgresql_host, postgresql_port, ) is_present = role in roles if not present: if is_present: yield make_execute_psql_command( 'DROP ROLE "{0}"'.format(role), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) else: host.noop('postgresql role {0} does not exist'.format(role)) return if not is_present: sql_bits = ['CREATE ROLE "{0}"'.format(role)] for key, value in ( ('LOGIN', login), ('SUPERUSER', superuser), ('INHERIT', inherit), ('CREATEDB', createdb), ('CREATEROLE', createrole), ('REPLICATION', replication), ): if value: sql_bits.append(key) if connection_limit: sql_bits.append('CONNECTION LIMIT {0}'.format(connection_limit)) if password: sql_bits.append(MaskString("PASSWORD '{0}'".format(password))) yield make_execute_psql_command( StringCommand(*sql_bits), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) else: host.noop('postgresql role {0} exists'.format(role)) @operation def database( database, present=True, owner=None, template=None, encoding=None, lc_collate=None, lc_ctype=None, tablespace=None, connection_limit=None, postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, state=None, host=None, ): current_databases = host.fact.postgresql_databases( postgresql_user, postgresql_password, postgresql_host, postgresql_port, ) is_present = database in current_databases if not present: if is_present: yield make_execute_psql_command( 'DROP DATABASE "{0}"'.format(database), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) else: host.noop('postgresql database {0} does not exist'.format(database)) return if present and not is_present: sql_bits = ['CREATE DATABASE "{0}"'.format(database)] for key, value in ( ('OWNER', '"{0}"'.format(owner) if owner else owner), ('TEMPLATE', template), ('ENCODING', encoding), ('LC_COLLATE', lc_collate), ('LC_CTYPE', lc_ctype), ('TABLESPACE', tablespace), ('CONNECTION LIMIT', connection_limit), ): if value: sql_bits.append('{0} {1}'.format(key, value)) yield make_execute_psql_command( StringCommand(*sql_bits), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) else: host.noop('postgresql database {0} exists'.format(database)) @operation
MIT License
netzkolchose/django-computedfields
computedfields/admin.py
ComputedModelsAdmin.modelgraph
python
def modelgraph(self, inst): model = apps.get_model(inst.app_label, inst.model) if not active_resolver._local_mro.get(model, None): return 'None' _url = reverse('admin:%s_%s_computedfields_modelgraph' % (self.model._meta.app_label, self.model._meta.model_name), args=[inst.id]) return mark_safe('''<a href="%s" target="popup" onclick="javascript:open('', 'popup', 'height=400,width=600,resizable=yes')"> ModelGraph</a>''' % _url)
Link to show modelgraph.
https://github.com/netzkolchose/django-computedfields/blob/ffa7c963cb0e70d2afe5954f2fdca241c0407b3f/computedfields/admin.py#L115-L126
from json import dumps from django.contrib import admin from django.apps import apps from django.conf import settings from django.utils.html import escape, mark_safe, format_html from django.urls import reverse, NoReverseMatch, path from django.shortcuts import render from django.core.exceptions import ObjectDoesNotExist from .models import ComputedFieldsAdminModel, ContributingModelsModel from .resolver import active_resolver from .graph import ComputedModelsGraph try: import pygments from pygments.lexers import JsonLexer from pygments.formatters import HtmlFormatter except ImportError: pygments = False try: from graphviz import Digraph except ImportError: Digraph = False class ComputedModelsAdmin(admin.ModelAdmin): actions = None change_list_template = 'computedfields/change_list.html' list_display = ('name', 'computed_fields', 'dependencies', 'local_computed_fields_mro', 'modelgraph') list_display_links = None def has_add_permission(self, request): return False def has_delete_permission(self, request, obj=None): return False def dependencies(self, inst): model = apps.get_model(inst.app_label, inst.model) cf_models = active_resolver.computed_models deps = {} for fieldname, field in cf_models.get(model).items(): deps[fieldname] = field._computed['depends'] data = dumps(deps, indent=4, sort_keys=True) if pygments: data = mark_safe( pygments.highlight(data, JsonLexer(stripnl=False), HtmlFormatter(noclasses=True, nowrap=True))) return format_html(u'<pre>{}</pre>', data) def computed_fields(self, inst): model = apps.get_model(inst.app_label, inst.model) cfs = list(active_resolver.computed_models[model].keys()) data = dumps(cfs, indent=4, sort_keys=True) if pygments: data = mark_safe( pygments.highlight(data, JsonLexer(stripnl=False), HtmlFormatter(noclasses=True, nowrap=True))) return format_html(u'<pre>{}</pre>', data) def local_computed_fields_mro(self, inst): model = apps.get_model(inst.app_label, inst.model) entry = active_resolver._local_mro[model] base = entry['base'] deps = {'mro': base, 'fields': {}} for field, value in entry['fields'].items(): deps['fields'][field] = [name for pos, name in enumerate(base) if value & (1 << pos)] data = dumps(deps, indent=4, sort_keys=False) if pygments: data = mark_safe(pygments.highlight(data, JsonLexer(stripnl=False), HtmlFormatter(noclasses=True, nowrap=True))) return format_html(u'<pre>{}</pre>', data) def name(self, obj): name = escape(u'%s.%s' % (obj.app_label, obj.model)) try: _url = escape(reverse('admin:%s_%s_changelist' % (obj.app_label, obj.model))) except NoReverseMatch: return name return format_html(u'<a href="{}">{}</a>', _url, name) def get_urls(self): urls = super(ComputedModelsAdmin, self).get_urls() info = self.model._meta.app_label, self.model._meta.model_name databaseview_urls = [ path(r'computedfields/rendergraph/', self.admin_site.admin_view(self.render_graph), name='%s_%s_computedfields_rendergraph' % info), path(r'computedfields/renderuniongraph/', self.admin_site.admin_view(self.render_uniongraph), name='%s_%s_computedfields_renderuniongraph' % info), path(r'computedfields/modelgraph/<int:modelid>/', self.admin_site.admin_view(self.render_modelgraph), name='%s_%s_computedfields_modelgraph' % info), ] return databaseview_urls + urls
MIT License
pycontribs/pyrax
pyrax/image.py
ImageClient.update
python
def update(self, img, value_dict): return self._manager.update(img, value_dict)
Accepts an image reference (object or ID) and dictionary of key/value pairs, where the key is an attribute of the image, and the value is the desired new value for that image.
https://github.com/pycontribs/pyrax/blob/a0c022981f76a4cba96a22ecc19bb52843ac4fbe/pyrax/image.py#L556-L562
from __future__ import absolute_import, unicode_literals from functools import wraps import pyrax from pyrax.object_storage import StorageObject from pyrax.client import BaseClient import pyrax.exceptions as exc from pyrax.manager import BaseManager from pyrax.resource import BaseResource import pyrax.utils as utils DEFAULT_FORMAT = "vhd" def assure_image(fnc): @wraps(fnc) def _wrapped(self, img, *args, **kwargs): if not isinstance(img, Image): img = self._manager.get(img) return fnc(self, img, *args, **kwargs) return _wrapped class Image(BaseResource): def __init__(self, manager, info, key=None, loaded=False, member_manager_class=None, tag_manager_class=None): super(Image, self).__init__(manager, info, key=key, loaded=loaded) member_manager_class = member_manager_class or ImageMemberManager tag_manager_class = tag_manager_class or ImageTagManager self._member_manager = member_manager_class(self.manager.api, resource_class=ImageMember, response_key="", plural_response_key="members", uri_base="images/%s/members" % self.id) self._tag_manager = tag_manager_class(self.manager.api, resource_class=ImageTag, response_key="", plural_response_key="tags", uri_base="images/%s/tags" % self.id) self._non_display = [ "com.rackspace__1__build_core", "com.rackspace__1__build_managed", "com.rackspace__1__build_rackconnect", "com.rackspace__1__options", "com.rackspace__1__platform_target", "com.rackspace__1__release_build_date", "com.rackspace__1__release_id", "com.rackspace__1__release_version", "com.rackspace__1__source", "com.rackspace__1__visible_core", "com.rackspace__1__visible_managed", "com.rackspace__1__visible_rackconnect", "file", "instance_type_ephemeral_gb", "instance_type_flavorid", "instance_type_id", "instance_type_memory_mb", "instance_type_name", "instance_type_root_gb", "instance_type_rxtx_factor", "instance_type_swap", "instance_type_vcpu_weight", "instance_type_vcpus", "instance_uuid", "org.openstack__1__architecture", "org.openstack__1__os_distro", "org.openstack__1__os_version", "rax_activation_profile", "rax_managed", "rax_options", "schema", "self", ] def update(self, value_dict): return self.manager.update(self, value_dict) def change_name(self, newname): return self.update({"name": newname}) def list_members(self): return self._member_manager.list() def get_member(self, member): return self._member_manager.get(member) def add_member(self, project_id): return self._member_manager.create(name=None, project_id=project_id) def delete_member(self, project_id): return self._member_manager.delete(project_id) def add_tag(self, tag): return self._tag_manager.add(tag) def delete_tag(self, tag): return self._tag_manager.delete(tag) class ImageMember(BaseResource): @property def id(self): return self.member_id class ImageTag(BaseResource): pass class ImageTask(BaseResource): pass class ImageManager(BaseManager): def _create_body(self, name, metadata=None): if metadata is None: body = {} else: body = {"metadata": metadata} return body def list(self, limit=None, marker=None, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None, return_raw=False): uri = "/%s" % self.uri_base qs = utils.dict_to_qs(dict(limit=limit, marker=marker, name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir)) if qs: uri = "%s?%s" % (uri, qs) return self._list(uri, return_raw=return_raw) def list_all(self, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None): def strip_version(uri): pos = uri.find("/images") return uri[pos:] obj_class = self.resource_class resp, resp_body = self.list(name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir, return_raw=True) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret = [obj_class(manager=self, info=res) for res in data if res] while next_uri: resp, resp_body = self.api.method_get(next_uri) data = resp_body.get(self.plural_response_key, resp_body) next_uri = strip_version(resp_body.get("next", "")) ret.extend([obj_class(manager=self, info=res) for res in data if res]) return ret def create(self, name, img_format=None, img_container_format=None, data=None, container=None, obj=None, metadata=None): if img_format is None: img_format = "vhd" if img_container_format is None: img_container_format = "bare" headers = { "X-Image-Meta-name": name, "X-Image-Meta-disk_format": img_format, "X-Image-Meta-container_format": img_container_format, } if data: img_data = data else: ident = self.api.identity region = self.api.region_name clt = ident.get_client("object_store", region) if not isinstance(obj, StorageObject): obj = clt.get_object(container, obj) img_data = obj.fetch() uri = "%s/images" % self.uri_base resp, resp_body = self.api.method_post(uri, headers=headers, data=img_data) def update(self, img, value_dict): img = self.get(img) uri = "/%s/%s" % (self.uri_base, utils.get_id(img)) body = [] for key, val in value_dict.items(): op = "replace" if key in img.__dict__ else "add" body.append({"op": op, "path": "/%s" % key, "value": val}) headers = {"Content-Type": "application/openstack-images-v2.1-json-patch"} resp, resp_body = self.api.method_patch(uri, body=body, headers=headers) def update_image_member(self, img_id, status): if status not in ("pending", "accepted", "rejected"): raise exc.InvalidImageMemberStatus("The status value must be one " "of 'accepted', 'rejected', or 'pending'. Received: '%s'" % status) api = self.api project_id = api.identity.tenant_id uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id) body = {"status": status} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.InvalidImageMember("The update member request could not " "be completed. No member request for that image was found.") class ImageMemberManager(BaseManager): def _create_body(self, name, project_id): body = {"member": project_id} return body def create(self, name, *args, **kwargs): try: return super(ImageMemberManager, self).create(name, *args, **kwargs) except Exception as e: if e.http_status == 403: raise exc.UnsharableImage("You cannot share a public image.") else: raise class ImageTagManager(BaseManager): def _create_body(self, name): return {} def add(self, tag): uri = "/%s/%s" % (self.uri_base, tag) resp, resp_body = self.api.method_put(uri) class ImageTasksManager(BaseManager): def _create_body(self, name, img=None, cont=None, img_format=None, img_name=None): img = utils.get_id(img) cont = utils.get_name(cont) body = {"type": name} if name == "export": body["input"] = { "image_uuid": img, "receiving_swift_container": cont} else: nm = "%s/%s" % (cont, utils.get_name(img)) body["input"] = { "image_properties": {"name": img_name or img}, "import_from": nm, "import_from_format": img_format or DEFAULT_FORMAT} return body def create(self, name, *args, **kwargs): cont = kwargs.get("cont") if cont: api = self.api rgn = api.region_name cf = api.identity.object_store[rgn].client cf.get_container(cont) return super(ImageTasksManager, self).create(name, *args, **kwargs) class JSONSchemaManager(BaseManager): def _create_body(self, name): pass def images(self): uri = "/%s/images" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body def image(self): uri = "/%s/image" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body def image_members(self): uri = "/%s/members" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body def image_member(self): uri = "/%s/member" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body def image_tasks(self): uri = "/%s/tasks" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body def image_task(self): uri = "/%s/task" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body class ImageClient(BaseClient): name = "Images" def _configure_manager(self): self._manager = ImageManager(self, resource_class=Image, response_key="", plural_response_key="images", uri_base="images") self._tasks_manager = ImageTasksManager(self, resource_class=ImageTask, response_key="", plural_response_key="tasks", uri_base="tasks") self._schema_manager = JSONSchemaManager(self, resource_class=None, response_key="", plural_response_key="", uri_base="schemas") def list(self, limit=None, marker=None, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None): return self._manager.list(limit=limit, marker=marker, name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir) def list_all(self, name=None, visibility=None, member_status=None, owner=None, tag=None, status=None, size_min=None, size_max=None, sort_key=None, sort_dir=None): return self._manager.list_all(name=name, visibility=visibility, member_status=member_status, owner=owner, tag=tag, status=status, size_min=size_min, size_max=size_max, sort_key=sort_key, sort_dir=sort_dir)
Apache License 2.0
coderskychen/action_recognition_zoo
model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/utils.py
GetTensorOpName
python
def GetTensorOpName(x): t = x.name.rsplit(":", 1) if len(t) == 1: return x.name else: return t[0]
Get the name of the op that created a tensor. Useful for naming related tensors, as ':' in name field of op is not permitted Args: x: the input tensor. Returns: the name of the op.
https://github.com/coderskychen/action_recognition_zoo/blob/92ec5ec3efeee852aec5c057798298cd3a8e58ae/model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/utils.py#L70-L85
from __future__ import division import math import numpy import tensorflow as tf class LayerParameters(object): def __init__(self): self.name = "" self.num_units = 0 self._with_bias = False self.relu = False self.gradient_l2norm_bound = 0.0 self.bias_gradient_l2norm_bound = 0.0 self.trainable = True self.weight_decay = 0.0 class ConvParameters(object): def __init__(self): self.patch_size = 5 self.stride = 1 self.in_channels = 1 self.out_channels = 0 self.with_bias = True self.relu = True self.max_pool = True self.max_pool_size = 2 self.max_pool_stride = 2 self.trainable = False self.in_size = 28 self.name = "" self.num_outputs = 0 self.bias_stddev = 0.1 class NetworkParameters(object): def __init__(self): self.input_size = 0 self.projection_type = 'NONE' self.projection_dimensions = 0 self.default_gradient_l2norm_bound = 0.0 self.layer_parameters = [] self.conv_parameters = []
MIT License
sofia-netsurv/python-netsurv
env/lib/python3.5/site-packages/isort/finders.py
ReqsBaseFinder._normalize_name
python
def _normalize_name(self, name): if self.mapping: name = self.mapping.get(name, name) return name.lower().replace('-', '_')
Convert package name to module name Examples: Django -> django django-haystack -> haystack Flask-RESTFul -> flask_restful
https://github.com/sofia-netsurv/python-netsurv/blob/429fb07a2b06cc505fdd9350148266a6b4e23e64/env/lib/python3.5/site-packages/isort/finders.py#L244-L254
from __future__ import absolute_import, division, print_function, unicode_literals import inspect import os import os.path import re import sys import sysconfig from fnmatch import fnmatch from glob import glob from .pie_slice import PY2 from .utils import chdir, exists_case_sensitive try: from pipreqs import pipreqs except ImportError: pipreqs = None try: from pip_api import parse_requirements except ImportError: parse_requirements = None try: from requirementslib import Pipfile except ImportError: Pipfile = None try: from functools import lru_cache except ImportError: from backports.functools_lru_cache import lru_cache KNOWN_SECTION_MAPPING = { 'STDLIB': 'STANDARD_LIBRARY', 'FUTURE': 'FUTURE_LIBRARY', 'FIRSTPARTY': 'FIRST_PARTY', 'THIRDPARTY': 'THIRD_PARTY', } class BaseFinder(object): def __init__(self, config, sections): self.config = config self.sections = sections class ForcedSeparateFinder(BaseFinder): def find(self, module_name): for forced_separate in self.config['forced_separate']: path_glob = forced_separate if not forced_separate.endswith('*'): path_glob = '%s*' % forced_separate if fnmatch(module_name, path_glob) or fnmatch(module_name, '.' + path_glob): return forced_separate class LocalFinder(BaseFinder): def find(self, module_name): if module_name.startswith("."): return self.sections.LOCALFOLDER class KnownPatternFinder(BaseFinder): def __init__(self, config, sections): super(KnownPatternFinder, self).__init__(config, sections) self.known_patterns = [] for placement in reversed(self.sections): known_placement = KNOWN_SECTION_MAPPING.get(placement, placement) config_key = 'known_{0}'.format(known_placement.lower()) known_patterns = self.config.get(config_key, []) known_patterns = [ pattern for known_pattern in known_patterns for pattern in self._parse_known_pattern(known_pattern) ] for known_pattern in known_patterns: regexp = '^' + known_pattern.replace('*', '.*').replace('?', '.?') + '$' self.known_patterns.append((re.compile(regexp), placement)) @staticmethod def _is_package(path): if PY2: return os.path.exists(os.path.join(path, '__init__.py')) else: return os.path.isdir(path) def _parse_known_pattern(self, pattern): if pattern.endswith(os.path.sep): patterns = [ filename for filename in os.listdir(pattern) if self._is_package(os.path.join(pattern, filename)) ] else: patterns = [pattern] return patterns def find(self, module_name): parts = module_name.split('.') module_names_to_check = ('.'.join(parts[:first_k]) for first_k in range(len(parts), 0, -1)) for module_name_to_check in module_names_to_check: for pattern, placement in self.known_patterns: if pattern.match(module_name_to_check): return placement class PathFinder(BaseFinder): def __init__(self, config, sections): super(PathFinder, self).__init__(config, sections) self.paths = [os.getcwd()] self.virtual_env = self.config.get('virtual_env') or os.environ.get('VIRTUAL_ENV') if self.virtual_env: self.virtual_env = os.path.realpath(self.virtual_env) self.virtual_env_src = False if self.virtual_env: self.virtual_env_src = '{0}/src/'.format(self.virtual_env) for path in glob('{0}/lib/python*/site-packages'.format(self.virtual_env)): if path not in self.paths: self.paths.append(path) for path in glob('{0}/lib/python*/*/site-packages'.format(self.virtual_env)): if path not in self.paths: self.paths.append(path) for path in glob('{0}/src/*'.format(self.virtual_env)): if os.path.isdir(path): self.paths.append(path) self.conda_env = self.config.get('conda_env') or os.environ.get('CONDA_PREFIX') if self.conda_env: self.conda_env = os.path.realpath(self.conda_env) for path in glob('{0}/lib/python*/site-packages'.format(self.conda_env)): if path not in self.paths: self.paths.append(path) for path in glob('{0}/lib/python*/*/site-packages'.format(self.conda_env)): if path not in self.paths: self.paths.append(path) self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()['stdlib']) if self.stdlib_lib_prefix not in self.paths: self.paths.append(self.stdlib_lib_prefix) self.ext_suffix = sysconfig.get_config_var("EXT_SUFFIX") or ".so" for path in sys.path[1:]: if path not in self.paths: self.paths.append(path) def find(self, module_name): for prefix in self.paths: package_path = "/".join((prefix, module_name.split(".")[0])) is_module = (exists_case_sensitive(package_path + ".py") or exists_case_sensitive(package_path + ".so") or exists_case_sensitive(package_path + self.ext_suffix) or exists_case_sensitive(package_path + "/__init__.py")) is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path) if is_module or is_package: if 'site-packages' in prefix: return self.sections.THIRDPARTY if 'dist-packages' in prefix: return self.sections.THIRDPARTY if self.virtual_env and self.virtual_env_src in prefix: return self.sections.THIRDPARTY if self.conda_env and self.conda_env in prefix: return self.sections.THIRDPARTY if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix): return self.sections.STDLIB return self.config['default_section'] class ReqsBaseFinder(BaseFinder): def __init__(self, config, sections, path='.'): super(ReqsBaseFinder, self).__init__(config, sections) self.path = path if self.enabled: self.mapping = self._load_mapping() self.names = self._load_names() @staticmethod def _load_mapping(): if not pipreqs: return path = os.path.dirname(inspect.getfile(pipreqs)) path = os.path.join(path, 'mapping') with open(path) as f: return dict(line.strip().split(":")[::-1] for line in f) def _load_names(self): names = [] for path in self._get_files(): for name in self._get_names(path): names.append(self._normalize_name(name)) return names @staticmethod def _get_parents(path): prev = '' while path != prev: prev = path yield path path = os.path.dirname(path) def _get_files(self): path = os.path.abspath(self.path) if os.path.isfile(path): path = os.path.dirname(path) for path in self._get_parents(path): for file_path in self._get_files_from_dir(path): yield file_path
MIT License
tensorflow/model-analysis
tensorflow_model_analysis/utils/util.py
to_ragged_tensor_value
python
def to_ragged_tensor_value( ragged_tensor: tf.RaggedTensor) -> types.RaggedTensorValue: nested_row_splits = [] for splits in ragged_tensor.nested_row_splits: nested_row_splits.append(to_numpy(splits)) return types.RaggedTensorValue( to_numpy(ragged_tensor.flat_values), nested_row_splits)
Converts ragged tensor to RaggedTensorValue.
https://github.com/tensorflow/model-analysis/blob/6814617c50e073f8d039b96b03b19fef39fa0008/tensorflow_model_analysis/utils/util.py#L67-L74
import collections import inspect import sys import traceback from typing import Any, List, Mapping, MutableMapping, Optional, Text, Union import numpy as np import six import tensorflow as tf from tensorflow_model_analysis import constants from tensorflow_model_analysis import types KEY_SEPARATOR = '__' KEYS_SUFFIX = 'keys' VALUES_SUFFIX = 'values' def is_sparse_or_ragged_tensor_value(tensor: Any) -> bool: return (isinstance(tensor, types.SparseTensorValue) or isinstance(tensor, types.RaggedTensorValue) or isinstance(tensor, tf.compat.v1.SparseTensorValue)) def to_numpy(tensor: Any) -> np.ndarray: if isinstance(tensor, np.ndarray): return tensor elif hasattr(tensor, 'numpy') and callable(tensor.numpy): return tensor.numpy() else: return np.array(tensor) def to_sparse_tensor_value( sparse_tensor: Union[tf.SparseTensor, tf.compat.v1.SparseTensorValue] ) -> types.SparseTensorValue: return types.SparseTensorValue( to_numpy(sparse_tensor.values), to_numpy(sparse_tensor.indices), to_numpy(sparse_tensor.dense_shape))
Apache License 2.0
openstack/tempest-lib
tempest_lib/common/rest_client.py
RestClient.get_headers
python
def get_headers(self, accept_type=None, send_type=None): if accept_type is None: accept_type = self._get_type() if send_type is None: send_type = self._get_type() return {'Content-Type': 'application/%s' % send_type, 'Accept': 'application/%s' % accept_type}
Return the default headers which will be used with outgoing requests :param str accept_type: The media type to use for the Accept header, if one isn't provided the object var TYPE will be used :param str send_type: The media-type to use for the Content-Type header, if one isn't provided the object var TYPE will be used :rtype: dict :return: The dictionary of headers which can be used in the headers dict for outgoing request
https://github.com/openstack/tempest-lib/blob/023426894a4f72d906ed6f79c55ed7152a732b44/tempest_lib/common/rest_client.py#L103-L121
import collections import logging as real_logging import re import time import jsonschema from oslo_log import log as logging from oslo_serialization import jsonutils as json import six from tempest_lib.common import http from tempest_lib.common.utils import misc as misc_utils from tempest_lib import exceptions MAX_RECURSION_DEPTH = 2 HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206, 207) HTTP_REDIRECTION = (300, 301, 302, 303, 304, 305, 306, 307) JSONSCHEMA_VALIDATOR = jsonschema.Draft4Validator FORMAT_CHECKER = jsonschema.draft4_format_checker class RestClient(object): TYPE = "json" api_version = None LOG = logging.getLogger(__name__) def __init__(self, auth_provider, service, region, endpoint_type='publicURL', build_interval=1, build_timeout=60, disable_ssl_certificate_validation=False, ca_certs=None, trace_requests=''): self.auth_provider = auth_provider self.service = service self.region = region self.endpoint_type = endpoint_type self.build_interval = build_interval self.build_timeout = build_timeout self.trace_requests = trace_requests self._skip_path = False self.general_header_lc = set(('cache-control', 'connection', 'date', 'pragma', 'trailer', 'transfer-encoding', 'via', 'warning')) self.response_header_lc = set(('accept-ranges', 'age', 'etag', 'location', 'proxy-authenticate', 'retry-after', 'server', 'vary', 'www-authenticate')) dscv = disable_ssl_certificate_validation self.http_obj = http.ClosingHttp( disable_ssl_certificate_validation=dscv, ca_certs=ca_certs) def _get_type(self): return self.TYPE
Apache License 2.0
rberenguel/motllo
motllo/markdown.py
text_tree
python
def text_tree( path: Path, ignore_globs: Optional[List[str]] = None, include_globs: Optional[List[str]] = None, ): structure = build_tree(path, ignore_globs, include_globs) return "\n".join(list(tree(structure)))
Generate the textual tree representation only
https://github.com/rberenguel/motllo/blob/5ef3c378799f8825a2121e862ee852e01723354e/motllo/markdown.py#L115-L122
from pathlib import Path from typing import List, Optional from motllo.ops import Folder, tree, tree_links from motllo.traverser import Traverser def gitignore(path: Path, for_path=True): if not path.exists(): return [] globs = [] with path.open() as gitignore_file: lines = gitignore_file.read().split("\n") clean_globs = [l for l in lines if not l.startswith("#") and l != ""] globs = [f"{g}/*" if g.endswith("**") else g for g in clean_globs] if for_path: globs = [f"{path.parent}/{g}" for g in globs] return globs def global_gitignore(): return gitignore(Path.home() / ".gitignore_global", for_path=False) def project_gitignore(path: Path): ignores = [] for item in path.iterdir(): if item.name == ".gitignore": ignores += gitignore(item, for_path=True) if item.is_dir(): ignores += project_gitignore(item) return ignores def full_gitignore(path: Path): return project_gitignore(path) + global_gitignore() SUFFIX_TO_LANG = { "py": "python", "sbt": "scala", "scala": "scala", "toml": "toml", "json": "json", "lock": "toml", "yaml": "yaml", } LANG_TO_ELLIPSIS = { "python": "# ...", "scala": "// ...", "toml": "[...]", "json": "...", } def language(suffix): return SUFFIX_TO_LANG.get(suffix, "") def ellipsis(suffix): return LANG_TO_ELLIPSIS.get(language(suffix), "...") def build_file_markdown(current: Folder, base="", max_length=15): markdown: List[str] = [] for item in current.iterdir(): if item.is_dir(): markdown += build_file_markdown( item, base=base + "/" + item.basename, max_length=max_length ) else: if base.startswith("/"): base = base[1:] if base == "": basename = f"{item.name}" else: basename = f"{base}/{item.name}" markdown += [""] + [f"# `{basename}`"] if item.contents is not None: markdown += [""] + [f"```{language(item.suffix)}"] if item.suffix != "md": if len(item.contents) > max_length >= 0: markdown += ( item.contents[0 : max_length - 1] + [""] + [ellipsis(item.suffix)] ) else: markdown += item.contents else: markdown += [ "Content from Markdown files is ignored, since the output would break parsing" ] markdown += ["```"] return markdown def build_tree( path: Path, ignore_globs: Optional[List[str]], include_globs: Optional[List[str]] ): structure = Traverser(ignore_globs=ignore_globs, include_globs=include_globs)(path) structure = structure.prune() return structure
MIT License
google/flax
examples/mnist/train.py
get_datasets
python
def get_datasets(): ds_builder = tfds.builder('mnist') ds_builder.download_and_prepare() train_ds = tfds.as_numpy(ds_builder.as_dataset(split='train', batch_size=-1)) test_ds = tfds.as_numpy(ds_builder.as_dataset(split='test', batch_size=-1)) train_ds['image'] = jnp.float32(train_ds['image']) / 255. test_ds['image'] = jnp.float32(test_ds['image']) / 255. return train_ds, test_ds
Load MNIST train and test datasets into memory.
https://github.com/google/flax/blob/48b34ab87c7d20afc567f6e0fe5d67e423cf08bc/examples/mnist/train.py#L97-L105
from flax import linen as nn from flax.metrics import tensorboard from flax.training import train_state import jax import jax.numpy as jnp import ml_collections import numpy as np import optax import tensorflow_datasets as tfds class CNN(nn.Module): @nn.compact def __call__(self, x): x = nn.Conv(features=32, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = nn.Conv(features=64, kernel_size=(3, 3))(x) x = nn.relu(x) x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) x = x.reshape((x.shape[0], -1)) x = nn.Dense(features=256)(x) x = nn.relu(x) x = nn.Dense(features=10)(x) return x @jax.jit def apply_model(state, images, labels): def loss_fn(params): logits = CNN().apply({'params': params}, images) one_hot = jax.nn.one_hot(labels, 10) loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) return loss, logits grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, logits), grads = grad_fn(state.params) accuracy = jnp.mean(jnp.argmax(logits, -1) == labels) return grads, loss, accuracy @jax.jit def update_model(state, grads): return state.apply_gradients(grads=grads) def train_epoch(state, train_ds, batch_size, rng): train_ds_size = len(train_ds['image']) steps_per_epoch = train_ds_size // batch_size perms = jax.random.permutation(rng, len(train_ds['image'])) perms = perms[:steps_per_epoch * batch_size] perms = perms.reshape((steps_per_epoch, batch_size)) epoch_loss = [] epoch_accuracy = [] for perm in perms: batch_images = train_ds['image'][perm, ...] batch_labels = train_ds['label'][perm, ...] grads, loss, accuracy = apply_model(state, batch_images, batch_labels) state = update_model(state, grads) epoch_loss.append(loss) epoch_accuracy.append(accuracy) train_loss = np.mean(epoch_loss) train_accuracy = np.mean(epoch_accuracy) return state, train_loss, train_accuracy
Apache License 2.0
yuzhimanhua/multi-bioner
model/utils.py
read_corpus
python
def read_corpus(lines): features = list() labels = list() tmp_fl = list() tmp_ll = list() for line in lines: if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')): line = line.rstrip('\n').split() tmp_fl.append(line[0]) tmp_ll.append(line[-1]) elif len(tmp_fl) > 0: features.append(tmp_fl) labels.append(tmp_ll) tmp_fl = list() tmp_ll = list() if len(tmp_fl) > 0: features.append(tmp_fl) labels.append(tmp_ll) return features, labels
convert corpus into features and labels
https://github.com/yuzhimanhua/multi-bioner/blob/c2a1f057642bb188561721be50f89fd4a2deeabc/model/utils.py#L212-L234
import codecs import csv import itertools from functools import reduce import numpy as np import shutil import torch import json import torch.nn as nn import torch.nn.init from model.ner_dataset import * zip = getattr(itertools, 'izip', zip) def to_scalar(var): return var.view(-1).data.tolist()[0] def argmax(vec): _, idx = torch.max(vec, 1) return to_scalar(idx) def log_sum_exp(vec, m_size): _, idx = torch.max(vec, 1) max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size) def switch(vec1, vec2, mask): catvec = torch.cat([vec1.view(-1, 1), vec2.view(-1, 1)], dim=1) switched_vec = torch.gather(catvec, 1, mask.long().view(-1, 1)) return switched_vec.view(-1) def encode2char_safe(input_lines, char_dict): unk = char_dict['<u>'] forw_lines = [list(map(lambda m: list(map(lambda t: char_dict.get(t, unk), m)), line)) for line in input_lines] return forw_lines def concatChar(input_lines, char_dict): features = [[char_dict[' ']] + list(reduce(lambda x, y: x + [char_dict[' ']] + y, sentence)) + [char_dict['\n']] for sentence in input_lines] return features def encode_safe(input_lines, word_dict, unk): lines = list(map(lambda t: list(map(lambda m: word_dict.get(m, unk), t)), input_lines)) return lines def encode(input_lines, word_dict): lines = list(map(lambda t: list(map(lambda m: word_dict[m], t)), input_lines)) return lines def encode2Tensor(input_lines, word_dict, unk): lines = list(map(lambda t: torch.LongTensor(list(map(lambda m: word_dict.get(m, unk), t))), input_lines)) return lines def generate_corpus_char(lines, feature_map, label_map, char_count, c_thresholds=1, if_shrink_w_feature=False, w_thresholds=1): features, labels, feature_map, label_map = generate_corpus(lines, feature_map, label_map, if_shrink_feature=if_shrink_w_feature, thresholds=w_thresholds) for feature in features: for word in feature: for tup in word: if tup not in char_count: char_count[tup] = len(char_count) else: char_count[tup] += 1 return features, labels, feature_map, label_map, char_count def shrink_features(feature_map, features, thresholds): feature_count = {k: 0 for (k, v) in iter(feature_map.items())} for feature_list in features: for feature in feature_list: feature_count[feature] += 1 shrinked_feature_count = [k for (k, v) in iter(feature_count.items()) if v >= thresholds] feature_map = {shrinked_feature_count[ind]: (ind + 1) for ind in range(0, len(shrinked_feature_count))} feature_map['<unk>'] = 0 if '<eof>' not in feature_map: feature_map['<eof>'] = len(feature_map) return feature_map def generate_corpus(lines, feature_map, label_map, if_shrink_feature=False, thresholds=1): features = list() labels = list() tmp_fl = list() tmp_ll = list() for line in lines: if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')): line = line.rstrip('\n').split() tmp_fl.append(line[0]) if line[0] not in feature_map: feature_map[line[0]] = len(feature_map) + 1 tmp_ll.append(line[-1]) if line[-1] not in label_map: label_map[line[-1]] = len(label_map) elif len(tmp_fl) > 0: features.append(tmp_fl) labels.append(tmp_ll) tmp_fl = list() tmp_ll = list() if len(tmp_fl) > 0: features.append(tmp_fl) labels.append(tmp_ll) if '<start>' not in label_map: label_map['<start>'] = len(label_map) if '<pad>' not in label_map: label_map['<pad>'] = len(label_map) if if_shrink_feature: feature_map = shrink_features(feature_map, features, thresholds) else: feature_map['<unk>'] = 0 if '<eof>' not in feature_map: feature_map['<eof>'] = len(feature_map) return features, labels, feature_map, label_map
Apache License 2.0
react-native-skia/react-native-skia
build/android/gyp/compile_resources.py
_ParseArgs
python
def _ParseArgs(args): parser, input_opts, output_opts = resource_utils.ResourceArgsParser() input_opts.add_argument( '--aapt2-path', required=True, help='Path to the Android aapt2 tool.') input_opts.add_argument( '--android-manifest', required=True, help='AndroidManifest.xml path.') input_opts.add_argument( '--expected-file', help='Expected contents for the check. If' '--android-manifest-verify-diff-base is set, this is a diff file. If' 'not, this is a AndroidManifest file.') input_opts.add_argument( '--android-manifest-normalized', help='Normalized manifest.') input_opts.add_argument( '--android-manifest-expectations-failure-file', help='Write to this file if expected manifest contents do not match ' 'final manifest contents.') input_opts.add_argument( '--fail-on-expectations', action="store_true", help='When passed, fails the build on AndroidManifest expectation ' 'mismatches.') input_opts.add_argument( '--expected-manifest-base-expectation', help='When we expect the actual normalized manifest is different from' 'the file from --android-manifest-expected, this file specifies the' 'difference.') input_opts.add_argument( '--only-verify-expectations', action='store_true', help='If passed, only verify the android manifest expectation and exit.') input_opts.add_argument( '--r-java-root-package-name', default='base', help='Short package name for this target\'s root R java file (ex. ' 'input of "base" would become gen.base_module). Defaults to "base".') group = input_opts.add_mutually_exclusive_group() group.add_argument( '--shared-resources', action='store_true', help='Make all resources in R.java non-final and allow the resource IDs ' 'to be reset to a different package index when the apk is loaded by ' 'another application at runtime.') group.add_argument( '--app-as-shared-lib', action='store_true', help='Same as --shared-resources, but also ensures all resource IDs are ' 'directly usable from the APK loaded as an application.') input_opts.add_argument( '--package-id', type=int, help='Decimal integer representing custom package ID for resources ' '(instead of 127==0x7f). Cannot be used with --shared-resources.') input_opts.add_argument( '--package-name', help='Package name that will be used to create R class.') input_opts.add_argument( '--rename-manifest-package', help='Package name to force AAPT to use.') input_opts.add_argument( '--arsc-package-name', help='Package name to set in manifest of resources.arsc file. This is ' 'only used for apks under test.') input_opts.add_argument( '--shared-resources-allowlist', help='An R.txt file acting as a allowlist for resources that should be ' 'non-final and have their package ID changed at runtime in R.java. ' 'Implies and overrides --shared-resources.') input_opts.add_argument( '--shared-resources-allowlist-locales', default='[]', help='Optional GN-list of locales. If provided, all strings corresponding' ' to this locale list will be kept in the final output for the ' 'resources identified through --shared-resources-allowlist, even ' 'if --locale-allowlist is being used.') input_opts.add_argument( '--use-resource-ids-path', help='Use resource IDs generated by aapt --emit-ids.') input_opts.add_argument( '--extra-main-r-text-files', help='Additional R.txt files that will be added to the root R.java file, ' 'but not packaged in the generated resources.arsc. If these resources ' 'entries contain duplicate resources with the generated R.txt file, they ' 'must be identical.') input_opts.add_argument( '--support-zh-hk', action='store_true', help='Use zh-rTW resources for zh-rHK.') input_opts.add_argument( '--debuggable', action='store_true', help='Whether to add android:debuggable="true".') input_opts.add_argument('--version-code', help='Version code for apk.') input_opts.add_argument('--version-name', help='Version name for apk.') input_opts.add_argument( '--min-sdk-version', required=True, help='android:minSdkVersion for APK.') input_opts.add_argument( '--target-sdk-version', required=True, help="android:targetSdkVersion for APK.") input_opts.add_argument( '--max-sdk-version', help="android:maxSdkVersion expected in AndroidManifest.xml.") input_opts.add_argument( '--manifest-package', help='Package name of the AndroidManifest.xml.') input_opts.add_argument( '--locale-allowlist', default='[]', help='GN list of languages to include. All other language configs will ' 'be stripped out. List may include a combination of Android locales ' 'or Chrome locales.') input_opts.add_argument( '--resource-exclusion-regex', default='', help='File-based filter for resources (applied before compiling)') input_opts.add_argument( '--resource-exclusion-exceptions', default='[]', help='GN list of globs that say which files to include even ' 'when --resource-exclusion-regex is set.') input_opts.add_argument( '--values-filter-rules', help='GN list of source_glob:regex for filtering resources after they ' 'are compiled. Use this to filter out entries within values/ files.') input_opts.add_argument('--png-to-webp', action='store_true', help='Convert png files to webp format.') input_opts.add_argument('--webp-binary', default='', help='Path to the cwebp binary.') input_opts.add_argument( '--webp-cache-dir', help='The directory to store webp image cache.') input_opts.add_argument( '--no-xml-namespaces', action='store_true', help='Whether to strip xml namespaces from processed xml resources.') input_opts.add_argument( '--short-resource-paths', action='store_true', help='Whether to shorten resource paths inside the apk or module.') input_opts.add_argument( '--strip-resource-names', action='store_true', help='Whether to strip resource names from the resource table of the apk ' 'or module.') output_opts.add_argument('--arsc-path', help='Apk output for arsc format.') output_opts.add_argument('--proto-path', help='Apk output for proto format.') group = input_opts.add_mutually_exclusive_group() group.add_argument( '--optimized-arsc-path', help='Output for `aapt2 optimize` for arsc format (enables the step).') group.add_argument( '--optimized-proto-path', help='Output for `aapt2 optimize` for proto format (enables the step).') input_opts.add_argument( '--resources-config-path', help='Path to aapt2 resources config file.') output_opts.add_argument( '--info-path', help='Path to output info file for the partial apk.') output_opts.add_argument( '--srcjar-out', required=True, help='Path to srcjar to contain generated R.java.') output_opts.add_argument('--r-text-out', help='Path to store the generated R.txt file.') output_opts.add_argument( '--proguard-file', help='Path to proguard.txt generated file.') output_opts.add_argument( '--proguard-file-main-dex', help='Path to proguard.txt generated file for main dex.') output_opts.add_argument( '--emit-ids-out', help='Path to file produced by aapt2 --emit-ids.') output_opts.add_argument( '--resources-path-map-out-path', help='Path to file produced by aapt2 that maps original resource paths ' 'to shortened resource paths inside the apk or module.') input_opts.add_argument( '--is-bundle-module', action='store_true', help='Whether resources are being generated for a bundle module.') options = parser.parse_args(args) resource_utils.HandleCommonOptions(options) options.locale_allowlist = build_utils.ParseGnList(options.locale_allowlist) options.shared_resources_allowlist_locales = build_utils.ParseGnList( options.shared_resources_allowlist_locales) options.resource_exclusion_exceptions = build_utils.ParseGnList( options.resource_exclusion_exceptions) options.values_filter_rules = build_utils.ParseGnList( options.values_filter_rules) options.extra_main_r_text_files = build_utils.ParseGnList( options.extra_main_r_text_files) if options.optimized_proto_path and not options.proto_path: parser.error('--optimized-proto-path requires --proto-path') if not options.arsc_path and not options.proto_path: parser.error('One of --arsc-path or --proto-path is required.') if options.resources_path_map_out_path and not options.short_resource_paths: parser.error( '--resources-path-map-out-path requires --short-resource-paths') if options.package_id and options.shared_resources: parser.error('--package-id and --shared-resources are mutually exclusive') return options
Parses command line options. Returns: An options object as from argparse.ArgumentParser.parse_args()
https://github.com/react-native-skia/react-native-skia/blob/91ecc74444b163f128541dbc1a42e27a9c0fb40b/build/android/gyp/compile_resources.py#L51-L286
import argparse import collections import contextlib import filecmp import hashlib import logging import os import re import shutil import subprocess import sys import tempfile import textwrap import zipfile from xml.etree import ElementTree from util import build_utils from util import diff_utils from util import manifest_utils from util import md5_check from util import parallel from util import protoresources from util import resource_utils _PNG_WEBP_EXCLUSION_PATTERN = re.compile('|'.join([ r'.*star_gray\.png', r'.*\.9\.png', r'.*daydream_icon_.*\.png' ]))
MIT License
google/personfinder
tools/pfif-tools/app/controller.py
PfifController.get_file
python
def get_file(self, file_number=1, return_filename=False): paste_name = 'pfif_xml_' + str(file_number) upload_name = 'pfif_xml_file_' + str(file_number) url_name = 'pfif_xml_url_' + str(file_number) desired_file = None filename = None if self.request.POST.get(paste_name): desired_file = StringIO(self.request.POST[paste_name]) elif upload_name in self.request.FILES: desired_file = StringIO(self.request.FILES[upload_name].read()) filename = self.request.FILES[upload_name].name elif self.request.POST.get(url_name): url = self.request.POST[url_name] desired_file = StringIO(utils.open_url(url).read()) filename = url if desired_file is not None: if return_filename and filename is not None: return (desired_file, filename) elif return_filename: return (desired_file, None) else: return desired_file else: if return_filename: return (None, None) else: return None
Gets a file that was pasted in, uploaded, or given by a URL. If multiple files are provided, specify the number of the desired file as file_number. Returns None if there is no file. If return_filename is True, returns a tuple: (desired_file, filename).
https://github.com/google/personfinder/blob/475f4c0ce916036d39bae2d480cde07126550875/tools/pfif-tools/app/controller.py#L30-L63
from StringIO import StringIO from django.shortcuts import render from django.views import View import pfif_validator import pfif_diff import utils class PfifController(View):
Apache License 2.0
danielfrg/jupyterhub-kubernetes_spawner
kubernetes_spawner/swagger_client/models/v1_resource_quota_status.py
V1ResourceQuotaStatus.hard
python
def hard(self, hard): self._hard = hard
Sets the hard of this V1ResourceQuotaStatus. Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota :param hard: The hard of this V1ResourceQuotaStatus. :type: str
https://github.com/danielfrg/jupyterhub-kubernetes_spawner/blob/15a2b63ef719f8c3ff83221333f7de69c1495512/kubernetes_spawner/swagger_client/models/v1_resource_quota_status.py#L64-L72
from pprint import pformat from six import iteritems class V1ResourceQuotaStatus(object): def __init__(self): self.swagger_types = { 'hard': 'str', 'used': 'str' } self.attribute_map = { 'hard': 'hard', 'used': 'used' } self._hard = None self._used = None @property def hard(self): return self._hard @hard.setter
Apache License 2.0
luna-klatzer/openhiven.py
openhivenpy/types/message.py
Message.mentions
python
def mentions(self) -> Optional[List[Mention]]: from .mention import Mention if type(self._mentions) is list: if len(self._mentions) > 0: if type(self._mentions[0]) is dict: mentions = [] for d in self._mentions: dict_ = { 'timestamp': self.timestamp, 'author': self.author, 'user': d } mention_data = Mention.format_obj_data(dict_) mentions.append( Mention(mention_data, client=self._client) ) self._mentions = mentions else: return [] return self._mentions else: return None
Returns the mentions of the message
https://github.com/luna-klatzer/openhiven.py/blob/9184d6a77bde0ee3847dcb9ea7d399217a36c95d/openhivenpy/types/message.py#L376-L399
from __future__ import annotations import asyncio import datetime import logging import sys from typing import Optional, List, Union from typing import TYPE_CHECKING from .hiven_type_schemas import MessageSchema, get_compiled_validator, DeletedMessageSchema from .. import utils from ..base_types import DataClassObject from ..exceptions import InvalidPassedDataError from ..utils import log_type_exception if TYPE_CHECKING: from .embed import Embed from .textroom import TextRoom from .user import User from .house import House from .mention import Mention from .attachment import Attachment from .. import HivenClient logger = logging.getLogger(__name__) __all__ = ['DeletedMessage', 'Message'] class DeletedMessage(DataClassObject): _json_schema: dict = DeletedMessageSchema json_validator = get_compiled_validator(_json_schema) @log_type_exception('DeletedMessage') def __init__(self, data: dict, client: HivenClient): super().__init__() self._message_id = data.get('message_id') self._house_id = data.get('house_id') self._room_id = data.get('room_id') self._client = client def __str__(self): return f"Deleted message in room {self.room_id}" @classmethod def format_obj_data(cls, data: dict) -> dict: data = cls.validate(data) data['message_id'] = data['id'] return data @property def message_id(self) -> Optional[str]: return getattr(self, '_message_id') @property def house_id(self) -> Optional[str]: return getattr(self, '_house_id') @property def room_id(self) -> Optional[str]: return getattr(self, '_room_id') class Message(DataClassObject): _json_schema: dict = MessageSchema json_validator = get_compiled_validator(_json_schema) @log_type_exception('Message') def __init__(self, data: dict, client: HivenClient): super().__init__() self._id = data.get('id') self._author = data.get('author') self._author_id = data.get('author_id') self._attachment: Union[dict, Attachment] = data.get('attachment') self._content = data.get('content') self._timestamp = data.get('timestamp') self._edited_at = data.get('edited_at') self._mentions = data.get('mentions') self._type = data.get('type') self._exploding = data.get('exploding') self._house_id = data.get('house_id') self._house = data.get('house') self._room_id = data.get('room_id') self._room = data.get('room') self._embed = data.get('embed') self._bucket = data.get('bucket') self._device_id = data.get('device_id') self._exploding_age = data.get('exploding_age') self._recipient_ids = data.get('recipient_ids') self._client = client def __str__(self) -> str: return f"<Message id='{self.id}' from '{self.author.name}'>" def __repr__(self) -> str: info = [ ('id', self.id), ('content', self.content), ('author', repr(self.author)), ('room', repr(self.room)), ('type', self.type), ('exploding', self.exploding), ('edited_at', self.edited_at) ] return '<Message {}>'.format(' '.join('%s=%s' % t for t in info)) @classmethod def format_obj_data(cls, data: dict) -> dict: data['type'] = utils.safe_convert(int, data.get('type'), None) data['bucket'] = utils.safe_convert(int, data.get('bucket'), None) data['exploding_age'] = utils.safe_convert(int, data.get('exploding_age'), None) data['timestamp'] = utils.safe_convert(int, data.get('timestamp')) data = cls.validate(data) if not data.get('room_id') and data.get('room'): room_ = data.pop('room') if type(room_) is dict: room_ = room_.get('id', None) elif isinstance(room_, DataClassObject): room_ = getattr(room_, 'id', None) elif type(data.get('room_id')) is str: room_ = data['room_id'] else: room_ = None if room_ is None: raise InvalidPassedDataError( "The passed room is not in the correct format!", data=data ) else: data['room_id'] = room_ if not data.get('house_id') and data.get('house'): house_ = data.pop('house') if type(house_) is dict: house_ = house_.get('id', None) elif isinstance(house_, DataClassObject): house_ = getattr(house_, 'id', None) elif type(data.get('house_id')) is str: house_ = data['house_id'] else: house_ = None data['house_id'] = house_ if not data.get('author_id') and data.get('author'): author = data.pop('author') if type(author) is dict: author = author.get('id', None) elif isinstance(author, DataClassObject): author = getattr(author, 'id', None) elif type(data.get('author_id')) is str: author = data['author_id'] else: author = None if author is None: raise InvalidPassedDataError( "The passed author is not in the correct format!", data=data ) else: data['author'] = author data['author'] = data['author_id'] data['house'] = data['house_id'] data['room'] = data['room_id'] data['device_id'] = utils.safe_convert( str, data.get('device_id'), None ) return data @property def id(self) -> Optional[str]: return getattr(self, '_id', None) @property def author_id(self) -> Optional[str]: return getattr(self, '_author_id', None) @property def author(self) -> Optional[User]: from .user import User if type(self._author) is str and self._author: author_id = self._author elif type(self.author_id) is str and self.author_id: author_id = self.author_id else: author_id = None if author_id: data = self._client.find_house(author_id) if data: self._author = User(data=data, client=self._client) return self._author else: return None elif type(self._author) is User: return self._author else: return None @property def timestamp(self) -> Optional[datetime.datetime]: if utils.convertible(int, self._timestamp): self._timestamp = datetime.datetime.fromtimestamp( utils.safe_convert(int, self._timestamp) / 1000 ) return self._timestamp elif type(self._timestamp) is datetime.datetime: return self._timestamp else: return None @property def type(self) -> Optional[int]: return getattr(self, '_type', None) @property def exploding(self) -> Optional[bool]: return getattr(self, '_exploding', None) @property def recipient_ids(self) -> Optional[List[str]]: return getattr(self, '_exploding', None) @property def edited_at(self) -> Optional[str]: return getattr(self, '_edited_at', None) @property def room(self) -> Optional[TextRoom]: from . import TextRoom if type(self._room) is str and self._room: room_id = self._room elif type(self.room_id) is str and self.room_id: room_id = self.room_id else: room_id = None if room_id: if self.recipient_ids is not None: data = self._client.find_private_room(room_id) if not data: data = self._client.find_private_group_room(room_id) else: data = self._client.find_room(room_id) if data: self._room = TextRoom(data=data, client=self._client) return self._room else: return None elif type(self._room) is TextRoom: return self._room else: return None @property def house(self) -> Optional[House]: from .house import House if type(self._house) is str and self._house: data = self._client.find_house(self.house_id) if data: self._house = House(data=data, client=self._client) return self._house else: return None elif type(self._house) is House: return self._house else: return None @property def attachment(self) -> Optional[Attachment]: from .attachment import Attachment if type(self._attachment) is dict and self._attachment: self._attachment: Union[Attachment, dict] = Attachment( data=self._attachment, client=self._client ) return self._attachment elif type(self._attachment) is Attachment: return self._attachment else: return None @property def content(self) -> Optional[str]: return getattr(self, '_content', None) @property
MIT License
trevor/calendarserver
calendarserver/tools/principals.py
getProxies
python
def getProxies(record): allProxies = { DelegateRecordType.readDelegateGroup: [], DelegateRecordType.writeDelegateGroup: [], } for recordType in allProxies.iterkeys(): proxyGroup = yield record.service.recordWithShortName( recordType, record.uid ) allProxies[recordType] = yield proxyGroup.members() returnValue( ( allProxies[DelegateRecordType.readDelegateGroup], allProxies[DelegateRecordType.writeDelegateGroup] ) )
Returns a tuple containing the records for read proxies and write proxies of the given record
https://github.com/trevor/calendarserver/blob/c9970b06a70445ca75b62e3d170c26bc897a035e/calendarserver/tools/principals.py#L601-L622
from __future__ import print_function from getopt import getopt, GetoptError import operator import os import sys import uuid from calendarserver.tools.cmdline import utilityMain, WorkerService from calendarserver.tools.util import ( recordForPrincipalID, prettyRecord ) from twext.who.directory import DirectoryRecord from twext.who.idirectory import RecordType, InvalidDirectoryRecordError from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, returnValue, succeed from twistedcaldav.config import config from txdav.who.delegates import ( addDelegate, removeDelegate, RecordType as DelegateRecordType ) from txdav.who.idirectory import AutoScheduleMode allowedAutoScheduleModes = { "default": None, "none": AutoScheduleMode.none, "accept-always": AutoScheduleMode.accept, "decline-always": AutoScheduleMode.decline, "accept-if-free": AutoScheduleMode.acceptIfFree, "decline-if-busy": AutoScheduleMode.declineIfBusy, "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy, } def usage(e=None): if e: print(e) print("") name = os.path.basename(sys.argv[0]) print("usage: %s [options] action_flags principal [principal ...]" % (name,)) print(" %s [options] --list-principal-types" % (name,)) print(" %s [options] --list-principals type" % (name,)) print("") print(" Performs the given actions against the giving principals.") print("") print(" Principals are identified by one of the following:") print(" Type and shortname (eg.: users:wsanchez)") print(" A GUID (eg.: E415DBA7-40B5-49F5-A7CC-ACC81E4DEC79)") print("") print("options:") print(" -h --help: print this help and exit") print(" -f --config <path>: Specify caldavd.plist configuration path") print(" -v --verbose: print debugging information") print("") print("actions:") print(" --search <search-string>: search for matching principals") print(" --list-principal-types: list all of the known principal types") print(" --list-principals type: list all principals of the given type") print(" --list-read-proxies: list proxies with read-only access") print(" --list-write-proxies: list proxies with read-write access") print(" --list-proxies: list all proxies") print(" --list-proxy-for: principals this principal is a proxy for") print(" --add-read-proxy=principal: add a read-only proxy") print(" --add-write-proxy=principal: add a read-write proxy") print(" --remove-proxy=principal: remove a proxy") print(" --set-auto-schedule-mode={default|none|accept-always|decline-always|accept-if-free|decline-if-busy|automatic}: set auto-schedule mode") print(" --get-auto-schedule-mode: read auto-schedule mode") print(" --set-auto-accept-group=principal: set auto-accept-group") print(" --get-auto-accept-group: read auto-accept-group") print(" --add {locations|resources|addresses} full-name record-name UID: add a principal") print(" --remove: remove a principal") print(" --set-geo=url: set the geo: url for an address (e.g. geo:37.331741,-122.030333)") print(" --get-geo: get the geo: url for an address") print(" --set-street-address=streetaddress: set the street address string for an address") print(" --get-street-address: get the street address string for an address") print(" --set-address=guid: associate principal with an address (by guid)") print(" --get-address: get the associated address's guid") if e: sys.exit(64) else: sys.exit(0) class PrincipalService(WorkerService): function = None params = [] @inlineCallbacks def doWork(self): if self.function is not None: yield self.function(self.store, *self.params) def main(): try: (optargs, args) = getopt( sys.argv[1:], "a:hf:P:v", [ "help", "config=", "add=", "remove", "search=", "list-principal-types", "print-group-info", "list-principals=", "list-read-proxies", "list-write-proxies", "list-proxies", "list-proxy-for", "add-read-proxy=", "add-write-proxy=", "remove-proxy=", "set-auto-schedule-mode=", "get-auto-schedule-mode", "set-auto-accept-group=", "get-auto-accept-group", "set-geo=", "get-geo", "set-address=", "get-address", "set-street-address=", "get-street-address", "verbose", ], ) except GetoptError, e: usage(e) configFileName = None addType = None listPrincipalTypes = False listPrincipals = None searchPrincipals = None printGroupInfo = False principalActions = [] verbose = False for opt, arg in optargs: arg = arg.decode("utf-8") if opt in ("-h", "--help"): usage() elif opt in ("-v", "--verbose"): verbose = True elif opt in ("-f", "--config"): configFileName = arg elif opt in ("-a", "--add"): addType = arg elif opt in ("-r", "--remove"): principalActions.append((action_removePrincipal,)) elif opt in ("", "--list-principal-types"): listPrincipalTypes = True elif opt in ("", "--print-group-info"): printGroupInfo = True elif opt in ("", "--list-principals"): listPrincipals = arg elif opt in ("", "--search"): searchPrincipals = arg elif opt in ("", "--list-read-proxies"): principalActions.append((action_listProxies, "read")) elif opt in ("", "--list-write-proxies"): principalActions.append((action_listProxies, "write")) elif opt in ("-L", "--list-proxies"): principalActions.append((action_listProxies, "read", "write")) elif opt in ("--list-proxy-for"): principalActions.append((action_listProxyFor, "read", "write")) elif opt in ("--add-read-proxy", "--add-write-proxy"): if "read" in opt: proxyType = "read" elif "write" in opt: proxyType = "write" else: raise AssertionError("Unknown proxy type") principalActions.append((action_addProxy, proxyType, arg)) elif opt in ("", "--remove-proxy"): principalActions.append((action_removeProxy, arg)) elif opt in ("", "--set-auto-schedule-mode"): try: if arg not in allowedAutoScheduleModes: raise ValueError("Unknown auto-schedule mode: {mode}".format( mode=arg)) autoScheduleMode = allowedAutoScheduleModes[arg] except ValueError, e: abort(e) principalActions.append((action_setAutoScheduleMode, autoScheduleMode)) elif opt in ("", "--get-auto-schedule-mode"): principalActions.append((action_getAutoScheduleMode,)) elif opt in ("", "--set-auto-accept-group"): principalActions.append((action_setAutoAcceptGroup, arg)) elif opt in ("", "--get-auto-accept-group"): principalActions.append((action_getAutoAcceptGroup,)) elif opt in ("", "--set-geo"): principalActions.append((action_setValue, u"geographicLocation", arg)) elif opt in ("", "--get-geo"): principalActions.append((action_getValue, u"geographicLocation")) elif opt in ("", "--set-street-address"): principalActions.append((action_setValue, u"streetAddress", arg)) elif opt in ("", "--get-street-address"): principalActions.append((action_getValue, u"streetAddress")) elif opt in ("", "--set-address"): principalActions.append((action_setValue, u"associatedAddress", arg)) elif opt in ("", "--get-address"): principalActions.append((action_getValue, u"associatedAddress")) else: raise NotImplementedError(opt) if listPrincipalTypes: if args: usage("Too many arguments") function = runListPrincipalTypes params = () elif printGroupInfo: function = printGroupCacherInfo params = () elif addType: try: addType = matchStrings( addType, [ "locations", "resources", "addresses", "users", "groups" ] ) except ValueError, e: print(e) return try: fullName, shortName, uid = parseCreationArgs(args) except ValueError, e: print(e) return if fullName is not None: fullNames = [fullName] else: fullNames = () if shortName is not None: shortNames = [shortName] else: shortNames = () function = runAddPrincipal params = (addType, uid, shortNames, fullNames) elif listPrincipals: try: listPrincipals = matchStrings( listPrincipals, ["users", "groups", "locations", "resources", "addresses"] ) except ValueError, e: print(e) return if args: usage("Too many arguments") function = runListPrincipals params = (listPrincipals,) elif searchPrincipals: function = runSearch params = (searchPrincipals,) else: if not args: usage("No principals specified.") unicodeArgs = [a.decode("utf-8") for a in args] function = runPrincipalActions params = (unicodeArgs, principalActions) PrincipalService.function = function PrincipalService.params = params utilityMain(configFileName, PrincipalService, verbose=verbose) def runListPrincipalTypes(service, store): directory = store.directoryService() for recordType in directory.recordTypes(): print(directory.recordTypeToOldName(recordType)) return succeed(None) @inlineCallbacks def runListPrincipals(service, store, listPrincipals): directory = store.directoryService() recordType = directory.oldNameToRecordType(listPrincipals) try: records = list((yield directory.recordsWithRecordType(recordType))) if records: printRecordList(records) else: print("No records of type %s" % (listPrincipals,)) except InvalidDirectoryRecordError, e: usage(e) returnValue(None) @inlineCallbacks def runPrincipalActions(service, store, principalIDs, actions): directory = store.directoryService() for principalID in principalIDs: try: record = yield recordForPrincipalID(directory, principalID) except ValueError: record = None if record is None: sys.stderr.write("Invalid principal ID: %s\n" % (principalID,)) continue for action in actions: (yield action[0](store, record, *action[1:])) print("") @inlineCallbacks def runSearch(service, store, searchTerm): directory = store.directoryService() fields = [] for fieldName in ("fullNames", "emailAddresses"): fields.append((fieldName, searchTerm, True, "contains")) records = list((yield directory.recordsMatchingTokens(searchTerm.strip().split()))) if records: records.sort(key=operator.attrgetter('fullNames')) print("{n} matches found:".format(n=len(records))) for record in records: print( "\n{d} ({rt})".format( d=record.displayName, rt=record.recordType.name ) ) print(" UID: {u}".format(u=record.uid,)) print( " Record name{plural}: {names}".format( plural=("s" if len(record.shortNames) > 1 else ""), names=(", ".join(record.shortNames)) ) ) try: if record.emailAddresses: print( " Email{plural}: {emails}".format( plural=("s" if len(record.emailAddresses) > 1 else ""), emails=(", ".join(record.emailAddresses)) ) ) except AttributeError: pass else: print("No matches found") print("") @inlineCallbacks def runAddPrincipal(service, store, addType, uid, shortNames, fullNames): directory = store.directoryService() recordType = directory.oldNameToRecordType(addType) record = yield directory.recordWithUID(uid) if record is not None: print("UID already in use: {uid}".format(uid=uid)) returnValue(None) for shortName in shortNames: record = yield directory.recordWithShortName(recordType, shortName) if record is not None: print("Record name already in use: {name}".format(name=shortName)) returnValue(None) fields = { directory.fieldName.recordType: recordType, directory.fieldName.uid: uid, directory.fieldName.shortNames: shortNames, directory.fieldName.fullNames: fullNames, directory.fieldName.hasCalendars: True, directory.fieldName.hasContacts: True, } record = DirectoryRecord(directory, fields) yield record.service.updateRecords([record], create=True) print("Added '{name}'".format(name=fullNames[0])) @inlineCallbacks def action_removePrincipal(store, record): directory = store.directoryService() fullName = record.displayName shortNames = ",".join(record.shortNames) yield directory.removeRecords([record.uid]) print( "Removed '{full}' {shorts} {uid}".format( full=fullName, shorts=shortNames, uid=record.uid ) ) @inlineCallbacks def action_listProxies(store, record, *proxyTypes): directory = store.directoryService() for proxyType in proxyTypes: groupRecordType = { "read": directory.recordType.readDelegateGroup, "write": directory.recordType.writeDelegateGroup, }.get(proxyType) pseudoGroup = yield directory.recordWithShortName( groupRecordType, record.uid ) proxies = yield pseudoGroup.members() if proxies: print("%s proxies for %s:" % ( {"read": "Read-only", "write": "Read/write"}[proxyType], prettyRecord(record) )) printRecordList(proxies) print("") else: print("No %s proxies for %s" % (proxyType, prettyRecord(record))) @inlineCallbacks def action_listProxyFor(store, record, *proxyTypes): directory = store.directoryService() for proxyType in proxyTypes: groupRecordType = { "read": directory.recordType.readDelegatorGroup, "write": directory.recordType.writeDelegatorGroup, }.get(proxyType) pseudoGroup = yield directory.recordWithShortName( groupRecordType, record.uid ) proxies = yield pseudoGroup.members() if proxies: print("%s is a %s proxy for:" % ( prettyRecord(record), {"read": "Read-only", "write": "Read/write"}[proxyType] )) printRecordList(proxies) print("") else: print( "{r} is not a {t} proxy for anyone".format( r=prettyRecord(record), t={"read": "Read-only", "write": "Read/write"}[proxyType] ) ) @inlineCallbacks def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs): directory = store.directoryService() readWrite = (proxyType == "write") for proxyID in proxyIDs: proxyRecord = yield recordForPrincipalID(directory, proxyID) if proxyRecord is None: print("Invalid principal ID: %s" % (proxyID,)) else: txn = store.newTransaction() yield fn(txn, record, proxyRecord, readWrite) yield txn.commit() print( "{msg} {proxy} as a {proxyType} proxy for {record}".format( msg=msg, proxy=prettyRecord(proxyRecord), proxyType=proxyType, record=prettyRecord(record) ) ) @inlineCallbacks def action_addProxy(store, record, proxyType, *proxyIDs): yield _addRemoveProxy("Added", addDelegate, store, record, proxyType, *proxyIDs) @inlineCallbacks def action_removeProxy(store, record, *proxyIDs): yield _addRemoveProxy("Removed", removeDelegate, store, record, "write", *proxyIDs) yield _addRemoveProxy("Removed", removeDelegate, store, record, "read", *proxyIDs) @inlineCallbacks def setProxies(record, readProxyRecords, writeProxyRecords): proxyTypes = [ (DelegateRecordType.readDelegateGroup, readProxyRecords), (DelegateRecordType.writeDelegateGroup, writeProxyRecords), ] for recordType, proxyRecords in proxyTypes: if proxyRecords is None: continue proxyGroup = yield record.service.recordWithShortName( recordType, record.uid ) yield proxyGroup.setMembers(proxyRecords) @inlineCallbacks
Apache License 2.0
dpressel/mead-baseline
layers/eight_mile/pytorch/layers.py
get_activation
python
def get_activation(name: str = "relu") -> nn.Module: if name is None or name == "ident": return nn.Identity() if name == "tanh": return nn.Tanh() if name == "gelu": return GeLU() if name == "hardtanh": return nn.Hardtanh() if name == "leaky_relu": return nn.LeakyReLU() if name == "prelu": return nn.PReLU() if name == "sigmoid": return nn.Sigmoid() if name == "log_sigmoid": return nn.LogSigmoid() if name == "log_softmax": return nn.LogSoftmax(dim=-1) if name == "softmax": return nn.Softmax(dim=-1) return nn.ReLU()
Get back an `nn.Module` by string name of the activation operator :param name: A string name of the operation :return: A module associated with that string
https://github.com/dpressel/mead-baseline/blob/ee6a7c154293be0f0d7d637e41efe9aabd3bbf80/layers/eight_mile/pytorch/layers.py#L314-L340
import copy import math import logging from typing import Dict, List, Optional, Tuple, Union import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.jit as jit import torch.autograd import contextlib import glob from eight_mile.utils import listify, Offsets, is_sequence, str2bool from eight_mile.utils import transition_mask as transition_mask_np MASK_FALSE = False logger = logging.getLogger("mead.layers") def sequence_mask(lengths: torch.Tensor, max_len: int = -1) -> torch.Tensor: lens = lengths.cpu() if max_len < 0: max_len_v = torch.max(lens) else: max_len_v = max_len row = torch.arange(0, max_len_v).type_as(lens).view(1, -1) col = lens.view(-1, 1) mask = row < col return mask def sequence_mask_mxlen(lengths: torch.Tensor, max_len: int) -> torch.Tensor: lens = lengths.cpu() max_len_v = max_len row = torch.arange(0, max_len_v).type_as(lens).view(1, -1) col = lens.view(-1, 1) mask = row < col return mask @torch.jit.script def truncate_mask_over_time(mask: torch.Tensor, x: torch.Tensor) -> torch.Tensor: Tout = x.shape[1] mask = mask[:, :Tout] return mask def vec_log_sum_exp(vec: torch.Tensor, dim: int) -> torch.Tensor: max_scores, idx = torch.max(vec, dim, keepdim=True) max_scores_broadcast = max_scores.expand_as(vec) return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True)) def unsort_batch(batch: torch.Tensor, perm_idx: torch.Tensor) -> torch.Tensor: perm_idx = perm_idx.to(batch.device) diff = len(batch.shape) - len(perm_idx.shape) extra_dims = [1] * diff perm_idx = perm_idx.view([-1] + extra_dims) return torch.scatter(torch.zeros_like(batch), 0, perm_idx.expand_as(batch), batch) def infer_lengths(tensor, dim=1): if len(tensor.shape) != 2: raise ValueError(f"infer_lengths only works with tensors wit two dims right now, got {len(tensor.shape)}") offsets = torch.arange(1, tensor.shape[dim] + 1, device=tensor.device, dtype=tensor.dtype).unsqueeze(1 - dim) non_pad_loc = (tensor != Offsets.PAD).to(tensor.dtype) return torch.argmax(non_pad_loc * offsets, dim=dim) + 1 def tensor_and_lengths(inputs) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if isinstance(inputs, (list, tuple)): in_tensor, lengths = inputs else: in_tensor = inputs lengths = None return in_tensor, lengths class VariationalDropout(nn.Module): def __init__(self, pdrop: float = 0.5, batch_first: bool = False): super().__init__() self.pdrop = pdrop self.batch_first = batch_first def extra_repr(self): return "p=%.1f" % self.pdrop def forward(self, input: torch.Tensor) -> torch.Tensor: if not self.training: return input if self.batch_first: dim0 = input.size(0) dim1 = 1 else: dim0 = 1 dim1 = input.size(1) mask = torch.zeros(dim0, dim1, input.size(2)).bernoulli_(1 - self.pdrop).to(input.device) mask = mask / self.pdrop return mask * input class SequenceLoss(nn.Module): def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"): super().__init__() self.avg = avg if avg == "token": self.crit = LossFn(ignore_index=Offsets.PAD, reduction="mean") self._norm = self._no_norm else: self.crit = LossFn(ignore_index=Offsets.PAD, reduction="sum") self._norm = self._batch_norm def _batch_norm(self, loss, inputs): return loss / inputs.size()[0] def _no_norm(self, loss, inputs): return loss def forward(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: total_sz = targets.nelement() loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz)) return self._norm(loss, inputs) def extra_repr(self): return f"reduction={self.avg}" class LabelSmoothingLoss(nn.Module): def __init__(self, label_smoothing, ignore_index=0, reduction="none"): if not (0.0 < label_smoothing <= 1.0): raise ValueError(f"`label_smoothing` must be between 0.0 and 1.0, got {label_smoothing}") super().__init__() self.ignore_index = ignore_index self.label_smoothing = label_smoothing self.confidence = 1.0 - label_smoothing self.reduction = reduction if reduction != "mean" else "batchmean" def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor: B, V = output.size() smoothed = torch.full((B, V), self.label_smoothing / (V - 2)) smoothed[:, self.ignore_index] = 0 smoothed = torch.scatter(smoothed, 1, target.unsqueeze(1), self.confidence) smoothed = smoothed.masked_fill_((target == self.ignore_index).unsqueeze(1), 0) return F.kl_div(output, smoothed, reduction=self.reduction) def extra_repr(self): return f"label_smoothing={self.label_smoothing}" class MeanPool1D(nn.Module): def __init__(self, outsz, batch_first=True): super().__init__() self.batch_first = batch_first self.reduction_dim = 1 if self.batch_first else 0 self.output_dim = outsz self.requires_length = True def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: tensor, lengths = tensor_and_lengths(inputs) return torch.sum(tensor, self.reduction_dim, keepdim=False) / torch.unsqueeze(lengths, -1).to(tensor.dtype).to( tensor.device ) def extra_repr(self): return f"batch_first={self.batch_first}" class MaxPool1D(nn.Module): def __init__(self, outsz, batch_first=True): super().__init__() self.batch_first = batch_first self.reduction_dim = 1 if self.batch_first else 0 self.output_dim = outsz def forward(self, inputs: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> torch.Tensor: tensor, lengths = tensor_and_lengths(inputs) if lengths is not None: mask = sequence_mask(lengths).to(tensor.device) mask = mask if self.batch_first else bth2tbh(mask) tensor = tensor.masked_fill(mask.unsqueeze(-1) == MASK_FALSE, -1e4) dmax, _ = torch.max(tensor, self.reduction_dim, keepdim=False) return dmax def extra_repr(self) -> str: return f"batch_first={self.batch_first}" class GeLU(nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.nn.functional.gelu(x)
Apache License 2.0
facelessuser/colorhelper
lib/coloraide/color/gamut/__init__.py
Fit.distance
python
def distance(color):
Get coordinates of the new gamut mapped color.
https://github.com/facelessuser/colorhelper/blob/dd4a67b9c90ade4c2cb29f49d1ce02978fa4feb0/lib/coloraide/color/gamut/__init__.py#L45-L46
from ... import util from ... spaces import Angle, Cylindrical, GamutBound from abc import ABCMeta, abstractmethod def clip_channels(color): channels = util.no_nan(color.coords()) gamut = color._space.RANGE fit = [] for i, value in enumerate(channels): a, b = gamut[i] is_bound = isinstance(gamut[i], GamutBound) if isinstance(a, Angle) and isinstance(b, Angle): fit.append(value % 360.0) continue if not is_bound: a = None b = None fit.append(util.clamp(value, a, b)) return fit class Fit(ABCMeta): @staticmethod @abstractmethod def name(): @staticmethod @abstractmethod
MIT License
neurodiffgym/neurodiffeq
neurodiffeq/operators.py
cartesian_to_cylindrical
python
def cartesian_to_cylindrical(x, y, z): return torch.sqrt(x ** 2 + y ** 2), torch.atan2(y, x), z
r"""Convert cartesian coordinates :math:`(x, y, z)` to cylindrical coordinate :math:`(\rho, \phi, z)`. The input shapes of x, y, and z must be the same. If the azimuthal angle :math:`phi` is undefined, the default value will be 0. :param x: The :math:`x`-component of cartesian coordinates. :type x: `torch.Tensor` :param y: The :math:`y`-component of cartesian coordinates. :type y: `torch.Tensor` :param z: The :math:`z`-component of cartesian coordinates. :type z: `torch.Tensor` :return: The :math:`\rho`-, :math:`\phi`-, and :math:`z`-component in cylindrical coordinates. :rtype: tuple[`torch.Tensor`]
https://github.com/neurodiffgym/neurodiffeq/blob/ab670a1af2e58766849f3bc683f7e6b0a6444124/neurodiffeq/operators.py#L418-L432
import torch from torch import sin, cos from torch import autograd from .neurodiffeq import safe_diff as diff def _split_u_x(*us_xs): if len(us_xs) == 0 or len(us_xs) % 2 != 0: raise RuntimeError("Number of us and xs must be equal and positive") us = us_xs[:len(us_xs) // 2] xs = us_xs[len(us_xs) // 2:] return us, xs def grad(u, *xs): grads = [] for x, g in zip(xs, autograd.grad(u, xs, grad_outputs=torch.ones_like(u), create_graph=True, allow_unused=True)): if g is None: grads.append(torch.zeros_like(x, requires_grad=True)) else: grads.append(g.requires_grad_(True)) return grads def div(*us_xs): us, xs = _split_u_x(*us_xs) return sum(diff(u, x) for u, x in zip(us, xs)) def curl(u_x, u_y, u_z, x, y, z): dxy, dxz = grad(u_x, y, z) dyx, dyz = grad(u_y, x, z) dzx, dzy = grad(u_z, x, y) return dzy - dyz, dxz - dzx, dyx - dxy def laplacian(u, *xs): gs = grad(u, *xs) return sum(diff(g, x) for g, x in zip(gs, xs)) def vector_laplacian(u_x, u_y, u_z, x, y, z): return laplacian(u_x, x, y, z), laplacian(u_y, x, y, z), laplacian(u_z, x, y, z) def spherical_curl(u_r, u_theta, u_phi, r, theta, phi): ur_dth, ur_dph = grad(u_r, theta, phi) uth_dr, uth_dph = grad(u_theta, r, phi) uph_dr, uph_dth = grad(u_phi, r, theta) csc_th = 1 / sin(theta) r_inv = 1 / r curl_r = r_inv * (uph_dth + (u_phi * cos(theta) - uth_dph) * csc_th) curl_th = r_inv * (csc_th * ur_dph - u_phi) - uph_dr curl_ph = uth_dr + r_inv * (u_theta - ur_dth) return curl_r, curl_th, curl_ph def spherical_grad(u, r, theta, phi): u_dr, u_dth, u_dph = grad(u, r, theta, phi) r_inv = 1 / r return u_dr, u_dth * r_inv, u_dph * r_inv / sin(theta) def spherical_div(u_r, u_theta, u_phi, r, theta, phi): sin_th = sin(theta) return (diff(u_r * r ** 2, r) / r + (diff(u_theta * sin_th, theta) + diff(u_phi, phi)) / sin_th) / r def spherical_laplacian(u, r, theta, phi): u_dr, u_dth, u_dph = grad(u, r, theta, phi) sin_th = sin(theta) r2 = r ** 2 return (diff(r2 * u_dr, r) + diff(sin_th * u_dth, theta) / sin_th + diff(u_dph, phi) / sin_th ** 2) / r2 def spherical_vector_laplacian(u_r, u_theta, u_phi, r, theta, phi): ur_dr, ur_dth, ur_dph = grad(u_r, r, theta, phi) uth_dr, uth_dth, uth_dph = grad(u_theta, r, theta, phi) uph_dr, uph_dth, uph_dph = grad(u_phi, r, theta, phi) sin_th, cos_th = sin(theta), cos(theta) sin2_th = sin_th ** 2 r2 = r ** 2 scalar_lap_r = (diff(r2 * ur_dr, r) + diff(sin_th * ur_dth, theta) / sin_th + diff(ur_dph, phi) / sin2_th) / r2 scalar_lap_th = (diff(r2 * uth_dr, r) + diff(sin_th * uth_dth, theta) / sin_th + diff(uth_dph, phi) / sin2_th) / r2 scalar_lap_ph = (diff(r2 * uph_dr, r) + diff(sin_th * uph_dth, theta) / sin_th + diff(uph_dph, phi) / sin2_th) / r2 vec_lap_r = scalar_lap_r - 2 * (u_r + uth_dth + (cos_th * u_theta + uph_dph) / sin_th) / r2 vec_lap_th = scalar_lap_th + (2 * ur_dth - (u_theta + 2 * cos_th * uph_dph) / sin2_th) / r2 vec_lap_ph = scalar_lap_ph + ((2 * cos_th * uth_dph - u_phi) / sin_th + 2 * ur_dph) / (r2 * sin_th) return vec_lap_r, vec_lap_th, vec_lap_ph def spherical_to_cartesian(r, theta, phi): rho = r * sin(theta) return rho * cos(phi), rho * sin(phi), r * cos(theta) def cartesian_to_spherical(x, y, z): rho2 = x ** 2 + y ** 2 return torch.sqrt(rho2 + z ** 2), torch.atan2(torch.sqrt(rho2), z), torch.atan2(y, x) def cylindrical_grad(u, rho, phi, z): u_drho, u_dphi, u_dz = grad(u, rho, phi, z) return u_drho, u_dphi / rho, u_dz def cylindrical_div(u_rho, u_phi, u_z, rho, phi, z): return diff(u_rho, rho) + (u_rho + diff(u_phi, phi)) / rho + diff(u_z, z) def cylindrical_curl(u_rho, u_phi, u_z, rho, phi, z): urho_dphi, urho_dz = grad(u_rho, phi, z) uphi_drho, uphi_dz = grad(u_phi, rho, z) uz_drho, uz_dphi = grad(u_z, rho, phi) return ( uz_dphi / rho - uphi_dz, urho_dz - uz_drho, uphi_drho + (u_phi - urho_dphi) / rho ) def cylindrical_laplacian(u, rho, phi, z): u_drho, u_dphi, u_dz = grad(u, rho, phi, z) return diff(u_drho, rho) + u_drho / rho + diff(u_dphi, phi) / rho ** 2 + diff(u_dz, z) def cylindrical_vector_laplacian(u_rho, u_phi, u_z, rho, phi, z): rho2 = rho ** 2 urho_drho, urho_dphi, urho_dz = grad(u_rho, rho, phi, z) uphi_drho, uphi_dphi, uphi_dz = grad(u_phi, rho, phi, z) uz_drho, uz_dphi, uz_dz = grad(u_z, rho, phi, z) scalar_lap_rho = diff(urho_drho, rho) + urho_drho / rho + diff(urho_dphi, phi) / rho ** 2 + diff(urho_dz, z) scalar_lap_phi = diff(uphi_drho, rho) + uphi_drho / rho + diff(uphi_dphi, phi) / rho ** 2 + diff(uphi_dz, z) scalar_lap_z = diff(uz_drho, rho) + uz_drho / rho + diff(uz_dphi, phi) / rho ** 2 + diff(uz_dz, z) return ( scalar_lap_rho - (u_rho + 2 * uphi_dphi) / rho2, scalar_lap_phi + (2 * urho_dphi - u_phi) / rho2, scalar_lap_z, ) def cylindrical_to_cartesian(rho, phi, z): return rho * cos(phi), rho * sin(phi), z
MIT License
voxel-fox-ltd/novus
discord/shard.py
ShardInfo.connect
python
async def connect(self) -> None: if not self.is_closed(): return await self._parent.reconnect()
|coro| Connects a shard. If the shard is already connected this does nothing.
https://github.com/voxel-fox-ltd/novus/blob/4b3a3f918b6212ef2167002c4dbfe910727c04b0/discord/shard.py#L265-L273
from __future__ import annotations import asyncio import logging import aiohttp from .state import AutoShardedConnectionState from .client import Client from .backoff import ExponentialBackoff from .gateway import * from .errors import ( ClientException, HTTPException, GatewayNotFound, ConnectionClosed, PrivilegedIntentsRequired, ) from .enums import Status from typing import TYPE_CHECKING, Any, Callable, Tuple, Type, Optional, List, Dict, TypeVar if TYPE_CHECKING: from .gateway import DiscordWebSocket from .activity import BaseActivity from .enums import Status EI = TypeVar('EI', bound='EventItem') __all__ = ( 'AutoShardedClient', 'ShardInfo', ) _log = logging.getLogger(__name__) class EventType: close = 0 reconnect = 1 resume = 2 identify = 3 terminate = 4 clean_close = 5 class EventItem: __slots__ = ('type', 'shard', 'error') def __init__(self, etype: int, shard: Optional['Shard'], error: Optional[Exception]) -> None: self.type: int = etype self.shard: Optional['Shard'] = shard self.error: Optional[Exception] = error def __lt__(self: EI, other: EI) -> bool: if not isinstance(other, EventItem): return NotImplemented return self.type < other.type def __eq__(self: EI, other: EI) -> bool: if not isinstance(other, EventItem): return NotImplemented return self.type == other.type def __hash__(self) -> int: return hash(self.type) class Shard: def __init__(self, ws: DiscordWebSocket, client: AutoShardedClient, queue_put: Callable[[EventItem], None]) -> None: self.ws: DiscordWebSocket = ws self._client: Client = client self._dispatch: Callable[..., None] = client.dispatch self._queue_put: Callable[[EventItem], None] = queue_put self.loop: asyncio.AbstractEventLoop = self._client.loop self._disconnect: bool = False self._reconnect = client._reconnect self._backoff: ExponentialBackoff = ExponentialBackoff() self._task: Optional[asyncio.Task] = None self._handled_exceptions: Tuple[Type[Exception], ...] = ( OSError, HTTPException, GatewayNotFound, ConnectionClosed, aiohttp.ClientError, asyncio.TimeoutError, ) @property def id(self) -> int: return self.ws.shard_id def launch(self) -> None: self._task = self.loop.create_task(self.worker()) def _cancel_task(self) -> None: if self._task is not None and not self._task.done(): self._task.cancel() async def close(self) -> None: self._cancel_task() await self.ws.close(code=1000) async def disconnect(self) -> None: await self.close() self._dispatch('shard_disconnect', self.id) async def _handle_disconnect(self, e: Exception) -> None: self._dispatch('disconnect') self._dispatch('shard_disconnect', self.id) if not self._reconnect: self._queue_put(EventItem(EventType.close, self, e)) return if self._client.is_closed(): return if isinstance(e, OSError) and e.errno in (54, 10054): exc = ReconnectWebSocket(self.id, resume=True) self._queue_put(EventItem(EventType.resume, self, exc)) return if isinstance(e, ConnectionClosed): if e.code == 4014: self._queue_put(EventItem(EventType.terminate, self, PrivilegedIntentsRequired(self.id))) return if e.code != 1000: self._queue_put(EventItem(EventType.close, self, e)) return retry = self._backoff.delay() _log.error('Attempting a reconnect for shard ID %s in %.2fs', self.id, retry, exc_info=e) await asyncio.sleep(retry) self._queue_put(EventItem(EventType.reconnect, self, e)) async def worker(self) -> None: while not self._client.is_closed(): try: await self.ws.poll_event() except ReconnectWebSocket as e: etype = EventType.resume if e.resume else EventType.identify self._queue_put(EventItem(etype, self, e)) break except self._handled_exceptions as e: await self._handle_disconnect(e) break except asyncio.CancelledError: break except Exception as e: self._queue_put(EventItem(EventType.terminate, self, e)) break async def reidentify(self, exc: ReconnectWebSocket) -> None: self._cancel_task() self._dispatch('disconnect') self._dispatch('shard_disconnect', self.id) _log.info('Got a request to %s the websocket at Shard ID %s.', exc.op, self.id) try: coro = DiscordWebSocket.from_client( self._client, resume=exc.resume, shard_id=self.id, session=self.ws.session_id, sequence=self.ws.sequence, ) self.ws = await asyncio.wait_for(coro, timeout=60.0) except self._handled_exceptions as e: await self._handle_disconnect(e) except asyncio.CancelledError: return except Exception as e: self._queue_put(EventItem(EventType.terminate, self, e)) else: self.launch() async def reconnect(self) -> None: self._cancel_task() try: coro = DiscordWebSocket.from_client(self._client, shard_id=self.id) self.ws = await asyncio.wait_for(coro, timeout=60.0) except self._handled_exceptions as e: await self._handle_disconnect(e) except asyncio.CancelledError: return except Exception as e: self._queue_put(EventItem(EventType.terminate, self, e)) else: self.launch() class ShardInfo: __slots__ = ('_parent', 'id', 'shard_count') def __init__(self, parent: Shard, shard_count: Optional[int]) -> None: self._parent: Shard = parent self.id: int = parent.id self.shard_count: Optional[int] = shard_count def is_closed(self) -> bool: return not self._parent.ws.open async def disconnect(self) -> None: if self.is_closed(): return await self._parent.disconnect() async def reconnect(self) -> None: if not self.is_closed(): await self._parent.disconnect() await self._parent.reconnect()
MIT License
pajbot/pajbot
pajbot/apiwrappers/twitch/helix.py
TwitchHelixAPI._fetch_subscribers_page
python
def _fetch_subscribers_page(self, broadcaster_id, authorization, after_pagination_cursor=None): response = self.get( "/subscriptions", {"broadcaster_id": broadcaster_id, **self._with_pagination(after_pagination_cursor)}, authorization=authorization, ) subscribers = [entry["user_id"] for entry in response["data"]] pagination_cursor = response["pagination"].get("cursor", None) return subscribers, pagination_cursor
Fetch a list of subscribers (user IDs) of a broadcaster + a pagination cursor as a tuple.
https://github.com/pajbot/pajbot/blob/42e19a692eb663556bc78d0d86eef1a667728f46/pajbot/apiwrappers/twitch/helix.py#L291-L329
from typing import Dict, Optional, List, Tuple import logging import time from datetime import datetime, timezone import math from requests import HTTPError from pajbot import utils from pajbot.apiwrappers.response_cache import ( DateTimeSerializer, ClassInstanceSerializer, ListSerializer, TwitchChannelEmotesSerializer, ) from pajbot.apiwrappers.twitch.base import BaseTwitchAPI from pajbot.models.emote import Emote from pajbot.models.user import UserBasics, UserChannelInformation, UserStream from pajbot.utils import iterate_in_chunks log = logging.getLogger(__name__) class TwitchGame: def __init__( self, id: str, name: str, box_art_url: str, ): self.id: str = id self.name: str = name self.box_art_url: str = box_art_url def jsonify(self): return { "id": self.id, "name": self.name, "box_art_url": self.box_art_url, } @staticmethod def from_json(json_data): return TwitchGame( json_data["id"], json_data["name"], json_data["box_art_url"], ) class TwitchVideo: def __init__( self, id: str, user_id: str, user_name: str, title: str, description: str, created_at: str, published_at: str, url: str, thumbnail_url: str, viewable: str, view_count: int, language: str, video_type: str, duration: str, ): self.id: str = id self.user_id: str = user_id self.user_name: str = user_name self.title: str = title self.description: str = description self.created_at: str = created_at self.published_at: str = published_at self.url: str = url self.thumbnail_url: str = thumbnail_url self.viewable: str = viewable self.view_count: int = view_count self.language: str = language self.video_type: str = video_type self.duration: str = duration def jsonify(self): return { "id": self.id, "user_id": self.user_id, "user_name": self.user_name, "title": self.title, "description": self.description, "created_at": self.created_at, "published_at": self.published_at, "url": self.url, "thumbnail_url": self.thumbnail_url, "viewable": self.viewable, "view_count": self.view_count, "language": self.language, "video_type": self.video_type, "duration": self.duration, } @staticmethod def from_json(json_data): return TwitchVideo( json_data["id"], json_data["user_id"], json_data["user_name"], json_data["title"], json_data["description"], json_data["created_at"], json_data["published_at"], json_data["url"], json_data["thumbnail_url"], json_data["viewable"], json_data["view_count"], json_data["language"], json_data["video_type"], json_data["duration"], ) class TwitchHelixAPI(BaseTwitchAPI): authorization_header_prefix = "Bearer" def __init__(self, redis, app_token_manager): super().__init__(base_url="https://api.twitch.tv/helix", redis=redis) self.app_token_manager = app_token_manager @property def default_authorization(self): return self.app_token_manager def request(self, method, endpoint, params, headers, authorization=None, json=None): try: return super().request(method, endpoint, params, headers, authorization, json) except HTTPError as e: if e.response.status_code == 429: rate_limit_reset = datetime.fromtimestamp(int(e.response.headers["Ratelimit-Reset"]), tz=timezone.utc) time_to_wait = rate_limit_reset - utils.now() time.sleep(math.ceil(time_to_wait.total_seconds())) return super().request(method, endpoint, params, headers, authorization, json) raise e @staticmethod def _with_pagination(after_pagination_cursor=None): if after_pagination_cursor is None: return {} return {"after": after_pagination_cursor} @staticmethod def _fetch_all_pages(page_fetch_fn, *args, **kwargs): pagination_cursor = None responses = [] while True: response, pagination_cursor = page_fetch_fn(after_pagination_cursor=pagination_cursor, *args, **kwargs) responses.extend(response) if len(response) <= 0 or pagination_cursor is None: break return responses def _fetch_user_data_by_login(self, login: str): response = self.get("/users", {"login": login}) if len(response["data"]) <= 0: return None return response["data"][0] def _fetch_user_data_by_id(self, user_id): response = self.get("/users", {"id": user_id}) if len(response["data"]) <= 0: return None return response["data"][0] def _fetch_user_data_from_authorization(self, authorization): response = self.get("/users", authorization=authorization) if len(response["data"]) <= 0: raise ValueError("No user returned for given authorization") return response["data"][0] def _get_user_data_by_login(self, login): return self.cache.cache_fetch_fn( redis_key=f"api:twitch:helix:user:by-login:{login}", fetch_fn=lambda: self._fetch_user_data_by_login(login), expiry=lambda response: 30 if response is None else 300, ) def _get_user_data_by_id(self, user_id): return self.cache.cache_fetch_fn( redis_key=f"api:twitch:helix:user:by-id:{user_id}", fetch_fn=lambda: self._fetch_user_data_by_id(user_id), expiry=lambda response: 30 if response is None else 300, ) def get_user_id(self, login: str) -> Optional[str]: user_data = self._get_user_data_by_login(login) return user_data["id"] if user_data is not None else None def require_user_id(self, login: str) -> str: user_id = self.get_user_id(login) if user_id is None: raise ValueError(f'No user found under login name "{login}" on Twitch') return user_id def get_login(self, user_id: str) -> Optional[str]: user_data = self._get_user_data_by_id(user_id) return user_data["login"] if user_data is not None else None def fetch_channel_information(self, user_id: str) -> Optional[UserChannelInformation]: response = self.get("/channels", {"broadcaster_id": user_id}) if len(response["data"]) <= 0: return None info = response["data"][0] return UserChannelInformation(info["broadcaster_language"], info["game_id"], info["game_name"], info["title"]) def get_channel_information(self, user_id: str) -> Optional[UserChannelInformation]: return self.cache.cache_fetch_fn( redis_key=f"api:twitch:helix:channel-information:{user_id}", serializer=ClassInstanceSerializer(UserChannelInformation), fetch_fn=lambda: self.fetch_channel_information(user_id), expiry=lambda response: 30 if response else 300, ) def fetch_follow_since(self, from_id, to_id): response = self.get("/users/follows", {"from_id": from_id, "to_id": to_id}) if len(response["data"]) <= 0: return None return self.parse_datetime(response["data"][0]["followed_at"]) def get_follow_since(self, from_id: str, to_id: str): return self.cache.cache_fetch_fn( redis_key=f"api:twitch:helix:follow-since:{from_id}:{to_id}", serializer=DateTimeSerializer(), fetch_fn=lambda: self.fetch_follow_since(from_id, to_id), expiry=lambda response: 30 if response is None else 300, ) def get_profile_image_url(self, user_id: str) -> Optional[str]: user_data = self._get_user_data_by_id(user_id) return user_data["profile_image_url"] if user_data is not None else None def get_user_basics_by_login(self, login: str) -> Optional[UserBasics]: user_data = self._get_user_data_by_login(login) if user_data is None: return None return UserBasics(user_data["id"], user_data["login"], user_data["display_name"]) def fetch_user_basics_from_authorization(self, authorization) -> UserBasics: user_data = self._fetch_user_data_from_authorization(authorization) return UserBasics(user_data["id"], user_data["login"], user_data["display_name"])
MIT License
featurelabs/featuretools-tsfresh-primitives
featuretools_tsfresh_primitives/utils.py
comprehensive_fc_parameters
python
def comprehensive_fc_parameters(): parameters = ComprehensiveFCParameters() partial_autocorrelation = parameters['partial_autocorrelation'] for index, values in enumerate(partial_autocorrelation): if values['lag'] == 0: del partial_autocorrelation[index] return parameters
A wrapper around the tsfresh function :class:`ComperehensiveFCParameters` to filter out unsupported parameter settings. Returns: parameters (dict) : a dictionary list of parameters Docstring source: https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.settings.ComprehensiveFCParameters
https://github.com/featurelabs/featuretools-tsfresh-primitives/blob/e9482cdb032d8ef49ee6a159c78fd5ee76ccc475/featuretools_tsfresh_primitives/utils.py#L9-L26
from featuretools.primitives import AggregationPrimitive from tsfresh.feature_extraction.settings import ComprehensiveFCParameters from featuretools_tsfresh_primitives.primitives import SUPPORTED_PRIMITIVES PRIMITIVES = {primitive.name: primitive for primitive in SUPPORTED_PRIMITIVES}
MIT License
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/cli/commands/repos.py
delete
python
def delete(ctx, opts, owner_repo, yes): owner, repo = owner_repo delete_args = { "namespace": click.style(owner, bold=True), "repository": click.style(repo, bold=True), } prompt = "delete the %(repository)s from the %(namespace)s namespace" % delete_args if not utils.confirm_operation(prompt, assume_yes=yes): return click.secho( "Deleting %(repository)s from the %(namespace)s namespace ... " % delete_args, nl=False, ) context_msg = "Failed to delete the repository!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): api.delete_repo(owner=owner, repo=repo) click.secho("OK", fg="green")
Delete a repository from a namespace. - OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the name of the REPO to be deleted, separated by a slash. Example: 'your-org/your-repo' Full CLI example: $ cloudsmith repos delete your-org/your-repo
https://github.com/cloudsmith-io/cloudsmith-cli/blob/0e6800e8f544b24f5cadca2e70063a5dbf62c462/cloudsmith_cli/cli/commands/repos.py#L275-L308
from __future__ import absolute_import, print_function, unicode_literals import json from operator import itemgetter import click import six from ...core.api import repos as api from .. import command, decorators, utils, validators from ..exceptions import handle_api_exceptions from ..utils import maybe_spinner from .main import main def print_repositories(opts, data, page_info=None, show_list_info=True): headers = [ "Name", "Type", "Packages", "Groups", "Downloads", "Size", "Owner / Repository (Identifier)", ] rows = [] for repo in sorted(data, key=itemgetter("namespace", "slug")): rows.append( [ click.style(repo["name"], fg="cyan"), click.style(repo["repository_type_str"], fg="yellow"), click.style(six.text_type(repo["package_count"]), fg="blue"), click.style(six.text_type(repo["package_group_count"]), fg="blue"), click.style(six.text_type(repo["num_downloads"]), fg="blue"), click.style(six.text_type(repo["size_str"]), fg="blue"), "%(owner_slug)s/%(slug)s" % { "owner_slug": click.style(repo["namespace"], fg="magenta"), "slug": click.style(repo["slug"], fg="green"), }, ] ) if data: click.echo() utils.pretty_print_table(headers, rows) click.echo() num_results = len(data) list_suffix = "repositor%s visible" % ("ies" if num_results != 1 else "y") utils.pretty_print_list_info( num_results=num_results, page_info=page_info, suffix=list_suffix ) @main.group(cls=command.AliasGroup, name="repositories", aliases=["repos"]) @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_api_auth_options @decorators.initialise_api @click.pass_context def repositories(ctx, opts): @repositories.command(name="get", aliases=["list", "ls"]) @decorators.common_cli_config_options @decorators.common_cli_list_options @decorators.common_cli_output_options @decorators.common_api_auth_options @decorators.initialise_api @click.argument( "owner_repo", metavar="OWNER/REPO", callback=validators.validate_optional_owner_repo, default="", required=False, ) @click.pass_context def get(ctx, opts, owner_repo, page, page_size): use_stderr = opts.output != "pretty" click.echo("Getting list of repositories ... ", nl=False, err=use_stderr) if isinstance(owner_repo, list): if len(owner_repo) == 1: owner = owner_repo[0] repo = None else: owner, repo = owner_repo if isinstance(owner_repo, str): repo = None if owner_repo: owner = owner_repo else: owner = None context_msg = "Failed to get list of repositories!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): repos_, page_info = api.list_repos( owner=owner, repo=repo, page=page, page_size=page_size ) click.secho("OK", fg="green", err=use_stderr) if utils.maybe_print_as_json(opts, repos_, page_info): return print_repositories(opts=opts, data=repos_, show_list_info=False) @repositories.command(aliases=["new"]) @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_api_auth_options @decorators.initialise_api @click.argument("owner", default=None, required=True) @click.argument("repo_config_file", type=click.File("rb"), required=True) @click.pass_context def create(ctx, opts, owner, repo_config_file): use_stderr = opts.output != "pretty" repo_config = json.load(repo_config_file) repo_name = repo_config.get("name", None) if repo_name is None: raise click.BadParameter( "Name is a required field for creating a repository.", param="name" ) click.secho( "Creating %(name)s repository for the %(owner)s namespace ..." % { "name": click.style(repo_name, bold=True), "owner": click.style(owner, bold=True), }, nl=False, err=use_stderr, ) context_msg = "Failed to create the repository!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): repository = api.create_repo(owner, repo_config) click.secho("OK", fg="green", err=use_stderr) print_repositories(opts=opts, data=[repository], show_list_info=False) @repositories.command() @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_api_auth_options @decorators.initialise_api @click.argument( "owner_repo", metavar="OWNER/REPO", callback=validators.validate_owner_repo ) @click.argument("repo_config_file", type=click.File("rb"), required=True) @click.pass_context def update(ctx, opts, owner_repo, repo_config_file): use_stderr = opts.output != "pretty" owner, repo = owner_repo repo_config = json.load(repo_config_file) click.secho( "Updating %(name)s repository in the %(owner)s namespace ..." % { "name": click.style(repo, bold=True), "owner": click.style(owner, bold=True), }, nl=False, err=use_stderr, ) context_msg = "Failed to update the repository!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): repository = api.update_repo(owner, repo, repo_config) click.secho("OK", fg="green", err=use_stderr) print_repositories(opts=opts, data=[repository], show_list_info=False) @repositories.command(aliases=["rm"]) @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_api_auth_options @decorators.initialise_api @click.argument( "owner_repo", metavar="OWNER/REPO", callback=validators.validate_owner_repo ) @click.option( "-y", "--yes", default=False, is_flag=True, help="Assume yes as default answer to questions (this is dangerous!)", ) @click.pass_context
Apache License 2.0
wearpants/twiggy
twiggy/formats.py
LineFormat.format_text
python
def format_text(self, msg): if msg.suppress_newlines: return msg.text.replace('\n', '\\n') else: return msg.text
format the text part of a message
https://github.com/wearpants/twiggy/blob/8bd6ce544f6dac568fcaff9525fc6cce870bfa90/twiggy/formats.py#L40-L45
import copy from .lib.converter import ConversionTable, Converter from .lib import iso8601time line_conversion = ConversionTable([ Converter(key='time', convert_value=iso8601time, convert_item='{1}'.format, required=True), ('level', str, '{1}'.format, True), ('name', str, '{1}'.format), ]) line_conversion.generic_value = str line_conversion.generic_item = "{0}={1}".format line_conversion.aggregate = ':'.join class LineFormat(object): def __init__(self, separator='|', traceback_prefix='\nTRACE ', conversion=line_conversion): self.separator = separator self.traceback_prefix = traceback_prefix self.conversion = conversion def __copy__(self): return self.__class__(self.separator, self.traceback_prefix, self.conversion.copy()) def __call__(self, msg): fields = self.format_fields(msg) text = self.format_text(msg) trace = self.format_traceback(msg) return "{fields}{self.separator}{text}{trace}\n".format(**locals())
BSD 3-Clause New or Revised License
azure/autorest.python
test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py
Blob.__init__
python
def __init__( self, *, name: str, deleted: bool, snapshot: str, properties: "BlobProperties", metadata: Optional[Dict[str, str]] = None, **kwargs ): super(Blob, self).__init__(**kwargs) self.name = name self.deleted = deleted self.snapshot = snapshot self.properties = properties self.metadata = metadata
:keyword name: Required. :paramtype name: str :keyword deleted: Required. :paramtype deleted: bool :keyword snapshot: Required. :paramtype snapshot: str :keyword properties: Required. Properties of a blob. :paramtype properties: ~xmlservice.models.BlobProperties :keyword metadata: Dictionary of :code:`<string>`. :paramtype metadata: dict[str, str]
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py#L157-L184
import datetime from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._auto_rest_swagger_batxml_service_enums import * class AccessPolicy(msrest.serialization.Model): _validation = { "start": {"required": True}, "expiry": {"required": True}, "permission": {"required": True}, } _attribute_map = { "start": {"key": "Start", "type": "iso-8601"}, "expiry": {"key": "Expiry", "type": "iso-8601"}, "permission": {"key": "Permission", "type": "str"}, } def __init__(self, *, start: datetime.datetime, expiry: datetime.datetime, permission: str, **kwargs): super(AccessPolicy, self).__init__(**kwargs) self.start = start self.expiry = expiry self.permission = permission class AppleBarrel(msrest.serialization.Model): _attribute_map = { "good_apples": {"key": "GoodApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, "bad_apples": {"key": "BadApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, } def __init__(self, *, good_apples: Optional[List[str]] = None, bad_apples: Optional[List[str]] = None, **kwargs): super(AppleBarrel, self).__init__(**kwargs) self.good_apples = good_apples self.bad_apples = bad_apples class Banana(msrest.serialization.Model): _attribute_map = { "name": {"key": "name", "type": "str", "xml": {"name": "name"}}, "flavor": {"key": "flavor", "type": "str", "xml": {"name": "flavor"}}, "expiration": {"key": "expiration", "type": "iso-8601", "xml": {"name": "expiration"}}, } _xml_map = {"name": "banana"} def __init__( self, *, name: Optional[str] = None, flavor: Optional[str] = None, expiration: Optional[datetime.datetime] = None, **kwargs ): super(Banana, self).__init__(**kwargs) self.name = name self.flavor = flavor self.expiration = expiration class Blob(msrest.serialization.Model): _validation = { "name": {"required": True}, "deleted": {"required": True}, "snapshot": {"required": True}, "properties": {"required": True}, } _attribute_map = { "name": {"key": "Name", "type": "str"}, "deleted": {"key": "Deleted", "type": "bool"}, "snapshot": {"key": "Snapshot", "type": "str"}, "properties": {"key": "Properties", "type": "BlobProperties"}, "metadata": {"key": "Metadata", "type": "{str}"}, } _xml_map = {"name": "Blob"}
MIT License
bugra/l1
l1/algos.py
_mad
python
def _mad(array): c = .6745 center = np.apply_over_axes(np.median, array, 0) absolute_difference = np.fabs(array - center) / c return np.median(absolute_difference, axis=0)
The Median Absolute Deviation along given axis of an array Parameters array(np.array) : array Returns mad : float `mad` = median(abs(`a` - center))/`c`
https://github.com/bugra/l1/blob/82bb988767c48eefd1e26a6ffb1789c7bebf31a2/l1/algos.py#L58-L73
from itertools import chain from cvxopt import matrix, solvers, spmatrix import numpy as np solvers.options['show_progress'] = False def _second_order_derivative_matrix(size_of_matrix): temp = size_of_matrix - 2 first = [1, -2, 1] * temp second = list(chain.from_iterable([[ii] * 3 for ii in range(temp)])) third = list(chain.from_iterable([[ii, ii + 1, ii + 2] for ii in range(temp)])) second_order = spmatrix(first, second, third) return second_order def _l1(signal, regularizer): signal_size = signal.size[0] temp = signal_size - 2 temp_ls = range(temp) D = _second_order_derivative_matrix(signal_size) P = D * D.T q = -D * signal G = spmatrix([], [], [], (2 * temp, temp)) G[:temp, :temp] = spmatrix(1.0, temp_ls, temp_ls) G[temp:, :temp] = -spmatrix(1.0, temp_ls, temp_ls) h = matrix(regularizer, (2 * temp, 1), tc='d') residual = solvers.qp(P, q, G, h) trend = signal - D.T * residual['x'] return trend
Apache License 2.0
shortbloke/home_assistant_config
custom_components/owlintuition/sensor.py
OwlData.__init__
python
def __init__(self, localaddr): self.data = {} self._localaddr = localaddr
Prepare an empty dictionary
https://github.com/shortbloke/home_assistant_config/blob/5587d501f115ca49e77c935387fe05feec2eb632/custom_components/owlintuition/sensor.py#L204-L207
import asyncio import socket from xml.etree import ElementTree as ET from datetime import datetime, timedelta from select import select from functools import reduce import logging import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA, STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING ) import homeassistant.const as c from homeassistant.exceptions import TemplateError from homeassistant.helpers.entity import Entity import homeassistant.helpers.config_validation as cv CONF_COST_UNIT_OF_MEASUREMENT = 'cost_unit_of_measurement' CONF_COST_ICON = 'cost_icon' ATTR_LAST_UPDATE = 'last_update' ATTR_LAST_RESET = 'last_reset' ATTR_DEVICE_CLASS = 'device_class' ATTR_STATE_CLASS = 'state_class' VERSION = '1.5.0' DEFAULT_NAME = 'OWL Intuition' MODE_MONO = 'monophase' MODE_TRI = 'triphase' SENSOR_ELECTRICITY_BATTERY = 'electricity_battery' SENSOR_ELECTRICITY_BATTERY_LVL = 'electricity_battery_lvl' SENSOR_ELECTRICITY_RADIO = 'electricity_radio' SENSOR_ELECTRICITY_POWER = 'electricity_power' SENSOR_ELECTRICITY_ENERGY_TODAY = 'electricity_energy_today' SENSOR_ELECTRICITY_COST_TODAY = 'electricity_cost_today' SENSOR_ELECTRICITY_LAST_UPDATE = 'electricity_last_update' SENSOR_SOLAR_GPOWER = 'solargen' SENSOR_SOLAR_GENERGY_TODAY = 'solargen_today' SENSOR_SOLAR_EPOWER = 'solarexp' SENSOR_SOLAR_EENERGY_TODAY = 'solarexp_today' SENSOR_HOTWATER_BATTERY = 'hotwater_battery' SENSOR_HOTWATER_BATTERY_LVL = 'hotwater_battery_lvl' SENSOR_HOTWATER_RADIO = 'hotwater_radio' SENSOR_HOTWATER_CURRENT = 'hotwater_current' SENSOR_HOTWATER_REQUIRED = 'hotwater_required' SENSOR_HOTWATER_AMBIENT = 'hotwater_ambient' SENSOR_HOTWATER_STATE = 'hotwater_state' SENSOR_HEATING_BATTERY = 'heating_battery' SENSOR_HEATING_BATTERY_LVL = 'heating_battery_lvl' SENSOR_HEATING_RADIO = 'heating_radio' SENSOR_HEATING_CURRENT = 'heating_current' SENSOR_HEATING_REQUIRED = 'heating_required' SENSOR_HEATING_STATE = 'heating_state' SENSOR_RELAYS_RADIO = 'relays_radio' OWLCLASS_WEATHER = 'weather' OWLCLASS_ELECTRICITY = 'electricity' OWLCLASS_SOLAR = 'solar' OWLCLASS_HOTWATER = 'hot_water' OWLCLASS_HEATING = 'heating' OWLCLASS_RELAYS = 'relays' OWL_CLASSES = [ OWLCLASS_WEATHER, OWLCLASS_ELECTRICITY, OWLCLASS_SOLAR, OWLCLASS_HOTWATER, OWLCLASS_HEATING, OWLCLASS_RELAYS ] BATTERY_SENSORS = [ SENSOR_ELECTRICITY_BATTERY, SENSOR_HOTWATER_BATTERY, SENSOR_HEATING_BATTERY ] RADIO_SENSORS = [ SENSOR_ELECTRICITY_RADIO, SENSOR_HOTWATER_RADIO, SENSOR_HEATING_RADIO, SENSOR_RELAYS_RADIO ] SENSOR_TYPES = { SENSOR_ELECTRICITY_BATTERY: ['Electricity Battery', None, 'mdi:battery', OWLCLASS_ELECTRICITY, None], SENSOR_ELECTRICITY_BATTERY_LVL: ['Electricity Battery Level', '%', 'mdi:battery', OWLCLASS_ELECTRICITY, c.DEVICE_CLASS_BATTERY], SENSOR_ELECTRICITY_RADIO: ['Electricity Radio', 'dBm', 'mdi:signal', OWLCLASS_ELECTRICITY, c.DEVICE_CLASS_SIGNAL_STRENGTH], SENSOR_ELECTRICITY_POWER: ['Electricity Power', 'W', 'mdi:flash', OWLCLASS_ELECTRICITY, c.DEVICE_CLASS_POWER], SENSOR_ELECTRICITY_ENERGY_TODAY: ['Electricity Today', 'kWh', 'mdi:flash', OWLCLASS_ELECTRICITY, c.DEVICE_CLASS_ENERGY], SENSOR_ELECTRICITY_COST_TODAY: ['Cost Today', None, 'mdi:coin', OWLCLASS_ELECTRICITY, None], SENSOR_SOLAR_GPOWER: ['Solar Generating', 'W', 'mdi:flash', OWLCLASS_SOLAR, c.DEVICE_CLASS_POWER], SENSOR_SOLAR_GENERGY_TODAY: ['Solar Generated Today', 'kWh', 'mdi:flash', OWLCLASS_SOLAR, c.DEVICE_CLASS_ENERGY], SENSOR_SOLAR_EPOWER: ['Solar Exporting', 'W', 'mdi:flash', OWLCLASS_SOLAR, c.DEVICE_CLASS_POWER], SENSOR_SOLAR_EENERGY_TODAY: ['Solar Exported Today', 'kWh', 'mdi:flash', OWLCLASS_SOLAR, c.DEVICE_CLASS_ENERGY], SENSOR_HOTWATER_BATTERY: ['Hotwater Battery', None, 'mdi:battery', OWLCLASS_HOTWATER, None], SENSOR_HOTWATER_BATTERY_LVL: ['Hotwater Battery Level', 'V', 'mdi:battery', OWLCLASS_HOTWATER, c.DEVICE_CLASS_VOLTAGE], SENSOR_HOTWATER_RADIO: ['Hotwater Radio', 'dBm', 'mdi:signal', OWLCLASS_HOTWATER, c.DEVICE_CLASS_SIGNAL_STRENGTH], SENSOR_HOTWATER_CURRENT: ['Hotwater Temperature', '°C', 'mdi:thermometer', OWLCLASS_HOTWATER, c.DEVICE_CLASS_TEMPERATURE], SENSOR_HOTWATER_REQUIRED: ['Hotwater Required', '°C', 'mdi:thermostat', OWLCLASS_HOTWATER, c.DEVICE_CLASS_TEMPERATURE], SENSOR_HOTWATER_AMBIENT: ['Hotwater Ambient', '°C', 'mdi:thermometer', OWLCLASS_HOTWATER, c.DEVICE_CLASS_TEMPERATURE], SENSOR_HOTWATER_STATE: ['Hotwater State', '', 'mdi:information-outline', OWLCLASS_HOTWATER, None], SENSOR_HEATING_BATTERY: ['Heating Battery', None, 'mdi:battery', OWLCLASS_HEATING, None], SENSOR_HEATING_BATTERY_LVL: ['Heating Battery Level', 'V', 'mdi:battery', OWLCLASS_HEATING, c.DEVICE_CLASS_VOLTAGE], SENSOR_HEATING_RADIO: ['Heating Radio', 'dBm', 'mdi:signal', OWLCLASS_HEATING, c.DEVICE_CLASS_SIGNAL_STRENGTH], SENSOR_HEATING_CURRENT: ['Heating Temperature', '°C', 'mdi:thermometer', OWLCLASS_HEATING, c.DEVICE_CLASS_TEMPERATURE], SENSOR_HEATING_REQUIRED: ['Heating Required', '°C', 'mdi:thermostat', OWLCLASS_HEATING, None], SENSOR_HEATING_STATE: ['Heating State', None, 'mdi:information-outline', OWLCLASS_HEATING, None], SENSOR_RELAYS_RADIO: ['Relays Radio', 'dBm', 'mdi:signal', OWLCLASS_RELAYS, c.DEVICE_CLASS_SIGNAL_STRENGTH], } HEATING_STATE = [ 'Standby', 'Comfort (Running)', '', '', 'Comfort (Up To Temperature)', 'Comfort (Warm Up)', 'Comfort (Cool Down)', 'Standby (Running)'] HOTWATER_STATE = [ 'Standby', 'Running', '', '', 'Up To Temperature', 'Warm Up', 'Cool Down', 'Standby (Running)' ] DEFAULT_MONITORED = [ OWLCLASS_ELECTRICITY ] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(c.CONF_PORT): cv.port, vol.Optional(c.CONF_HOST, default='localhost'): cv.string, vol.Optional(c.CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(c.CONF_MODE, default=MODE_MONO): vol.In([MODE_MONO, MODE_TRI]), vol.Optional(c.CONF_MONITORED_CONDITIONS, default=DEFAULT_MONITORED): vol.All(cv.ensure_list, [vol.In(OWL_CLASSES)]), vol.Optional(CONF_COST_ICON, default='mdi:coin'): cv.string, vol.Optional(CONF_COST_UNIT_OF_MEASUREMENT): cv.string, }) SOCK_TIMEOUT = 60 _LOGGER = logging.getLogger(__name__) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): hostname = config.get(c.CONF_HOST) if hostname == 'localhost': hostname = socket.gethostbyname(socket.getfqdn()) owldata = OwlData((hostname, config.get(c.CONF_PORT))) SENSOR_TYPES[SENSOR_ELECTRICITY_COST_TODAY][1] = config.get(CONF_COST_UNIT_OF_MEASUREMENT) SENSOR_TYPES[SENSOR_ELECTRICITY_COST_TODAY][2] = config.get(CONF_COST_ICON) dev = [] for sensor in SENSOR_TYPES: if SENSOR_TYPES[sensor][3] in config.get(c.CONF_MONITORED_CONDITIONS): dev.append(OwlIntuitionSensor(owldata, config.get(c.CONF_NAME), sensor)) _LOGGER.debug("Adding sensor %s", sensor) if config.get(c.CONF_MODE) == MODE_TRI and OWLCLASS_ELECTRICITY in config.get(c.CONF_MONITORED_CONDITIONS): for phase in range(1, 4): dev.append(OwlIntuitionSensor(owldata, config.get(c.CONF_NAME), SENSOR_ELECTRICITY_POWER, phase)) dev.append(OwlIntuitionSensor(owldata, config.get(c.CONF_NAME), SENSOR_ELECTRICITY_ENERGY_TODAY, phase)) async_add_devices(dev, True) class OwlData:
MIT License
plaid/plaid-python
plaid/model/payment_initiation_metadata.py
PaymentInitiationMetadata.openapi_types
python
def openapi_types(): lazy_import() return { 'supports_international_payments': (bool,), 'maximum_payment_amount': ({str: (str,)},), 'supports_refund_details': (bool,), 'standing_order_metadata': (PaymentInitiationStandingOrderMetadata,), }
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
https://github.com/plaid/plaid-python/blob/950d04d621a5f5b92a7705cc30d14d4004db8543/plaid/model/payment_initiation_metadata.py#L74-L89
import re import sys from plaid.model_utils import ( ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from plaid.model.payment_initiation_standing_order_metadata import PaymentInitiationStandingOrderMetadata globals()['PaymentInitiationStandingOrderMetadata'] = PaymentInitiationStandingOrderMetadata class PaymentInitiationMetadata(ModelNormal): allowed_values = { } validations = { } @cached_property def additional_properties_type(): lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) _nullable = True @cached_property
MIT License
napari/napari
napari/layers/shapes/_shapes_models/shape.py
Shape.scale
python
def scale(self, scale, center=None): if isinstance(scale, (list, np.ndarray)): transform = np.array([[scale[0], 0], [0, scale[1]]]) else: transform = np.array([[scale, 0], [0, scale]]) if center is None: self.transform(transform) else: self.shift(-center) self.transform(transform) self.shift(center)
Performs a scaling on the shape Parameters ---------- scale : float, list scalar or list specifying rescaling of shape. center : list length 2 list specifying coordinate of center of scaling.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/layers/shapes/_shapes_models/shape.py#L287-L306
from abc import ABC, abstractmethod from copy import copy import numpy as np from ....utils.translations import trans from .._shapes_utils import ( is_collinear, path_to_mask, poly_to_mask, triangulate_edge, triangulate_face, ) class Shape(ABC): def __init__( self, *, shape_type='rectangle', edge_width=1, z_index=0, dims_order=None, ndisplay=2, ): self._dims_order = dims_order or list(range(2)) self._ndisplay = ndisplay self.slice_key = None self._face_vertices = np.empty((0, self.ndisplay)) self._face_triangles = np.empty((0, 3), dtype=np.uint32) self._edge_vertices = np.empty((0, self.ndisplay)) self._edge_offsets = np.empty((0, self.ndisplay)) self._edge_triangles = np.empty((0, 3), dtype=np.uint32) self._box = np.empty((9, 2)) self._closed = False self._filled = True self._use_face_vertices = False self.edge_width = edge_width self.z_index = z_index self.name = '' @property @abstractmethod def data(self): raise NotImplementedError() @data.setter @abstractmethod def data(self, data): raise NotImplementedError() @abstractmethod def _update_displayed_data(self): raise NotImplementedError() @property def ndisplay(self): return self._ndisplay @ndisplay.setter def ndisplay(self, ndisplay): if self.ndisplay == ndisplay: return self._ndisplay = ndisplay self._update_displayed_data() @property def dims_order(self): return self._dims_order @dims_order.setter def dims_order(self, dims_order): if self.dims_order == dims_order: return self._dims_order = dims_order self._update_displayed_data() @property def dims_displayed(self): return self.dims_order[-self.ndisplay :] @property def dims_not_displayed(self): return self.dims_order[: -self.ndisplay] @property def data_displayed(self): return self.data[:, self.dims_displayed] @property def edge_width(self): return self._edge_width @edge_width.setter def edge_width(self, edge_width): self._edge_width = edge_width @property def z_index(self): return self._z_index @z_index.setter def z_index(self, z_index): self._z_index = z_index def _set_meshes(self, data, closed=True, face=True, edge=True): if edge: centers, offsets, triangles = triangulate_edge(data, closed=closed) self._edge_vertices = centers self._edge_offsets = offsets self._edge_triangles = triangles else: self._edge_vertices = np.empty((0, self.ndisplay)) self._edge_offsets = np.empty((0, self.ndisplay)) self._edge_triangles = np.empty((0, 3), dtype=np.uint32) if face: clean_data = np.array( [ p for i, p in enumerate(data) if i == 0 or not np.all(p == data[i - 1]) ] ) if not is_collinear(clean_data[:, -2:]): if clean_data.shape[1] == 2: vertices, triangles = triangulate_face(clean_data) elif len(np.unique(clean_data[:, 0])) == 1: val = np.unique(clean_data[:, 0]) vertices, triangles = triangulate_face(clean_data[:, -2:]) exp = np.expand_dims(np.repeat(val, len(vertices)), axis=1) vertices = np.concatenate([exp, vertices], axis=1) else: triangles = [] vertices = [] if len(triangles) > 0: self._face_vertices = vertices self._face_triangles = triangles else: self._face_vertices = np.empty((0, self.ndisplay)) self._face_triangles = np.empty((0, 3), dtype=np.uint32) else: self._face_vertices = np.empty((0, self.ndisplay)) self._face_triangles = np.empty((0, 3), dtype=np.uint32) else: self._face_vertices = np.empty((0, self.ndisplay)) self._face_triangles = np.empty((0, 3), dtype=np.uint32) def transform(self, transform): self._box = self._box @ transform.T self._data[:, self.dims_displayed] = ( self._data[:, self.dims_displayed] @ transform.T ) self._face_vertices = self._face_vertices @ transform.T points = self.data_displayed centers, offsets, triangles = triangulate_edge( points, closed=self._closed ) self._edge_vertices = centers self._edge_offsets = offsets self._edge_triangles = triangles def shift(self, shift): shift = np.array(shift) self._face_vertices = self._face_vertices + shift self._edge_vertices = self._edge_vertices + shift self._box = self._box + shift self._data[:, self.dims_displayed] = self.data_displayed + shift
BSD 3-Clause New or Revised License
lord63/v2ex_daily_mission
v2ex_daily_mission/v2ex.py
V2ex.get_last
python
def get_last(self): response = self.session.get(self.mission_url, verify=False, cookies=self.cookie) soup = BeautifulSoup(response.text, 'html.parser') last = soup.select('#Main div')[-1].text return last
Get to know how long you have kept signing in.
https://github.com/lord63/v2ex_daily_mission/blob/5fb62dc3efbf7c57aa6bad37f3d5ce2fc479dabd/v2ex_daily_mission/v2ex.py#L68-L73
from __future__ import absolute_import import logging import os import requests from requests.packages import urllib3 from bs4 import BeautifulSoup urllib3.disable_warnings() class V2ex(object): def __init__(self, config): self.signin_url = 'https://www.v2ex.com/signin' self.balance_url = 'https://www.v2ex.com/balance' self.mission_url = 'https://www.v2ex.com/mission/daily' self.config = config self.session = requests.Session() self.session.headers.update( {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux \ x86_64; rv:28.0) Gecko/20100101 Firefox/28.0'}) self.cookie = self._make_cookie(config) logging.basicConfig( filename=os.path.join(config['log_directory'], 'v2ex.log'), level='INFO', format='%(asctime)s [%(levelname)s] %(message)s') requests_log = logging.getLogger("requests") requests_log.setLevel(logging.WARNING) def _make_cookie(self, config): return dict([i.split('=', 1) for i in config["cookie"].split('; ')]) def get_money(self): response = self.session.get(self.mission_url, verify=False, cookies=self.cookie) soup = BeautifulSoup(response.text, 'html.parser') onclick = soup.find('input', class_='super normal button')['onclick'] url = onclick.split('=', 1)[1][2:-2] if url == '/balance': return "You have completed the mission today." else: headers = {'Referer': 'https://www.v2ex.com/mission/daily'} data = {'once': url.split('=')[-1]} self.session.get('https://www.v2ex.com'+url, verify=False, headers=headers, data=data, cookies=self.cookie,) balance = self._get_balance() return balance def _get_balance(self): response = self.session.get(self.balance_url, verify=False, cookies=self.cookie) soup = BeautifulSoup(response.text, 'html.parser') first_line = soup.select( "table.data tr:nth-of-type(2)")[0].text.strip().split('\n') total, today = first_line[-2:] logging.info('%-26sTotal:%-8s', today, total) return '\n'.join([u"Today: {0}".format(today), "Total: {0}".format(total)])
MIT License
domwoe/21datamarket
datamarket.py
query_registry
python
def query_registry(): query = json.loads(request.args.get('query')) results = sensors.find(query) json_docs = [] for doc in results: json_docs.append(doc) return json.dumps(json_docs,default=json_util.default)
Query sensor registry
https://github.com/domwoe/21datamarket/blob/caf65020e2fc614b5860517044774aac20eeff53/datamarket.py#L84-L97
from flask import Flask from flask import request from flask.json import jsonify from two1.wallet import Wallet from two1.bitserv.flask import Payment from pymongo import MongoClient from datetime import datetime, timedelta import json from bson import json_util from bson.objectid import ObjectId app = Flask(__name__) wallet = Wallet() payment = Payment(app, wallet) db_client = MongoClient('localhost', 27017) registry = db_client.registry sensors = registry.sensors def valid_publish_request(request): return True def get_publish_price(request): if not valid_publish_request(request): return "invalid publish request" hours = int(request.args.get('hours')) price = hours * 2 if price < 2: price = 2 return price @app.route('/publish', methods=['POST']) @payment.required(get_publish_price) def add_sensor(): sensor = request.data.decode('utf-8') hours = int(request.args.get('hours')) expire_date = datetime.now() + timedelta(hours=hours) sensor = json.loads(sensor) sensor['expireAt'] = expire_date sensor_id = sensors.insert_one(sensor).inserted_id return json.dumps({'sensor_id' : str(sensor_id), 'expireAt': expire_date.strftime("%Y-%m-%d %H:%M:%S")}) @app.route('/renew') @payment.required(get_publish_price) def renew_sensor(): sensor_id = request.args.get('sensor') hours = int(request.args.get('hours')) entry = sensors.find_one(ObjectId(sensor)) expire_date = entry['expireAt'] expire_date = expire_date + timedelta(hours=hours) result = sensors.update_one(ObjectId(sensor),{"$set": {"expireAt": expire_date}}) return jsonify(result) @app.route('/query')
MIT License
googleapis/python-aiplatform
google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py
FeaturestoreOnlineServingServiceClient.read_feature_values
python
def read_feature_values( self, request: Union[ featurestore_online_service.ReadFeatureValuesRequest, dict ] = None, *, entity_type: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> featurestore_online_service.ReadFeatureValuesResponse: has_flattened_params = any([entity_type]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance( request, featurestore_online_service.ReadFeatureValuesRequest ): request = featurestore_online_service.ReadFeatureValuesRequest(request) if entity_type is not None: request.entity_type = entity_type rpc = self._transport._wrapped_methods[self._transport.read_feature_values] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("entity_type", request.entity_type),) ), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response
r"""Reads Feature values of a specific entity of an EntityType. For reading feature values of multiple entities of an EntityType, please use StreamingReadFeatureValues. Args: request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): The request object. Request message for [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. entity_type (str): Required. The resource name of the EntityType for the entity being read. Value format: ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be ``user``. This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: Response message for [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues].
https://github.com/googleapis/python-aiplatform/blob/c1c2326b2342ab1b6f4c4ce3852e63376eae740d/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py#L365-L447
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.cloud.aiplatform_v1.types import featurestore_online_service from .transports.base import ( FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO, ) from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport from .transports.grpc_asyncio import ( FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, ) class FeaturestoreOnlineServingServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport _transport_registry[ "grpc_asyncio" ] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[FeaturestoreOnlineServingServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class FeaturestoreOnlineServingServiceClient( metaclass=FeaturestoreOnlineServingServiceClientMeta ): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "aiplatform.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> FeaturestoreOnlineServingServiceTransport: return self._transport @staticmethod def entity_type_path( project: str, location: str, featurestore: str, entity_type: str, ) -> str: return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format( project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) @staticmethod def parse_entity_type_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/featurestores/(?P<featurestore>.+?)/entityTypes/(?P<entity_type>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) if isinstance(transport, FeaturestoreOnlineServingServiceTransport): if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, )
Apache License 2.0
hypothesis/h
h/views/api/groups.py
create
python
def create(request): appstruct = CreateGroupAPISchema( default_authority=request.default_authority, group_authority=request.effective_authority, ).validate(_json_payload(request)) group_service = request.find_service(name="group") group_create_service = request.find_service(name="group_create") groupid = appstruct.get("groupid", None) if groupid is not None: duplicate_group = group_service.fetch(pubid_or_groupid=groupid) if duplicate_group: raise HTTPConflict( _("group with groupid '{}' already exists").format(groupid) ) group = group_create_service.create_private_group( name=appstruct["name"], userid=request.user.userid, description=appstruct.get("description", None), groupid=groupid, ) return GroupJSONPresenter(group, request).asdict(expand=["organization", "scopes"])
Create a group from the POST payload.
https://github.com/hypothesis/h/blob/1bf1fe34fd471f26a216e682d15ce986dd400fdb/h/views/api/groups.py#L47-L72
from pyramid.httpexceptions import ( HTTPBadRequest, HTTPConflict, HTTPNoContent, HTTPNotFound, ) from h.i18n import TranslationString as _ from h.presenters import GroupJSONPresenter, GroupsJSONPresenter, UserJSONPresenter from h.schemas.api.group import CreateGroupAPISchema, UpdateGroupAPISchema from h.security import Permission from h.views.api.config import api_config from h.views.api.exceptions import PayloadError @api_config( versions=["v1", "v2"], route_name="api.groups", request_method="GET", link_name="groups.read", description="Fetch the user's groups", ) def groups(request): expand = request.GET.getall("expand") or [] list_svc = request.find_service(name="group_list") all_groups = list_svc.request_groups( user=request.user, authority=request.params.get("authority"), document_uri=request.params.get("document_uri"), ) all_groups = GroupsJSONPresenter(all_groups, request).asdicts(expand=expand) return all_groups @api_config( versions=["v1", "v2"], route_name="api.groups", request_method="POST", permission=Permission.Group.CREATE, link_name="group.create", description="Create a new group", )
BSD 2-Clause Simplified License
hexrd/hexrd
hexrd/imageseries/stats.py
max
python
def max(ims, nframes=0): nf = _nframes(ims, nframes) img = ims[0] for i in range(1, nf): img = np.maximum(img, ims[i]) return img
maximum over frames
https://github.com/hexrd/hexrd/blob/90e9b26e5e5091dd5ecf460b3227072e6d90bcd5/hexrd/imageseries/stats.py#L33-L39
import numpy as np from psutil import virtual_memory vmem = virtual_memory() STATS_BUFFER = int(0.5*vmem.available) del vmem
BSD 3-Clause New or Revised License
yoseflab/scvi-tools
scvi/_settings.py
ScviConfig.batch_size
python
def batch_size(self) -> int: return self._batch_size
Minibatch size for loading data into the model. This is only used after a model is trained. Trainers have specific `batch_size` parameters.
https://github.com/yoseflab/scvi-tools/blob/60957c6dcc7b7d6ee9321ad640d2b1492c97662d/scvi/_settings.py#L66-L73
import logging from pathlib import Path from typing import Union import pytorch_lightning as pl import torch from rich.console import Console from rich.logging import RichHandler from ._compat import Literal scvi_logger = logging.getLogger("scvi") class ScviConfig: def __init__( self, verbosity: int = logging.INFO, progress_bar_style: Literal["rich", "tqdm"] = "tqdm", batch_size: int = 128, seed: int = 0, logging_dir: str = "./scvi_log/", dl_num_workers: int = 0, dl_pin_memory_gpu_training: bool = True, ): self.verbosity = verbosity self.seed = seed self.batch_size = batch_size if progress_bar_style not in ["rich", "tqdm"]: raise ValueError("Progress bar style must be in ['rich', 'tqdm']") self.progress_bar_style = progress_bar_style self.logging_dir = logging_dir self.dl_num_workers = dl_num_workers self.dl_pin_memory_gpu_training = dl_pin_memory_gpu_training self._num_threads = None @property
BSD 3-Clause New or Revised License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/media_player/bluesound.py
BluesoundPlayer.source
python
def source(self): from urllib import parse if (self._status is None or (self.is_grouped and not self.is_master)): return None current_service = self._status.get('service', '') if current_service == '': return '' stream_url = self._status.get('streamUrl', '') if self._status.get('is_preset', '') == '1' and stream_url != '': items = [x for x in self._preset_items if 'url2' in x and parse.unquote(x['url2']) == stream_url] if items: return items[0]['title'] title = self._status.get('title1', '').lower() if title == 'bluetooth' or stream_url == 'Capture:hw:2,0/44100/16/2': items = [x for x in self._capture_items if x['url'] == "Capture%3Abluez%3Abluetooth"] if items: return items[0]['title'] items = [x for x in self._capture_items if x['url'] == stream_url] if items: return items[0]['title'] if stream_url[:8] == 'Capture:': stream_url = stream_url[8:] idx = BluesoundPlayer._try_get_index(stream_url, ':') if idx > 0: stream_url = stream_url[:idx] for item in self._capture_items: url = parse.unquote(item['url']) if url[:8] == 'Capture:': url = url[8:] idx = BluesoundPlayer._try_get_index(url, ':') if idx > 0: url = url[:idx] if url.lower() == stream_url.lower(): return item['title'] items = [x for x in self._capture_items if x['name'] == current_service] if items: return items[0]['title'] items = [x for x in self._services_items if x['name'] == current_service] if items: return items[0]['title'] if self._status.get('streamUrl', '') != '': _LOGGER.debug("Couldn't find source of stream URL: %s", self._status.get('streamUrl', '')) return None
Name of the current input source.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/media_player/bluesound.py#L678-L743
import asyncio from asyncio.futures import CancelledError from datetime import timedelta import logging import aiohttp from aiohttp.client_exceptions import ClientError from aiohttp.hdrs import CONNECTION, KEEP_ALIVE import async_timeout import voluptuous as vol from homeassistant.components.media_player import ( ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA, SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_STOP, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, MediaPlayerDevice) from homeassistant.const import ( ATTR_ENTITY_ID, CONF_HOST, CONF_HOSTS, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING) from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_track_time_interval from homeassistant.util import Throttle import homeassistant.util.dt as dt_util REQUIREMENTS = ['xmltodict==0.11.0'] _LOGGER = logging.getLogger(__name__) ATTR_MASTER = 'master' DATA_BLUESOUND = 'bluesound' DEFAULT_PORT = 11000 NODE_OFFLINE_CHECK_TIMEOUT = 180 NODE_RETRY_INITIATION = timedelta(minutes=3) SERVICE_CLEAR_TIMER = 'bluesound_clear_sleep_timer' SERVICE_JOIN = 'bluesound_join' SERVICE_SET_TIMER = 'bluesound_set_sleep_timer' SERVICE_UNJOIN = 'bluesound_unjoin' STATE_GROUPED = 'grouped' SYNC_STATUS_INTERVAL = timedelta(minutes=5) UPDATE_CAPTURE_INTERVAL = timedelta(minutes=30) UPDATE_PRESETS_INTERVAL = timedelta(minutes=30) UPDATE_SERVICES_INTERVAL = timedelta(minutes=30) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_HOSTS): vol.All(cv.ensure_list, [{ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }]) }) BS_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, }) BS_JOIN_SCHEMA = BS_SCHEMA.extend({ vol.Required(ATTR_MASTER): cv.entity_id, }) SERVICE_TO_METHOD = { SERVICE_JOIN: { 'method': 'async_join', 'schema': BS_JOIN_SCHEMA}, SERVICE_UNJOIN: { 'method': 'async_unjoin', 'schema': BS_SCHEMA}, SERVICE_SET_TIMER: { 'method': 'async_increase_timer', 'schema': BS_SCHEMA}, SERVICE_CLEAR_TIMER: { 'method': 'async_clear_timer', 'schema': BS_SCHEMA} } def _add_player(hass, async_add_devices, host, port=None, name=None): if host in [x.host for x in hass.data[DATA_BLUESOUND]]: return @callback def _init_player(event=None): hass.async_add_job(player.async_init()) @callback def _start_polling(event=None): player.start_polling() @callback def _stop_polling(): player.stop_polling() @callback def _add_player_cb(): async_add_devices([player]) _LOGGER.info("Added device with name: %s", player.name) if hass.is_running: _start_polling() else: hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, _start_polling) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_polling) player = BluesoundPlayer(hass, host, port, name, _add_player_cb) hass.data[DATA_BLUESOUND].append(player) if hass.is_running: _init_player() else: hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _init_player) async def async_setup_platform( hass, config, async_add_devices, discovery_info=None): if DATA_BLUESOUND not in hass.data: hass.data[DATA_BLUESOUND] = [] if discovery_info: _add_player(hass, async_add_devices, discovery_info.get(CONF_HOST), discovery_info.get(CONF_PORT, None)) return hosts = config.get(CONF_HOSTS, None) if hosts: for host in hosts: _add_player( hass, async_add_devices, host.get(CONF_HOST), host.get(CONF_PORT), host.get(CONF_NAME)) async def async_service_handler(service): method = SERVICE_TO_METHOD.get(service.service) if not method: return params = {key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID} entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: target_players = [player for player in hass.data[DATA_BLUESOUND] if player.entity_id in entity_ids] else: target_players = hass.data[DATA_BLUESOUND] for player in target_players: await getattr(player, method['method'])(**params) for service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[service]['schema'] hass.services.async_register( DOMAIN, service, async_service_handler, schema=schema) class BluesoundPlayer(MediaPlayerDevice): def __init__(self, hass, host, port=None, name=None, init_callback=None): self.host = host self._hass = hass self.port = port self._polling_session = async_get_clientsession(hass) self._polling_task = None self._name = name self._icon = None self._capture_items = [] self._services_items = [] self._preset_items = [] self._sync_status = {} self._status = None self._last_status_update = None self._is_online = False self._retry_remove = None self._lastvol = None self._master = None self._is_master = False self._group_name = None self._init_callback = init_callback if self.port is None: self.port = DEFAULT_PORT class _TimeoutException(Exception): pass @staticmethod def _try_get_index(string, search_string): try: return string.index(search_string) except ValueError: return -1 async def force_update_sync_status( self, on_updated_cb=None, raise_timeout=False): resp = None try: resp = await self.send_bluesound_command( 'SyncStatus', raise_timeout, raise_timeout) except Exception: raise if not resp: return None self._sync_status = resp['SyncStatus'].copy() if not self._name: self._name = self._sync_status.get('@name', self.host) if not self._icon: self._icon = self._sync_status.get('@icon', self.host) master = self._sync_status.get('master', None) if master is not None: self._is_master = False master_host = master.get('#text') master_device = [device for device in self._hass.data[DATA_BLUESOUND] if device.host == master_host] if master_device and master_host != self.host: self._master = master_device[0] else: self._master = None _LOGGER.error("Master not found %s", master_host) else: if self._master is not None: self._master = None slaves = self._sync_status.get('slave', None) self._is_master = slaves is not None if on_updated_cb: on_updated_cb() return True async def _start_poll_command(self): try: while True: await self.async_update_status() except (asyncio.TimeoutError, ClientError, BluesoundPlayer._TimeoutException): _LOGGER.info("Node %s is offline, retrying later", self._name) await asyncio.sleep( NODE_OFFLINE_CHECK_TIMEOUT, loop=self._hass.loop) self.start_polling() except CancelledError: _LOGGER.debug("Stopping the polling of node %s", self._name) except Exception: _LOGGER.exception("Unexpected error in %s", self._name) raise def start_polling(self): self._polling_task = self._hass.async_add_job( self._start_poll_command()) def stop_polling(self): self._polling_task.cancel() async def async_init(self, triggered=None): try: if self._retry_remove is not None: self._retry_remove() self._retry_remove = None await self.force_update_sync_status( self._init_callback, True) except (asyncio.TimeoutError, ClientError): _LOGGER.info("Node %s is offline, retrying later", self.host) self._retry_remove = async_track_time_interval( self._hass, self.async_init, NODE_RETRY_INITIATION) except Exception: _LOGGER.exception( "Unexpected when initiating error in %s", self.host) raise async def async_update(self): if not self._is_online: return await self.async_update_sync_status() await self.async_update_presets() await self.async_update_captures() await self.async_update_services() async def send_bluesound_command( self, method, raise_timeout=False, allow_offline=False): import xmltodict if not self._is_online and not allow_offline: return if method[0] == '/': method = method[1:] url = "http://{}:{}/{}".format(self.host, self.port, method) _LOGGER.debug("Calling URL: %s", url) response = None try: websession = async_get_clientsession(self._hass) with async_timeout.timeout(10, loop=self._hass.loop): response = await websession.get(url) if response.status == 200: result = await response.text() if len(result) < 1: data = None else: data = xmltodict.parse(result) elif response.status == 595: _LOGGER.info("Status 595 returned, treating as timeout") raise BluesoundPlayer._TimeoutException() else: _LOGGER.error("Error %s on %s", response.status, url) return None except (asyncio.TimeoutError, aiohttp.ClientError): if raise_timeout: _LOGGER.info("Timeout: %s", self.host) raise else: _LOGGER.debug("Failed communicating: %s", self.host) return None return data async def async_update_status(self): import xmltodict response = None url = 'Status' etag = '' if self._status is not None: etag = self._status.get('@etag', '') if etag != '': url = 'Status?etag={}&timeout=120.0'.format(etag) url = "http://{}:{}/{}".format(self.host, self.port, url) _LOGGER.debug("Calling URL: %s", url) try: with async_timeout.timeout(125, loop=self._hass.loop): response = await self._polling_session.get( url, headers={CONNECTION: KEEP_ALIVE}) if response.status == 200: result = await response.text() self._is_online = True self._last_status_update = dt_util.utcnow() self._status = xmltodict.parse(result)['status'].copy() group_name = self._status.get('groupName', None) if group_name != self._group_name: _LOGGER.debug( "Group name change detected on device: %s", self.host) self._group_name = group_name await asyncio.sleep(1, loop=self._hass.loop) await self.async_trigger_sync_on_all() elif self.is_grouped: await self.force_update_sync_status() self.async_schedule_update_ha_state() elif response.status == 595: _LOGGER.info("Status 595 returned, treating as timeout") raise BluesoundPlayer._TimeoutException() else: _LOGGER.error("Error %s on %s. Trying one more time", response.status, url) except (asyncio.TimeoutError, ClientError): self._is_online = False self._last_status_update = None self._status = None self.async_schedule_update_ha_state() _LOGGER.info( "Client connection error, marking %s as offline", self._name) raise async def async_trigger_sync_on_all(self): _LOGGER.debug("Trigger sync status on all devices") for player in self._hass.data[DATA_BLUESOUND]: await player.force_update_sync_status() @Throttle(SYNC_STATUS_INTERVAL) async def async_update_sync_status( self, on_updated_cb=None, raise_timeout=False): await self.force_update_sync_status( on_updated_cb, raise_timeout=False) @Throttle(UPDATE_CAPTURE_INTERVAL) async def async_update_captures(self): resp = await self.send_bluesound_command( 'RadioBrowse?service=Capture') if not resp: return self._capture_items = [] def _create_capture_item(item): self._capture_items.append({ 'title': item.get('@text', ''), 'name': item.get('@text', ''), 'type': item.get('@serviceType', 'Capture'), 'image': item.get('@image', ''), 'url': item.get('@URL', '') }) if 'radiotime' in resp and 'item' in resp['radiotime']: if isinstance(resp['radiotime']['item'], list): for item in resp['radiotime']['item']: _create_capture_item(item) else: _create_capture_item(resp['radiotime']['item']) return self._capture_items @Throttle(UPDATE_PRESETS_INTERVAL) async def async_update_presets(self): resp = await self.send_bluesound_command('Presets') if not resp: return self._preset_items = [] def _create_preset_item(item): self._preset_items.append({ 'title': item.get('@name', ''), 'name': item.get('@name', ''), 'type': 'preset', 'image': item.get('@image', ''), 'is_raw_url': True, 'url2': item.get('@url', ''), 'url': 'Preset?id={}'.format(item.get('@id', '')) }) if 'presets' in resp and 'preset' in resp['presets']: if isinstance(resp['presets']['preset'], list): for item in resp['presets']['preset']: _create_preset_item(item) else: _create_preset_item(resp['presets']['preset']) return self._preset_items @Throttle(UPDATE_SERVICES_INTERVAL) async def async_update_services(self): resp = await self.send_bluesound_command('Services') if not resp: return self._services_items = [] def _create_service_item(item): self._services_items.append({ 'title': item.get('@displayname', ''), 'name': item.get('@name', ''), 'type': item.get('@type', ''), 'image': item.get('@icon', ''), 'url': item.get('@name', '') }) if 'services' in resp and 'service' in resp['services']: if isinstance(resp['services']['service'], list): for item in resp['services']['service']: _create_service_item(item) else: _create_service_item(resp['services']['service']) return self._services_items @property def media_content_type(self): return MEDIA_TYPE_MUSIC @property def state(self): if self._status is None: return STATE_OFF if self.is_grouped and not self.is_master: return STATE_GROUPED status = self._status.get('state', None) if status == 'pause' or status == 'stop': return STATE_PAUSED elif status == 'stream' or status == 'play': return STATE_PLAYING return STATE_IDLE @property def media_title(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None return self._status.get('title1', None) @property def media_artist(self): if self._status is None: return None if self.is_grouped and not self.is_master: return self._group_name artist = self._status.get('artist', None) if not artist: artist = self._status.get('title2', None) return artist @property def media_album_name(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None album = self._status.get('album', None) if not album: album = self._status.get('title3', None) return album @property def media_image_url(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None url = self._status.get('image', None) if not url: return if url[0] == '/': url = "http://{}:{}{}".format(self.host, self.port, url) return url @property def media_position(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None mediastate = self.state if self._last_status_update is None or mediastate == STATE_IDLE: return None position = self._status.get('secs', None) if position is None: return None position = float(position) if mediastate == STATE_PLAYING: position += (dt_util.utcnow() - self._last_status_update).total_seconds() return position @property def media_duration(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None duration = self._status.get('totlen', None) if duration is None: return None return float(duration) @property def media_position_updated_at(self): return self._last_status_update @property def volume_level(self): volume = self._status.get('volume', None) if self.is_grouped: volume = self._sync_status.get('@volume', None) if volume is not None: return int(volume) / 100 return None @property def is_volume_muted(self): volume = self.volume_level if not volume: return None return volume < 0.001 and volume >= 0 @property def name(self): return self._name @property def icon(self): return self._icon @property def source_list(self): if (self._status is None or (self.is_grouped and not self.is_master)): return None sources = [] for source in self._preset_items: sources.append(source['title']) for source in [x for x in self._services_items if x['type'] == 'LocalMusic' or x['type'] == 'RadioService']: sources.append(source['title']) for source in self._capture_items: sources.append(source['title']) return sources @property
MIT License
llsourcell/chatbot_tutorial
chatbot/chatbot.py
Chatbot.main
python
def main(self, args=None): print('Welcome to DeepQA v0.1 !') print() print('TensorFlow detected: v{}'.format(tf.__version__)) self.args = self.parseArgs(args) if not self.args.rootDir: self.args.rootDir = os.getcwd() self.loadModelParams() self.textData = TextData(self.args) if self.args.createDataset: print('Dataset created! Thanks for using this program') return with tf.device(self.getDevice()): self.model = Model(self.args, self.textData) self.writer = tf.summary.FileWriter(self._getSummaryName()) self.saver = tf.train.Saver(max_to_keep=200) self.sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False) ) if self.args.debug: self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess) self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) print('Initialize variables...') self.sess.run(tf.global_variables_initializer()) if self.args.test != Chatbot.TestMode.ALL: self.managePreviousModel(self.sess) if self.args.initEmbeddings: self.loadEmbedding(self.sess) if self.args.test: if self.args.test == Chatbot.TestMode.INTERACTIVE: self.mainTestInteractive(self.sess) elif self.args.test == Chatbot.TestMode.ALL: print('Start predicting...') self.predictTestset(self.sess) print('All predictions done') elif self.args.test == Chatbot.TestMode.DAEMON: print('Daemon mode, running in background...') else: raise RuntimeError('Unknown test mode: {}'.format(self.args.test)) else: self.mainTrain(self.sess) if self.args.test != Chatbot.TestMode.DAEMON: self.sess.close() print("The End! Thanks for using this program")
Launch the training and/or the interactive mode
https://github.com/llsourcell/chatbot_tutorial/blob/cdf824ad0c5233aa66c50214ea074b92c2563715/chatbot/chatbot.py#L139-L218
import argparse import configparser import datetime import os import tensorflow as tf import numpy as np import math from tqdm import tqdm from tensorflow.python import debug as tf_debug from chatbot.textdata import TextData from chatbot.model import Model class Chatbot: class TestMode: ALL = 'all' INTERACTIVE = 'interactive' DAEMON = 'daemon' def __init__(self): self.args = None self.textData = None self.model = None self.writer = None self.saver = None self.modelDir = '' self.globStep = 0 self.sess = None self.MODEL_DIR_BASE = 'save' + os.sep + 'model' self.MODEL_NAME_BASE = 'model' self.MODEL_EXT = '.ckpt' self.CONFIG_FILENAME = 'params.ini' self.CONFIG_VERSION = '0.5' self.TEST_IN_NAME = 'data' + os.sep + 'test' + os.sep + 'samples.txt' self.TEST_OUT_SUFFIX = '_predictions.txt' self.SENTENCES_PREFIX = ['Q: ', 'A: '] @staticmethod def parseArgs(args): parser = argparse.ArgumentParser() globalArgs = parser.add_argument_group('Global options') globalArgs.add_argument('--test', nargs='?', choices=[Chatbot.TestMode.ALL, Chatbot.TestMode.INTERACTIVE, Chatbot.TestMode.DAEMON], const=Chatbot.TestMode.ALL, default=None, help='if present, launch the program try to answer all sentences from data/test/ with' ' the defined model(s), in interactive mode, the user can wrote his own sentences,' ' use daemon mode to integrate the chatbot in another program') globalArgs.add_argument('--createDataset', action='store_true', help='if present, the program will only generate the dataset from the corpus (no training/testing)') globalArgs.add_argument('--playDataset', type=int, nargs='?', const=10, default=None, help='if set, the program will randomly play some samples(can be use conjointly with createDataset if this is the only action you want to perform)') globalArgs.add_argument('--reset', action='store_true', help='use this if you want to ignore the previous model present on the model directory (Warning: the model will be destroyed with all the folder content)') globalArgs.add_argument('--verbose', action='store_true', help='When testing, will plot the outputs at the same time they are computed') globalArgs.add_argument('--debug', action='store_true', help='run DeepQA with Tensorflow debug mode. Read TF documentation for more details on this.') globalArgs.add_argument('--keepAll', action='store_true', help='If this option is set, all saved model will be kept (Warning: make sure you have enough free disk space or increase saveEvery)') globalArgs.add_argument('--modelTag', type=str, default=None, help='tag to differentiate which model to store/load') globalArgs.add_argument('--rootDir', type=str, default=None, help='folder where to look for the models and data') globalArgs.add_argument('--watsonMode', action='store_true', help='Inverse the questions and answer when training (the network try to guess the question)') globalArgs.add_argument('--autoEncode', action='store_true', help='Randomly pick the question or the answer and use it both as input and output') globalArgs.add_argument('--device', type=str, default=None, help='\'gpu\' or \'cpu\' (Warning: make sure you have enough free RAM), allow to choose on which hardware run the model') globalArgs.add_argument('--seed', type=int, default=None, help='random seed for replication') datasetArgs = parser.add_argument_group('Dataset options') datasetArgs.add_argument('--corpus', choices=TextData.corpusChoices(), default=TextData.corpusChoices()[0], help='corpus on which extract the dataset.') datasetArgs.add_argument('--datasetTag', type=str, default='', help='add a tag to the dataset (file where to load the vocabulary and the precomputed samples, not the original corpus). Useful to manage multiple versions. Also used to define the file used for the lightweight format.') datasetArgs.add_argument('--ratioDataset', type=float, default=1.0, help='ratio of dataset used to avoid using the whole dataset') datasetArgs.add_argument('--maxLength', type=int, default=10, help='maximum length of the sentence (for input and output), define number of maximum step of the RNN') datasetArgs.add_argument('--filterVocab', type=int, default=1, help='remove rarelly used words (by default words used only once). 0 to keep all words.') datasetArgs.add_argument('--skipLines', action='store_true', help='Generate training samples by only using even conversation lines as questions (and odd lines as answer). Useful to train the network on a particular person.') datasetArgs.add_argument('--vocabularySize', type=int, default=40000, help='Limit the number of words in the vocabulary (0 for unlimited)') nnArgs = parser.add_argument_group('Network options', 'architecture related option') nnArgs.add_argument('--hiddenSize', type=int, default=512, help='number of hidden units in each RNN cell') nnArgs.add_argument('--numLayers', type=int, default=2, help='number of rnn layers') nnArgs.add_argument('--softmaxSamples', type=int, default=0, help='Number of samples in the sampled softmax loss function. A value of 0 deactivates sampled softmax') nnArgs.add_argument('--initEmbeddings', action='store_true', help='if present, the program will initialize the embeddings with pre-trained word2vec vectors') nnArgs.add_argument('--embeddingSize', type=int, default=64, help='embedding size of the word representation') nnArgs.add_argument('--embeddingSource', type=str, default="GoogleNews-vectors-negative300.bin", help='embedding file to use for the word representation') trainingArgs = parser.add_argument_group('Training options') trainingArgs.add_argument('--numEpochs', type=int, default=30, help='maximum number of epochs to run') trainingArgs.add_argument('--saveEvery', type=int, default=2000, help='nb of mini-batch step before creating a model checkpoint') trainingArgs.add_argument('--batchSize', type=int, default=256, help='mini-batch size') trainingArgs.add_argument('--learningRate', type=float, default=0.002, help='Learning rate') trainingArgs.add_argument('--dropout', type=float, default=0.9, help='Dropout rate (keep probabilities)') return parser.parse_args(args)
Apache License 2.0
thinkboxsoftware/deadline
Custom/events/Zabbix/API/zabbix/sender.py
ZabbixSender.__get_response
python
def __get_response(self, connection): result = None response_header = self.__receive(connection, 13) logger.debug('{0}.__get_response.response_header: {1}'.format(self.cn, response_header)) if not response_header.startswith('ZBXD\x01') or len(response_header) != 13: logger.debug('{0}.__get_response: Wrong zabbix response'.format(self.cn)) result = False else: response_len = struct.unpack('<Q', response_header[5:])[0] try: response_body = connection.recv(response_len) finally: connection.close() result = json.loads(response_body) logger.debug('{0}.__get_response: {1}'.format(self.cn, result)) return result
Get response from zabbix server, reads from self.socket. Returns: str: JSON response from zabbix server
https://github.com/thinkboxsoftware/deadline/blob/15c583b215cc1658458e261d267c5800402321b1/Custom/events/Zabbix/API/zabbix/sender.py#L180-L206
import base64 import ConfigParser import json import logging import socket import StringIO import struct import sys import time import urllib2 logger = logging.getLogger(__name__) class ZabbixMetric(object): def __init__(self, host, key, value, clock = None): self.host = str(host) self.key = str(key) self.value = str(value) self.clock = clock if clock else str(int(time.time())) def __repr__(self): result = json.dumps(self.__dict__) logger.debug('{0}: {1}'.format(self.__class__.__name__, result)) return result class ZabbixSender(object): def __init__(self, zabbix_server = '127.0.0.1', zabbix_port = 10051, use_config = None): self.cn = self.__class__.__name__ if use_config: self.zabbix_uri = self.__load_from_config(use_config) else: self.zabbix_uri = [ (zabbix_server, zabbix_port) ] logger.debug('{0}({1})'.format(self.cn, self.zabbix_uri)) def __load_from_config(self, config_file): if config_file and isinstance(config_file, bool): config_file = '/etc/zabbix/zabbix_agentd.conf' result = None try: with open(config_file, 'r') as f: config_file_data = "[root]\n" + f.read() except: result = False exit() config_file_fp = StringIO.StringIO(config_file_data) config = ConfigParser.RawConfigParser({'Server':'127.0.0.1', 'Port':10051}) config.readfp(config_file_fp) zabbix_server = config.get('root','Server') zabbix_port = config.get('root','Port') zabbix_server_list = [ server.strip() for server in zabbix_server.split(',')] result = [ (server, zabbix_port) for server in zabbix_server_list ] return result def __receive(self, socket, count): buf = '' while len(buf) < count: chunk = socket.recv(count - len(buf)) if not chunk: break buf += chunk return buf def __create_messages(self, metrics_array): metrics = [] for m in metrics_array: metrics.append(str(m)) logger.debug('{0}.__create_messages: {1}'.format(self.cn, metrics)) return metrics def __create_request(self, messages): request = '{{"request":"sender data","data":[{0}]}}'.format(','.join(messages)) logger.debug('{0}.__create_request: {1}'.format(self.cn, request)) return request def __create_packet(self, request): data_len = struct.pack('<Q', len(request)) packet = 'ZBXD\x01'+ data_len + request logger.debug('{0}.__create_packet (str): {1}'.format(self.cn, packet)) logger.debug('{0}.__create_packet (hex): {1}'.format(self.cn, ':'.join(x.encode('hex') for x in packet))) return packet
Apache License 2.0
alejandrofrias/case-conversion
case_conversion/utils.py
segment_string
python
def segment_string(string: str) -> Tuple[List[Optional[str]], str, bool]: words: List[Optional[str]] = [] separator = "" curr_i = 1 seq_i = 0 prev_i = string[0:1] was_upper = False if string.isupper(): string = string.lower() was_upper = True while curr_i <= len(string): char = string[curr_i : curr_i + 1] split = False if curr_i < len(string): if char_is_upper(char): split = True elif not char_is_sep(char) and char_is_sep(prev_i): split = True elif char_is_sep(char) and not char_is_sep(prev_i): split = True else: split = True if split: if not char_is_sep(prev_i): words.append(string[seq_i:curr_i]) else: if not separator: separator = string[seq_i : seq_i + 1] words.append(None) seq_i = curr_i curr_i += 1 prev_i = char return words, separator, was_upper
Segment string on separator into list of words. Arguments: string (str): The string to process Returns: optional, list of str: List of words the string got minced to separator: The separator char intersecting words bool: Whether the string was upper-case
https://github.com/alejandrofrias/case-conversion/blob/a62128c14691e87865435ae56c15119f38de65c5/case_conversion/utils.py#L213-L282
import unicodedata from typing import Iterator, List, Optional, Tuple from .types import Case, InvalidAcronymError def get_rubstring_ranges(a_str: str, sub: str) -> Iterator[Tuple[int, int]]: start = 0 sub_len = len(sub) while True: start = a_str.find(sub, start) if start == -1: return yield (start, start + sub_len) start += 1 def char_is_sep(a_char: str) -> bool: return not ( char_is_upper(a_char) or char_is_lower(a_char) or char_is_decimal(a_char) ) def char_is_decimal(a_char: str) -> bool: return unicodedata.category(a_char) == "Nd" def char_is_lower(a_char: str) -> bool: return unicodedata.category(a_char) == "Ll" def char_is_upper(a_char: str) -> bool: return unicodedata.category(a_char) == "Lu" def is_upper(a_string: str) -> bool: return len(a_string) == 1 and char_is_upper(a_string) def is_valid_acronym(a_string: str) -> bool: if not a_string: return False for a_char in a_string: if char_is_sep(a_char): return False return True def determine_case(was_all_upper: bool, words: List[str], string: str) -> Case: case_type = Case.UNKOWN if was_all_upper: case_type = Case.UPPER elif string.islower(): case_type = Case.LOWER elif words: camel_case = words[0].islower() pascal_case = words[0].istitle() or words[0].isupper() if camel_case or pascal_case: for word in words[1:]: c = word.istitle() or word.isupper() camel_case &= c pascal_case &= c if not c: break if camel_case: case_type = Case.CAMEL elif pascal_case: case_type = Case.PASCAL else: case_type = Case.MIXED return case_type def advanced_acronym_detection( s: int, i: int, words: List[str], acronyms: List[str] ) -> int: acr_str = "".join(words[s:i]) range_list: List[Tuple[int, int]] = [] not_range = set(range(len(acr_str))) for acr in acronyms: for (start, end) in get_rubstring_ranges(acr_str, acr): for r in range_list: if start < r[1] and end > r[0]: break else: range_list.append((start, end)) for j in range(start, end): not_range.remove(j) for nr in not_range: range_list.append((nr, nr + 1)) range_list.sort() for _ in range(s, i): del words[s] for j in range(len(range_list)): r = range_list[j] words.insert(s + j, acr_str[r[0] : r[1]]) return s + len(range_list) - 1 def simple_acronym_detection(s: int, i: int, words: List[str], *args) -> int: acr_str = "".join(words[s:i]) for _ in range(s, i): del words[s] words.insert(s, "".join(acr_str)) return s def sanitize_acronyms(unsafe_acronyms: List[str]) -> List[str]: acronyms = [] for acr in unsafe_acronyms: if is_valid_acronym(acr): acronyms.append(acr.upper()) else: raise InvalidAcronymError(acr) return acronyms def normalize_words(words: List[str], acronyms: List[str]) -> List[str]: normalized = [] for word in words: if word.upper() in acronyms: normalized.append(word.upper()) else: if not word.isupper(): normalized.append(word.capitalize()) return normalized
MIT License
stanfordnlp/python-stanford-corenlp
corenlp/annotator.py
Annotator.__init__
python
def __init__(self, host="", port=8432): Process.__init__(self) self.host, self.port = host, port self._Handler.annotator = self
Launches a server endpoint to communicate with CoreNLP
https://github.com/stanfordnlp/python-stanford-corenlp/blob/57a739e3aa97942e443bea63bd413afcbe5e7f3f/corenlp/annotator.py#L117-L123
import io from multiprocessing import Process from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from six.moves import http_client as HTTPStatus from corenlp_protobuf import Document, parseFromDelimitedString, writeToDelimitedString class Annotator(Process): @property def name(self): raise NotImplementedError() @property def requires(self): raise NotImplementedError() @property def provides(self): raise NotImplementedError() def annotate(self, ann): raise NotImplementedError() @property def properties(self): return { "customAnnotatorClass.{}".format(self.name): "edu.stanford.nlp.pipeline.GenericWebServiceAnnotator", "generic.endpoint": "http://{}:{}".format(self.host, self.port), "generic.requires": ",".join(self.requires), "generic.provides": ",".join(self.provides), } class _Handler(BaseHTTPRequestHandler): annotator = None def __init__(self, request, client_address, server): BaseHTTPRequestHandler.__init__(self, request, client_address, server) def do_GET(self): if not self.path.endswith("/"): self.path += "/" if self.path == "/ping/": msg = "pong".encode("UTF-8") self.send_response(HTTPStatus.OK) self.send_header("Content-Type", "text/application") self.send_header("Content-Length", len(msg)) self.end_headers() self.wfile.write(msg) else: self.send_response(HTTPStatus.BAD_REQUEST) self.end_headers() def do_POST(self): if not self.path.endswith("/"): self.path += "/" if self.path == "/annotate/": length = int(self.headers.get('content-length')) msg = self.rfile.read(length) doc = Document() parseFromDelimitedString(doc, msg) self.annotator.annotate(doc) with io.BytesIO() as stream: writeToDelimitedString(doc, stream) msg = stream.getvalue() self.send_response(HTTPStatus.OK) self.send_header("Content-Type", "application/x-protobuf") self.send_header("Content-Length", len(msg)) self.end_headers() self.wfile.write(msg) else: self.send_response(HTTPStatus.BAD_REQUEST) self.end_headers()
MIT License
phillipmartin/virustotal2
virustotal2.py
VirusTotal2Report.rescan
python
def rescan(self): if self.type in ("file_name", "hash"): data = self.scan.retrieve(self.scan_id, thing_type="hash", raw=True, rescan=True) else: raise TypeError("cannot rescan type "+self.type) try: self._json = json.loads(data) except: raise TypeError
Requests a rescan of the current file. This API only works for reports that have been generated from files or hashes. Keyword arguments: none Raises: TypeError if we don't get JSON back from VT
https://github.com/phillipmartin/virustotal2/blob/5c2136da98bedacc5f1b4ea1a1c82cd7907f7542/virustotal2.py#L446-L466
import base64 import threading from itertools import izip_longest import os import urlparse import re import json import time import hashlib import requests class VirusTotal2(object): _SCAN_ID_RE = re.compile(r"^[a-fA-F0-9]{64}-[0-9]{10}$") def __init__(self, api_key, limit_per_min=None): self.api_key = api_key self._urls_per_retrieve = 4 self._hashes_per_retrieve = 4 self._ips_per_retrieve = 1 self._domains_per_retrieve = 1 self._urls_per_scan = 4 self._hashes_per_scan = 25 self._files_per_scan = 1 self.limits = [] self.limit_lock = threading.Lock() if limit_per_min: self.limit_per_min = limit_per_min else: self.limit_per_min = 4 def scan(self, thing, thing_type=None, raw=False, rescan=False): thing_id = self._whatisthing(thing) if thing_type is None: thing_type = thing_id data = {"apikey": self.api_key} if thing_type == "url": endpoint = "https://www.virustotal.com/vtapi/v2/url/scan" if isinstance(thing, list): data["url"] = "\n".join(thing) else: data["url"] = thing self._limit_call_handler() result = requests.post(endpoint, data=data).text elif thing_type == "file_name" or thing_type == "base64": with open(thing, 'rb') as f: if thing_type == "base64": content = base64.b64decode(f.read()) else: content = f.read() if rescan: endpoint = "https://www.virustotal.com/vtapi/v2/file/rescan" data["resource"] = hashlib.sha256(content).hexdigest() self._limit_call_handler() result = requests.post(endpoint, data=data).text else: endpoint = "https://www.virustotal.com/vtapi/v2/file/scan" self._limit_call_handler() result = requests.post(endpoint, data=data, files={"file": (os.path.basename(thing), content)}).text elif thing_type == "hash": if rescan: endpoint = "https://www.virustotal.com/vtapi/v2/file/rescan" if isinstance(thing, list): data["resource"] = ", ".join(thing) else: data["resource"] = thing self._limit_call_handler() result = requests.post(endpoint, data=data).text() else: raise TypeError("Hahses can only be re-scanned, please set rescan=True") else: raise TypeError("Unable to scan type '"+thing_type+".") if raw: return result return self._generate_report(result, thing_id, thing) def retrieve(self, thing, thing_type=None, raw=False): thing_id = self._whatisthing(thing) if thing_type is None: thing_type = thing_id data = {"apikey": self.api_key} if thing_type == "url": endpoint = "http://www.virustotal.com/vtapi/v2/url/report" if isinstance(thing, list): list_of_lists = self._grouped(thing, self._urls_per_retrieve) list_of_results = [] for group in list_of_lists: data["resource"] = "\n".join([url for url in group if url is not None]) self._limit_call_handler() try: ret = json.loads(requests.post(endpoint, data=data).text) except: raise TypeError if not isinstance(ret, list): ret = [ret] for item in ret: list_of_results.append(item) result = json.dumps(list_of_results) else: data["resource"] = thing self._limit_call_handler() result = requests.post(endpoint, data=data).text elif thing_type == "ip": endpoint = "http://www.virustotal.com/vtapi/v2/ip-address/report" if not isinstance(thing, list): thing = [thing] list_of_results = [] for ip in thing: data["ip"] = ip self._limit_call_handler() try: ret = json.loads(requests.get(endpoint, params=data).text) except: raise TypeError list_of_results.append(ret) if len(list_of_results) == 1: list_of_results = list_of_results[0] result = json.dumps(list_of_results) elif thing_type == "file_name" or thing_type == "base64": endpoint = "http://www.virustotal.com/vtapi/v2/file/report" hashes = [] if not isinstance(thing, list): thing = [thing] for f in thing: fh = open(f, 'rb') if thing_type == "base64": content = base64.b64decode(fh.read()) else: content = fh.read() hashval = hashlib.sha256(content).hexdigest() hashes.append(hashval) data["resource"] = ", ".join(hashes) self._limit_call_handler() result = requests.post(endpoint, data=data).text elif thing_type == 'domain': endpoint = "http://www.virustotal.com/vtapi/v2/domain/report" if isinstance(thing, list): raise TypeError data["domain"] = thing self._limit_call_handler() result = requests.get(endpoint, params=data).text elif thing_type == 'hash': endpoint = "http://www.virustotal.com/vtapi/v2/file/report" if isinstance(thing, list): data["resource"] = ", ".join(thing) else: data["resource"] = thing self._limit_call_handler() result = requests.post(endpoint, data=data).text elif thing_type == "scanid": raise TypeError("Can't infer the proper endpoint when given scanIDs without a thing_type that is not scanID") else: raise TypeError("Unable to scan type '"+thing_type+".") if raw: return result return self._generate_report(result, thing_id, thing) def _generate_report(self, result, thing_id, thing): report = [] if isinstance(result, basestring): try: obj = json.loads(result) if isinstance(obj, dict): report.append(VirusTotal2Report(obj, self, thing_id, thing)) else: for (i, rep) in enumerate(obj): report.append(VirusTotal2Report(rep, self, thing_id, thing[i])) except: raise TypeError("VT String is unparsable: "+str(result)) else: raise TypeError("VT String (which is not a string?) is unparsable: "+str(result)) return report if len(report) > 1 else report[0] def _limit_call_handler(self): with self.limit_lock: if self.limit_per_min <= 0: return now = time.time() self.limits = [l for l in self.limits if l > now] self.limits.append(now + 60) if len(self.limits) >= self.limit_per_min: time.sleep(self.limits[0] - now) def _grouped(self, iterable, n): return izip_longest(*[iter(iterable)] * n, fillvalue=None) def _whatisthing(self, thing): if isinstance(thing, list): thing = thing[0] if isinstance(thing,basestring) and os.path.isfile(thing): if thing.endswith(".base64"): return "base64" else: return "file_name" if not isinstance(thing, basestring): return "unknown" if all(i in "1234567890abcdef" for i in str(thing).lower()) and len(thing) in [32, 40, 64]: return "hash" elif all(i in "1234567890." for i in thing) and len(thing) <= 15: return "ip" elif "." in thing and "/" not in thing: return "domain" elif self._SCAN_ID_RE.match(thing): return "scanid" elif urlparse.urlparse(thing).scheme: return "url" else: return "unknown" class VirusTotal2Report(object): def __init__(self, obj, parent, thing_id, query): super(VirusTotal2Report, self).__init__() self.scan = parent self._json = obj self.type = thing_id self.query = query self.update() def __repr__(self): return "<VirusTotal2 report %s (%s)>" % ( self.id, self.status, ) def __iter__(self): if self.type == "ip": for resolution in self.resolutions.iteritems(): yield resolution elif self.type == "domain": for resolution in self.resolutions.iteritems(): yield resolution elif self.type == "url": for scanner, report in self.scans.iteritems(): yield (scanner, report["result"]) else: for antivirus, report in self.scans.iteritems(): yield ( (antivirus, report["version"], report["update"]), report["result"], ) def __getattr__(self, attr): item = { "id": "resource", "status": "verbose_msg", }.get(attr, attr) try: return self._json[item] except KeyError: raise AttributeError(attr) def update(self): if self.response_code == 0: return if self.type in ("ip", "domain"): data = self.scan.retrieve(self.query, raw=True) elif self.type == "file_name" or self.type == "base64": data = self.scan.retrieve(self.scan_id, thing_type="hash", raw=True) else: data = self.scan.retrieve(self.scan_id, thing_type=self.type, raw=True) try: self._json = json.loads(data) except: raise TypeError
MIT License
rensaproject/rensapy
rensapy/src/en/parser/nltk_lite/contrib/paradigmquery.py
ParadigmQuery.getTree
python
def getTree(self): if self.string == None: print "No string has been parsed. Please use parse(string)." return None return self.nltktree
Returns the results from the CFG parsing
https://github.com/rensaproject/rensapy/blob/e7bb0bd248c23353226d4582eb3cc15e0de168ca/rensapy/src/en/parser/nltk_lite/contrib/paradigmquery.py#L160-L167
from en.parser.nltk_lite import tokenize from en.parser.nltk_lite import parse from en.parser.nltk_lite.parse import cfg from re import * class ParadigmQuery(object): def __init__(self, p_string=None): self.nltktree = None self.string = p_string self.parseList = None self.nltkTree = None self.parseTree = None self.xml = None if p_string <> None: self.parse(p_string) def parse(self, p_string): self.nltktree = None self.string = p_string """ 1. Tokenize ------------------------------------------------------------------------ """ re_all = r'table[(]|\,|[)]|[/]|\w+' data_tokens = tokenize.regexp(self.string, re_all) """ 2. Develop a context free grammar ------------------------------------------------------------------------ """ O, T, H, D = cfg.nonterminals('O, T, H, D') productions = ( cfg.Production(O, [D]), cfg.Production(O, [H]), cfg.Production(O, [T]), cfg.Production(T, ['table(', O, ',', O, ',', O, ')']), cfg.Production(H, [D, '/', D]), cfg.Production(H, [D, '/', O]) ) re_domain = compile(r'^\w+$') for tok in data_tokens: if re_domain.match(tok): prod = cfg.Production(D,[tok]), productions = productions + prod grammar = cfg.Grammar(O, productions) rd_parser = parse.RecursiveDescent(grammar) tokens = tokenize.regexp(self.string, re_all) toklist = list(tokens) """ 3. Parse using the context free grammar ------------------------------------------------------------------------ """ try: self.parseList = rd_parser.get_parse_list(toklist)[0] except IndexError: print "Could not parse query." return """ 4. Refine and convert to a Tree representation ------------------------------------------------------------------------ """ string = str(self.parseList) string2 = string.replace(":","").replace("')'","").replace("table(","").replace("','","").replace("'","").replace("/","") self.nltktree = parse.tree.bracket_parse(string2) self.parseTree = QuerySentence(self.nltktree) self.xml = self.parseTree.toXML()
MIT License
line/line-bot-sdk-python
linebot/async_api.py
AsyncLineBotApi.narrowcast
python
async def narrowcast( self, messages, retry_key=None, recipient=None, filter=None, limit=None, notification_disabled=False, timeout=None, ): if not isinstance(messages, (list, tuple)): messages = [messages] if retry_key: self.headers["X-Line-Retry-Key"] = retry_key data = { "messages": [message.as_json_dict() for message in messages], "recipient": recipient.as_json_dict(), "filter": filter.as_json_dict(), "limit": limit.as_json_dict(), "notificationDisabled": notification_disabled, } response = await self._post( "/v2/bot/message/narrowcast", data=json.dumps(data), timeout=timeout ) return NarrowcastResponse(request_id=response.headers.get("X-Line-Request-Id"))
Call narrowcast API. https://developers.line.biz/en/reference/messaging-api/#send-narrowcast-message Sends push messages to multiple users at any time. Messages cannot be sent to groups or rooms. :param messages: Messages. Max: 5 :type messages: T <= :py:class:`linebot.models.send_messages.SendMessage` | list[T <= :py:class:`linebot.models.send_messages.SendMessage`] :param retry_key: (optional) Arbitrarily generated UUID in hexadecimal notation. :param recipient: audience object of recipient :type recipient: T <= :py:class:`linebot.models.recipient.AudienceRecipient` :param filter: demographic filter of recipient :type filter: T <= :py:class:`linebot.models.filter.DemographicFilter` :param limit: limit on this narrowcast :type limit: T <= :py:class:`linebot.models.limit.Limit` :param bool notification_disabled: (optional) True to disable push notification when the message is sent. The default value is False. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.async_http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.NarrowcastResponse`
https://github.com/line/line-bot-sdk-python/blob/914a2d5520ffb68a2f0cc6894006902a42f66c71/linebot/async_api.py#L261-L316
import json from .__about__ import __version__ from .exceptions import LineBotApiError from .models import ( Error, Profile, MemberIds, Content, RichMenuResponse, MessageQuotaResponse, MessageQuotaConsumptionResponse, IssueLinkTokenResponse, IssueChannelTokenResponse, MessageDeliveryBroadcastResponse, MessageDeliveryMulticastResponse, MessageDeliveryPushResponse, MessageDeliveryReplyResponse, InsightMessageDeliveryResponse, InsightFollowersResponse, InsightDemographicResponse, InsightMessageEventResponse, BroadcastResponse, NarrowcastResponse, MessageProgressNarrowcastResponse, BotInfo, GetWebhookResponse, TestWebhookResponse, AudienceGroup, ClickAudienceGroup, ImpAudienceGroup, GetAuthorityLevel, Audience, CreateAudienceGroup, ) from .models.responses import ( Group, UserIds, RichMenuAliasResponse, RichMenuAliasListResponse, ) class AsyncLineBotApi(object): DEFAULT_API_ENDPOINT = "https://api.line.me" DEFAULT_API_DATA_ENDPOINT = "https://api-data.line.me" def __init__( self, channel_access_token, async_http_client, endpoint=DEFAULT_API_ENDPOINT, data_endpoint=DEFAULT_API_DATA_ENDPOINT, ): self.data_endpoint = data_endpoint self.endpoint = endpoint self.headers = { "Authorization": "Bearer " + channel_access_token, "User-Agent": "line-bot-sdk-python-async/" + __version__, } self.async_http_client = async_http_client async def reply_message( self, reply_token, messages, notification_disabled=False, timeout=None ): if not isinstance(messages, (list, tuple)): messages = [messages] data = { "replyToken": reply_token, "messages": [message.as_json_dict() for message in messages], "notificationDisabled": notification_disabled, } await self._post( "/v2/bot/message/reply", data=json.dumps(data), timeout=timeout ) async def push_message( self, to, messages, retry_key=None, notification_disabled=False, timeout=None ): if not isinstance(messages, (list, tuple)): messages = [messages] if retry_key: self.headers["X-Line-Retry-Key"] = retry_key data = { "to": to, "messages": [message.as_json_dict() for message in messages], "notificationDisabled": notification_disabled, } await self._post("/v2/bot/message/push", data=json.dumps(data), timeout=timeout) async def multicast( self, to, messages, retry_key=None, notification_disabled=False, timeout=None ): if not isinstance(messages, (list, tuple)): messages = [messages] if retry_key: self.headers["X-Line-Retry-Key"] = retry_key data = { "to": to, "messages": [message.as_json_dict() for message in messages], "notificationDisabled": notification_disabled, } await self._post( "/v2/bot/message/multicast", data=json.dumps(data), timeout=timeout ) async def broadcast( self, messages, retry_key=None, notification_disabled=False, timeout=None ): if not isinstance(messages, (list, tuple)): messages = [messages] if retry_key: self.headers["X-Line-Retry-Key"] = retry_key data = { "messages": [message.as_json_dict() for message in messages], "notificationDisabled": notification_disabled, } response = await self._post( "/v2/bot/message/broadcast", data=json.dumps(data), timeout=timeout ) return BroadcastResponse(request_id=response.headers.get("X-Line-Request-Id"))
Apache License 2.0
vemel/mypy_boto3_builder
mypy_boto3_builder/structures/client.py
Client.boto3_doc_link
python
def boto3_doc_link(self) -> str: return self.service_name.get_boto3_doc_link("Client")
List to boto3 docs page.
https://github.com/vemel/mypy_boto3_builder/blob/07c9a4273404ea0bb8aa6c14b9ed8b3af1f1a3dd/mypy_boto3_builder/structures/client.py#L71-L75
from collections.abc import Iterator from botocore.client import BaseClient from mypy_boto3_builder.enums.service_module_name import ServiceModuleName from mypy_boto3_builder.import_helpers.import_record import ImportRecord from mypy_boto3_builder.import_helpers.import_string import ImportString from mypy_boto3_builder.service_name import ServiceName from mypy_boto3_builder.structures.argument import Argument from mypy_boto3_builder.structures.attribute import Attribute from mypy_boto3_builder.structures.class_record import ClassRecord from mypy_boto3_builder.structures.method import Method from mypy_boto3_builder.type_annotations.internal_import import InternalImport from mypy_boto3_builder.type_annotations.type import Type from mypy_boto3_builder.type_annotations.type_class import TypeClass class Client(ClassRecord): _alias_name: str = "Client" def __init__(self, name: str, service_name: ServiceName, boto3_client: BaseClient) -> None: super().__init__(name=name) self.service_name = service_name self.boto3_client = boto3_client self.exceptions_class = ClassRecord(name="Exceptions") self.bases = [TypeClass(BaseClient)] self.client_error_class = ClassRecord( name="BotocoreClientError", attributes=[ Attribute("MSG_TEMPLATE", Type.str), ], bases=[TypeClass(BaseException)], methods=[ Method( name="__init__", arguments=[ Argument("self", None), Argument("error_response", Type.MappingStrAny), Argument("operation_name", Type.str), ], return_type=Type.none, body_lines=[ "self.response: Dict[str, Any]", "self.operation_name: str", ], ), ], ) def __hash__(self) -> int: return hash(self.service_name) @staticmethod def get_class_name(service_name: ServiceName) -> str: return f"{service_name.class_name}Client" @property
MIT License
pdm-project/pdm
pdm/cli/commands/plugin.py
RemoveCommand._resolve_dependencies_to_remove
python
def _resolve_dependencies_to_remove(self, packages: list[str]) -> list[str]: result: set[str] = set() to_resolve = list(packages) ws = WorkingSet() graph = build_dependency_graph(ws) while to_resolve: temp: list[Package] = [] for name in to_resolve: key = normalize_name(name) if key in ws: result.add(key) package = Package(key, "0.0.0", {}) if package not in graph: continue for dep in graph.iter_children(package): temp.append(dep) graph.remove(package) to_resolve.clear() for dep in temp: if not any(graph.iter_parents(dep)) and dep.name != "pdm": to_resolve.append(dep.name) return sorted(result)
Perform a BFS to find all unneeded dependencies
https://github.com/pdm-project/pdm/blob/a5ef1fa3b09b53d9d0575657f4d23c9cc2f2cc11/pdm/cli/commands/plugin.py#L137-L162
from __future__ import annotations import argparse import os import shlex import subprocess import sys import click from pdm import termui from pdm.cli.commands.base import BaseCommand from pdm.cli.options import verbose_option from pdm.cli.utils import Package, build_dependency_graph from pdm.models.environment import WorkingSet from pdm.project import Project from pdm.utils import normalize_name if sys.version_info >= (3, 8): import importlib.metadata as importlib_metadata else: import importlib_metadata from pip import __file__ as pip_location def _all_plugins() -> list[str]: result: set[str] = set() for dist in importlib_metadata.distributions(): if any(ep.group in ("pdm", "pdm.plugin") for ep in dist.entry_points): result.add(normalize_name(dist.metadata["Name"])) return sorted(result) def run_pip(args: list[str]) -> bytes: return subprocess.check_output( [sys.executable, "-I", os.path.dirname(pip_location)] + args, stderr=subprocess.STDOUT, ) class Command(BaseCommand): arguments = [verbose_option] def add_arguments(self, parser: argparse.ArgumentParser) -> None: subparsers = parser.add_subparsers(title="Sub commands") ListCommand.register_to(subparsers) AddCommand.register_to(subparsers) RemoveCommand.register_to(subparsers) parser.set_defaults(search_parent=False) self.parser = parser def handle(self, project: Project, options: argparse.Namespace) -> None: self.parser.print_help() class ListCommand(BaseCommand): arguments = [verbose_option] name = "list" def handle(self, project: Project, options: argparse.Namespace) -> None: plugins = _all_plugins() echo = project.core.ui.echo if not plugins: echo("No plugin is installed with PDM", err=True) sys.exit(1) echo("Installed plugins:", err=True) for plugin in plugins: metadata = importlib_metadata.metadata(plugin) echo( f"{termui.green(metadata['Name'])} {termui.yellow(metadata['Version'])}" ) if metadata["Summary"]: echo(f" {metadata['Summary']}") class AddCommand(BaseCommand): arguments = [verbose_option] name = "add" def add_arguments(self, parser: argparse.ArgumentParser) -> None: parser.add_argument( "--pip-args", help="Arguments that will be passed to pip install", default="", ) parser.add_argument( "packages", nargs="+", help="Specify one or many plugin names, " "each package can have a version specifier", ) def handle(self, project: Project, options: argparse.Namespace) -> None: pip_args = ["install"] + shlex.split(options.pip_args) + options.packages project.core.ui.echo( f"Running pip command: {pip_args}", verbosity=termui.DETAIL ) with project.core.ui.open_spinner( f"Installing plugins: {options.packages}" ) as spinner: try: run_pip(pip_args) except subprocess.CalledProcessError as e: spinner.fail("Installation failed: \n" + e.output.decode("utf8")) sys.exit(1) else: spinner.succeed("Installation succeeds.") class RemoveCommand(BaseCommand): arguments = [verbose_option] name = "remove" def add_arguments(self, parser: argparse.ArgumentParser) -> None: parser.add_argument( "--pip-args", help="Arguments that will be passed to pip uninstall", default="", ) parser.add_argument( "-y", "--yes", action="store_true", help="Answer yes on the question" ) parser.add_argument( "packages", nargs="+", help="Specify one or many plugin names" )
MIT License
juftin/camply
camply/search/base_search.py
BaseCampingSearch._log_availabilities
python
def _log_availabilities(cls, availability_df: DataFrame, verbose: bool) -> DataFrame: booking_date: datetime for booking_date, available_sites in availability_df.groupby("booking_date"): logger.info(f"📅 {booking_date.strftime('%a, %B %d')} " f"🏕 {len(available_sites)} sites") location_tuple: tuple for location_tuple, campground_availability in available_sites.groupby([DataColumns.RECREATION_AREA_COLUMN, DataColumns.FACILITY_NAME_COLUMN]): logger.info(f"\t⛰️ {' 🏕 '.join(location_tuple)}: ⛺ " f"{len(campground_availability)} sites") if verbose is True: for booking_nights, nightly_availability in campground_availability.groupby( [DataColumns.BOOKING_NIGHTS_COLUMN]): unique_urls = nightly_availability[DataColumns.BOOKING_URL_COLUMN].unique() for booking_url in sorted(unique_urls): logger.info(f"\t\t🔗 {booking_url} " f"({booking_nights} night" f"{'s' if booking_nights > 1 else ''})") return availability_df
Log the Availabilities Parameters ---------- availability_df: DataFrame verbose: bool Returns ------- DataFrame
https://github.com/juftin/camply/blob/7abd14393ab63029f964c12268ac96d5fc9baaa7/camply/search/base_search.py#L597-L628
from abc import ABC, abstractmethod from datetime import datetime, timedelta from itertools import groupby, islice, tee import logging from operator import itemgetter from os import getenv from time import sleep from typing import Generator, Iterable, List, Optional, Set, Union from pandas import concat, DataFrame, date_range, Series, Timedelta import tenacity from camply.config import CampsiteContainerFields, DataColumns, SearchConfig from camply.containers import AvailableCampsite, CampgroundFacility, RecreationArea, SearchWindow from camply.notifications import CAMPSITE_NOTIFICATIONS, SilentNotifications from camply.providers import RecreationDotGov, YellowstoneLodging from camply.utils.logging_utils import get_emoji logger = logging.getLogger(__name__) class SearchError(Exception): class CampsiteNotFound(SearchError): class BaseCampingSearch(ABC): def __init__(self, provider: Union[RecreationDotGov, YellowstoneLodging], search_window: Union[SearchWindow, List[SearchWindow]], weekends_only: bool = False, nights: int = 1) -> None: self.campsite_finder: Union[RecreationDotGov, YellowstoneLodging] = provider self.search_window: List[SearchWindow] = self._make_list(search_window) self.weekends_only: bool = weekends_only self.search_days: List[datetime] = self._get_search_days() self.search_months: List[datetime] = self._get_search_months() self.nights = self._validate_consecutive_nights(nights=nights) self.campsites_found: Set[AvailableCampsite] = set() @abstractmethod def get_all_campsites(self) -> List[AvailableCampsite]: def _get_intersection_date_overlap(self, date: datetime, periods: int) -> bool: campsite_date_range = set(date_range(start=date, periods=periods)) intersection = campsite_date_range.intersection(self.search_days) if intersection: return True else: return False def _compare_date_overlap(self, campsite: AvailableCampsite) -> bool: intersection = self._get_intersection_date_overlap(date=campsite.booking_date, periods=campsite.booking_nights) return intersection def _filter_date_overlap(self, campsites: DataFrame) -> bool: filtered_campsites = campsites[campsites.apply( lambda x: self._get_intersection_date_overlap(date=x.booking_date, periods=x.booking_nights), axis=1)].copy().reset_index(drop=True) return filtered_campsites def _search_matching_campsites_available(self, log: bool = False, verbose: bool = False, raise_error: bool = False) -> List[AvailableCampsite]: matching_campgrounds = list() for camp in self.get_all_campsites(): if all([self._compare_date_overlap(campsite=camp) is True, camp.booking_nights >= self.nights]): matching_campgrounds.append(camp) logger.info(f"{(get_emoji(matching_campgrounds) + ' ') * 4}{len(matching_campgrounds)} " "Reservable Campsites Matching Search Preferences") self.assemble_availabilities(matching_data=matching_campgrounds, log=log, verbose=verbose) if len(matching_campgrounds) == 0 and raise_error is True: campsite_availability_message = "No Campsites were found, we'll continue checking" logger.info(campsite_availability_message) raise CampsiteNotFound(campsite_availability_message) return matching_campgrounds @classmethod def _get_polling_minutes(cls, polling_interval: Optional[int]) -> int: if polling_interval is None: polling_interval = getenv("POLLING_INTERVAL", SearchConfig.RECOMMENDED_POLLING_INTERVAL) if int(polling_interval) < SearchConfig.POLLING_INTERVAL_MINIMUM: polling_interval = SearchConfig.POLLING_INTERVAL_MINIMUM polling_interval_minutes = int(round(float(polling_interval), 2)) return polling_interval_minutes def _continuous_search_retry(self, log: bool, verbose: bool, polling_interval: int, continuous_search_attempts: int, notification_provider: str, notify_first_try: bool) -> List[AvailableCampsite]: polling_interval_minutes = self._get_polling_minutes(polling_interval=polling_interval) notifier = CAMPSITE_NOTIFICATIONS.get(notification_provider.lower(), SilentNotifications)() logger.info(f"Searching for campsites every {polling_interval_minutes} minutes. " f"Notifications active via {notifier}") retryer = tenacity.Retrying( retry=tenacity.retry_if_exception_type(CampsiteNotFound), wait=tenacity.wait.wait_fixed(int(polling_interval_minutes) * 60)) matching_campsites = retryer.__call__(self._search_matching_campsites_available, False, False, True) found_campsites = set(matching_campsites) new_campsites = found_campsites.difference(self.campsites_found) self.assemble_availabilities(matching_data=list(new_campsites), log=log, verbose=verbose) logger.info(f"{len(new_campsites)} New Campsites Found.") self.campsites_found.update(new_campsites) logged_campsites = list(new_campsites) if max([retryer.statistics.get("attempt_number", 1), continuous_search_attempts]) > 1: notifier.send_campsites(campsites=logged_campsites) elif retryer.statistics.get("attempt_number", 1) == 1 and notify_first_try is True: notifier.send_campsites(campsites=logged_campsites) else: if not isinstance(notifier, SilentNotifications) and len(logged_campsites) > SearchConfig.MINIMUM_CAMPSITES_FIRST_NOTIFY: error_message = (f"Found more than {SearchConfig.MINIMUM_CAMPSITES_FIRST_NOTIFY} " f"matching campsites ({len(logged_campsites)}) on the " "first try. Try searching online instead. " f"camply is only sending the first " f"{SearchConfig.MINIMUM_CAMPSITES_FIRST_NOTIFY} notifications. " "Go Get your campsite! 🏕") logger.warning(error_message) notifier.send_message(message=error_message) logged_campsites = logged_campsites[:SearchConfig.MINIMUM_CAMPSITES_FIRST_NOTIFY] notifier.send_campsites(campsites=logged_campsites) return list(self.campsites_found) def _search_campsites_continuous(self, log: bool = True, verbose: bool = False, polling_interval: Optional[int] = None, notification_provider: str = "silent", notify_first_try: bool = False, search_forever: bool = False): polling_interval_minutes = self._get_polling_minutes(polling_interval=polling_interval) continuous_search = True continuous_search_attempts = 1 while continuous_search is True: self._continuous_search_retry(log=log, verbose=verbose, polling_interval=polling_interval, notification_provider=notification_provider, notify_first_try=notify_first_try, continuous_search_attempts=continuous_search_attempts) continuous_search_attempts += 1 if search_forever is True: sleep(int(polling_interval_minutes) * 60) else: continuous_search = False return list(self.campsites_found) def get_matching_campsites(self, log: bool = True, verbose: bool = False, continuous: bool = False, polling_interval: Optional[int] = None, notification_provider: str = "silent", notify_first_try: bool = False, search_forever: bool = False) -> List[AvailableCampsite]: if continuous is True: self._search_campsites_continuous(log=log, verbose=verbose, polling_interval=polling_interval, notification_provider=notification_provider, notify_first_try=notify_first_try, search_forever=search_forever) else: matching_campsites = self._search_matching_campsites_available(log=log, verbose=True) self.campsites_found.update(set(matching_campsites)) return list(self.campsites_found) def _get_search_days(self) -> List[datetime]: now = datetime.now() current_date = datetime(year=now.year, month=now.month, day=now.day) search_days = set() for window in self.search_window: generated_dates = set() for index in range(0, (window.end_date - window.start_date).days): search_day = window.start_date search_day = search_day.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=index) if search_day >= current_date: generated_dates.add(search_day) search_days.update(generated_dates) if self.weekends_only is True: logger.info("Limiting Search of Campgrounds to Weekend Availabilities") for search_date in list(search_days): if search_date.weekday() not in [4, 5]: search_days.remove(search_date) number_searches = len(search_days) if number_searches > 0: logger.info(f"{len(search_days)} booking nights selected for search, " f"ranging from {min(search_days).strftime('%Y-%m-%d')} to " f"{max(search_days).strftime('%Y-%m-%d')}") else: logger.info(SearchConfig.ERROR_MESSAGE) raise RuntimeError(SearchConfig.ERROR_MESSAGE) return list(sorted(search_days)) def _get_search_months(self) -> List[datetime]: search_days = self.search_days.copy() truncated_months = set([day.replace(day=1) for day in search_days]) if len(truncated_months) > 1: logger.info(f"{len(truncated_months)} different months selected for search, " f"ranging from {min(search_days).strftime('%Y-%m-%d')} to " f"{max(search_days).strftime('%Y-%m-%d')}") return sorted(list(truncated_months)) elif len(truncated_months) == 0: logger.info(SearchConfig.ERROR_MESSAGE) raise RuntimeError(SearchConfig.ERROR_MESSAGE) else: return sorted(list(truncated_months)) @classmethod def _consolidate_campsites(cls, campsite_df: DataFrame, nights: int) -> List[AvailableCampsite]: composed_groupings = list() for _, campsite_slice in campsite_df.groupby( [CampsiteContainerFields.CAMPSITE_ID, CampsiteContainerFields.CAMPGROUND_ID]): campsite_grouping = campsite_slice.sort_values(by=CampsiteContainerFields.BOOKING_DATE, ascending=True).copy() booking_date = campsite_grouping[CampsiteContainerFields.BOOKING_DATE] date = Timedelta('1d') consecutive_nights = booking_date.diff() != date group_identifier = consecutive_nights.cumsum() campsite_grouping[CampsiteContainerFields.CAMPSITE_GROUP] = group_identifier for campsite_group, campsite_group_slice in campsite_grouping.groupby( [CampsiteContainerFields.CAMPSITE_GROUP]): composed_grouping = campsite_group_slice.sort_values( by=CampsiteContainerFields.BOOKING_DATE, ascending=True).copy() composed_grouping.drop(columns=[CampsiteContainerFields.CAMPSITE_GROUP], inplace=True) nightly_breakouts = cls._find_consecutive_nights(dataframe=composed_grouping, nights=nights) composed_groupings.append(nightly_breakouts) if len(composed_groupings) == 0: composed_groupings = [DataFrame()] return concat(composed_groupings, ignore_index=True) @classmethod def _consecutive_subseq(cls, iterable: Iterable, length: int) -> Generator: for _, consec_run in groupby(enumerate(iterable), lambda x: x[0] - x[1]): k_wise = tee(map(itemgetter(1), consec_run), length) for n, it in enumerate(k_wise): next(islice(it, n, n), None) yield from zip(*k_wise) @classmethod def _find_consecutive_nights(cls, dataframe: DataFrame, nights: int) -> DataFrame: dataframe_slice = dataframe.copy().reset_index(drop=True) nights_indexes = dataframe_slice.booking_date.index consecutive_generator = cls._consecutive_subseq(iterable=nights_indexes, length=nights) sequences = list(consecutive_generator) concatted_data = list() for sequence in sequences: index_list = list(sequence) data_copy = dataframe_slice.iloc[index_list].copy() data_copy.booking_date = data_copy.booking_date.min() data_copy.booking_end_date = data_copy.booking_end_date.max() data_copy.booking_url = data_copy.booking_url.loc[index_list[0]] data_copy.booking_nights = (data_copy.booking_end_date - data_copy.booking_date).dt.days data_copy.drop_duplicates(inplace=True) concatted_data.append(data_copy) if len(concatted_data) == 0: concatted_data = [DataFrame()] return concat(concatted_data, ignore_index=True) def _validate_consecutive_nights(self, nights: int) -> int: search_days = Series(self.search_days) consecutive_nights = search_days.diff() != Timedelta('1d') largest_grouping = consecutive_nights.cumsum().value_counts().max() if nights > 1: logger.info(f"Searching for availabilities with {nights} consecutive night stays.") if nights > largest_grouping: logger.warning("Too many consecutive nights selected. " "The consecutive night parameter will be set to " f"the max possible, {largest_grouping}.") return largest_grouping else: return nights @staticmethod def campsites_to_df(campsites: List[AvailableCampsite]) -> DataFrame: return DataFrame(data=campsites, columns=AvailableCampsite._fields) @staticmethod def df_to_campsites(campsite_df: DataFrame) -> List[AvailableCampsite]: composed_campsite_array = list() composed_campsite_data_array = campsite_df.to_dict(orient="records") for campsite_record in composed_campsite_data_array: composed_campsite_array.append(AvailableCampsite(**campsite_record)) return composed_campsite_array @classmethod def assemble_availabilities(cls, matching_data: List[AvailableCampsite], log: bool = True, verbose: bool = False) -> DataFrame: availability_df = cls.campsites_to_df(campsites=matching_data) if log is True: cls._log_availabilities(availability_df=availability_df, verbose=verbose) return availability_df @classmethod
MIT License
thealgorithms/algorithms-keeper
tests/data/return_annotation.py
no_annotation
python
def no_annotation(num: int): return num + 5
This function contains no return annotation >>> no_annotation(5) 10
https://github.com/thealgorithms/algorithms-keeper/blob/26c7ddd2a1b7fb3b99faa9a426a9542b67717212/tests/data/return_annotation.py#L1-L7
MIT License