repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
awemulya/kobo-predict
onadata/libs/tests/serializers/test_attachment_serializer.py
TestAttachmentSerializer.setUp
python
def setUp(self): self.data = { "name": "photo_in_group", "title": "photo_in_group", "type": "survey", "default_language": "default", "id_string": "photo_in_group", "sms_keyword": "photo_in_group", "children": [ { "label": "Group #1", "type": "group", "children": [ { "label": "Group #2", "type": "group", "children": [ { "type": "photo", "name": "photograph", "label": "Smile :)" } ], "name": "group2" } ], "name": "group1" }, { "control": { "bodyless": True }, "type": "group", "children": [ { "bind": { "readonly": "true()", "calculate": "concat('uuid:', uuid())" }, "type": "calculate", "name": "instanceID" } ], "name": "meta" } ] } self.question = "photograph"
self.data is a json represenatation of an xform
https://github.com/awemulya/kobo-predict/blob/f302d084e30fb637d43ec638c701e01a3dddc721/onadata/libs/tests/serializers/test_attachment_serializer.py#L8-L58
from django.test import TransactionTestCase from onadata.libs.serializers.attachment_serializer import get_path class TestAttachmentSerializer(TransactionTestCase):
BSD 2-Clause Simplified License
yangsenius/posenfs
src/task_dataset/preprocess.py
symmetric_exchange_after_flip
python
def symmetric_exchange_after_flip(keypoints_flip, name): if name == 'mpii': parts = [[3,4],[5,6],[7,8],[9,10],[11,12],[13,14]] elif name == 'coco': parts = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[15,16]] else: raise ValueError keypoints = keypoints_flip.copy() for part in parts: tmp = keypoints[part[1],:].copy() keypoints[part[1],:] = keypoints[part[0],:].copy() keypoints[part[0],:] = tmp return keypoints
flip will make the left-right body parts exchange
https://github.com/yangsenius/posenfs/blob/dd8403633b64936e73a3d8d44d4b34f422d6a6a0/src/task_dataset/preprocess.py#L97-L118
import numpy as np def make_affine_matrix(bbox, target_size, margin=1.2, aug_rotation= 0, aug_scale=1): (w,h)=target_size scale = min((w/margin) /bbox[2], (h/margin) /bbox[3]) t = np.zeros((3, 3)) offset_X= w/2 - scale*(bbox[0]+bbox[2]/2) offset_Y= h/2 - scale*(bbox[1]+bbox[3]/2) t[0, 0] = scale t[1, 1] = scale t[0, 2] = offset_X t[1, 2] = offset_Y t[2, 2] = 1 theta = aug_rotation*np.pi/180 alpha = np.cos(theta)*aug_scale beta = np.sin(theta)*aug_scale rs = np.zeros((3,3)) rs[0, 0] = alpha rs[0, 1] = beta rs[0, 2] = (1-alpha)*(w/2)-beta*(h/2) rs[1, 0] = -beta rs[1, 1] = alpha rs[1, 2] = beta *(w/2) + (1-alpha)*(h/2) rs[2, 2] = 1 final_matrix = np.dot(rs,t) return final_matrix def mpii_to_coco_format(keypoints_original): parts = [[0,9],[1,8],[2,7], [3,13],[4,12],[5,14],[6,11],[7,15],[8,10], [9,3],[10,2],[11,4],[12,1],[13,5],[14,0],[15,6]] keypoints = keypoints_original.copy() for part in parts: keypoints[part[0],:] = keypoints_original[part[1],:].copy() return keypoints
MIT License
ravendb/ravendb-python-client
pyravendb/tools/utils.py
Utils.contains_any
python
def contains_any(text, any_of): if not text or not any_of: return False any_of = list(set(any_of)) for c in any_of: if c in text: return True return False
@param text: The text we want to check :type str @param any_of: list of char :type list :returns False if text nit
https://github.com/ravendb/ravendb-python-client/blob/dbe51ee8eea166e0d9e60897ab480dd9a693366b/pyravendb/tools/utils.py#L374-L389
from pyravendb.custom_exceptions import exceptions import OpenSSL.crypto from collections import Iterable from pyravendb.tools.projection import create_entity_with_mapper from datetime import datetime, timedelta from enum import Enum from threading import Timer from copy import deepcopy import urllib import inspect import json import sys import re class _DynamicStructure(object): def __init__(self, **entries): self.__dict__.update(entries) def __str__(self): return str(self.__dict__) class Utils(object): @staticmethod def quote_key(key, reserved_slash=False): reserved = '%:=&?~#+!$,;\'*[]' if reserved_slash: reserved += '/' if key: return urllib.parse.quote(key, safe=reserved) else: return '' @staticmethod def unpack_iterable(iterable): for item in iterable: if isinstance(item, Iterable) and not isinstance(item, str): for nested in Utils.unpack_iterable(item): yield nested else: yield item @staticmethod def convert_to_snake_case(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() @staticmethod def database_name_validation(name): if name is None: raise ValueError("None name is not valid") result = re.match(r'([A-Za-z0-9_\-\.]+)', name, re.IGNORECASE) if not result: raise exceptions.InvalidOperationException( "Database name can only contain only A-Z, a-z, \"_\", \".\" or \"-\" but was: " + name) @staticmethod def first_or_default(iterator, func, default): for item in iterator: if func(item): return item return default @staticmethod def get_change_vector_from_header(response): header = response.headers.get("ETag", None) if header is not None and header[0] == "\"": return header[1: len(header) - 2] @staticmethod def import_class(name): components = name.split('.') module_name = '.'.join(name.split('.')[:-1]) mod = None try: mod = getattr(__import__(module_name, fromlist=[components[-1]]), components[-1]) except (ImportError, ValueError, AttributeError): pass return mod @staticmethod def is_inherit(parent, child): if child is None or parent is None: return False if parent == child: return True if parent != child: return Utils.is_inherit(parent, child.__base__) @staticmethod def initialize_object(obj, object_type, convert_to_snake_case=None): initialize_dict, set_needed = Utils.make_initialize_dict(obj, object_type.__init__, convert_to_snake_case) o = object_type(**initialize_dict) if set_needed: for key, value in obj.items(): setattr(o, key, value) return o @staticmethod def convert_to_entity(document, object_type, conventions, events, nested_object_types=None): metadata = document.pop("@metadata") original_document = deepcopy(document) original_metadata = deepcopy(metadata) type_from_metadata = conventions.try_get_type_from_metadata(metadata) mapper = conventions.mappers.get(object_type, None) events.before_conversion_to_entity(document, metadata, type_from_metadata) if object_type == dict: events.after_conversion_to_entity(document, document, metadata) return document, metadata, original_metadata, original_document if type_from_metadata is None: if object_type is not None: metadata["Raven-Python-Type"] = "{0}.{1}".format(object_type.__module__, object_type.__name__) else: dyn = _DynamicStructure(**document) events.after_conversion_to_entity(dyn, document, metadata) return dyn, metadata, original_metadata, original_document else: object_from_metadata = Utils.import_class(type_from_metadata) if object_from_metadata is not None: if object_type is None: object_type = object_from_metadata elif Utils.is_inherit(object_type, object_from_metadata): mapper = conventions.mappers.get(object_from_metadata, None) or mapper object_type = object_from_metadata elif object_type is not object_from_metadata: raise exceptions.InvalidOperationException( f"Cannot covert document from type {object_from_metadata} to {object_type}") if nested_object_types is None and mapper: entity = create_entity_with_mapper(document, mapper, object_type) else: entity = _DynamicStructure(**document) entity.__class__ = object_type entity = Utils.initialize_object(document, object_type) if nested_object_types: for key in nested_object_types: attr = getattr(entity, key) if attr: try: if isinstance(attr, list): nested_list = [] for attribute in attr: nested_list.append(Utils.initialize_object(attribute, nested_object_types[key])) setattr(entity, key, nested_list) elif nested_object_types[key] is datetime: setattr(entity, key, Utils.string_to_datetime(attr)) elif nested_object_types[key] is timedelta: setattr(entity, key, Utils.string_to_timedelta(attr)) else: setattr(entity, key, Utils.initialize_object(attr, nested_object_types[key])) except TypeError as e: print(e) pass if 'Id' in entity.__dict__: entity.Id = metadata.get('@id', None) events.after_conversion_to_entity(entity, document, metadata) return entity, metadata, original_metadata, original_document @staticmethod def make_initialize_dict(document, entity_init, convert_to_snake_case=None): if convert_to_snake_case: convert_to_snake_case = {} if convert_to_snake_case is True else convert_to_snake_case try: converted_document = {} for key in document: converted_key = convert_to_snake_case.get(key, key) converted_document[Utils.convert_to_snake_case(converted_key)] = document[key] document = converted_document except: pass if entity_init is None: return document set_needed = False entity_initialize_dict = {} args, __, keywords, defaults, _, _, _ = inspect.getfullargspec(entity_init) if (len(args) - 1) > len(document): remainder = len(args) if defaults: remainder -= len(defaults) for i in range(1, remainder): entity_initialize_dict[args[i]] = document.get(args[i], None) for i in range(remainder, len(args)): entity_initialize_dict[args[i]] = document.get(args[i], defaults[i - remainder]) else: if keywords: entity_initialize_dict = document else: for key in document: if key in args: entity_initialize_dict[key] = document[key] if not entity_initialize_dict and len(args) - 1 > 0: set_needed = True for key in args[1:]: entity_initialize_dict[key] = None return entity_initialize_dict, set_needed @staticmethod def dict_to_bytes(the_dict): json_dict = json.dumps(the_dict) return bytes(json_dict, encoding='utf-8') @staticmethod def dict_to_string(dictionary): builder = [] for item in dictionary: if sys.version_info.major > 2 and isinstance(dictionary[item], bytes): dictionary[item] = dictionary[item].decode('utf-8') builder.append('{0}={1}'.format(item, dictionary[item])) return ','.join(item for item in builder) @staticmethod def datetime_to_string(datetime_obj): add_suffix = '0' if datetime_obj != datetime.max else '9' return datetime_obj.strftime(f"%Y-%m-%dT%H:%M:%S.%f{add_suffix}") if datetime_obj else '' @staticmethod def start_a_timer(interval, function, args=None, name=None, daemon=False): timer = Timer(interval, function, args) timer.daemon = daemon if name is not None: timer.name = name timer.start() return timer @staticmethod def string_to_datetime(datetime_str): try: if datetime_str.endswith('Z'): datetime_str = datetime_str[:-1] datetime_s = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: datetime_s = datetime.strptime(datetime_str[:-1], "%Y-%m-%dT%H:%M:%S.%f") return datetime_s @staticmethod def timedelta_tick(td): return int(td.total_seconds() * 10000000) @staticmethod def string_to_timedelta(timedelta_str): pattern = r'(?:(-?\d+)[.])?(\d{2}):(\d{2}):(\d{2})(?:.(\d+))?' timedelta_initialize = None m = re.match(pattern, timedelta_str, re.IGNORECASE) if m: timedelta_initialize = {"days": 0 if m.group(1) is None else int(m.group(1)), "hours": 0 if m.group(2) is None else int(m.group(2)), "minutes": 0 if m.group(3) is None else int(m.group(3)), "seconds": 0 if m.group(4) is None else int(m.group(4)), "microseconds": 0 if m.group(5) is None else int(m.group(5)) } if timedelta_initialize: return timedelta(**timedelta_initialize) return None @staticmethod def timedelta_to_str(timedelta_obj): timedelta_str = "" if isinstance(timedelta_obj, timedelta): total_seconds = timedelta_obj.seconds days = timedelta_obj.days hours = total_seconds // 3600 minutes = (total_seconds // 60) % 60 seconds = (total_seconds % 3600) % 60 microseconds = timedelta_obj.microseconds if days > 0: timedelta_str += "{0}.".format(days) timedelta_str += "{:02}:{:02}:{:02}".format(hours, minutes, seconds) if microseconds > 0: timedelta_str += ".{0}".format(microseconds) return timedelta_str @staticmethod def escape(term, allow_wild_cards, make_phrase): wild_cards = ['-', '&', '|', '!', '(', ')', '{', '}', '[', ']', '^', '"', '~', ':', '\\'] if not term: return "\"\"" start = 0 length = len(term) buffer = "" if length >= 2 and term[0] == '/' and term[1] == '/': buffer += "//" start = 2 i = start while i < length: ch = term[i] if ch == '*' or ch == '?': if allow_wild_cards: i += 1 continue if ch in wild_cards: if i > start: buffer += term[start:i - start] buffer += '\\{0}'.format(ch) start = i + 1 elif ch == ' ' or ch == '\t': if make_phrase: return "\"{0}\"".format(Utils.escape(term, allow_wild_cards, False)) i += 1 if length > start: buffer += term[start: length - start] return buffer @staticmethod def pfx_to_pem(pem_path, pfx_path, pfx_password): with open(pem_path, 'wb') as pem_file: with open(pfx_path, 'rb') as pfx_file: pfx = pfx_file.read() p12 = OpenSSL.crypto.load_pkcs12(pfx, pfx_password) pem_file.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, p12.get_privatekey())) pem_file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, p12.get_certificate())) ca = p12.get_ca_certificates() if ca is not None: for cert in ca: pem_file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)) return pem_path @staticmethod def get_cert_file_fingerprint(pem_path): with open(pem_path, 'rb') as pem_file: cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem_file.read()) return str(cert.digest("sha1")) @staticmethod def index_of_any(text, any_of): result = -1 if not text or not any_of: return result any_of = list(set(any_of)) i = 0 while i < len(text) and result == -1: for c in any_of: if c == text[i]: result = i break i += 1 return result @staticmethod
MIT License
torchsynth/torchsynth
torchsynth/synth.py
AbstractSynth.on_post_move_to_device
python
def on_post_move_to_device(self) -> None: self.synthconfig.to(self.device) for module in self.modules(): if isinstance(module, SynthModule): module._update_device(self.device)
LightningModule trigger after this Synth has been moved to a different device. Use this to update children SynthModules device settings
https://github.com/torchsynth/torchsynth/blob/a2fdb489ca2a548b4a6b2e532fb64d2f814ada6c/torchsynth/synth.py#L367-L376
import json import os import sys from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union import pkg_resources if sys.version_info.major == 3 and sys.version_info.minor >= 8: from typing import OrderedDict as OrderedDictType else: from typing import Dict as OrderedDictType import torch from pytorch_lightning.core.lightning import LightningModule from torch import Tensor as T from torchsynth.config import N_BATCHSIZE_FOR_TRAIN_TEST_REPRODUCIBILITY, SynthConfig from torchsynth.module import ( ADSR, LFO, VCA, AudioMixer, ControlRateUpsample, ControlRateVCA, ModulationMixer, MonophonicKeyboard, Noise, SineVCO, SquareSawVCO, SynthModule, ) from torchsynth.parameter import ModuleParameter from torchsynth.signal import Signal class AbstractSynth(LightningModule): def __init__(self, synthconfig: Optional[SynthConfig] = None, *args, **kwargs): super().__init__(*args, **kwargs) if synthconfig is not None: self.synthconfig = synthconfig else: self.synthconfig = SynthConfig() @property def batch_size(self) -> T: assert self.synthconfig.batch_size.ndim == 0 return self.synthconfig.batch_size @property def sample_rate(self) -> T: assert self.synthconfig.sample_rate.ndim == 0 return self.synthconfig.sample_rate @property def buffer_size(self) -> T: assert self.synthconfig.buffer_size.ndim == 0 return self.synthconfig.buffer_size @property def buffer_size_seconds(self) -> T: assert self.synthconfig.buffer_size_seconds.ndim == 0 return self.synthconfig.buffer_size_seconds def add_synth_modules( self, modules: List[Tuple[str, SynthModule, Optional[Dict[str, Any]]]] ): for module_tuple in modules: if len(module_tuple) == 3: name, module, params = module_tuple else: name, module = module_tuple params = {} if not issubclass(module, SynthModule): raise TypeError(f"{module} is not a SynthModule") self.add_module( name, module(self.synthconfig, device=self.device, **params) ) def get_parameters( self, include_frozen: bool = False ) -> OrderedDictType[Tuple[str, str], ModuleParameter]: parameters = [] for module_name, module in sorted(self.named_modules()): if isinstance(module, SynthModule): for parameter in module.parameters(): if include_frozen or not ModuleParameter.is_parameter_frozen( parameter ): parameters.append( ((module_name, parameter.parameter_name), parameter) ) return OrderedDict(parameters) def set_parameters( self, params: Dict[Tuple[str, str], T], freeze: Optional[bool] = False ): for (module_name, param_name), value in params.items(): module = getattr(self, module_name) module.set_parameter(param_name, value.to(self.device)) if freeze: module.get_parameter(param_name).frozen = True def freeze_parameters(self, params: List[Tuple[str, str]]): for module_name, param_name in params: module = getattr(self, module_name) module.get_parameter(param_name).frozen = True def unfreeze_all_parameters(self): for param in self.parameters(): if isinstance(param, ModuleParameter): param.frozen = False def output(self, *args: Any, **kwargs: Any) -> Signal: raise NotImplementedError("Derived classes must override this method") def forward( self, batch_idx: Optional[int] = None, *args: Any, **kwargs: Any ) -> Tuple[Signal, torch.Tensor, Union[torch.Tensor, None]]: if self.synthconfig.reproducible and batch_idx is None: raise ValueError( "Reproducible mode is on, you must " "pass a batch index when calling this synth" ) is_train = self._batch_idx_to_is_train(batch_idx) if self.synthconfig.no_grad: with torch.no_grad(): if batch_idx is not None: self.randomize(seed=batch_idx) params = torch.stack([p.data for p in self.parameters()], dim=1) return self.output(*args, **kwargs), params, is_train else: if batch_idx is not None: self.randomize(seed=batch_idx) params = torch.stack([p.data for p in self.parameters()], dim=1) return self.output(*args, **kwargs), params, is_train def _batch_idx_to_is_train( self, batch_idx: Union[None, int] ) -> Union[None, torch.tensor]: if batch_idx is not None: idxs = torch.range( self.batch_size * batch_idx, self.batch_size * (batch_idx + 1) - 1, device=self.device, ) assert len(idxs) == self.batch_size is_train = (idxs // N_BATCHSIZE_FOR_TRAIN_TEST_REPRODUCIBILITY) % 10 != 9 else: is_train = None return is_train def test_step(self, batch, batch_idx): return 0.0 @property def hyperparameters(self) -> OrderedDictType[Tuple[str, str, str], Any]: hparams = [] for (module_name, parameter_name), parameter in self.get_parameters().items(): hparams.append( ( (module_name, parameter_name, "curve"), parameter.parameter_range.curve, ) ) hparams.append( ( (module_name, parameter_name, "symmetric"), parameter.parameter_range.symmetric, ) ) return OrderedDict(hparams) def set_hyperparameter(self, hyperparameter: Tuple[str, str, str], value: Any): module = getattr(self, hyperparameter[0]) parameter = module.get_parameter(hyperparameter[1]) assert not ModuleParameter.is_parameter_frozen(parameter) setattr(parameter.parameter_range, hyperparameter[2], value) def save_hyperparameters(self, filename: str, indent=True) -> None: hp = [{"name": key, "value": val} for key, val in self.hyperparameters.items()] with open(os.path.abspath(filename), "w") as fp: json.dump(hp, fp, indent=indent) def load_hyperparameters(self, nebula: str) -> None: try: synth = type(self).__name__.lower() nebulae_str = f"nebulae/{synth}/{nebula}.json" data = pkg_resources.resource_string(__name__, nebulae_str) hyperparameters = json.loads(data) except FileNotFoundError: with open(os.path.abspath(nebula), "r") as fp: hyperparameters = json.load(fp) for hp in hyperparameters: self.set_hyperparameter(hp["name"], hp["value"]) def randomize(self, seed: Optional[int] = None): parameters = [param for _, param in sorted(self.named_parameters())] if seed is not None: cpu_rng = torch.Generator(device="cpu") new_values = [] for i in range(self.batch_size): cpu_rng.manual_seed(seed * self.batch_size.numpy().item() + i) new_values.append( torch.rand((len(parameters),), device="cpu", generator=cpu_rng) ) new_values = torch.stack(new_values, dim=1) if self.device.type != "cpu": new_values = new_values.pin_memory().to(self.device, non_blocking=True) for i, parameter in enumerate(parameters): if not ModuleParameter.is_parameter_frozen(parameter): parameter.data = new_values[i] else: assert not self.synthconfig.reproducible for parameter in parameters: if not ModuleParameter.is_parameter_frozen(parameter): parameter.data.uniform_(0, 1) for module in self._modules: self._modules[module].seed = seed
Apache License 2.0
code42/code42cli
src/code42cli/cmds/securitydata.py
clear_checkpoint
python
def clear_checkpoint(state, checkpoint_name): _get_file_event_cursor_store(state.profile.name).delete(checkpoint_name)
Remove the saved file event checkpoint from `--use-checkpoint/-c` mode.
https://github.com/code42/code42cli/blob/7258218f6c1680dd8354e1df662da503d62dfe73/src/code42cli/cmds/securitydata.py#L337-L339
from pprint import pformat import click import py42.sdk.queries.fileevents.filters as f from click import echo from py42.exceptions import Py42InvalidPageTokenError from py42.sdk.queries.fileevents.file_event_query import FileEventQuery from py42.sdk.queries.fileevents.filters import InsertionTimestamp from py42.sdk.queries.fileevents.filters.exposure_filter import ExposureType from py42.sdk.queries.fileevents.filters.file_filter import FileCategory from py42.sdk.queries.fileevents.filters.risk_filter import RiskIndicator from py42.sdk.queries.fileevents.filters.risk_filter import RiskSeverity import code42cli.cmds.search.options as searchopt import code42cli.options as opt from code42cli.click_ext.groups import OrderedGroup from code42cli.click_ext.options import incompatible_with from code42cli.click_ext.types import MapChoice from code42cli.cmds.search import SendToCommand from code42cli.cmds.search.cursor_store import FileEventCursorStore from code42cli.cmds.search.options import send_to_format_options from code42cli.cmds.search.options import server_options from code42cli.cmds.util import convert_to_or_query from code42cli.cmds.util import create_time_range_filter from code42cli.cmds.util import try_get_default_header from code42cli.date_helper import convert_datetime_to_timestamp from code42cli.date_helper import limit_date_range from code42cli.logger import get_main_cli_logger from code42cli.options import format_option from code42cli.options import sdk_options from code42cli.output_formats import FileEventsOutputFormat from code42cli.output_formats import FileEventsOutputFormatter from code42cli.output_formats import OutputFormatter from code42cli.util import warn_interrupt logger = get_main_cli_logger() MAX_EVENT_PAGE_SIZE = 10000 SECURITY_DATA_KEYWORD = "file events" file_events_format_option = click.option( "-f", "--format", type=click.Choice(FileEventsOutputFormat(), case_sensitive=False), help="The output format of the result. Defaults to table format.", default=FileEventsOutputFormat.TABLE, ) exposure_type_option = click.option( "-t", "--type", multiple=True, type=click.Choice(list(ExposureType.choices())), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, callback=searchopt.is_in_filter(f.ExposureType), help="Limits events to those with given exposure types.", ) username_option = click.option( "--c42-username", multiple=True, callback=searchopt.is_in_filter(f.DeviceUsername), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to endpoint events for these Code42 users.", ) actor_option = click.option( "--actor", multiple=True, callback=searchopt.is_in_filter(f.Actor), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to only those enacted by the cloud service user " "of the person who caused the event.", ) md5_option = click.option( "--md5", multiple=True, callback=searchopt.is_in_filter(f.MD5), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to file events where the file has one of these MD5 hashes.", ) sha256_option = click.option( "--sha256", multiple=True, callback=searchopt.is_in_filter(f.SHA256), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to file events where the file has one of these SHA256 hashes.", ) source_option = click.option( "--source", multiple=True, callback=searchopt.is_in_filter(f.Source), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to only those from one of these sources. For example, Gmail, Box, or Endpoint.", ) file_name_option = click.option( "--file-name", multiple=True, callback=searchopt.is_in_filter(f.FileName), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to file events where the file has one of these names.", ) file_path_option = click.option( "--file-path", multiple=True, callback=searchopt.is_in_filter(f.FilePath), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to file events where the file is located at one of these paths. Applies to endpoint file events only.", ) file_category_option = click.option( "--file-category", multiple=True, type=MapChoice( choices=list(FileCategory.choices()), extras_map={ "AUDIO": FileCategory.AUDIO, "DOCUMENT": FileCategory.DOCUMENT, "EXECUTABLE": FileCategory.EXECUTABLE, "IMAGE": FileCategory.IMAGE, "PDF": FileCategory.PDF, "PRESENTATION": FileCategory.PRESENTATION, "SCRIPT": FileCategory.SCRIPT, "SOURCE_CODE": FileCategory.SOURCE_CODE, "SPREADSHEET": FileCategory.SPREADSHEET, "VIDEO": FileCategory.VIDEO, "VIRTUAL_DISK_IMAGE": FileCategory.VIRTUAL_DISK_IMAGE, "ARCHIVE": FileCategory.ZIP, "ZIP": FileCategory.ZIP, "Zip": FileCategory.ZIP, }, ), callback=searchopt.is_in_filter(f.FileCategory), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to file events where the file can be classified by one of these categories.", ) process_owner_option = click.option( "--process-owner", multiple=True, callback=searchopt.is_in_filter(f.ProcessOwner), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits exposure events by process owner, as reported by the device’s operating system. " "Applies only to `Printed` and `Browser or app read` events.", ) tab_url_option = click.option( "--tab-url", multiple=True, callback=searchopt.is_in_filter(f.TabURL), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to be exposure events with one of the specified destination tab URLs.", ) include_non_exposure_option = click.option( "--include-non-exposure", is_flag=True, callback=searchopt.exists_filter(f.ExposureType), cls=incompatible_with(["advanced_query", "type", "saved_search"]), help="Get all events including non-exposure events.", ) risk_indicator_map = { "PUBLIC_CORPORATE_BOX": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_BOX, "PUBLIC_CORPORATE_GOOGLE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_GOOGLE_DRIVE, "PUBLIC_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_ONEDRIVE, "SENT_CORPORATE_GMAIL": RiskIndicator.CloudDataExposures.SENT_CORPORATE_GMAIL, "SHARED_CORPORATE_BOX": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_BOX, "SHARED_CORPORATE_GOOGLE_DRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_GOOGLE_DRIVE, "SHARED_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_ONEDRIVE, "AMAZON_DRIVE": RiskIndicator.CloudStorageUploads.AMAZON_DRIVE, "BOX": RiskIndicator.CloudStorageUploads.BOX, "DROPBOX": RiskIndicator.CloudStorageUploads.DROPBOX, "GOOGLE_DRIVE": RiskIndicator.CloudStorageUploads.GOOGLE_DRIVE, "ICLOUD": RiskIndicator.CloudStorageUploads.ICLOUD, "MEGA": RiskIndicator.CloudStorageUploads.MEGA, "ONEDRIVE": RiskIndicator.CloudStorageUploads.ONEDRIVE, "ZOHO": RiskIndicator.CloudStorageUploads.ZOHO, "BITBUCKET": RiskIndicator.CodeRepositoryUploads.BITBUCKET, "GITHUB": RiskIndicator.CodeRepositoryUploads.GITHUB, "GITLAB": RiskIndicator.CodeRepositoryUploads.GITLAB, "SOURCEFORGE": RiskIndicator.CodeRepositoryUploads.SOURCEFORGE, "STASH": RiskIndicator.CodeRepositoryUploads.STASH, "163.COM": RiskIndicator.EmailServiceUploads.ONESIXTHREE_DOT_COM, "126.COM": RiskIndicator.EmailServiceUploads.ONETWOSIX_DOT_COM, "AOL": RiskIndicator.EmailServiceUploads.AOL, "COMCAST": RiskIndicator.EmailServiceUploads.COMCAST, "GMAIL": RiskIndicator.EmailServiceUploads.GMAIL, "ICLOUD_MAIL": RiskIndicator.EmailServiceUploads.ICLOUD, "MAIL.COM": RiskIndicator.EmailServiceUploads.MAIL_DOT_COM, "OUTLOOK": RiskIndicator.EmailServiceUploads.OUTLOOK, "PROTONMAIL": RiskIndicator.EmailServiceUploads.PROTONMAIL, "QQMAIL": RiskIndicator.EmailServiceUploads.QQMAIL, "SINA_MAIL": RiskIndicator.EmailServiceUploads.SINA_MAIL, "SOHU_MAIL": RiskIndicator.EmailServiceUploads.SOHU_MAIL, "YAHOO": RiskIndicator.EmailServiceUploads.YAHOO, "ZOHO_MAIL": RiskIndicator.EmailServiceUploads.ZOHO_MAIL, "AIRDROP": RiskIndicator.ExternalDevices.AIRDROP, "REMOVABLE_MEDIA": RiskIndicator.ExternalDevices.REMOVABLE_MEDIA, "AUDIO": RiskIndicator.FileCategories.AUDIO, "DOCUMENT": RiskIndicator.FileCategories.DOCUMENT, "EXECUTABLE": RiskIndicator.FileCategories.EXECUTABLE, "IMAGE": RiskIndicator.FileCategories.IMAGE, "PDF": RiskIndicator.FileCategories.PDF, "PRESENTATION": RiskIndicator.FileCategories.PRESENTATION, "SCRIPT": RiskIndicator.FileCategories.SCRIPT, "SOURCE_CODE": RiskIndicator.FileCategories.SOURCE_CODE, "SPREADSHEET": RiskIndicator.FileCategories.SPREADSHEET, "VIDEO": RiskIndicator.FileCategories.VIDEO, "VIRTUAL_DISK_IMAGE": RiskIndicator.FileCategories.VIRTUAL_DISK_IMAGE, "ZIP": RiskIndicator.FileCategories.ZIP, "FACEBOOK_MESSENGER": RiskIndicator.MessagingServiceUploads.FACEBOOK_MESSENGER, "MICROSOFT_TEAMS": RiskIndicator.MessagingServiceUploads.MICROSOFT_TEAMS, "SLACK": RiskIndicator.MessagingServiceUploads.SLACK, "WHATSAPP": RiskIndicator.MessagingServiceUploads.WHATSAPP, "OTHER": RiskIndicator.Other.OTHER, "UNKNOWN": RiskIndicator.Other.UNKNOWN, "FACEBOOK": RiskIndicator.SocialMediaUploads.FACEBOOK, "LINKEDIN": RiskIndicator.SocialMediaUploads.LINKEDIN, "REDDIT": RiskIndicator.SocialMediaUploads.REDDIT, "TWITTER": RiskIndicator.SocialMediaUploads.TWITTER, "FILE_MISMATCH": RiskIndicator.UserBehavior.FILE_MISMATCH, "OFF_HOURS": RiskIndicator.UserBehavior.OFF_HOURS, "REMOTE": RiskIndicator.UserBehavior.REMOTE, "FIRST_DESTINATION_USE": RiskIndicator.UserBehavior.FIRST_DESTINATION_USE, "RARE_DESTINATION_USE": RiskIndicator.UserBehavior.RARE_DESTINATION_USE, } risk_indicator_map_reversed = {v: k for k, v in risk_indicator_map.items()} def risk_indicator_callback(filter_cls): def callback(ctx, param, arg): if arg: mapped_args = tuple(risk_indicator_map[i] for i in arg) filter_func = searchopt.is_in_filter(filter_cls) return filter_func(ctx, param, mapped_args) return callback risk_indicator_option = click.option( "--risk-indicator", multiple=True, type=MapChoice( choices=list(risk_indicator_map.keys()), extras_map=risk_indicator_map_reversed, ), callback=risk_indicator_callback(f.RiskIndicator), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to those classified by the given risk indicator categories.", ) risk_severity_option = click.option( "--risk-severity", multiple=True, type=click.Choice(list(RiskSeverity.choices())), callback=searchopt.is_in_filter(f.RiskSeverity), cls=searchopt.AdvancedQueryAndSavedSearchIncompatible, help="Limits events to those classified by the given risk severity.", ) begin_option = opt.begin_option( SECURITY_DATA_KEYWORD, callback=lambda ctx, param, arg: convert_datetime_to_timestamp( limit_date_range(arg, max_days_back=90) ), ) end_option = opt.end_option(SECURITY_DATA_KEYWORD) checkpoint_option = opt.checkpoint_option( SECURITY_DATA_KEYWORD, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible ) advanced_query_option = searchopt.advanced_query_option(SECURITY_DATA_KEYWORD) def _get_saved_search_option(): def _get_saved_search_query(ctx, param, arg): if arg is None: return query = ctx.obj.sdk.securitydata.savedsearches.get_query(arg) return query return click.option( "--saved-search", help="Get events from a saved search filter with the given ID." "WARNING: Using a saved search is incompatible with other query-building arguments.", callback=_get_saved_search_query, cls=incompatible_with("advanced_query"), ) def _create_header_keys_map(): return {"name": "Name", "id": "Id"} def _create_search_header_map(): return { "fileName": "FileName", "filePath": "FilePath", "eventType": "Type", "eventTimestamp": "EventTimestamp", "fileCategory": "FileCategory", "fileSize": "FileSize", "fileOwner": "FileOwner", "md5Checksum": "MD5Checksum", "sha256Checksum": "SHA256Checksum", "riskIndicators": "RiskIndicator", "riskSeverity": "RiskSeverity", } def search_options(f): f = checkpoint_option(f) f = advanced_query_option(f) f = end_option(f) f = begin_option(f) return f def file_event_options(f): f = exposure_type_option(f) f = username_option(f) f = actor_option(f) f = md5_option(f) f = sha256_option(f) f = source_option(f) f = file_name_option(f) f = file_path_option(f) f = file_category_option(f) f = process_owner_option(f) f = tab_url_option(f) f = include_non_exposure_option(f) f = risk_indicator_option(f) f = risk_severity_option(f) f = _get_saved_search_option()(f) return f @click.group(cls=OrderedGroup) @sdk_options(hidden=True) def security_data(state): state.cursor_getter = _get_file_event_cursor_store @security_data.command() @click.argument("checkpoint-name") @sdk_options()
MIT License
open-reaction-database/ord-schema
ord_schema/message_helpers.py
set_dative_bonds
python
def set_dative_bonds( mol: Chem.Mol, from_atoms: Tuple[str, ...] = ('N', 'P')) -> Chem.Mol: p_table = Chem.GetPeriodicTable() edit_mol = Chem.RWMol(mol) edit_mol.UpdatePropertyCache(strict=False) metals = [atom for atom in edit_mol.GetAtoms() if is_transition_metal(atom)] for metal in metals: for nbr in metal.GetNeighbors(): nbr_atom = nbr.GetSymbol() if nbr_atom in from_atoms and nbr_atom == 'C': if nbr.GetFormalCharge() > 0: warnings.warn( f'A positively charged C atom bound to ' f'{metal.GetSymbol()} was found in the compound ' f'with SMILES {Chem.MolToSmiles(mol)}. If this is ' f'a datively bound metal-carbene complex, ' f'the positive charge should be removed from ' f'the SMILES string before setting dative bonds') if (nbr.GetTotalValence() + nbr.GetFormalCharge() != p_table.GetDefaultValence(nbr_atom) and edit_mol.GetBondBetweenAtoms( nbr.GetIdx(), metal.GetIdx()).GetBondType() == Chem.BondType.SINGLE): edit_mol.RemoveBond(nbr.GetIdx(), metal.GetIdx()) edit_mol.AddBond(nbr.GetIdx(), metal.GetIdx(), Chem.BondType.DATIVE) elif nbr_atom in from_atoms and nbr_atom != 'C': if (nbr.GetExplicitValence() > p_table.GetDefaultValence(nbr_atom) and edit_mol.GetBondBetweenAtoms( nbr.GetIdx(), metal.GetIdx()).GetBondType() == Chem.BondType.SINGLE): edit_mol.RemoveBond(nbr.GetIdx(), metal.GetIdx()) edit_mol.AddBond(nbr.GetIdx(), metal.GetIdx(), Chem.BondType.DATIVE) return edit_mol.GetMol()
Converts metal-ligand bonds to dative. Replaces some single bonds between metals and atoms with atomic numbers in fromAtoms with dative bonds. For all atoms except carbon, the replacement is only done if the atom has "too many" bonds. To handle metal-carbene complexes, metal-carbon bonds are converted to dative if the sum of the explicit and implicit valence of the carbon atom does not equal its default valence, 4. Args: mol: The molecule to be converted. fromAtoms: tuple of atomic symbols corresponding to atom types that should have atom-metal bonds converted to dative. Default is N and P Returns: The modified molecule.
https://github.com/open-reaction-database/ord-schema/blob/3f87ab37bc404d25bce360435059fb13392db949/ord_schema/message_helpers.py#L627-L687
import enum import functools import gzip import os import re from typing import Dict, Iterable, List, Optional, Tuple, Type, TypeVar, Union import warnings import flask from google import protobuf from google.protobuf import json_format from google.protobuf import text_format import pandas as pd from rdkit import Chem from rdkit.Chem import rdChemReactions import ord_schema from ord_schema import units from ord_schema.proto import reaction_pb2 _COMPOUND_IDENTIFIER_LOADERS = { reaction_pb2.CompoundIdentifier.SMILES: Chem.MolFromSmiles, reaction_pb2.CompoundIdentifier.INCHI: Chem.MolFromInchi, reaction_pb2.CompoundIdentifier.MOLBLOCK: Chem.MolFromMolBlock, } MessageType = TypeVar('MessageType') def build_compound(smiles: Optional[str] = None, name: Optional[str] = None, amount: Optional[str] = None, role: Optional[str] = None, is_limiting: Optional[bool] = None, prep: Optional[str] = None, prep_details: Optional[str] = None, vendor: Optional[str] = None) -> reaction_pb2.Compound: compound = reaction_pb2.Compound() if smiles: compound.identifiers.add(value=smiles, type='SMILES') if name: compound.identifiers.add(value=name, type='NAME') if amount: if amount.lower() in ('saturated', 'catalytic', 'titrated'): compound.amount.unmeasured.CopyFrom( reaction_pb2.UnmeasuredAmount(type=amount.upper())) else: resolver = units.UnitResolver() amount_pb = resolver.resolve(amount) if isinstance(amount_pb, reaction_pb2.Mass): compound.amount.mass.CopyFrom(amount_pb) elif isinstance(amount_pb, reaction_pb2.Moles): compound.amount.moles.CopyFrom(amount_pb) elif isinstance(amount_pb, reaction_pb2.Volume): compound.amount.volume.CopyFrom(amount_pb) else: raise TypeError(f'unsupported units for amount: {amount_pb}') if role: field = reaction_pb2.Compound.DESCRIPTOR.fields_by_name['reaction_role'] values_dict = field.enum_type.values_by_name try: compound.reaction_role = values_dict[role.upper()].number except KeyError as error: raise KeyError( f'{role} is not a supported type: {values_dict.keys()}' ) from error if is_limiting is not None: if not (is_limiting is True or is_limiting is False): raise TypeError( f'is_limiting must be a boolean value: {is_limiting}') compound.is_limiting = is_limiting if prep: field = reaction_pb2.CompoundPreparation.DESCRIPTOR.fields_by_name[ 'type'] values_dict = field.enum_type.values_by_name try: compound.preparations.add().type = values_dict[prep.upper()].number except KeyError as error: raise KeyError( f'{prep} is not a supported type: {values_dict.keys()}' ) from error if (compound.preparations[0].type == reaction_pb2.CompoundPreparation.CUSTOM and not prep_details): raise ValueError( 'prep_details must be provided when CUSTOM prep is used') if prep_details: if not prep: raise ValueError('prep must be provided when prep_details is used') compound.preparations[0].details = prep_details if vendor: compound.source.vendor = vendor return compound def set_solute_moles(solute: reaction_pb2.Compound, solvents: List[reaction_pb2.Compound], concentration: str, overwrite: bool = False) -> List[reaction_pb2.Compound]: if solute.amount.WhichOneof('kind') and not overwrite: raise ValueError('solute has defined amount and overwrite is False') volume_liter = 0 for solvent in solvents: amount = solvent.amount if not amount.HasField('volume') or not amount.volume.value: raise ValueError('solvent must have defined volume') if amount.volume.units == amount.volume.LITER: volume_liter += amount.volume.value elif amount.volume.units == amount.volume.MILLILITER: volume_liter += amount.volume.value * 1e-3 elif amount.volume.units == amount.volume.MICROLITER: volume_liter += amount.volume.value * 1e-6 elif amount.volume.units == amount.volume.NANOLITER: volume_liter += amount.volume.value * 1e-9 else: raise ValueError('solvent units not recognized by set_solute_moles') resolver = units.UnitResolver( unit_synonyms=units.CONCENTRATION_UNIT_SYNONYMS, forbidden_units={}) concentration_pb = resolver.resolve(concentration) if concentration_pb.units == concentration_pb.MOLAR: concentration_molar = concentration_pb.value elif concentration_pb.units == concentration_pb.MILLIMOLAR: concentration_molar = concentration_pb.value * 1e-3 elif concentration_pb.units == concentration_pb.MICROMOLAR: concentration_molar = concentration_pb.value * 1e-6 else: raise ValueError(f'unsupported units: {concentration_pb.units}') moles = volume_liter * concentration_molar if moles < 1e-6: value = moles * 1e9 unit = reaction_pb2.Moles.NANOMOLE elif moles < 1e-3: value = moles * 1e6 unit = reaction_pb2.Moles.MICROMOLE elif moles < 1: value = moles * 1e3 unit = reaction_pb2.Moles.MILLIMOLE else: value = moles unit = reaction_pb2.Moles.MOLE solute.amount.moles.value = value solute.amount.moles.units = unit return [solute] + solvents def build_data(filename: str, description: str) -> reaction_pb2.Data: _, extension = os.path.splitext(filename) if not extension.startswith('.'): raise ValueError(f'cannot deduce the file format for {filename}') data = reaction_pb2.Data() data.format = extension[1:] with open(filename, 'rb') as f: data.bytes_value = f.read() data.description = description return data def find_submessages(message: ord_schema.Message, submessage_type: Type[MessageType]) -> List[MessageType]: if not issubclass(submessage_type, ord_schema.Message): raise TypeError('submessage_type must be a Protocol Buffer type') submessage_name = submessage_type.DESCRIPTOR.full_name submessages = [] for field, value in message.ListFields(): if field.type != field.TYPE_MESSAGE: continue if field.message_type.full_name == submessage_name: if field.label == field.LABEL_REPEATED: submessages.extend(value) else: submessages.append(value) elif field.message_type.GetOptions().map_entry: field_value = field.message_type.fields_by_name['value'] if field_value.type != field_value.TYPE_MESSAGE: continue if field_value.message_type.full_name == submessage_name: submessages.extend(value.values()) else: for submessage in value.values(): submessages.extend( find_submessages(submessage, submessage_type)) elif field.label == field.LABEL_REPEATED: for submessage in value: submessages.extend(find_submessages(submessage, submessage_type)) else: submessages.extend(find_submessages(value, submessage_type)) return submessages def smiles_from_compound(compound: reaction_pb2.Compound) -> str: return (get_compound_smiles(compound) or Chem.MolToSmiles(mol_from_compound(compound))) def molblock_from_compound(compound: reaction_pb2.Compound) -> str: return (get_compound_molblock(compound) or Chem.MolToMolBlock(mol_from_compound(compound))) def mol_from_compound( compound: reaction_pb2.Compound, return_identifier: bool = False ) -> Union[Chem.Mol, Tuple[Chem.Mol, str]]: for identifier in compound.identifiers: if identifier.type in _COMPOUND_IDENTIFIER_LOADERS: mol = _COMPOUND_IDENTIFIER_LOADERS[identifier.type]( identifier.value) if not mol: raise ValueError( f'invalid structural identifier for Compound: {identifier}') if return_identifier: return mol, identifier return mol raise ValueError(f'no valid structural identifier for Compound: {compound}') def check_compound_identifiers(compound: reaction_pb2.Compound): smiles = set() for identifier in compound.identifiers: if identifier.type in _COMPOUND_IDENTIFIER_LOADERS: mol = _COMPOUND_IDENTIFIER_LOADERS[identifier.type]( identifier.value) else: continue if not mol: raise ValueError( f'invalid structural identifier for Compound: {identifier}') smiles.add(Chem.MolToSmiles(mol)) if len(smiles) > 1: raise ValueError(f'structural identifiers are inconsistent: {smiles}') def get_reaction_smiles(message: reaction_pb2.Reaction, generate_if_missing: bool = False, allow_incomplete: bool = True, validate: bool = True) -> Optional[str]: types = [ reaction_pb2.ReactionIdentifier.REACTION_SMILES, reaction_pb2.ReactionIdentifier.REACTION_CXSMILES ] for identifier in message.identifiers: if identifier.type in types: return identifier.value if not generate_if_missing: return None reactants, agents, products = set(), set(), set() roles = reaction_pb2.ReactionRole for key in sorted(message.inputs): for compound in message.inputs[key].components: try: smiles = smiles_from_compound(compound) except ValueError as error: if allow_incomplete: continue raise error if compound.reaction_role in [ roles.REAGENT, roles.SOLVENT, roles.CATALYST ]: agents.add(smiles) elif compound.reaction_role == roles.INTERNAL_STANDARD: continue else: reactants.add(smiles) for outcome in message.outcomes: for product in outcome.products: try: smiles = smiles_from_compound(product) except ValueError as error: if allow_incomplete: continue raise error if product.reaction_role == roles.PRODUCT: products.add(smiles) elif product.reaction_role in [ roles.REAGENT, roles.SOLVENT, roles.CATALYST, roles.INTERNAL_STANDARD ]: continue if not allow_incomplete and (not reactants or not products): raise ValueError( 'reaction must contain at least one reactant and one product') if not reactants and not products: raise ValueError('reaction contains no valid reactants or products') components = [ '.'.join(sorted(reactants)), '.'.join(sorted(agents)), '.'.join(sorted(products)) ] reaction_smiles = '>'.join(components) if validate and not allow_incomplete: reaction_smiles = validate_reaction_smiles(reaction_smiles) return reaction_smiles def validate_reaction_smiles(reaction_smiles: str) -> str: try: reaction = rdChemReactions.ReactionFromSmarts(reaction_smiles, useSmiles=True) if not reaction: raise ValueError('reaction SMILES could not be parsed') rdChemReactions.SanitizeRxn(reaction) _, num_errors = reaction.Validate() if num_errors: raise ValueError('reaction SMILES contains errors') except (RuntimeError, ValueError) as error: raise ValueError( f'bad reaction SMILES ({str(error)}): {reaction_smiles}') from error return rdChemReactions.ReactionToSmiles(reaction) def reaction_from_smiles(reaction_smiles): reaction = rdChemReactions.ReactionFromSmarts(reaction_smiles, useSmiles=True) rdChemReactions.RemoveMappingNumbersFromReactions(reaction) message = reaction_pb2.Reaction() message.identifiers.add(value=reaction_smiles, type='REACTION_SMILES') reaction_input = message.inputs['from_reaction_smiles'] for mol in reaction.GetReactants(): component = reaction_input.components.add() component.identifiers.add(value=Chem.MolToSmiles(mol), type='SMILES') component.reaction_role = reaction_pb2.ReactionRole.REACTANT for smiles in reaction_smiles.split('>')[1].split('.'): if not smiles: continue component = reaction_input.components.add() component.identifiers.add(value=smiles, type='SMILES') component.reaction_role = reaction_pb2.ReactionRole.REAGENT outcome = message.outcomes.add() for mol in reaction.GetProducts(): component = outcome.products.add() component.identifiers.add(value=Chem.MolToSmiles(mol), type='SMILES') component.reaction_role = reaction_pb2.ReactionRole.PRODUCT return message def get_product_yield(product: reaction_pb2.ProductCompound, as_measurement: bool = False): for measurement in product.measurements: if measurement.type == measurement.YIELD: if as_measurement: return measurement return measurement.percentage.value return None def get_compound_identifier( compound: reaction_pb2.Compound, identifier_type: reaction_pb2.CompoundIdentifier.IdentifierType ) -> Optional[str]: for identifier in compound.identifiers: if identifier.type == identifier_type: return identifier.value return None def set_compound_identifier( compound: reaction_pb2.Compound, identifier_type: reaction_pb2.CompoundIdentifier.IdentifierType, value: str) -> reaction_pb2.CompoundIdentifier: for identifier in compound.identifiers: if identifier.type == identifier_type: identifier.value = value return identifier identifier = compound.identifiers.add(type=identifier_type, value=value) return identifier def get_compound_smiles(compound: reaction_pb2.Compound) -> Optional[str]: return get_compound_identifier(compound, reaction_pb2.CompoundIdentifier.SMILES) def set_compound_smiles(compound: reaction_pb2.Compound, value: str) -> reaction_pb2.CompoundIdentifier: return set_compound_identifier(compound, reaction_pb2.CompoundIdentifier.SMILES, value) def is_transition_metal(atom: Chem.Atom) -> bool: atom_n = atom.GetAtomicNum() return 22 <= atom_n <= 29 or 40 <= atom_n <= 47 or 72 <= atom_n <= 79 def has_transition_metal(mol: Chem.Mol) -> bool: for atom in mol.GetAtoms(): if is_transition_metal(atom): return True return False
Apache License 2.0
backtrace-labs/umash
t/exact_test.py
lte_prob
python
def lte_prob(name, p_a_lower=0.5, a_offset=0, b_offset=0): return DEFAULT_STATISTIC._replace( name=name, probability_a_lower=p_a_lower, a_offset=a_offset, b_offset=b_offset, fn_name="exact_test_lte_prob", fn_args=(), )
Returns a statistic that computes the probability that a value chosen uniformly at random from A is <= a value uniformly chosen from B.
https://github.com/backtrace-labs/umash/blob/97466abbb12922839c6c101b73da2d61653b0f28/t/exact_test.py#L39-L50
from collections import defaultdict, namedtuple import math import queue import sys import time from csm import csm from exact_test_sampler import ( Sample, Statistic, actual_data_results, resampled_data_results, ) __all__ = [ "exact_test", "lte_prob", "gt_prob", "mean", "quantile", "median", "q99", ] Result = namedtuple("Result", ["actual_value", "judgement", "m", "n", "num_trials"]) DEFAULT_STATISTIC = Statistic(None, 0.5, 0, 0, None, ())
MIT License
cornell-zhang/quickest
hls/test.py
load_data
python
def load_data(FLAGS, silence=False): if not silence: print '' if not silence: print 'Load data from: ', FLAGS.data_dir if not os.path.exists(FLAGS.data_dir): sys.exit("Data file " + FLAGS.data_dir + " does not exist!") with open(FLAGS.data_dir, "rb") as f: data = pickle.load(f) X = data['x'] Y = data['y'] feature_name = data['fname'] target_name = data['tname'] mean_features = data['fmean'] mean_targets = data['tmean'] std_features = data['fstd'] std_targets = data['tstd'] return X, Y, mean_features, mean_targets, std_features, std_targets, feature_name, target_name
Load testing dataset.
https://github.com/cornell-zhang/quickest/blob/3d4826519baf83f60a372725c663a1beb33d3da2/hls/test.py#L27-L53
import os import pickle import sys import argparse import numpy as np parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default = './data/data_test.pkl', help = 'Directory or file of the testing dataset. \ String. Default: ./data/data_test.pkl') parser.add_argument('--models_save_dir', type=str, default='./saves/train/models_save.pkl', help='Directory or file of the pre-trained models. \ String. Default: ./train/models_save.pkl') parser.add_argument('--save_result_dir', type=str, default='./saves/test/results.pkl', help='Directory or file to save the result. \ String. Default: ./saves/test/results.pkl')
BSD 3-Clause New or Revised License
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/providers/aws/aws_elasticached_memcached.py
ElastiCacheMemcached._IsDeleting
python
def _IsDeleting(self): cluster_info = self._DescribeInstance() return cluster_info.get('CacheClusterStatus', '') == 'deleting'
Returns True if cluster is being deleted and false otherwise.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/providers/aws/aws_elasticached_memcached.py#L107-L110
import json import logging from absl import flags from perfkitbenchmarker import errors from perfkitbenchmarker import managed_memory_store from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers import aws from perfkitbenchmarker.providers.aws import util MEMCACHED_VERSIONS = ['1.5.10', '1.5.16', '1.6.6'] FLAGS = flags.FLAGS class ElastiCacheMemcached(managed_memory_store.BaseManagedMemoryStore): CLOUD = aws.CLOUD MEMORY_STORE = managed_memory_store.MEMCACHED def __init__(self, spec): super(ElastiCacheMemcached, self).__init__(spec) self.subnet_group_name = 'subnet-%s' % self.name self.zone = self.spec.vms[0].zone self.region = util.GetRegionFromZone(self.zone) self.node_type = FLAGS.cache_node_type self.version = FLAGS.managed_memory_store_version @staticmethod def CheckPrerequisites(benchmark_config): if (FLAGS.managed_memory_store_version and FLAGS.managed_memory_store_version not in MEMCACHED_VERSIONS): raise errors.Config.InvalidValue('Invalid Memcached version.') def GetResourceMetadata(self): result = { 'cloud_memcached_version': self.version, 'cloud_memcached_node_type': self.node_type, } return result def _CreateDependencies(self): subnet_id = self.spec.vms[0].network.subnet.id cmd = ['aws', 'elasticache', 'create-cache-subnet-group', '--region', self.region, '--cache-subnet-group-name', self.subnet_group_name, '--cache-subnet-group-description', '"memcached benchmark subnet"', '--subnet-ids', subnet_id] vm_util.IssueCommand(cmd) def _DeleteDependencies(self): cmd = ['aws', 'elasticache', 'delete-cache-subnet-group', '--region', self.region, '--cache-subnet-group-name', self.subnet_group_name] vm_util.IssueCommand(cmd, raise_on_failure=False) def _Create(self): cmd = ['aws', 'elasticache', 'create-cache-cluster', '--engine', 'memcached', '--region', self.region, '--cache-cluster-id', self.name, '--preferred-availability-zone', self.zone, '--num-cache-nodes', str(managed_memory_store.MEMCACHED_NODE_COUNT), '--cache-node-type', self.node_type, '--cache-subnet-group-name', self.subnet_group_name] if self.version: cmd += ['--engine-version', self.version] cmd += ['--tags'] cmd += util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd) def _Delete(self): cmd = ['aws', 'elasticache', 'delete-cache-cluster', '--region', self.region, '--cache-cluster-id', self.name] vm_util.IssueCommand(cmd, raise_on_failure=False)
Apache License 2.0
dsp-jetpack/jetpack
src/pilot/discover_nodes/discover_nodes/dracclient/client.py
DRACClient.__init__
python
def __init__(self, host, username, password, port=443, path='/wsman', protocol='https'): super(DRACClient, self).__init__(host, username, password, port, path, protocol)
Construct a DRACClient object. :param host: hostname or IP of the iDRAC interface :param username: username for accessing the iDRAC interface :param password: password for accessing the iDRAC interface :param port: port for accessing the iDRAC interface :param path: path for accessing the iDRAC interface :param protocol: protocol for accessing the iDRAC interface
https://github.com/dsp-jetpack/jetpack/blob/7388aa37b152c53372e74ae5f202e258a3225f93/src/pilot/discover_nodes/discover_nodes/dracclient/client.py#L35-L56
from __future__ import absolute_import from __future__ import print_function import dracclient.client as ironic_client import dracclient.resources.uris as ironic_uris import logging from .resources import uris LOG = logging.getLogger(__name__) class DRACClient(ironic_client.DRACClient):
Apache License 2.0
mozilla/make.mozilla.org
vendor-local/lib/python/south/db/generic.py
DatabaseOperations._constraints_affecting_columns
python
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"): if self.dry_run: raise DryRunError("Cannot get constraints for columns.") if columns is not None: columns = set(map(lambda s: s.lower(), columns)) db_name = self._get_setting('NAME') cnames = {} for col, constraints in self.lookup_constraint(db_name, table_name): for kind, cname in constraints: if kind == type: cnames.setdefault(cname, set()) cnames[cname].add(col.lower()) for cname, cols in cnames.items(): if cols == columns or columns is None: yield cname
Gets the names of the constraints affecting the given columns. If columns is None, returns all constraints of the type on the table.
https://github.com/mozilla/make.mozilla.org/blob/98b87c517b463a5bae09f29284b1dabca97bb376/vendor-local/lib/python/south/db/generic.py#L573-L595
import datetime import string import random import re import sys from django.core.management.color import no_style from django.db import transaction, models from django.db.utils import DatabaseError from django.db.backends.util import truncate_name from django.db.backends.creation import BaseDatabaseCreation from django.db.models.fields import NOT_PROVIDED from django.dispatch import dispatcher from django.conf import settings from django.utils.datastructures import SortedDict try: from django.utils.functional import cached_property except ImportError: class cached_property(object): def __init__(self, func): self.func = func def __get__(self, instance, type): res = instance.__dict__[self.func.__name__] = self.func(instance) return res from south.logger import get_logger def alias(attrname): def func(self, *args, **kwds): return getattr(self, attrname)(*args, **kwds) return func def invalidate_table_constraints(func): def _cache_clear(self, table, *args, **opts): self._set_cache(table, value=INVALID) return func(self, table, *args, **opts) return _cache_clear def delete_column_constraints(func): def _column_rm(self, table, column, *args, **opts): self._set_cache(table, column, value=[]) return func(self, table, column, *args, **opts) return _column_rm def copy_column_constraints(func): def _column_cp(self, table, column_old, column_new, *args, **opts): db_name = self._get_setting('NAME') self._set_cache(table, column_new, value=self.lookup_constraint(db_name, table, column_old)) return func(self, table, column_old, column_new, *args, **opts) return _column_cp class INVALID(Exception): def __repr__(self): return 'INVALID' class DryRunError(ValueError): pass class DatabaseOperations(object): alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s' alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL' alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL' delete_check_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s' add_column_string = 'ALTER TABLE %s ADD COLUMN %s;' delete_unique_sql = "ALTER TABLE %s DROP CONSTRAINT %s" delete_foreign_key_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s' max_index_name_length = 63 drop_index_string = 'DROP INDEX %(index_name)s' delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;' create_primary_key_string = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s PRIMARY KEY (%(columns)s)" delete_primary_key_sql = "ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s" add_check_constraint_fragment = "ADD CONSTRAINT %(constraint)s CHECK (%(check)s)" rename_table_sql = "ALTER TABLE %s RENAME TO %s;" backend_name = None default_schema_name = "public" allows_combined_alters = True supports_foreign_keys = True has_check_constraints = True has_booleans = True @cached_property def has_ddl_transactions(self): self._possibly_initialise() connection = self._get_connection() exceptions = (DatabaseError, ) try: from MySQLdb import OperationalError exceptions += (OperationalError, ) except ImportError: pass if getattr(connection.features, 'supports_transactions', True): cursor = connection.cursor() self.start_transaction() cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)') self.rollback_transaction() try: cursor.execute('CREATE TABLE DDL_TRANSACTION_TEST (X INT)') except exceptions: return False else: return True finally: cursor.execute('DROP TABLE DDL_TRANSACTION_TEST') else: return False def __init__(self, db_alias): self.debug = False self.deferred_sql = [] self.dry_run = False self.pending_transactions = 0 self.pending_create_signals = [] self.db_alias = db_alias self._constraint_cache = {} self._initialised = False def lookup_constraint(self, db_name, table_name, column_name=None): def _lookup(): table = self._constraint_cache[db_name][table_name] if table is INVALID: raise INVALID elif column_name is None: return table.items() else: return table[column_name] try: ret = _lookup() return ret except INVALID, e: del self._constraint_cache[db_name][table_name] self._fill_constraint_cache(db_name, table_name) except KeyError, e: if self._is_valid_cache(db_name, table_name): return [] self._fill_constraint_cache(db_name, table_name) return self.lookup_constraint(db_name, table_name, column_name) def _set_cache(self, table_name, column_name=None, value=INVALID): db_name = self._get_setting('NAME') try: if column_name is not None: self._constraint_cache[db_name][table_name][column_name] = value else: self._constraint_cache[db_name][table_name] = value except (LookupError, TypeError): pass def _is_valid_cache(self, db_name, table_name): try: return self._constraint_cache[db_name][table_name] is not INVALID except KeyError: return False def _is_multidb(self): try: from django.db import connections except ImportError: return False else: return True def _get_connection(self): if self._is_multidb(): from django.db import connections return connections[self.db_alias] else: from django.db import connection return connection def _get_setting(self, setting_name): setting_name = setting_name.upper() connection = self._get_connection() if self._is_multidb(): return connection.settings_dict[setting_name] else: return getattr(settings, "DATABASE_%s" % setting_name) def _has_setting(self, setting_name): try: self._get_setting(setting_name) except (KeyError, AttributeError): return False else: return True def _get_schema_name(self): try: return self._get_setting('schema') except (KeyError, AttributeError): return self.default_schema_name def _possibly_initialise(self): if not self._initialised: self.connection_init() self._initialised = True def connection_init(self): pass def quote_name(self, name): return self._get_connection().ops.quote_name(name) def execute(self, sql, params=[]): self._possibly_initialise() cursor = self._get_connection().cursor() if self.debug: print " = %s" % sql, params if self.dry_run: return [] get_logger().debug('execute "%s" with params "%s"' % (sql, params)) try: cursor.execute(sql, params) except DatabaseError, e: print >> sys.stderr, 'FATAL ERROR - The following SQL query failed: %s' % sql print >> sys.stderr, 'The error was: %s' % e raise try: return cursor.fetchall() except: return [] def execute_many(self, sql, regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"): sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()]) for st in re.split(regex, sql)[1:][::2]: self.execute(st) def add_deferred_sql(self, sql): self.deferred_sql.append(sql) def execute_deferred_sql(self): for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] def clear_deferred_sql(self): self.deferred_sql = [] def clear_run_data(self, pending_creates = None): self.clear_deferred_sql() self.pending_create_signals = pending_creates or [] def get_pending_creates(self): return self.pending_create_signals @invalidate_table_constraints def create_table(self, table_name, fields): if len(table_name) > 63: print " ! WARNING: You have a table name longer than 63 characters; this will not fully work on PostgreSQL or MySQL." columns = [ self.column_sql(table_name, field_name, field) for field_name, field in fields ] self.execute('CREATE TABLE %s (%s);' % ( self.quote_name(table_name), ', '.join([col for col in columns if col]), )) add_table = alias('create_table') @invalidate_table_constraints def rename_table(self, old_table_name, table_name): if old_table_name == table_name: return params = (self.quote_name(old_table_name), self.quote_name(table_name)) self.execute(self.rename_table_sql % params) self._set_cache(table_name, value=INVALID) @invalidate_table_constraints def delete_table(self, table_name, cascade=True): params = (self.quote_name(table_name), ) if cascade: self.execute('DROP TABLE %s CASCADE;' % params) else: self.execute('DROP TABLE %s;' % params) drop_table = alias('delete_table') @invalidate_table_constraints def clear_table(self, table_name): params = (self.quote_name(table_name), ) self.execute('DELETE FROM %s;' % params) @invalidate_table_constraints def add_column(self, table_name, name, field, keep_default=True): sql = self.column_sql(table_name, name, field) if sql: params = ( self.quote_name(table_name), sql, ) sql = self.add_column_string % params self.execute(sql) if not keep_default and field.default is not None: field.default = NOT_PROVIDED self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True) def _db_type_for_alter_column(self, field): try: return field.db_type(connection=self._get_connection()) except TypeError: return field.db_type() def _alter_add_column_mods(self, field, name, params, sqls): pass def _alter_set_defaults(self, field, name, params, sqls): if not field.null and field.has_default(): default = field.get_default() sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (self.quote_name(name),), [default])) else: sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), [])) @invalidate_table_constraints def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False): if self.dry_run: if self.debug: print ' - no dry run output for alter_column() due to dynamic DDL, sorry' return if hasattr(field, 'south_init'): field.south_init() field.set_attributes_from_name(name) if not explicit_name: name = field.column else: field.column = name if not ignore_constraints: if self.has_check_constraints: check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK") for constraint in check_constraints: self.execute(self.delete_check_sql % { 'table': self.quote_name(table_name), 'constraint': self.quote_name(constraint), }) unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE")) if field.unique and not unique_constraint: self.create_unique(table_name, [name]) elif not field.unique and unique_constraint: self.delete_unique(table_name, [name]) try: self.delete_foreign_key(table_name, name) except ValueError: pass params = { "column": self.quote_name(name), "type": self._db_type_for_alter_column(field), "table_name": table_name } sqls = [] if params["type"] is not None: sqls.append((self.alter_string_set_type % params, [])) self._alter_add_column_mods(field, name, params, sqls) if field.null: sqls.append((self.alter_string_set_null % params, [])) else: sqls.append((self.alter_string_drop_null % params, [])) self._alter_set_defaults(field, name, params, sqls) if self.allows_combined_alters: sqls, values = zip(*sqls) self.execute( "ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)), flatten(values), ) else: for sql, values in sqls: self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values) if not ignore_constraints: if field.rel and self.supports_foreign_keys: self.execute( self.foreign_key_sql( table_name, field.column, field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column ) ) def _fill_constraint_cache(self, db_name, table_name): schema = self._get_schema_name() ifsc_tables = ["constraint_column_usage", "key_column_usage"] self._constraint_cache.setdefault(db_name, {}) self._constraint_cache[db_name][table_name] = {} for ifsc_table in ifsc_tables: rows = self.execute(""" SELECT kc.constraint_name, kc.column_name, c.constraint_type FROM information_schema.%s AS kc JOIN information_schema.table_constraints AS c ON kc.table_schema = c.table_schema AND kc.table_name = c.table_name AND kc.constraint_name = c.constraint_name WHERE kc.table_schema = %%s AND kc.table_name = %%s """ % ifsc_table, [schema, table_name]) for constraint, column, kind in rows: self._constraint_cache[db_name][table_name].setdefault(column, set()) self._constraint_cache[db_name][table_name][column].add((kind, constraint)) return
BSD 3-Clause New or Revised License
googleads/google-ads-python
examples/remarketing/add_customer_match_user_list.py
_build_offline_user_data_job_operations
python
def _build_offline_user_data_job_operations(client): user_data_with_email_address_operation = client.get_type( "OfflineUserDataJobOperation" ) user_data_with_email_address = user_data_with_email_address_operation.create user_identifier_with_hashed_email = client.get_type("UserIdentifier") user_identifier_with_hashed_email.hashed_email = _normalize_and_hash( "customer@example.com" ) user_data_with_email_address.user_identifiers.append( user_identifier_with_hashed_email ) user_data_with_physical_address_operation = client.get_type( "OfflineUserDataJobOperation" ) user_data_with_physical_address = ( user_data_with_physical_address_operation.create ) user_identifier_with_address = client.get_type("UserIdentifier") user_identifier_with_address.address_info.hashed_first_name = _normalize_and_hash( "John" ) user_identifier_with_address.address_info.hashed_last_name = _normalize_and_hash( "Doe" ) user_identifier_with_address.address_info.country_code = "US" user_identifier_with_address.address_info.postal_code = "10011" user_data_with_physical_address.user_identifiers.append( user_identifier_with_address ) return [ user_data_with_email_address_operation, user_data_with_physical_address_operation, ]
Builds and returns two sample offline user data job operations. Args: client: The Google Ads client. Returns: A list containing the operations.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/examples/remarketing/add_customer_match_user_list.py#L191-L239
import argparse import hashlib import sys import uuid from google.ads.googleads.client import GoogleAdsClient from google.ads.googleads.errors import GoogleAdsException def main(client, customer_id, skip_polling): user_list_resource_name = _create_customer_match_user_list( client, customer_id ) _add_users_to_customer_match_user_list( client, customer_id, user_list_resource_name, skip_polling ) def _create_customer_match_user_list(client, customer_id): user_list_service_client = client.get_service("UserListService") user_list_operation = client.get_type("UserListOperation") user_list = user_list_operation.create user_list.name = f"Customer Match list #{uuid.uuid4()}" user_list.description = ( "A list of customers that originated from email and physical addresses" ) user_list.crm_based_user_list.upload_key_type = ( client.enums.CustomerMatchUploadKeyTypeEnum.CONTACT_INFO ) user_list.membership_life_span = 30 response = user_list_service_client.mutate_user_lists( customer_id=customer_id, operations=[user_list_operation] ) user_list_resource_name = response.results[0].resource_name print( f"User list with resource name '{user_list_resource_name}' was created." ) return user_list_resource_name def _add_users_to_customer_match_user_list( client, customer_id, user_list_resource_name, skip_polling ): offline_user_data_job_service_client = client.get_service( "OfflineUserDataJobService" ) offline_user_data_job = client.get_type("OfflineUserDataJob") offline_user_data_job.type_ = ( client.enums.OfflineUserDataJobTypeEnum.CUSTOMER_MATCH_USER_LIST ) offline_user_data_job.customer_match_user_list_metadata.user_list = ( user_list_resource_name ) create_offline_user_data_job_response = offline_user_data_job_service_client.create_offline_user_data_job( customer_id=customer_id, job=offline_user_data_job ) offline_user_data_job_resource_name = ( create_offline_user_data_job_response.resource_name ) print( "Created an offline user data job with resource name: " f"'{offline_user_data_job_resource_name}'." ) request = client.get_type("AddOfflineUserDataJobOperationsRequest") request.resource_name = offline_user_data_job_resource_name request.operations = _build_offline_user_data_job_operations(client) request.enable_partial_failure = True response = offline_user_data_job_service_client.add_offline_user_data_job_operations( request=request ) partial_failure = getattr(response, "partial_failure_error", None) if getattr(partial_failure, "code", None) != 0: error_details = getattr(partial_failure, "details", []) for error_detail in error_details: failure_message = client.get_type("GoogleAdsFailure") failure_object = type(failure_message).deserialize( error_detail.value ) for error in failure_object.errors: print( "A partial failure at index " f"{error.location.field_path_elements[0].index} occurred.\n" f"Error message: {error.message}\n" f"Error code: {error.error_code}" ) print("The operations are added to the offline user data job.") operation_response = offline_user_data_job_service_client.run_offline_user_data_job( resource_name=offline_user_data_job_resource_name ) if skip_polling: _check_job_status( client, customer_id, offline_user_data_job_resource_name, user_list_resource_name, ) else: print("Request to execute the added operations started.") print("Waiting until operation completes...") operation_response.result() _print_customer_match_user_list_info( client, customer_id, user_list_resource_name )
Apache License 2.0
jshaffstall/pyphysicssandbox
pyphysicssandbox/__init__.py
static_ball
python
def static_ball(p, radius): return _ball(p, radius, 0, True)
Creates a ball that remains fixed in place. :param p: The center point of the ball :type p: (int, int) :param radius: The radius of the ball :type radius: int :rtype: shape
https://github.com/jshaffstall/pyphysicssandbox/blob/ad9300ccc1e50f95372c6e0ca7bb0a9cafc839b7/pyphysicssandbox/__init__.py#L197-L207
import pygame import pymunk import math from pygame import Color from pygame import constants __docformat__ = "reStructuredText" __all__ = ['window', 'add_observer', 'gravity', 'resistance', 'mouse_clicked', 'static_ball', 'ball', 'static_box', 'box', 'static_rounded_box', 'rounded_box', 'static_polygon', 'polygon', 'static_triangle', 'triangle', 'static_text', 'text', 'static_text_with_font', 'text_with_font', 'static_line', 'line', 'pivot', 'gear', 'motor', 'pin', 'rotary_spring', 'run', 'draw', 'Color', 'cosmetic_text', 'cosmetic_text_with_font', 'num_shapes', 'constants', 'deactivate', 'reactivate', 'mouse_point', 'add_collision', 'slip_motor', 'set_margins', 'cosmetic_box', 'cosmetic_rounded_box', 'cosmetic_ball', 'cosmetic_line', 'cosmetic_polygon', 'cosmetic_triangle', 'spring', 'color' ] pygame.init() space = pymunk.Space() space.gravity = (0.0, 500.0) space.damping = 0.95 win_title = "Untitled" win_width = 500 win_height = 500 x_margin = win_width y_margin = win_height observers = [] clicked = False default_color = Color('black') shapes = {} def window(title, width, height): global win_title global win_width global win_height global x_margin global y_margin win_title = title win_width = width win_height = height x_margin = win_width y_margin = win_height def add_observer(hook): global observers observers.append(hook) def set_margins(x, y): global x_margin global y_margin x_margin = x y_margin = y def gravity(x, y): space.gravity = (x, y) def color(c): global default_color default_color = Color(c) def resistance(v): space.damping = v def mouse_clicked(): return clicked def mouse_point(): return pygame.mouse.get_pos()
MIT License
rosewang2008/gym-cooking
gym_cooking/navigation_planner/planners/e2e_brtdp.py
E2E_BRTDP.__init__
python
def __init__(self, alpha, tau, cap, main_cap): self.alpha = alpha self.tau = tau self.cap = cap self.main_cap = main_cap self.v_l = {} self.v_u = {} self.repr_to_env_dict = dict() self.start = None self.pq = mpq() self.actions = World.NAV_ACTIONS self.is_joint = False self.planner_level = PlannerLevel.LEVEL0 self.cur_object_count = 0 self.is_subtask_complete = lambda h: False self.removed_object = None self.goal_obj = None self.time_cost = 1.0 self.action_cost = 0.1
Initializes BRTDP algorithm with its hyper-parameters. Rf. BRTDP paper for how these hyper-parameters are used in their algorithm. http://www.cs.cmu.edu/~ggordon/mcmahan-likhachev-gordon.brtdp.pdf Args: alpha: BRTDP convergence criteria. tau: BRTDP normalization constant. cap: BRTDP cap on sample trial rollouts. main_cap: BRTDP main cap on its main loop.
https://github.com/rosewang2008/gym-cooking/blob/74570c1f1a88fabf8fb7d3ddec10aaf2274a2403/gym_cooking/navigation_planner/planners/e2e_brtdp.py#L43-L77
from recipe_planner.utils import * import navigation_planner.utils as nav_utils from navigation_planner.utils import MinPriorityQueue as mpq from utils.world import World from utils.interact import interact from utils.core import * from collections import defaultdict import numpy as np import scipy as sp import random from itertools import product import copy import time from functools import lru_cache from enum import Enum class PlannerLevel(Enum): LEVEL1 = 1 LEVEL0 = 0 def argmin(vector): e_x = np.array(vector) == min(vector) return np.where(np.random.multinomial(1, e_x / e_x.sum()))[0][0] def argmax(vector): e_x = np.array(vector) == max(vector) return np.where(np.random.multinomial(1, e_x / e_x.sum()))[0][0] class E2E_BRTDP:
MIT License
molssi/qcfractal
qcfractal/web_handlers.py
MoleculeHandler.post
python
def post(self): self.authenticate("write") body_model, response_model = rest_model("molecule", "post") body = self.parse_bodymodel(body_model) ret = self.storage.add_molecules(body.data) response = response_model(**ret) self.logger.info("POST: Molecule - {} inserted.".format(response.meta.n_inserted)) self.write(response)
Experimental documentation, need to find a decent format. Request: "meta" - Overall options to the Molecule pull request - No current options "data" - A dictionary of {key : molecule JSON} requests Returns: "meta" - Metadata associated with the query - "errors" - A list of errors in (index, error_id) format. - "n_inserted" - The number of molecule inserted. - "success" - If the query was successful or not. - "error_description" - A string based description of the error or False - "duplicates" - A list of keys that were already inserted. "data" - A dictionary of {key : id} results
https://github.com/molssi/qcfractal/blob/de022c93f2931721fffa509bb61fb176ed27f993/qcfractal/web_handlers.py#L248-L276
import json import tornado.web from pydantic import ValidationError from qcelemental.util import deserialize, serialize from .interface.models.rest_models import rest_model from .storage_sockets.storage_utils import add_metadata_template _valid_encodings = { "application/json": "json", "application/json-ext": "json-ext", "application/msgpack-ext": "msgpack-ext", } class APIHandler(tornado.web.RequestHandler): _required_auth = "admin" _logging_param_counts = {} def initialize(self, **objects): self.content_type = "Not Provided" try: self.content_type = self.request.headers.get("Content-Type", "application/json") self.encoding = _valid_encodings[self.content_type] except KeyError: raise tornado.web.HTTPError( status_code=401, reason=f"Did not understand 'Content-Type': {self.content_type}" ) self.set_header("Content-Type", self.content_type) self.objects = objects self.storage = self.objects["storage_socket"] self.logger = objects["logger"] self.api_logger = objects["api_logger"] self.view_handler = objects["view_handler"] self.username = None def prepare(self): if self._required_auth: self.authenticate(self._required_auth) try: if (self.encoding == "json") and isinstance(self.request.body, bytes): blob = self.request.body.decode() else: blob = self.request.body if blob: self.data = deserialize(blob, self.encoding) else: self.data = None except: raise tornado.web.HTTPError(status_code=401, reason="Could not deserialize body.") def on_finish(self): exclude_uris = ["/task_queue", "/service_queue", "/queue_manager"] if self.data is None: return if self.api_logger and self.request.method == "GET" and self.request.uri not in exclude_uris: extra_params = self.data.copy() if self._logging_param_counts: for key in self._logging_param_counts: if extra_params["data"].get(key, None): extra_params["data"][key] = len(extra_params["data"][key]) if "data" in extra_params: extra_params["data"] = {k: v for k, v in extra_params["data"].items() if v is not None} extra_params = json.dumps(extra_params) log = self.api_logger.get_api_access_log(request=self.request, extra_params=extra_params) self.storage.save_access(log) def authenticate(self, permission): if "Authorization" in self.request.headers: data = json.loads(self.request.headers["Authorization"]) username = data["username"] password = data["password"] else: username = None password = None self.username = username verified, msg = self.objects["storage_socket"].verify_user(username, password, permission) if verified is False: raise tornado.web.HTTPError(status_code=401, reason=msg) def parse_bodymodel(self, model): try: return model(**self.data) except ValidationError: raise tornado.web.HTTPError(status_code=401, reason="Invalid REST") def write(self, data): if not isinstance(data, (str, bytes)): data = serialize(data, self.encoding) return super().write(data) class InformationHandler(APIHandler): _required_auth = "read" def get(self): self.logger.info("GET: Information") self.write(self.objects["public_information"]) class KVStoreHandler(APIHandler): _required_auth = "read" _logging_param_counts = {"id"} def get(self): body_model, response_model = rest_model("kvstore", "get") body = self.parse_bodymodel(body_model) ret = self.storage.get_kvstore(body.data.id) ret = response_model(**ret) self.logger.info("GET: KVStore - {} pulls.".format(len(ret.data))) self.write(ret) class WavefunctionStoreHandler(APIHandler): _required_auth = "read" _logging_param_counts = {"id"} def get(self): body_model, response_model = rest_model("wavefunctionstore", "get") body = self.parse_bodymodel(body_model) ret = self.storage.get_wavefunction_store(body.data.id, include=body.meta.include) if len(ret["data"]): ret["data"] = ret["data"][0] ret = response_model(**ret) self.logger.info("GET: WavefunctionStore - 1 pull.") self.write(ret) class MoleculeHandler(APIHandler): _required_auth = "read" _logging_param_counts = {"id"} def get(self): body_model, response_model = rest_model("molecule", "get") body = self.parse_bodymodel(body_model) molecules = self.storage.get_molecules(**{**body.data.dict(), **body.meta.dict()}) ret = response_model(**molecules) self.logger.info("GET: Molecule - {} pulls.".format(len(ret.data))) self.write(ret)
BSD 3-Clause New or Revised License
rosenbrockc/aflow
aflow/entries.py
Entry.ael_poisson_ratio
python
def ael_poisson_ratio(self): return self._lazy_load("ael_poisson_ratio")
AEL Poisson ratio (`optional`). Units: ``. .. note:: The following verifications are available for this keyword. They are exposed as additional methods on this object. - :meth:`energy_cutoff` - :meth:`kpoints` Returns: float: Returns the istropic Poisson ratio as calculated with AEL. Examples: You can expect the *content* of the result to be something like: `ael_poisson_ratio=0.216`
https://github.com/rosenbrockc/aflow/blob/785284fe144b0bc63a52470ad8fcb3e27b0ca683/aflow/entries.py#L523-L541
from aflow.caster import cast import aflow.keywords as kw def _val_from_str(attr, value): clsname = "_{}".format(attr) if hasattr(kw, clsname): cls = getattr(kw, clsname) atype = getattr(cls, "atype") return cast(atype, attr, value) else: return value class AflowFile(object): def __init__(self, aurl, filename): self.aurl = aurl self.filename = filename def __repr__(self): return "AflowFile({0}/{1})".format(self.aurl, self.filename) def __call__(self, target=None): from six.moves import urllib url = "http://{}/{}".format(self.aurl.replace(':', '/'), self.filename) try: urlopen = urllib.request.urlopen rawresp = urlopen(url).read().decode("utf-8") if target is not None: from os import path tpath = path.abspath(path.expanduser(target)) with open(tpath, 'w') as f: f.write(rawresp) return tpath else: return rawresp except: urlopen = urllib.request from os import path from aflow.msg import info if target is not None: tpath = path.abspath(path.expanduser(target)) else: tpath = path.abspath(path.expanduser(self.filename)) urlopen.urlretrieve(url,tpath) infomsg = "The file {0} has been saved to {1}".format(self.filename,tpath) info(infomsg) class AflowFiles(list): def __init__(self, entry): files = entry._lazy_load("files") if files is not None: super(AflowFiles, self).extend(files) self.aurl = entry._lazy_load("aurl") def __getitem__(self, key): from six import string_types from fnmatch import fnmatch if isinstance(key, string_types): matches = [f for f in self if fnmatch(f, key)] if len(matches) == 1: return AflowFile(self.aurl, matches[0]) else: raise KeyError("Pattern matches more than one file.") else: match = super(AflowFiles, self).__getitem__(key).strip() return AflowFile(self.aurl, match) class Entry(object): def __init__(self, **kwargs): self.attributes = {a: _val_from_str(a, v) for a, v in kwargs.items()} self.raw = kwargs self._atoms = None self._files = None def __str__(self): aurl = self.attributes["aurl"].replace(".edu:", ".edu/") return "http://" + aurl def __eq__(self, other): return self.auid == other.auid def __hash__(self): return hash(self.auid) def _lazy_load(self, keyword): if keyword in self.attributes: return self.attributes[keyword] else: import requests import json aurl = self.attributes["aurl"].replace(".edu:", ".edu/") url = "http://{0}?{1}".format(aurl, keyword) r = requests.get(url) if len(r.text) == 0: return result = _val_from_str(keyword, r.text) self.attributes[keyword] = result return result def atoms(self, pattern="CONTCAR.relax*", quippy=False, keywords=None, calculator=None): if self._atoms is not None: return self._atoms from fnmatch import fnmatch target = [f for f in self.files if fnmatch(f, pattern)][-1] aurl = self.attributes["aurl"].replace(".edu:", ".edu/") url = "http://{0}/{1}".format(aurl, target) import requests lines = requests.get(url).text.split('\n') preline = ' '.join(self.species).strip() + ' !' lines[0] = preline + lines[0] contcar = '\n'.join(lines) if quippy: import quippy reader = quippy.io.read else: from ase.io import read reader = read from six import StringIO cfile = StringIO(contcar) try: self._atoms = reader(cfile, format="vasp") finally: cfile.close() if calculator is not None: self._atoms.set_calculator(calculator) if keywords is None: return self._atoms self._atoms.results = {} for kw, pname in keywords.items(): value = getattr(self, kw.name) if quippy: self._atoms.params.set_value(pname, value) else: self._atoms.results[pname] = value return self._atoms @property def files(self): if self._files is None: self._files = AflowFiles(self) return self._files @property def Bravais_lattice_orig(self): return self._lazy_load("Bravais_lattice_orig") @property def Bravais_lattice_relax(self): return self._lazy_load("Bravais_lattice_relax") @property def Egap(self): return self._lazy_load("Egap") @property def Egap_fit(self): return self._lazy_load("Egap_fit") @property def Egap_type(self): return self._lazy_load("Egap_type") @property def PV_atom(self): return self._lazy_load("PV_atom") @property def PV_cell(self): return self._lazy_load("PV_cell") @property def Pearson_symbol_orig(self): return self._lazy_load("Pearson_symbol_orig") @property def Pearson_symbol_relax(self): return self._lazy_load("Pearson_symbol_relax") @property def Pulay_stress(self): return self._lazy_load("Pulay_stress") @property def ael_bulk_modulus_reuss(self): return self._lazy_load("ael_bulk_modulus_reuss") @property def ael_bulk_modulus_voigt(self): return self._lazy_load("ael_bulk_modulus_voigt") @property def ael_bulk_modulus_vrh(self): return self._lazy_load("ael_bulk_modulus_vrh") @property def ael_elastic_anisotropy(self): return self._lazy_load("ael_elastic_anisotropy") @property
MIT License
nozaq/amazon-linux-cis
__main__.py
enable_aide
python
def enable_aide(): cron_job = '0 5 * * * /usr/sbin/aide --check' Package('aide').install() return exec_shell([ 'aide --init', 'mv /var/lib/aide/aide.db.new.gz /var/lib/aide/aide.db.gz', '(crontab -u root -l 2>/dev/null | grep -v /usr/sbin/aide; echo "{}") | crontab -'.format(cron_job) ])
1.3 Filesystem Integrity Checking
https://github.com/nozaq/amazon-linux-cis/blob/e521b63758bec5b7e341b037ff23d38fc4ba7970/__main__.py#L65-L76
import argparse import logging import os import re from subprocess import CalledProcessError import pkg_resources from util import exec_shell, set_backup_enabled, File, Package, Service, PropertyFile def get_string_asset(path): return pkg_resources.resource_string(__name__, 'assets/{}'.format(path)) def disable_unused_filesystems(): filesystems = [ 'cramfs', 'freevxfs', 'jffs2', 'hfs', 'hfsplus', 'squashfs', 'udf', 'vfat' ] prop = PropertyFile('/etc/modprobe.d/CIS.conf', ' ') for filesystem in filesystems: prop.override({'install {}'.format(filesystem): '/bin/true'}) prop.write() def set_mount_options(): options = { '/tmp': 'tmpfs /tmp tmpfs rw,nosuid,nodev,noexec,relatime 0 0', '/var/tmp': 'tmpfs /var/tmp tmpfs rw,nosuid,nodev,noexec,relatime 0 0', '/home': '/dev/xvdf1 /home ext4 rw,nodev,relatime,data=ordered 0 0', '/dev/shm': 'tmpfs /dev/shm tmpfs rw,nosuid,nodev,noexec,relatime 0 0' } with open('/etc/fstab', 'r') as f: for line in f: if line.startswith('#'): continue partition = line.split()[1] if partition not in options: options[partition] = line.strip() with open('/etc/fstab', 'w') as f: for record in options.values(): f.write('{}\n'.format(record)) def ensure_sticky_bit(): try: return exec_shell(['df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type d -perm -0002 2>/dev/null | xargs chmod a+t']) except CalledProcessError: return 1 def disable_automounting(): Service('autofs').disable()
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/term_create_dto.py
TermCreateDto.lang
python
def lang(self, lang): self._lang = lang
Sets the lang of this TermCreateDto. :param lang: The lang of this TermCreateDto. # noqa: E501 :type: str
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/term_create_dto.py#L152-L160
import pprint import re import six class TermCreateDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'text': 'str', 'lang': 'str', 'case_sensitive': 'bool', 'exact_match': 'bool', 'forbidden': 'bool', 'preferred': 'bool', 'status': 'str', 'concept_id': 'str', 'usage': 'str', 'note': 'str', 'short_translation': 'str', 'term_type': 'str', 'part_of_speech': 'str', 'gender': 'str', 'number': 'str' } attribute_map = { 'text': 'text', 'lang': 'lang', 'case_sensitive': 'caseSensitive', 'exact_match': 'exactMatch', 'forbidden': 'forbidden', 'preferred': 'preferred', 'status': 'status', 'concept_id': 'conceptId', 'usage': 'usage', 'note': 'note', 'short_translation': 'shortTranslation', 'term_type': 'termType', 'part_of_speech': 'partOfSpeech', 'gender': 'gender', 'number': 'number' } def __init__(self, text=None, lang=None, case_sensitive=None, exact_match=None, forbidden=None, preferred=None, status=None, concept_id=None, usage=None, note=None, short_translation=None, term_type=None, part_of_speech=None, gender=None, number=None): self._text = None self._lang = None self._case_sensitive = None self._exact_match = None self._forbidden = None self._preferred = None self._status = None self._concept_id = None self._usage = None self._note = None self._short_translation = None self._term_type = None self._part_of_speech = None self._gender = None self._number = None self.discriminator = None if text is not None: self.text = text if lang is not None: self.lang = lang if case_sensitive is not None: self.case_sensitive = case_sensitive if exact_match is not None: self.exact_match = exact_match if forbidden is not None: self.forbidden = forbidden if preferred is not None: self.preferred = preferred if status is not None: self.status = status if concept_id is not None: self.concept_id = concept_id if usage is not None: self.usage = usage if note is not None: self.note = note if short_translation is not None: self.short_translation = short_translation if term_type is not None: self.term_type = term_type if part_of_speech is not None: self.part_of_speech = part_of_speech if gender is not None: self.gender = gender if number is not None: self.number = number @property def text(self): return self._text @text.setter def text(self, text): self._text = text @property def lang(self): return self._lang @lang.setter
Apache License 2.0
gmr/rabbitpy
rabbitpy/channel.py
Channel.open
python
def open(self): self._set_state(self.OPENING) self.write_frame(self._build_open_frame()) self._wait_on_frame(spec.Channel.OpenOk) self._set_state(self.OPEN) LOGGER.debug('Channel #%i open', self._channel_id)
Open the channel, invoked directly upon creation by the Connection
https://github.com/gmr/rabbitpy/blob/d97fd2b1e983df0d95c2b0e2c6b29f61c79d82b9/rabbitpy/channel.py#L175-L183
import logging from pamqp import specification as spec from pamqp import PYTHON3 from rabbitpy import base from rabbitpy import exceptions from rabbitpy import message from rabbitpy.utils import queue LOGGER = logging.getLogger(__name__) BASIC_DELIVER = 'Basic.Deliver' CONTENT_BODY = 'ContentBody' CONTENT_HEADER = 'ContentHeader' class Channel(base.AMQPChannel): STATES = base.AMQPChannel.STATES STATES[0x04] = 'Remotely Closed' def __init__(self, channel_id, server_capabilities, events, exception_queue, read_queue, write_queue, maximum_frame_size, write_trigger, connection, blocking_read=False): super(Channel, self).__init__(exception_queue, write_trigger, connection, blocking_read) self._channel_id = channel_id self._consumers = {} self._consuming = False self._events = events self._maximum_frame_size = maximum_frame_size self._publisher_confirms = False self._read_queue = read_queue self._write_queue = write_queue self._server_capabilities = server_capabilities def __enter__(self): return self def __exit__(self, exc_type, exc_val, unused_exc_tb): if exc_type and exc_val: LOGGER.debug('Exiting due to exception: %r', exc_val) self._set_state(self.CLOSED) raise exc_val if self.open: self.close() def close(self): if self._connection.closed: LOGGER.debug('Channel %i close invoked when connection closed', self._channel_id) elif self.closed: LOGGER.debug('Channel %i close invoked when already closed', self._channel_id) else: self._set_state(self.CLOSING) if self._consumers: delivery_tag = 0 discard_counter = 0 ack_tags = [] for queue_obj, no_ack in self._consumers.values(): self._cancel_consumer(queue_obj) if not no_ack: LOGGER.debug('Channel %i will nack messages for %s', self._channel_id, queue_obj.consumer_tag) ack_tags.append(queue_obj.consumer_tag) if ack_tags: while not self._read_queue.empty(): frame_value = self._get_from_read_queue() if not frame_value: break if (frame_value.name == BASIC_DELIVER and frame_value.consumer_tag in ack_tags): if delivery_tag < frame_value.delivery_tag: delivery_tag = frame_value.delivery_tag discard_counter += 1 if delivery_tag: self._multi_nack(delivery_tag) super(Channel, self).close() def enable_publisher_confirms(self): if not self._supports_publisher_confirms: raise exceptions.NotSupportedError('Confirm.Select') self.rpc(spec.Confirm.Select()) self._publisher_confirms = True @property def id(self): return self._channel_id @property def maximum_frame_size(self): return self._maximum_frame_size
BSD 3-Clause New or Revised License
logicalclocks/maggy
maggy/trial.py
Trial.get_early_stop
python
def get_early_stop(self): with self.lock: return self.early_stop
Return the early stopping flag of the trial.
https://github.com/logicalclocks/maggy/blob/f71510d5c7e54503456041392748240d95ad9424/maggy/trial.py#L83-L86
import json import threading import hashlib from maggy import util class Trial(object): PENDING = "PENDING" SCHEDULED = "SCHEDULED" RUNNING = "RUNNING" ERROR = "ERROR" FINALIZED = "FINALIZED" def __init__(self, params, trial_type="optimization", info_dict=None): self.trial_type = trial_type if self.trial_type == "optimization": self.trial_id = Trial._generate_id(params) elif self.trial_type == "ablation": serializable_params = { "ablated_feature": params.get("ablated_feature", None), "ablated_layer": params.get("ablated_layer", None), } self.trial_id = Trial._generate_id(serializable_params) self.params = params self.status = Trial.PENDING self.early_stop = False self.final_metric = None self.metric_history = [] self.step_history = [] self.metric_dict = {} self.start = None self.duration = None self.lock = threading.RLock() if info_dict is None: self.info_dict = {} else: self.info_dict = info_dict
Apache License 2.0
geoffxy/habitat
analyzer/habitat/analysis/trace.py
Trace.to_device
python
def to_device(self, dest_device, predictor=None): if dest_device.name == self.device.name: return self actual_predictor = ( Trace.DefaultPredictor if predictor is None else predictor ) operations = [ operation.to_device(dest_device, actual_predictor) for operation in self._operations ] return Trace(dest_device, operations)
Get a predicted trace for the specified device.
https://github.com/geoffxy/habitat/blob/decc70d18c4a1db7bb109fd59b2b60567bf74375/analyzer/habitat/analysis/trace.py#L38-L51
from itertools import chain from habitat.analysis.predictor import Predictor class Trace: DefaultPredictor = Predictor() def __init__(self, device, operations): self._device = device self._operations = operations self._run_time_ms = None @property def operations(self): return self._operations @property def device(self): return self._device @property def run_time_ms(self): if self._run_time_ms is not None: return self._run_time_ms self._run_time_ms = sum(map( lambda op: op.run_time_ms, self._operations, )) return self._run_time_ms
Apache License 2.0
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_pipeline_result.py
V1beta1PipelineResult.description
python
def description(self, description): self._description = description
Sets the description of this V1beta1PipelineResult. Description is a human-readable description of the result # noqa: E501 :param description: The description of this V1beta1PipelineResult. # noqa: E501 :type: str
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_pipeline_result.py#L89-L98
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1PipelineResult(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'description': 'str', 'name': 'str', 'value': 'str' } attribute_map = { 'description': 'description', 'name': 'name', 'value': 'value' } def __init__(self, description='', name='', value='', local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._description = None self._name = None self._value = None self.discriminator = None if description is not None: self.description = description self.name = name self.value = value @property def description(self): return self._description @description.setter
Apache License 2.0
talaia-labs/python-teos
teos/chain_monitor.py
ChainMonitor.terminate
python
def terminate(self): self.status = ChainMonitorStatus.TERMINATED self.queue.put(ChainMonitor.END_MESSAGE)
Changes the ``status`` of the :obj:`ChainMonitor` to terminated and sends the ``ChainMonitor.END_MESSAGE`` message to the internal queue. All the threads will stop as soon as possible.
https://github.com/talaia-labs/python-teos/blob/66bc075d2432c45691af77cde20cbecd46341107/teos/chain_monitor.py#L224-L231
from enum import Enum from queue import Queue import zmq from threading import Thread, Event, Condition from teos.logger import get_logger class ChainMonitorStatus(Enum): IDLE = 0 LISTENING = 1 ACTIVE = 2 TERMINATED = 3 class ChainMonitor: END_MESSAGE = "END" def __init__(self, receiving_queues, block_processor, bitcoind_feed_params): self.logger = get_logger(component=ChainMonitor.__name__) self.last_tips = [] self.check_tip = Event() self.lock = Condition() self.bitcoind_reachable = block_processor.bitcoind_reachable self.polling_retries = 0 self.zmqContext = zmq.Context() self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.connect( "%s://%s:%s" % ( bitcoind_feed_params.get("BTC_FEED_PROTOCOL"), bitcoind_feed_params.get("BTC_FEED_CONNECT"), bitcoind_feed_params.get("BTC_FEED_PORT"), ) ) self.receiving_queues = receiving_queues self.polling_delta = 60 self.max_block_window_size = 10 self.block_processor = block_processor self.queue = Queue() self.status = ChainMonitorStatus.IDLE def enqueue(self, block_hash): if block_hash not in self.last_tips: with self.lock: self.queue.put(block_hash) self.last_tips.append(block_hash) if len(self.last_tips) > self.max_block_window_size: self.last_tips.pop(0) return True else: return False def monitor_chain_polling(self): while self.status != ChainMonitorStatus.TERMINATED: self.check_tip.wait(timeout=self.polling_delta) try: self.bitcoind_reachable.clear() current_tip = self.block_processor.get_best_block_hash(blocking=False) if self.polling_retries > 0: self.logger.info(f"Connection with bitcoind restored") self.polling_retries = 0 self.bitcoind_reachable.set() if current_tip and current_tip not in self.last_tips: self.logger.info("New block received via polling", block_hash=current_tip) self.enqueue(current_tip) except ConnectionRefusedError: if self.polling_retries == 0: self.logger.error("Lost connection with bitcoind") else: self.logger.error(f"Cannot connect to bitcoind (reties={self.polling_retries})") self.polling_retries += 1 def monitor_chain_zmq(self): while self.status != ChainMonitorStatus.TERMINATED: msg = self.zmqSubSocket.recv_multipart() topic = msg[0] body = msg[1] if topic == b"hashblock": block_hash = body.hex() if block_hash not in self.last_tips: self.logger.info("New block received via zmq", block_hash=block_hash) self.enqueue(block_hash) def notify_subscribers(self): while self.status != ChainMonitorStatus.TERMINATED: message = self.queue.get() with self.lock: for rec_queue in self.receiving_queues: rec_queue.put(message) def monitor_chain(self): if self.status != ChainMonitorStatus.IDLE: raise RuntimeError(f"This method can only be called in IDLE status. Current status is {self.status.name}.") self.status = ChainMonitorStatus.LISTENING try: self.last_tips.append(self.block_processor.get_best_block_hash(blocking=False)) Thread(target=self.monitor_chain_polling, daemon=True).start() Thread(target=self.monitor_chain_zmq, daemon=True).start() except ConnectionRefusedError: raise ConnectionRefusedError("Lost connection with bitcoind during bootstrap") def activate(self): if self.status != ChainMonitorStatus.LISTENING: raise RuntimeError( f"This method can only be called in LISTENING status. Current status is {self.status.name}." ) self.status = ChainMonitorStatus.ACTIVE Thread(target=self.notify_subscribers, daemon=True).start()
MIT License
kotori-y/scopy
scopy/ScoDruglikeness/molproperty_Lib.py
PC_properties.CalculateNumTriBond
python
def CalculateNumTriBond(self): pool = Pool(self.n_jobs) nTriple = pool.map_async(molproperty.CalculateNumTriBond, self.mols).get() pool.close() pool.join() return nTriple
Calculation of triple bond number of molecule ---> nTriple :param mols: molecules :type mols: Iterable :return: the number of triple bond :rtype: list
https://github.com/kotori-y/scopy/blob/b15e51a13507b5283888da90548bd50f7df5c50c/scopy/ScoDruglikeness/molproperty_Lib.py#L903-L918
import os import csv from multiprocessing import Pool from rdkit import Chem from . import molproperty from .. import ScoConfig from ..ScoRepresent.fingerprints import CalculateGhoseCrippen def _GetSmi(mol): return Chem.MolToSmiles(mol) class PC_properties(object): def __init__(self, mols, n_jobs=1): self.mols = mols if type(mols) is not Chem.rdchem.Mol else [mols] self.n_jobs = n_jobs if n_jobs>=1 else None def CalculateMolWeight(self): pool = Pool(self.n_jobs) MW = pool.map_async(molproperty.CalculateMolWeight, self.mols).get() pool.close() pool.join() return MW def CalculateNumBonds(self): pool = Pool(self.n_jobs) nBond = pool.map_async(molproperty.CalculateNumBonds, self.mols).get() pool.close() pool.join() return nBond def CalculateNumAtoms(self): pool = Pool(self.n_jobs) nAtom = pool.map_async(molproperty.CalculateNumAtoms, self.mols).get() pool.close() pool.join() return nAtom def CalculateNumHetero(self): pool = Pool(self.n_jobs) nHet = pool.map_async(molproperty.CalculateNumHetero, self.mols).get() pool.close() pool.join() return nHet def CalculateNumRotatableBonds(self): pool = Pool(self.n_jobs) nRot = pool.map_async(molproperty.CalculateNumRotatableBonds, self.mols).get() pool.close() pool.join() return nRot def CalculateNumRigidBonds(self): pool = Pool(self.n_jobs) nRig = pool.map_async(molproperty.CalculateNumRigidBonds, self.mols).get() pool.close() pool.join() return nRig def CalculateFlexibility(self): pool = Pool(self.n_jobs) Flex = pool.map_async(molproperty.CalculateFlexibility, self.mols).get() pool.close() pool.join() return Flex def CalculateNumRing(self): pool = Pool(self.n_jobs) nRing = pool.map_async(molproperty.CalculateNumRing, self.mols).get() pool.close() pool.join() return nRing def CalculateNumHeavyAtom(self): pool = Pool(self.n_jobs) nHev = pool.map_async(molproperty.CalculateNumHeavyAtom, self.mols).get() pool.close() pool.join() return nHev def CalculateLogD(self): intercept = 0.5748907159915493 fps = CalculateGhoseCrippen(self.mols,self.n_jobs) with open(os.path.join(ScoConfig.CrippenDir, 'Crippen.txt')) as f_obj: lines = csv.reader(f_obj,delimiter='\t') next(lines) contri = [x[-1] for x in lines] contri = [float(x) for x in contri] f_obj.close() logD = (fps*contri).sum(axis=1) + intercept return list(logD) def CalculateLogP(self): pool = Pool(self.n_jobs) logp = pool.map_async(molproperty.CalculateLogP, self.mols).get() pool.close() pool.join() return logp def CheckAcid(self): pool = Pool(self.n_jobs) ab = pool.map_async(molproperty.CheckAcid, self.mols).get() pool.close() pool.join() return ab def CalculatepKa(self): import warnings warnings.filterwarnings('ignore') from math import log10 logDl = self.CalculateLogD() logPl = self.CalculateLogP() statusl = self.CheckAcid() res = [] for status,logP, logD in zip(statusl,logPl,logDl): try: if status == 'acid': pKa = 7.4 - log10(10**(logP-logD)-1) else: pKa = log10(10**(logP-logD)-1) - 7.4 res.append(pKa) except: res.append('N/A') return res def CalculateMolMR(self): pool = Pool(self.n_jobs) mr = pool.map_async(molproperty.CalculateMolMR, self.mols).get() pool.close() pool.join() return mr def CalculateNumHDonors(self): pool = Pool(self.n_jobs) nHD = pool.map_async(molproperty.CalculateNumHDonors, self.mols).get() pool.close() pool.join() return nHD def CalculateNumHAcceptors(self): pool = Pool(self.n_jobs) nHA = pool.map_async(molproperty.CalculateNumHAcceptors, self.mols).get() pool.close() pool.join() return nHA def CalculateNumHyBond(self): pool = Pool(self.n_jobs) nHB = pool.map_async(molproperty.CalculateNumHyBond, self.mols).get() pool.close() pool.join() return nHB def CalculateAromaticProportion(self): pool = Pool(self.n_jobs) aroma = pool.map_async(molproperty.CalculateAromaticProportion, self.mols).get() pool.close() pool.join() return aroma def CalculateLogSw(self): pool = Pool(self.n_jobs) logSw = pool.map_async(molproperty.CalculateLogSw, self.mols).get() pool.close() pool.join() return logSw def CalculateFsp3(self): pool = Pool(self.n_jobs) fsp3 = pool.map_async(molproperty.CalculateFsp3, self.mols).get() pool.close() pool.join() return fsp3 def CalculateTPSA(self): pool = Pool(self.n_jobs) tpsa = pool.map_async(molproperty.CalculateTPSA, self.mols).get() pool.close() pool.join() return tpsa def CalculateQEDmean(self): pool = Pool(self.n_jobs) qed_mean = pool.map_async(molproperty.CalculateQEDmean, self.mols).get() pool.close() pool.join() return qed_mean def CalculateQEDmax(self): pool = Pool(self.n_jobs) qed_max = pool.map_async(molproperty.CalculateQEDmax, self.mols).get() pool.close() pool.join() return qed_max def CalculateQEDnone(self): pool = Pool(self.n_jobs) qed_none = pool.map_async(molproperty.CalculateQEDnone, self.mols).get() pool.close() pool.join() return qed_none def CalculateMaxSizeSystemRing(self): pool = Pool(self.n_jobs) maxring = pool.map_async(molproperty.CalculateMaxSizeSystemRing, self.mols).get() pool.close() pool.join() return maxring def CalculateNumStereocenters(self): nStereo = map(molproperty.CalculateNumStereocenters, self.mols) return list(nStereo) def CalculateNumCarbon(self): pool = Pool(self.n_jobs) nC = pool.map_async(molproperty.CalculateNumCarbon, self.mols).get() pool.close() pool.join() return nC def CalculateNumBoron(self): pool = Pool(self.n_jobs) nB = pool.map_async(molproperty.CalculateNumBoron, self.mols).get() pool.close() pool.join() return nB def CalculateNumFluorin(self): pool = Pool(self.n_jobs) nF = pool.map_async(molproperty.CalculateNumFluorin, self.mols).get() pool.close() pool.join() return nF def CalculateNumChlorin(self): pool = Pool(self.n_jobs) nCl = pool.map_async(molproperty.CalculateNumChlorin, self.mols).get() pool.close() pool.join() return nCl def CalculateNumBromine(self): pool = Pool(self.n_jobs) nBr = pool.map_async(molproperty.CalculateNumBromine, self.mols).get() pool.close() pool.join() return nBr def CalculateNumIodine(self): pool = Pool(self.n_jobs) nI = pool.map_async(molproperty.CalculateNumIodine, self.mols).get() pool.close() pool.join() return nI def CalculateNumPhosphor(self): pool = Pool(self.n_jobs) nP = pool.map_async(molproperty.CalculateNumPhosphor, self.mols).get() pool.close() pool.join() return nP def CalculateNumSulfur(self): pool = Pool(self.n_jobs) nS = pool.map_async(molproperty.CalculateNumSulfur, self.mols).get() pool.close() pool.join() return nS def CalculateNumOxygen(self): pool = Pool(self.n_jobs) nO = pool.map_async(molproperty.CalculateNumOxygen, self.mols).get() pool.close() pool.join() return nO def CalculateNumNitrogen(self): pool = Pool(self.n_jobs) nN = pool.map_async(molproperty.CalculateNumNitrogen, self.mols).get() pool.close() pool.join() return nN def CalculateNumChargedGroups(self): pass def CalculateHetCarbonRatio(self): pool = Pool(self.n_jobs) HetRatio = pool.map_async(molproperty.CalculateHetCarbonRatio, self.mols).get() pool.close() pool.join() return HetRatio def CalculateSAscore(self): pool = Pool(self.n_jobs) SA = pool.map_async(molproperty.CalculateSAscore, self.mols).get() pool.close() pool.join() return SA def CalculateNPscore(self): pool = Pool(self.n_jobs) NP = pool.map_async(molproperty.CalculateNPscore, self.mols).get() pool.close() pool.join() return NP def CalculateMolVolume(self): pool = Pool(self.n_jobs) mv = pool.map_async(molproperty.CalculateMolVolume, self.mols).get() pool.close() pool.join() return mv def CalculateMolDensity(self): pool = Pool(self.n_jobs) md = pool.map_async(molproperty.CalculateMolDensity, self.mols).get() pool.close() pool.join() return md def CalculateMolFCharge(self): pool = Pool(self.n_jobs) fChar = pool.map_async(molproperty.CalculateMolFCharge, self.mols).get() pool.close() pool.join() return fChar def CalculateNumSinBond(self): pool = Pool(self.n_jobs) nSingle = pool.map_async(molproperty.CalculateNumSinBond, self.mols).get() pool.close() pool.join() return nSingle def CalculateNumDouBond(self): pool = Pool(self.n_jobs) nDouble = pool.map_async(molproperty.CalculateNumDouBond, self.mols).get() pool.close() pool.join() return nDouble
MIT License
squareslab/bugzoo
bugzoo/mgr/coverage/extractor.py
CoverageExtractor.run
python
def run(self, tests: Iterable[TestCase], *, instrument: bool = True ) -> TestSuiteCoverage: container = self.container logger.debug("computing coverage for container: %s", container.uid) try: if instrument: logger.debug("instrumenting container") self.prepare() else: logger.debug("not instrumenting container") except Exception: msg = "failed to instrument container." raise exceptions.FailedToComputeCoverage(msg) cov = {} for test in tests: logger.debug("Generating coverage for test %s in container %s", test.name, container.uid) outcome = self.__installation.containers.execute(container, test) filelines = self.extract() test_coverage = TestCoverage(test.name, outcome, filelines) logger.debug("Generated coverage for test %s in container %s", test.name, container.uid) cov[test.name] = test_coverage self.cleanup() coverage = TestSuiteCoverage(cov) logger.debug("Computed coverage for container: %s", container.uid) return coverage
Computes line coverage information for a given set of tests. Parameters: tests: the tests for which coverage should be computed. instrument: if set to True, calls prepare and cleanup before and after running the tests. If set to False, prepare and cleanup are not called, and the responsibility of calling those methods is left to the user.
https://github.com/squareslab/bugzoo/blob/a87f03b2e33c2097c21c0175e613f4e95d9825eb/bugzoo/mgr/coverage/extractor.py#L209-L252
__all__ = ['CoverageExtractor'] from typing import FrozenSet, Optional, Iterable, Dict, Type from timeit import default_timer as timer import xml.etree.ElementTree as ET import abc import os import logging import tempfile import logging from ...core import FileLineSet, Container, TestSuiteCoverage, TestCoverage, CoverageInstructions, TestCase, Language, Bug, FileLineSet from ... import exceptions logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) _NAME_TO_EXTRACTOR = {} _EXTRACTOR_TO_NAME = {} def register(name: str): def decorator(cls: Type['CoverageExtractor']): cls.register(name) return cls return decorator def register_as_default(language: Language): def decorator(cls: Type['CoverageExtractor']): cls.register_as_default(language) return cls return decorator class CoverageExtractor(abc.ABC): @classmethod def register(cls, name: str) -> None: logger.info("registering coverage extractor [%s] under name [%s]", cls, name) if name in _NAME_TO_EXTRACTOR: raise exceptions.NameInUseError(name) if cls in _EXTRACTOR_TO_NAME: m = "coverage extractor [{}] is already registered under name: {}" m = m.format(cls, _EXTRACTOR_TO_NAME[cls]) raise Exception(m) try: instructions = cls.Instructions except UnboundLocalError: m = "coverage extractor doesn't provide an 'Instructions' class" raise Exception(m) if not issubclass(instructions, CoverageInstructions): m = "'Instructions' class should subclass 'CoverageInstructions'" raise Exception(m) instructions.register(name) _NAME_TO_EXTRACTOR[name] = cls _EXTRACTOR_TO_NAME[cls] = name logger.info("registered coverage extractor [%s] under name [%s]", cls, name) @classmethod def register_as_default(cls, language: Language) -> None: logger.info("registering coverage extractor [{}] as default for {}", cls, language) if cls not in _EXTRACTOR_TO_NAME: m = "coverage extractor [{}] has not been registered under a name" m = m.format(cls) raise Exception(m) name = _EXTRACTOR_TO_NAME[cls] cls_instructions = CoverageInstructions.find(name) cls_instructions.register_as_default(language) logger.info("registered coverage extractor [{}] as default for {}", cls, language) @staticmethod def build(installation: 'BugZoo', container: Container ) -> 'CoverageExtractor': bug = installation.bugs[container.bug] instructions = bug.instructions_coverage if instructions is None: raise exceptions.NoCoverageInstructions name = instructions.__class__.registered_under_name() extractor_cls = _NAME_TO_EXTRACTOR[name] builder = extractor_cls.from_instructions extractor = builder(installation, container, instructions) return extractor @staticmethod @abc.abstractmethod def from_instructions(installation: 'BugZoo', container: Container, instructions: CoverageInstructions ) -> 'CoverageExtractor': raise NotImplementedError def __init__(self, installation: 'BugZoo', container: Container ) -> None: self.__installation = installation self.__container = container @property def container(self) -> Container: return self.__container @abc.abstractmethod def extract(self) -> FileLineSet: raise NotImplementedError @abc.abstractmethod def cleanup(self) -> None: raise NotImplementedError
MIT License
pyviz-dev/nbsite
examples/sites/holoviews/holoviews/core/spaces.py
HoloMap.grid
python
def grid(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return GridSpace(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=GridSpace, **kwargs)
GridSpace takes a list of one or two dimensions, and lays out the containing Views along these axes in a GridSpace. Shows all HoloMap data When no dimensions are specified.
https://github.com/pyviz-dev/nbsite/blob/7a4752e6ed6a3b0c3698473a6dd3a71ff9ba2acb/examples/sites/holoviews/holoviews/core/spaces.py#L47-L58
import itertools import types from numbers import Number from itertools import groupby from functools import partial from contextlib import contextmanager from inspect import ArgSpec import numpy as np import param from . import traversal, util from .dimension import OrderedDict, Dimension, ViewableElement, redim from .layout import Layout, AdjointLayout, NdLayout from .ndmapping import UniformNdMapping, NdMapping, item_check from .overlay import Overlay, CompositeOverlay, NdOverlay, Overlayable from .options import Store, StoreOptions from ..streams import Stream class HoloMap(UniformNdMapping, Overlayable): data_type = (ViewableElement, NdMapping, Layout) def overlay(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdOverlay(self, **kwargs).reindex(dimensions) else: dims = [d for d in self.kdims if d not in dimensions] return self.groupby(dims, group_type=NdOverlay, **kwargs)
BSD 3-Clause New or Revised License
cgatoxford/cgatpipelines
obsolete/pipeline_rrbs.py
extractDMRCpGs
python
def extractDMRCpGs(outfile): RRBS.findCpGsFromBed( outfile, PARAMS["methylation_summary_genome_fasta"], PARAMS["annotation_dmr"], "DMR", both_strands=True, submit=True, job_memory="4G")
extract sequences for Highly conserved non-coding element and identify CpG locations
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/obsolete/pipeline_rrbs.py#L805-L812
from ruffus import * import sys import os import re import itertools import glob import sqlite3 import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGATPipelines.PipelineMapping as PipelineMapping import CGATPipelines.Pipeline as P import CGATPipelines.PipelineRrbs as RRBS import pandas as pd import CGATPipelines.PipelineTracks as PipelineTracks import numpy as np P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS = P.PARAMS INPUT_FORMATS = ("*.fastq.1.gz", "*.fastq.gz", "*.sra", "*.csfasta.gz") REGEX_FORMATS = regex(r"(\S+).(fastq.1.gz|fastq.gz|sra|csfasta.gz)") try: PARAMS["input"] except NameError: DATADIR = "." else: if PARAMS["input"] == 0: DATADIR = "." elif PARAMS["input"] == 1: DATADIR = "data.dir" else: DATADIR = PARAMS["input"] def connect(): dbh = sqlite3.connect(PARAMS["database_name"]) return dbh SEQUENCESUFFIXES = ("*.fastq.1.gz", "*.fastq.gz", "*.fa.gz", "*.sra", "*.export.txt.gz", "*.csfasta.gz", "*.csfasta.F3.gz", ) SEQUENCEFILES = tuple([os.path.join(DATADIR, suffix_name) for suffix_name in SEQUENCESUFFIXES]) SEQUENCEFILES_REGEX = regex( r"(\S+)-(\S+)-(\S+).(?P<suffix>fastq.1.gz|fastq.gz|sra)") Sample = PipelineTracks.AutoSample Sample.attributes = ('tissue', 'condition', 'replicate') TRACKS = PipelineTracks.Tracks(Sample).loadFromDirectory( [y for x in SEQUENCESUFFIXES for y in glob.glob(x)], "(\S+).(fastq.1.gz|fastq.gz|sra)") EXPERIMENTS = PipelineTracks.Aggregate(TRACKS, labels=("tissue", "condition")) CONDITIONS = PipelineTracks.Aggregate(TRACKS, labels=("condition", )) REPLICATES = PipelineTracks.Aggregate(TRACKS, labels=("replicate", )) @follows(mkdir("sequence_characteristics.dir")) @transform(SEQUENCEFILES, SEQUENCEFILES_REGEX, r"sequence_characteristics.dir/\1-\2-\3.\g<suffix>_start.tsv") def summariseReadStart(infile, outfile): if infile.endswith(".sra"): P.touch(outfile) else: statement = '''zcat %(infile)s | paste - - - - | cut -f2 | cut -c1-3 | sort | uniq -c | sort -nk1 | awk -F' ' 'BEGIN{total=0; sum=0} {total+=$1; OFS"\\t"; if($2=="CGG"||$2=="TGG"||$2=="CGA"||$2=="TGA") {sum+=$1; print $1, $2}} END {print total-sum,"others"}' > %(outfile)s ''' % locals() P.run() @merge(summariseReadStart, "sequence_characteristics.dir/read_start_summary.tsv") def combineReadStartSummaries(infiles, outfile): infile_list = " ".join(infiles) statement = '''echo -e "file\\treads\\tsequence\\tsample\\tcondition\\trep" > %(outfile)s; cgat combine_tables -v0 -a CAT -t %(infile_list)s| sed -e 's/sequence_characteristics.dir\///g' -e 's/.fastq.*start.tsv//g'| awk '{OFS="\\t"; split ($1, a, "-"); print $1,$2,$3,a[1],a[2],a[3]}' >> %(outfile)s;''' % locals() P.run() @transform(combineReadStartSummaries, suffix(".tsv"), ".load") def loadStartSummary(infile, outfile): dbh = connect() tablename = P.toTable(outfile) statement = '''cat %(infile)s | cgat csv2db --table %(tablename)s --retry --ignore-empty > %(outfile)s''' % locals() P.run() @follows(mkdir("bismark.dir")) @transform(SEQUENCEFILES, SEQUENCEFILES_REGEX, r"bismark.dir/\1-\2-\3_bismark_bt2.bam") def mapReadsWithBismark(infile, outfile): job_options = "-l mem_free=%s " % PARAMS["bismark_memory"] job_threads = (PARAMS["bismark_threads"] * 2) + 1 outdir = "bismark.dir" bismark_options = PARAMS["bismark_options"] m = PipelineMapping.Bismark() statement = m.build((infile,), outfile) P.run() @follows(mkdir("methylation.dir")) @transform(mapReadsWithBismark, regex("bismark.dir/(\S+).bam"), r"methylation.dir/\1.bismark.cov") def callMethylationStatus(infile, outfile): if infile.endswith(("bismark_bt2.bam", "bismark_bt.bam")): options = " --single-end " else: options = " --paired-end " if PARAMS["bismark_extraction_options"]: options += PARAMS["bismark_extraction_options"] CG = ("methylation.dir/CpG_context_" + P.snip(os.path.basename(outfile), ".bismark.cov") + ".txt") CHG = re.sub("CpG", "CHG", CG) CHH = re.sub("CpG", "CHH", CG) outdir = "methylation.dir" index_dir = PARAMS["bismark_index_dir"] genome = PARAMS["bismark_genome"] statement = '''bismark_methylation_extractor %(options)s --comprehensive --output %(outdir)s --counts --cytosine_report --bedGraph --genome_folder %(index_dir)s/%(genome)s/ %(infile)s; gzip -f %(CG)s; gzip -f %(CHG)s; gzip -f %(CHH)s ''' % locals() P.run() @follows(mkdir("plots.dir")) @transform(callMethylationStatus, regex("methylation.dir/(\S+).bismark.cov"), r"plots.dir/\1.read_position.tsv") def plotReadBias(infile, outfile): job_options = "-l mem_free=1G" m_bias_infile = P.snip(infile, ".bismark.cov") + ".M-bias.txt" print(m_bias_infile) RRBS.plotReadBias(m_bias_infile, outfile, submit=True, job_options=job_options) @follows(callMethylationStatus) @transform(mapReadsWithBismark, suffix(".bam"), ".sorted.bam") def sortAndIndexBams(infile, outfile): sort_out = P.snip(outfile, ".bam") statement = '''samtools sort %(infile)s %(sort_out)s; samtools index %(outfile)s;''' % locals() print(statement) P.run() @transform(sortAndIndexBams, regex(".bam"), ".bw") def buildBigWig(infile, outfile): job_memory = "16G" statement = '''cgat bam2wiggle --output-format=bigwig %(bigwig_options)s %(infile)s %(outfile)s > %(outfile)s.log''' P.run() @merge(buildBigWig, "bigwig_stats.load") def loadBigWigStats(infiles, outfile): data = " ".join( ['<( bigWigInfo %s | perl -p -e "s/:/\\t/; s/ //g; s/,//g")' % x for x in infiles]) headers = ",".join([P.snip(os.path.basename(x), ".bw") for x in infiles]) load_statement = P.build_load_statement( P.toTable(outfile), options="--add-index=track") statement = '''cgat combine_tables --header-names=%(headers)s --skip-titles --missing-value=0 --ignore-empty %(data)s | perl -p -e "s/bin/track/" | cgat table2table --transpose | %(load_statement)s > %(outfile)s ''' P.run() @follows(mkdir("coverage.dir")) @originate("coverage.dir/cpgIslands.bed") def makeCpgIslandsBed(outfile): infile = PARAMS["methylation_summary_cpgislands"] out = IOTools.openFile(outfile, "w") with IOTools.openFile(infile, "r") as f: for line in f.readlines(): contig, start, end = line.split()[1:4] if not contig == "chrom": out.write("%s\t%s\t%s\n" % (contig, start, end)) out.close() @subdivide(makeCpgIslandsBed, regex("coverage.dir/(\S+).bed"), r"coverage.dir/\1_1based.tsv") def make1basedCpgIslands(infile, outfile): out = IOTools.openFile(outfile, "w") out.write("%s\t%s\t%s\n" % ("contig", "position", "cpgi")) with IOTools.openFile(infile, "r") as f: lines = f.readlines() for line in lines: contig, start, stop = line.split() for position in [x for x in range(int(start), int(stop) + 2)]: out.write("%s\t%s\t%s\n" % (contig, position, "CpGIsland")) out.close() @transform(callMethylationStatus, suffix(".bismark.cov"), r".bismark.subset10.cov") def subsetCoverage(infile, outfile): statement = '''awk '($5+$6)>=10' %(infile)s > %(outfile)s''' % locals() P.run() @originate("fa_sizes.tsv") def getChromSizes(outfile): statement = "/ifs/apps/bio/ucsc/fetchChromSizes %(genome)s > %(outfile)s" P.run() @transform(subsetCoverage, regex("methylation.dir/(\S+).bismark.subset10.cov"), add_inputs(getChromSizes), r"methylation.dir/\1.bismark.bigwig") def bed2BigWig(infiles, outfile): infile, sizes = infiles infile = infile.replace(".bismark.cov", ".bedGraph") tmp_infile = P.getTempFilename() statement = ''' sort -k1,1 -k2,2n %(infile)s | awk '{OFS="\t"; $3 = $3 + 1; print $1,$2,$3,$4}' > %(tmp_infile)s; checkpoint; bedGraphToBigWig %(tmp_infile)s %(sizes)s %(outfile)s; checkpoint; rm -rf %(tmp_infile)s''' P.run() @originate("methylation.dir/cpg-locations-1.cov") def findCpGs(outfile): genome_infile = PARAMS["methylation_summary_genome_fasta"] job_options = "-l mem_free=2G" RRBS.fasta2CpG(genome_infile, outfile, submit=True, job_options=job_options) @follows(findCpGs) @merge([callMethylationStatus, findCpGs], "methylation.dir/cpgs_meth.tsv") def mergeCoverage(infiles, outfile): cpgs_infile = infiles[-1] coverage_infiles = infiles[:-1] job_options = "-l mem_free=48G" job_threads = 2 RRBS.mergeAndDrop(cpgs_infile, coverage_infiles, outfile, submit=True, job_options=job_options) @transform(mergeCoverage, suffix("_meth.tsv"), add_inputs(make1basedCpgIslands), "_meth_cpgi.tsv") def addCpGIs(infiles, outfile): infile, CpGI = infiles job_memory = "40G" job_threads = 1 RRBS.pandasMerge(infile, CpGI, outfile, merge_type="left", left=['contig', 'position'], right=['contig', 'position'], submit=True, job_memory=job_memory) @P.add_doc(RRBS.calculateCoverage) @transform(addCpGIs, suffix("_meth_cpgi.tsv"), "_coverage.tsv") def calculateCoverage(infile, outfile): RRBS.calculateCoverage(infile, outfile, submit=True, job_memory="2G") @P.add_doc(RRBS.plotCoverage) @transform(calculateCoverage, suffix("_coverage.tsv"), ["_coverage_bar.png", "_coverage_pie.png"]) def plotCoverage(infile, outfiles): RRBS.plotCoverage(infile, outfiles, submit=True, job_memory="6G") @transform(addCpGIs, suffix(".tsv"), ".load") def loadMergeCoverage(infile, outfile): dbh = connect() tablename = P.toTable(outfile) job_options = "-l mem_free=23G" job_threads = 2 statement = '''cat %(infile)s | cgat csv2db --table %(tablename)s --retry --ignore-empty > %(outfile)s''' % locals() P.run() @transform(callMethylationStatus, regex("methylation.dir/(\S+).bismark.cov"), r"methylation.dir/\1.coverage.tsv") def summariseCoverage(infile, outfile): CG = ("methylation.dir/CpG_context_" + P.snip(os.path.basename(infile), ".bismark.cov") + ".txt.gz") statement = '''zcat %(CG)s | awk -F'\t' '{print $3+$4;}'| sort -n | uniq -c | cut -f1 |awk '{print $1}' | sort -n | uniq -c| awk -F' ' '{OFS="\\t";} {print $1,$2;}' > %(outfile)s''' % locals() print(statement) P.run() @follows(summariseCoverage) @merge(summariseCoverage, "methylation.dir/coverage.tsv") def concatenateCoverage(infiles, outfile): statement = '''echo -e "file\\tfreq\\tcov\\tsample\\tcondition\\trep" > %(outfile)s; cgat combine_tables -v0 --glob="methylation.dir/*.coverage.tsv" -a CAT -t| sed -e 's/methylation.dir\///g' -e 's/_bismark.*.coverage.tsv//g'| awk '{OFS="\\t"; split ($1, a, "-"); print $1,$2,$3,a[1],a[2],a[3]}' >> %(outfile)s;''' % locals() P.run() @transform(concatenateCoverage, suffix(".tsv"), ".load") def loadCoverage(infile, outfile): dbh = connect() tablename = P.toTable(outfile) statement = '''cat %(infile)s | cgat csv2db --table %(tablename)s --retry --ignore-empty > %(outfile)s''' % locals() P.run() @follows(summariseCoverage) @merge(callMethylationStatus, "methylation.dir/coverage_overlap.tsv") def summariseCpGOverlap(infiles, outfile): infile_list = [] for x in infiles: if x.endswith(".cov"): infile_list.append(x) print((len(infile_list))) infile_list = " ".join(infile_list) coverage_range = [1, 2, 5, 10, 20, 30] coverage_range = " ".join(map(str, coverage_range)) statement = '''echo -e "CpGs\\toverlaps\\tthreshold" > %(outfile)s; for x in %(coverage_range)s; do cat %(infile_list)s | awk -v threshold=$x -F'\\t' '{OFS="\\t"; if ($5+$6>threshold) print $1,$2}'| sort| uniq -c| awk -F' ' '{OFS="\\t"; print $1}'| sort| uniq -c| awk -v threshold=$x -F' ' '{OFS="\\t";print $1,$2,threshold}' >> %(outfile)s; done''' % locals() print(infile_list) P.run() @transform(summariseCpGOverlap, suffix(".tsv"), ".load") def loadCpGOverlap(infile, outfile): dbh = connect() tablename = P.toTable(outfile) statement = '''cat %(infile)s | cgat csv2db --table %(tablename)s --retry --ignore-empty > %(outfile)s''' % locals() P.run() @transform(summariseCoverage, regex("methylation.dir/(\S+).coverage.tsv"), r"methylation.dir/\1.reads_by_threshold.tsv") def summariseRemainingReadsbyThreshold(infile, outfile): statement = '''sum_reads=$(awk -F'\\t' 'BEGIN {total=0} {total+=($1*$2)} END {print total}' %(infile)s); echo -e "1\\t$sum_reads\\t1" >> %(outfile)s; awk -v old_total=$sum_reads 'BEGIN {total=old_total} {total -= ($1*$2)} {OFS="\\t"; print $2+1,total,total/old_total}' %(infile)s >> %(outfile)s''' % locals() P.run() @merge(summariseRemainingReadsbyThreshold, "methylation.dir/reads_remaining_by_threshold.tsv") def concatenateRemainingReads(infiles, outfile): statement = '''echo -e "file\\tthreshold\\treads\\tpercentage\\tsample\\tcondition\\trep" > %(outfile)s; cgat combine_tables -v0 --glob="methylation.dir/*.reads_by_threshold.tsv" -a CAT -t| sed -e 's/methylation.dir\///g' -e 's/_bismark.*.reads_by_threshold.tsv//g'| awk '{OFS="\\t"; split ($1, a, "-"); print $1,$2,$3,$4,a[1],a[2],a[3]}' >> %(outfile)s;''' % locals() P.run() @transform(concatenateRemainingReads, suffix(".tsv"), ".load") def loadRemainingReads(infile, outfile): dbh = connect() tablename = P.toTable(outfile) statement = '''cat %(infile)s | cgat csv2db --table %(tablename)s --retry --ignore-empty > %(outfile)s''' % locals() P.run() @follows(makeCpgIslandsBed) @transform(sortAndIndexBams, regex("bismark.dir/(\S+).sorted.bam"), add_inputs(PARAMS["annotation_genes"]), r"coverage.dir/\1.profile.png") def makeGeneProfiles(infiles, outfile): outname = P.snip(os.path.basename(outfile), ".png") infile, genes_gtf = infiles job_options = "-l mem_free=24G" statement = '''cgat bam2geneprofile --bamfile=%(infile)s --gtffile=%(genes_gtf)s --method=tssprofile --reporter=gene -P %(outname)s --normalization=total-sum --normalize-profile=area; checkpoint; for file in %(outname)s*; do mv $file coverage.dir/.; done''' print(statement) P.run() @transform(addCpGIs, regex("methylation.dir/(\S+)_meth_cpgi.tsv"), r"methylation.dir/\1_covered_meth_cpgi.tsv") def subsetCpGsToCovered(infile, outfile): job_options = "-l mem_free=48G" RRBS.subsetToCovered(infile, outfile, cov_threshold=10, submit=True, job_options=job_options) @originate("methylation.dir/promoter_cpgs.tsv") def categorisePromoterCpGs(outfile): RRBS.categorisePromoterCpGs( outfile, PARAMS["methylation_summary_genome_fasta"], PARAMS['annotation_database'], submit=True, job_memory="4G") @originate("methylation.dir/repeat_cpgs.tsv") def extractRepeatCpGs(outfile): RRBS.findRepeatCpGs( outfile, PARAMS["methylation_summary_genome_fasta"], PARAMS["annotation_repeats_gff"], submit=True, job_memory="4G") @originate("methylation.dir/hnce_cpgs.tsv") def extractHCNECpGs(outfile): RRBS.findCpGsFromBed( outfile, PARAMS["methylation_summary_genome_fasta"], PARAMS["annotation_hcne"], "HCNE", both_strands=True, submit=True, job_memory="4G") @originate("methylation.dir/dmr_cpgs.tsv")
MIT License
mklan/nx-rom-market
lib/python3.5/ntpath.py
expanduser
python
def expanduser(path): if isinstance(path, bytes): tilde = b'~' else: tilde = '~' if not path.startswith(tilde): return path i, n = 1, len(path) while i < n and path[i] not in _get_bothseps(path): i += 1 if 'HOME' in os.environ: userhome = os.environ['HOME'] elif 'USERPROFILE' in os.environ: userhome = os.environ['USERPROFILE'] elif not 'HOMEPATH' in os.environ: return path else: try: drive = os.environ['HOMEDRIVE'] except KeyError: drive = '' userhome = join(drive, os.environ['HOMEPATH']) if isinstance(path, bytes): userhome = os.fsencode(userhome) if i != 1: userhome = join(dirname(userhome), path[1:i]) return userhome + path[i:]
Expand ~ and ~user constructs. If user or $HOME is unknown, do nothing.
https://github.com/mklan/nx-rom-market/blob/33613d2177b63df9e0568038ffdf1dd91ad334d8/lib/python3.5/ntpath.py#L304-L337
import os import sys import stat import genericpath from genericpath import * __all__ = ["normcase","isabs","join","splitdrive","split","splitext", "basename","dirname","commonprefix","getsize","getmtime", "getatime","getctime", "islink","exists","lexists","isdir","isfile", "ismount", "expanduser","expandvars","normpath","abspath", "splitunc","curdir","pardir","sep","pathsep","defpath","altsep", "extsep","devnull","realpath","supports_unicode_filenames","relpath", "samefile", "sameopenfile", "samestat", "commonpath"] curdir = '.' pardir = '..' extsep = '.' sep = '\\' pathsep = ';' altsep = '/' defpath = '.;C:\\bin' if 'ce' in sys.builtin_module_names: defpath = '\\Windows' devnull = 'nul' def _get_bothseps(path): if isinstance(path, bytes): return b'\\/' else: return '\\/' def normcase(s): try: if isinstance(s, bytes): return s.replace(b'/', b'\\').lower() else: return s.replace('/', '\\').lower() except (TypeError, AttributeError): if not isinstance(s, (bytes, str)): raise TypeError("normcase() argument must be str or bytes, " "not %r" % s.__class__.__name__) from None raise def isabs(s): s = splitdrive(s)[1] return len(s) > 0 and s[0] in _get_bothseps(s) def join(path, *paths): if isinstance(path, bytes): sep = b'\\' seps = b'\\/' colon = b':' else: sep = '\\' seps = '\\/' colon = ':' try: if not paths: path[:0] + sep result_drive, result_path = splitdrive(path) for p in paths: p_drive, p_path = splitdrive(p) if p_path and p_path[0] in seps: if p_drive or not result_drive: result_drive = p_drive result_path = p_path continue elif p_drive and p_drive != result_drive: if p_drive.lower() != result_drive.lower(): result_drive = p_drive result_path = p_path continue result_drive = p_drive if result_path and result_path[-1] not in seps: result_path = result_path + sep result_path = result_path + p_path if (result_path and result_path[0] not in seps and result_drive and result_drive[-1:] != colon): return result_drive + sep + result_path return result_drive + result_path except (TypeError, AttributeError, BytesWarning): genericpath._check_arg_types('join', path, *paths) raise def splitdrive(p): if len(p) >= 2: if isinstance(p, bytes): sep = b'\\' altsep = b'/' colon = b':' else: sep = '\\' altsep = '/' colon = ':' normp = p.replace(altsep, sep) if (normp[0:2] == sep*2) and (normp[2:3] != sep): index = normp.find(sep, 2) if index == -1: return p[:0], p index2 = normp.find(sep, index + 1) if index2 == index + 1: return p[:0], p if index2 == -1: index2 = len(p) return p[:index2], p[index2:] if normp[1:2] == colon: return p[:2], p[2:] return p[:0], p def splitunc(p): import warnings warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead", DeprecationWarning, 2) drive, path = splitdrive(p) if len(drive) == 2: return p[:0], p return drive, path def split(p): seps = _get_bothseps(p) d, p = splitdrive(p) i = len(p) while i and p[i-1] not in seps: i -= 1 head, tail = p[:i], p[i:] head = head.rstrip(seps) or head return d + head, tail def splitext(p): if isinstance(p, bytes): return genericpath._splitext(p, b'\\', b'/', b'.') else: return genericpath._splitext(p, '\\', '/', '.') splitext.__doc__ = genericpath._splitext.__doc__ def basename(p): return split(p)[1] def dirname(p): return split(p)[0] def islink(path): try: st = os.lstat(path) except (OSError, AttributeError): return False return stat.S_ISLNK(st.st_mode) def lexists(path): try: st = os.lstat(path) except OSError: return False return True try: from nt import _getvolumepathname except ImportError: _getvolumepathname = None def ismount(path): seps = _get_bothseps(path) path = abspath(path) root, rest = splitdrive(path) if root and root[0] in seps: return (not rest) or (rest in seps) if rest in seps: return True if _getvolumepathname: return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps) else: return False
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_container.py
V1Container.command
python
def command(self): return self._command
Gets the command of this V1Container. # noqa: E501 Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501 :return: The command of this V1Container. # noqa: E501 :rtype: list[str]
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_container.py#L183-L191
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1Container(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'security_context': 'V1SecurityContext', 'startup_probe': 'V1Probe', 'stdin': 'bool', 'stdin_once': 'bool', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'security_context': 'securityContext', 'startup_probe': 'startupProbe', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resources=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._readiness_probe = None self._resources = None self._security_context = None self._startup_probe = None self._stdin = None self._stdin_once = None self._termination_message_path = None self._termination_message_policy = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe self.name = name if ports is not None: self.ports = ports if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if security_context is not None: self.security_context = security_context if startup_probe is not None: self.startup_probe = startup_probe if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir @property def args(self): return self._args @args.setter def args(self, args): self._args = args @property
Apache License 2.0
andrek10/bearing-vibration-diagnostics-toolbox
pyvib/diagnose.py
diagnosefft
python
def diagnosefft(Y, df, charf, X, subband, debug=False, version=2, harmthreshold=3.0, subthreshold=3.0): df /= X nHarm = 1 score = 0.0 if debug is True: harmonics = [] subbandsNeg = [] subbandsPos = [] noises = [] scores = [] while True: if nHarm == 1: per = 0.02 else: per = 0.01 j1 = int((nHarm*charf-per*charf)/df) j2 = int((nHarm*charf+per*charf)/df) jh = np.argmax(Y[j1:j2]) + j1 harm = Y[jh] charf = df*jh/nHarm if version == 1: j1n = int((nHarm*charf-0.02*charf)/df) j2n = int((nHarm*charf+0.02*charf)/df) if jh - j1n == 0: noise = np.mean(Y[jh+1:j2n]) elif j2n - (jh + 1) == 0: noise = np.mean(Y[j1n:jh]) else: noise = (np.mean(Y[j1n:jh]) + np.mean(Y[jh+1:j2n]))/2.0 elif version == 2: for i in range(jh, 0, -1): if Y[i-1] > Y[i]: jhl = i break for i in range(jh, Y.size, 1): if Y[i+1] > Y[i]: jhr = i break noise = (np.mean(Y[jhl-2:jhl+1]) + np.mean(Y[jhr:jhr+3]))/2.0 if subband > 0.01: j1 = int((nHarm*charf - subband - 0.05)/df) j2 = int((nHarm*charf - subband + 0.05)/df) jsn = np.argmax(Y[j1:j2]) + j1 negSubBand = Y[jsn] j1 = int((nHarm*charf + subband - 0.05)/df) j2 = int((nHarm*charf + subband + 0.05)/df) jsp = np.argmax(Y[j1:j2]) + j1 posSubBand = Y[jsp] if subband > 0.01: if harm >= noise*harmthreshold and (negSubBand > noise*subthreshold or posSubBand > noise*subthreshold): score += harm/(noise*3.0)*nHarm**2.0 nHarm += 1 if debug is True: subbandsNeg.append(jsn) subbandsPos.append(jsp) harmonics.append(jh) noises.append(noise) scores.append(score) else: if debug is True: return score, subbandsNeg, subbandsPos, harmonics, noises, scores else: return score else: if harm >= noise*harmthreshold: score += harm/(noise*harmthreshold)*(nHarm+1.0)**2.0 nHarm += 1 if debug is True: harmonics.append(jh) noises.append(noise) scores.append(score) else: if debug is True: return score, subbandsNeg, subbandsPos, negSubBand, posSubBand, harmonics, noises, scores else: return score test1 = int((nHarm*charf+0.02*charf)/df) test2 = int((nHarm*charf + subband + 0.05)/df) if test1 > Y.size or test2 > Y.size: if debug is True: return score, subbandsNeg, subbandsPos, negSubBand, posSubBand, harmonics, noises, scores else: return score
Diagnose a spectrum for bearing faults. Returns a score Parameters ---------- Y : float 1D array Spectrum values df : float Delta frequency in Hz charf : float Harmonic frequency X : float Shaft speed in Hz xubband : float Sideband frequency debug : boolean, optional Whether debug information is returned version : int, optional Which version of this script to run. Default 2 with new noise estimator Returns ------- score : float Score for fault being present
https://github.com/andrek10/bearing-vibration-diagnostics-toolbox/blob/7af38e26b1c4fc340edf4f9a26519693816223cb/pyvib/diagnose.py#L170-L296
import numpy as np from .fft import fft from .linalg import get_SVDxi from .signal import envelope, fftwconvolve def R_SVD(U, sigma, V, time, f_fault, tolerance = 0.02, PMItreshold = 1.0, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None): m = sigma.size f_fault = np.asanyarray(f_fault) dt = time[1] - time[0] T0 = np.zeros(f_fault.size, dtype=int) T1 = np.zeros(f_fault.size, dtype=int) PMI = [] W = [] for i in range(0, f_fault.size): T0[i] = int( np.floor( (1.0/(f_fault[i]*(1.0 + tolerance)))/dt ) ) T1[i] = int( np.ceil( (1.0/(f_fault[i]*(1.0 - tolerance)))/dt ) ) if T1[i] == T0[i]: T1[i] += 1 PMI.append(np.zeros(m)) W.append(np.zeros(m)) for i in range(0, m): if estimate_xi_func_params is None: a_i = estimate_xi_func(U, sigma, V, i) else: a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params) a_i = envelope(a_i) a_i -= a_i.mean() R_a = fftwconvolve(np.flipud(a_i), a_i) R_a = R_a[a_i.size-1:] R_a = R_a / np.arange(R_a.size, 0, -1) R_0 = R_a[0] for k in range(0, f_fault.size): R_T = np.max(R_a[T0[k]:T1[k]]) PMI[k][i] = R_T/(R_0 - R_T) for k in range(0, f_fault.size): temp = np.sum(PMI[k]) for i in range(0, m): if PMI[k][i] > PMItreshold: W[k][i] = PMI[k][i]/temp return PMI, W def ES_SVD(U, sigma, V, time, f_fault, f_side, PMItreshold, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None): m = sigma.size f_fault = np.asanyarray(f_fault) f_side = np.asanyarray(f_side) dt = time[1] - time[0] Fs = 1.0/dt PMI = [] W = [] for i in range(0, f_fault.size): PMI.append(np.zeros(m)) W.append(np.zeros(m)) for i in range(0, m): if estimate_xi_func_params is None: a_i = estimate_xi_func(U, sigma, V, i) else: a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params) a_i = envelope(a_i) Y, df = fft(a_i, Fs) for k in range(0, f_fault.size): PMI[k][i] = diagnosefft(Y, df, f_fault[k], 1.0, f_side[k]) for k in range(0, f_fault.size): temp = 0.0 for i in range(0, m): if PMI[k][i] > PMItreshold: temp += PMI[k][i] for i in range(0, m): if PMI[k][i] > PMItreshold: W[k][i] = PMI[k][i]/temp return PMI, W
MIT License
googleapis/python-bigtable
google/cloud/bigtable/table.py
Table.exists
python
def exists(self): table_client = self._instance._client.table_admin_client try: table_client.get_table(request={"name": self.name, "view": VIEW_NAME_ONLY}) return True except NotFound: return False
Check whether the table exists. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_check_table_exists] :end-before: [END bigtable_api_check_table_exists] :dedent: 4 :rtype: bool :returns: True if the table exists, else False.
https://github.com/googleapis/python-bigtable/blob/a99bf88417d6aec03923447c70c2752f6bb5c459/google/cloud/bigtable/table.py#L411-L429
from typing import Set import warnings from google.api_core import timeout from google.api_core.exceptions import Aborted from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import NotFound from google.api_core.exceptions import RetryError from google.api_core.exceptions import ServiceUnavailable from google.api_core.gapic_v1.method import DEFAULT from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_messages_v2_pb2, ) _MAX_BULK_MUTATIONS = 100000 VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY RETRYABLE_MUTATION_ERRORS = (Aborted, DeadlineExceeded, ServiceUnavailable) RETRYABLE_CODES: Set[int] = set() for retryable in RETRYABLE_MUTATION_ERRORS: if retryable.grpc_status_code is not None: RETRYABLE_CODES.add(retryable.grpc_status_code.value[0]) class _BigtableRetryableError(Exception): DEFAULT_RETRY = Retry( predicate=if_exception_type(_BigtableRetryableError), initial=1.0, maximum=15.0, multiplier=2.0, deadline=120.0, ) class TableMismatchError(ValueError): class TooManyMutationsError(ValueError): class Table(object): def __init__(self, table_id, instance, mutation_timeout=None, app_profile_id=None): self.table_id = table_id self._instance = instance self._app_profile_id = app_profile_id self.mutation_timeout = mutation_timeout @property def name(self): project = self._instance._client.project instance_id = self._instance.instance_id table_client = self._instance._client.table_data_client return table_client.table_path( project=project, instance=instance_id, table=self.table_id ) def get_iam_policy(self): table_client = self._instance._client.table_admin_client resp = table_client.get_iam_policy(request={"resource": self.name}) return Policy.from_pb(resp) def set_iam_policy(self, policy): table_client = self._instance._client.table_admin_client resp = table_client.set_iam_policy( request={"resource": self.name, "policy": policy.to_pb()} ) return Policy.from_pb(resp) def test_iam_permissions(self, permissions): table_client = self._instance._client.table_admin_client resp = table_client.test_iam_permissions( request={"resource": self.name, "permissions": permissions} ) return list(resp.permissions) def column_family(self, column_family_id, gc_rule=None): return ColumnFamily(column_family_id, self, gc_rule=gc_rule) def row(self, row_key, filter_=None, append=False): warnings.warn( "This method will be deprecated in future versions. Please " "use Table.append_row(), Table.conditional_row() " "and Table.direct_row() methods instead.", PendingDeprecationWarning, stacklevel=2, ) if append and filter_ is not None: raise ValueError("At most one of filter_ and append can be set") if append: return AppendRow(row_key, self) elif filter_ is not None: return ConditionalRow(row_key, self, filter_=filter_) else: return DirectRow(row_key, self) def append_row(self, row_key): return AppendRow(row_key, self) def direct_row(self, row_key): return DirectRow(row_key, self) def conditional_row(self, row_key, filter_): return ConditionalRow(row_key, self, filter_=filter_) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return other.table_id == self.table_id and other._instance == self._instance def __ne__(self, other): return not self == other def create(self, initial_split_keys=[], column_families={}): table_client = self._instance._client.table_admin_client instance_name = self._instance.name families = { id: ColumnFamily(id, self, rule).to_pb() for (id, rule) in column_families.items() } table = admin_messages_v2_pb2.Table(column_families=families) split = table_admin_messages_v2_pb2.CreateTableRequest.Split splits = [split(key=_to_bytes(key)) for key in initial_split_keys] table_client.create_table( request={ "parent": instance_name, "table_id": self.table_id, "table": table, "initial_splits": splits, } )
Apache License 2.0
iml130/nncg
nncg/nodes/cnn.py
MeanNode.__init__
python
def __init__(self, mean, prev_node): super().__init__(prev_node) self.in_dim = prev_node.out_dim self.out_dim = self.in_dim self.in_var = prev_node.out_var self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) self.mean = mean
Initialize the node. :param mean: The mean to be subtracted as scalar. :param prev_node: The previous node.
https://github.com/iml130/nncg/blob/cb8a7145d785b394696540ab2572f5a017b2bbf2/nncg/nodes/cnn.py#L428-L439
from nncg.nodes.arithmetic import * from nncg.nodes.misc import * from nncg.nodes.controlflow import LoopNode from nncg.allocation import Allocation from nncg.tools import _len from nncg.quantization import QuantizedNode class Conv2DNode(Node): quantized = False in_var: Variable out_var: Variable access_pattern: List[int] def __init__(self, w: np.ndarray, b: np.ndarray, stride: tuple, padding: str, prev_node): self.in_var = prev_node.out_var x = self.in_var assert self.in_var.dim[2] == w.shape[2] assert w.shape[3] == b.shape[0] super().__init__(prev_node) self.in_dim = prev_node.out_dim self.w = w self.b = b self.stride = stride self.padding = padding self.H, self.W, self.C_IN = x.dim self.KH, self.KW, _, self.C_OUT = w.shape self.SH, self.SW = stride if padding == 'valid': H_OUT = int(np.ceil((self.H - self.KH + 1) / self.SH)) W_OUT = int(np.ceil((self.W - self.KW + 1) / self.SW)) self.pad_top = self.pad_bottom = self.pad_left = self.pad_right = 0 elif padding == 'same': H_OUT = int(np.ceil(float(self.H) / float(self.SH))) W_OUT = int(np.ceil(float(self.W) / float(self.SW))) self.pad_along_height = max((H_OUT - 1) * self.SH + self.KH - self.H, 0) self.pad_along_width = max((W_OUT - 1) * self.SW + self.KW - self.W, 0) self.pad_top = int(self.pad_along_height // 2) self.pad_bottom = int(self.pad_along_height - self.pad_top) self.pad_left = int(self.pad_along_width // 2) self.pad_right = int(self.pad_along_width - self.pad_left) else: raise Exception("Unknown padding.") self.in_var.change_padding([[self.pad_top, self.pad_bottom], [self.pad_left, self.pad_right], [0, 0]]) self.out_dim = (H_OUT, W_OUT, self.C_OUT) self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): b_var = Allocation.allocate_var(self.b.dtype, 'b', self.b.shape, init_data=self.b) out_var_idx = IndexedVariable(self.out_var) b_var_idx = IndexedVariable(b_var) bias_loop_descr = [ [0, self.out_dim[0], 1], [0, self.out_dim[1], 1], [0, self.out_dim[2], 1] ] bias_loops = LoopNode.create_loops_by_description(bias_loop_descr) b_h_loop = bias_loops[0] b_w_loop = bias_loops[1] b_c_loop = bias_loops[2] set_bias = AssignmentNode(out_var_idx, b_var_idx) b_c_loop.add_edge('content', set_bias) out_var_idx.set_indices([b_h_loop.get_node('var'), b_w_loop.get_node('var'), b_c_loop.get_node('var')]) b_var_idx.set_indices([b_c_loop.get_node('var')]) conv_loop_descr = [ [0, self.out_dim[0] * self.SH, self.stride[0]], [0, self.out_dim[1] * self.SW, self.stride[1]], [0, self.KH, 1], [0, self.KW, 1], [0, self.C_IN, 1], [0, self.C_OUT, 1] ] conv_loops = LoopNode.create_loops_by_description(conv_loop_descr) h_loop = conv_loops[0] w_loop = conv_loops[1] kh_loop = conv_loops[2] kw_loop = conv_loops[3] c_in_loop = conv_loops[4] c_out_loop = conv_loops[5] b_h_loop.add_edge('next', h_loop) w_var = Allocation.allocate_var(self.w.dtype, 'w', self.w.shape, init_data=self.w) out_var_idx = IndexedVariable(self.out_var) in_var_idx = IndexedVariable(self.in_var, False) w_var_idx = IndexedVariable(w_var, False) exp1 = Expression('{var} / {stride0}', var=h_loop.get_node('var'), stride0=Constant(self.stride[0])) exp2 = Expression('{var} / {stride1}', var=w_loop.get_node('var'), stride1=Constant(self.stride[1])) exp3 = Expression('{var1} + {var2}', var1=h_loop.get_node('var'), var2=kh_loop.get_node('var')) exp4 = Expression('{var1} + {var2}', var1=w_loop.get_node('var'), var2=kw_loop.get_node('var')) out_var_idx.set_indices([exp1, exp2, c_out_loop.get_node('var')]) in_var_idx.set_indices([exp3, exp4, c_in_loop.get_node('var')]) w_var_idx.set_indices( [kh_loop.get_node('var'), kw_loop.get_node('var'), c_in_loop.get_node('var'), c_out_loop.get_node('var')]) mac_node = MACNode(out_var_idx, w_var_idx, in_var_idx) c_out_loop.add_edge('content', mac_node) self.var_decls.append(self.out_var) self.const_decls.append(w_var) self.const_decls.append(b_var) self.add_edge('content', b_h_loop) def quantize(self, x_scale): min = np.min([np.min(self.w), np.min(self.b)]) max = np.max([np.max(self.w), np.max(self.b)]) self.scale = QuantizedNode.quantize_scale(min, max, 'int8') self.w = (self.w / self.scale).astype('int8') self.b = (self.b / self.scale / x_scale).astype('int16') class LeakyReLUNode(Node): def __init__(self, alpha, prev_node): super().__init__(prev_node) self.alpha = alpha self.in_var = prev_node.out_var self.in_dim = prev_node.out_dim self.out_dim = self.in_dim self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): loops, idxs = LoopNode.create_loops(self.in_var.dim) in_var_idx = IndexedVariable(self.in_var) out_var_idx = IndexedVariable(self.out_var) in_var_idx.set_indices(idxs) out_var_idx.set_indices(idxs) condition = Expression('{t_var_idx} < 0', t_var_idx=in_var_idx) if self.alpha == 0: false_exp = Constant(0) else: false_exp = Expression('{alpha} * {t_var_idx}', t_var_idx=in_var_idx) cond_node = ConditionalNode(out_var_idx, condition, false_exp, in_var_idx) loops[-1].add_edge('content', cond_node) self.var_decls.append(self.out_var) self.add_edge('content', loops[0]) class DenseNode(Node): def __init__(self, w, b, prev_node): super().__init__(prev_node) self.w = w self.b = b self.out_dim = w.shape[1] self.in_dim = prev_node.out_dim self.in_var = prev_node.out_var self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): b_var = Allocation.allocate_var('float', 'b', self.b.shape, init_data=self.b) b_var_idx = IndexedVariable(b_var) assert _len(self.in_dim) == 1 out_var_idx = IndexedVariable(self.out_var) b_loop = LoopNode(self.out_dim) out_var_idx.set_indices([b_loop.get_node('var')]) b_var_idx.set_indices([b_loop.get_node('var')]) set_bias = AssignmentNode(out_var_idx, b_var_idx) b_loop.add_edge('content', set_bias) out_var_idx = IndexedVariable(self.out_var) in_loop = LoopNode(self.in_dim) out_loop = LoopNode(self.out_dim) out_var_idx.set_indices([out_loop.get_node('var')]) w_var = Allocation.allocate_var('float', 'w', self.w.shape, init_data=self.w) in_var_idx = IndexedVariable(self.in_var, False) w_var_idx = IndexedVariable(w_var, False) in_var_idx.set_indices([in_loop.get_node('var')]) w_var_idx.set_indices([in_loop.get_node('var'), out_loop.get_node('var')]) mac_node = MACNode(out_var_idx, in_var_idx, w_var_idx) b_loop.add_edge('next', in_loop) in_loop.add_edge('content', out_loop) out_loop.add_edge('content', mac_node) self.var_decls.append(self.out_var) self.const_decls.append(w_var) self.const_decls.append(b_var) self.add_edge('content', b_loop) class FlattenNode(Node): def __init__(self, prev_node): super().__init__(prev_node) self.in_dim = prev_node.out_dim self.out_dim = np.prod(self.in_dim) self.in_var = prev_node.out_var self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): n = AssignmentNode(self.out_var, self.in_var) self.pointer_decls.append(self.out_var) self.add_edge('content', n) class MaxPoolingNode(Node): def __init__(self, size, stride, prev_node): super().__init__(prev_node) self.size = size self.stride = stride self.in_dim = prev_node.out_dim self.in_var = prev_node.out_var self.h_loop_end = self.in_dim[0] - size[0] + 1 self.w_loop_end = self.in_dim[1] - size[1] + 1 x_res = int(np.ceil(self.h_loop_end / stride[0])) y_res = int(np.ceil(self.w_loop_end / stride[1])) self.out_dim = (x_res, y_res, self.in_dim[2]) self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): h_loop = LoopNode(stop=self.h_loop_end, step=self.stride[0]) w_loop = LoopNode(stop=self.w_loop_end, step=self.stride[1]) h_loop.add_edge('content', w_loop) c_loop = LoopNode(self.in_dim[2]) w_loop.add_edge('content', c_loop) exp1 = Expression('{var} / {stride0}', var=h_loop.get_node('var'), stride0=Constant(self.stride[0])) exp2 = Expression('{var} / {stride1}', var=w_loop.get_node('var'), stride1=Constant(self.stride[1])) out_var_idx = IndexedVariable(self.out_var) in_var_idx = IndexedVariable(self.in_var, False) out_var_idx.set_indices([exp1, exp2, c_loop.get_node('var')]) in_var_idx.set_indices([h_loop.get_node('var'), w_loop.get_node('var'), c_loop.get_node('var')]) init = AssignmentNode(out_var_idx, in_var_idx) c_loop.add_edge('content', init) kh_loop = LoopNode(self.size[0]) init.add_edge('next', kh_loop) kw_loop = LoopNode(self.size[1]) kh_loop.add_edge('content', kw_loop) exp3 = Expression('{var1} + {var2}', var1=h_loop.get_node('var'), var2=kh_loop.get_node('var')) exp4 = Expression('{var1} + {var2}', var1=w_loop.get_node('var'), var2=kw_loop.get_node('var')) out_var_idx = IndexedVariable(self.out_var) in_var_idx = IndexedVariable(self.in_var, False) out_var_idx.set_indices([exp1, exp2, c_loop.get_node('var')]) in_var_idx.set_indices([exp3, exp4, c_loop.get_node('var')]) condition = Expression('{var_in} > {var_out}', var_in=in_var_idx, var_out=out_var_idx) n = ConditionalNode(out_var_idx, condition, in_var_idx, out_var_idx) kw_loop.add_edge('content', n) self.add_edge('content', h_loop) self.var_decls.append(self.out_var) class SoftmaxNode(Node): def __init__(self, prev_node): super().__init__(prev_node) self.in_dim = prev_node.out_dim if type(self.in_dim) is list: c = 0 for d in self.in_dim: if d > 1: c += 1 assert c == 1 self.out_dim = self.in_dim self.in_var = prev_node.out_var self.out_var = Allocation.allocate_var('float', 'x', self.out_dim) def lowering(self): t_var = Allocation.allocate_var('float', 'flat_x', np.prod(self.out_dim)) t_var_idx = IndexedVariable(t_var) n = AssignmentNode(t_var, self.in_var) sum_var = Allocation.allocate_var('float', 'sum', []) sum_loop = LoopNode(t_var.dim) sum_exp = Expression('{sum_var} += expf({t_var_idx});', sum_var=sum_var, t_var_idx=t_var_idx) sum_node = ExpressionNode(sum_exp) sum_loop.add_edge('content', sum_node) t_var_idx.set_indices([sum_loop.get_node('var')]) out_var_idx = IndexedVariable(self.out_var) loops, idxs = LoopNode.create_loops(self.in_var.dim) out_var_idx.set_indices(idxs) in_var_idx = IndexedVariable(self.in_var) in_var_idx.set_indices(idxs) exp = Expression('{out_var_idx} = expf({in_var_idx}) / {sum_var};', out_var_idx=out_var_idx, in_var_idx=in_var_idx, sum_var=sum_var) node = ExpressionNode(exp) loops[-1].add_edge('content', node) sum_loop.add_edge('next', loops[0]) n.add_edge('next', sum_loop) self.pointer_decls.append(t_var) self.var_decls.append(self.out_var) self.var_decls.append(sum_var) self.math_required = True self.add_edge('content', n) class MeanNode(Node): in_var: Variable out_var: Variable
Apache License 2.0
hyperiongray/trio-chrome-devtools-protocol
trio_cdp/__init__.py
CdpConnectionClosed.__init__
python
def __init__(self, reason): self.reason = reason
Constructor. :param reason: :type reason: wsproto.frame_protocol.CloseReason
https://github.com/hyperiongray/trio-chrome-devtools-protocol/blob/f41ca685b5c390f288592d135660c177fba5a945/trio_cdp/__init__.py#L43-L49
from __future__ import annotations from collections import defaultdict from contextlib import asynccontextmanager from dataclasses import dataclass import functools import itertools import json import logging import typing import cdp import trio from trio_websocket import ( ConnectionClosed as WsConnectionClosed, connect_websocket_url, open_websocket_url ) from .context import connection_context, session_context from .generated import * logger = logging.getLogger('trio_cdp') T = typing.TypeVar('T') MAX_WS_MESSAGE_SIZE = 2**24 class BrowserError(Exception): def __init__(self, obj): self.code = obj['code'] self.message = obj['message'] self.detail = obj.get('data') def __str__(self): return 'BrowserError<code={} message={}> {}'.format(self.code, self.message, self.detail) class CdpConnectionClosed(WsConnectionClosed):
MIT License
bohdon/maya-pulse
src/pulse/scripts/pulse/views/utils.py
getIcon
python
def getIcon(filename): return QtGui.QIcon(getIconPath(filename))
Return a QIcon for an icon by name Args: filename: A string representing the icon's file name
https://github.com/bohdon/maya-pulse/blob/679bc1b858461694cc130c7b7ea0c6f1b3838fe1/src/pulse/scripts/pulse/views/utils.py#L153-L160
import logging import os import traceback from functools import partial import maya.cmds as cmds from ..vendor.Qt import QtCore, QtWidgets, QtGui LOG = logging.getLogger(__name__) ICON_DIR = os.path.join(os.path.dirname(__file__), 'icons') _REPEAT_COMMAND = 'python("{0}._repeatLastFunc()")'.format(__name__) _REPEATABLE_FUNC = None _DPI_SCALE = 1.0 if hasattr(cmds, "mayaDpiSetting"): _DPI_SCALE = cmds.mayaDpiSetting(q=True, realScaleValue=True) def _repeatLastFunc(): if _REPEATABLE_FUNC is not None: _REPEATABLE_FUNC() def _softUpdateWrapper(wrapper, wrapped): attrs = ['__name__', '__doc__'] for attr in attrs: if hasattr(wrapped, attr): setattr(wrapper, attr, getattr(wrapped, attr)) return wrapper def _softWraps(wrapped): return partial(_softUpdateWrapper, wrapped=wrapped) def repeatable(func): @_softWraps(func) def wrapper(*args, **kwargs): global _REPEATABLE_FUNC _REPEATABLE_FUNC = partial(func, *args, **kwargs) result = func(*args, **kwargs) try: cmds.repeatLast( ac=_REPEAT_COMMAND, acl=func.__name__) except RuntimeError: pass return result return wrapper def repeatPartial(func, *args, **kwargs): return partial(repeatable(func), *args, **kwargs) def undoable(func): @_softWraps(func) def wrapper(*args, **kwargs): cmds.undoInfo(openChunk=True) try: func(*args, **kwargs) except Exception as e: traceback.print_exc() cmds.error(e) finally: cmds.undoInfo(closeChunk=True) return wrapper def undoPartial(func, *args, **kwargs): return partial(undoable(func), *args, **kwargs) def undoAndRepeatable(func): return repeatable(undoable(func)) def undoAndRepeatPartial(func, *args, **kwargs): return partial(undoAndRepeatable(func), *args, **kwargs) def dpiScale(value): return value * _DPI_SCALE def getIconPath(filename): return os.path.join(ICON_DIR, filename) def getIconPixmap(filename): return QtGui.QPixmap(getIconPath(filename))
MIT License
pandaproject/panda
client/utils.py
get_free_disk_space
python
def get_free_disk_space(p): s = os.statvfs(p) return s.f_frsize * s.f_bavail
Returns the number of free bytes on the drive that ``p`` is on
https://github.com/pandaproject/panda/blob/133baa47882a289773a30c9656e2ea4efe569387/client/utils.py#L12-L17
import os def get_total_disk_space(p): s = os.statvfs(p) return s.f_frsize * s.f_blocks
MIT License
locustio/locust
locust/user/task.py
filter_tasks_by_tags
python
def filter_tasks_by_tags(task_holder, tags=None, exclude_tags=None, checked=None): new_tasks = [] if checked is None: checked = {} for task in task_holder.tasks: if task in checked: if checked[task]: new_tasks.append(task) continue passing = True if hasattr(task, "tasks"): filter_tasks_by_tags(task, tags, exclude_tags, checked) passing = len(task.tasks) > 0 else: if tags is not None: passing &= "locust_tag_set" in dir(task) and len(task.locust_tag_set & tags) > 0 if exclude_tags is not None: passing &= "locust_tag_set" not in dir(task) or len(task.locust_tag_set & exclude_tags) == 0 if passing: new_tasks.append(task) checked[task] = passing task_holder.tasks = new_tasks
Function used by Environment to recursively remove any tasks/TaskSets from a TaskSet/User that shouldn't be executed according to the tag options
https://github.com/locustio/locust/blob/58487b526075826584690cdd2ad4ab1218afa146/locust/user/task.py#L132-L161
import logging import random import sys import traceback from time import time from typing import Any, Callable, List, Union from typing_extensions import final import gevent from gevent import GreenletExit from locust.exception import InterruptTaskSet, RescheduleTask, RescheduleTaskImmediately, StopUser, MissingWaitTimeError logger = logging.getLogger(__name__) LOCUST_STATE_RUNNING, LOCUST_STATE_WAITING, LOCUST_STATE_STOPPING = ["running", "waiting", "stopping"] def task(weight=1): def decorator_func(func): if func.__name__ in ["on_stop", "on_start"]: logging.warning( "You have tagged your on_stop/start function with @task. This will make the method get called both as a task AND on stop/start." ) if func.__name__ == "run": raise Exception( "User.run() is a method used internally by Locust, and you must not override it or register it as a task" ) func.locust_task_weight = weight return func """ Check if task was used without parentheses (not called), like this:: @task def my_task() pass """ if callable(weight): func = weight weight = 1 return decorator_func(func) else: return decorator_func def tag(*tags): def decorator_func(decorated): if hasattr(decorated, "tasks"): decorated.tasks = list(map(tag(*tags), decorated.tasks)) else: if "locust_tag_set" not in decorated.__dict__: decorated.locust_tag_set = set() decorated.locust_tag_set |= set(tags) return decorated if len(tags) == 0 or callable(tags[0]): raise ValueError("No tag name was supplied") return decorator_func def get_tasks_from_base_classes(bases, class_dict): new_tasks = [] for base in bases: if hasattr(base, "tasks") and base.tasks: new_tasks += base.tasks if "tasks" in class_dict and class_dict["tasks"] is not None: tasks = class_dict["tasks"] if isinstance(tasks, dict): tasks = tasks.items() for task in tasks: if isinstance(task, tuple): task, count = task for _ in range(count): new_tasks.append(task) else: new_tasks.append(task) for item in class_dict.values(): if "locust_task_weight" in dir(item): for i in range(item.locust_task_weight): new_tasks.append(item) return new_tasks
MIT License
ktok07b6/polyphony
polyphony/compiler/veritestgen.py
VerilogTestGen.generate
python
def generate(self): clk_period = 10 self.hdlmodule.add_constant('CLK_PERIOD', clk_period) self.hdlmodule.add_constant('CLK_HALF_PERIOD', int(clk_period / 2)) self.hdlmodule.add_constant('INITIAL_RESET_SPAN', clk_period * 10) self.set_indent(2) self._generate_main() self.set_indent(-2) main_code = self.result() self.codes = [] self._generate_include() self._generate_module() self.set_indent(2) self._generate_monitor_task() self.set_indent(-2) self.emit(main_code) self.emit('endmodule\n')
output verilog module format: module {module_name} {params} {portdefs} {localparams} {internal_regs} {internal_wires} {functions} {fsm} endmodule
https://github.com/ktok07b6/polyphony/blob/657c5c7440520db6b4985970bd50547407693ac4/polyphony/compiler/veritestgen.py#L18-L49
from .ahdl import * from .env import env from .vericodegen import VerilogCodeGen from .hdlmodule import RAMModule from .hdlinterface import * from logging import getLogger logger = getLogger(__name__) class VerilogTestGen(VerilogCodeGen): def __init__(self, hdlmodule): super().__init__(hdlmodule) clk = self.hdlmodule.gen_sig('clk', 1, {'reserved'}) rst = self.hdlmodule.gen_sig('rst', 1, {'reserved'}) self.hdlmodule.add_internal_reg(clk) self.hdlmodule.add_internal_reg(rst)
MIT License
voxel-fox-ltd/novus
discord/client.py
Client.get_channel
python
def get_channel(self, id: int, /) -> Optional[Union[GuildChannel, Thread, PrivateChannel]]: return self._connection.get_channel(id)
Returns a channel or thread with the given ID. Parameters ----------- id: :class:`int` The ID to search for. Returns -------- Optional[Union[:class:`.abc.GuildChannel`, :class:`.Thread`, :class:`.abc.PrivateChannel`]] The returned channel or ``None`` if not found.
https://github.com/voxel-fox-ltd/novus/blob/4b3a3f918b6212ef2167002c4dbfe910727c04b0/discord/client.py#L708-L721
from __future__ import annotations import asyncio import logging import signal import sys import traceback from typing import Any, Callable, Coroutine, Dict, Generator, List, Optional, Sequence, TYPE_CHECKING, Tuple, TypeVar, Union import aiohttp from .user import User, ClientUser from .invite import Invite from .template import Template from .widget import Widget from .guild import Guild from .emoji import Emoji from .channel import _threaded_channel_factory, PartialMessageable from .enums import ChannelType from .mentions import AllowedMentions from .errors import * from .enums import Status, VoiceRegion from .flags import ApplicationFlags, Intents from .gateway import * from .activity import ActivityTypes, BaseActivity, create_activity from .voice_client import VoiceClient from .http import HTTPClient from .state import ConnectionState from . import utils from .utils import MISSING from .object import Object from .backoff import ExponentialBackoff from .webhook import Webhook from .iterators import GuildIterator from .appinfo import AppInfo from .ui.action_row import MessageComponents from .stage_instance import StageInstance from .threads import Thread from .sticker import GuildSticker, StandardSticker, StickerPack, _sticker_factory from .application_commands import ApplicationCommand if TYPE_CHECKING: from .abc import SnowflakeTime, PrivateChannel, GuildChannel, Snowflake from .channel import DMChannel from .message import Message from .member import Member from .voice_client import VoiceProtocol __all__ = ( 'Client', ) Coro = TypeVar('Coro', bound=Callable[..., Coroutine[Any, Any, Any]]) _log = logging.getLogger(__name__) def _cancel_tasks(loop: asyncio.AbstractEventLoop) -> None: tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()} if not tasks: return _log.info('Cleaning up after %d tasks.', len(tasks)) for task in tasks: task.cancel() loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True)) _log.info('All tasks finished cancelling.') for task in tasks: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'Unhandled exception during Client.run shutdown.', 'exception': task.exception(), 'task': task }) def _cleanup_loop(loop: asyncio.AbstractEventLoop) -> None: try: _cancel_tasks(loop) loop.run_until_complete(loop.shutdown_asyncgens()) finally: _log.info('Closing the event loop.') loop.close() class Client: def __init__( self, *, loop: Optional[asyncio.AbstractEventLoop] = None, **options: Any, ): self.ws: DiscordWebSocket = None self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop self._listeners: Dict[str, List[Tuple[asyncio.Future, Callable[..., bool]]]] = {} self.shard_id: Optional[int] = options.get('shard_id') self.shard_count: Optional[int] = options.get('shard_count') connector: Optional[aiohttp.BaseConnector] = options.pop('connector', None) proxy: Optional[str] = options.pop('proxy', None) proxy_auth: Optional[aiohttp.BasicAuth] = options.pop('proxy_auth', None) unsync_clock: bool = options.pop('assume_unsync_clock', True) self.http: HTTPClient = HTTPClient(connector, proxy=proxy, proxy_auth=proxy_auth, unsync_clock=unsync_clock, loop=self.loop) self._handlers: Dict[str, Callable] = { 'ready': self._handle_ready } self._hooks: Dict[str, Callable] = { 'before_identify': self._call_before_identify_hook } self._enable_debug_events: bool = options.pop('enable_debug_events', False) self._connection: ConnectionState = self._get_state(**options) self._connection.shard_count = self.shard_count self._closed: bool = False self._ready: asyncio.Event = asyncio.Event() self._connection._get_websocket = self._get_websocket self._connection._get_client = lambda: self if VoiceClient.warn_nacl: VoiceClient.warn_nacl = False _log.warning("PyNaCl is not installed, voice will NOT be supported") def _get_websocket(self, guild_id: Optional[int] = None, *, shard_id: Optional[int] = None) -> DiscordWebSocket: return self.ws def _get_state(self, **options: Any) -> ConnectionState: return ConnectionState(dispatch=self.dispatch, handlers=self._handlers, hooks=self._hooks, http=self.http, loop=self.loop, **options) def _handle_ready(self) -> None: self._ready.set() @property def latency(self) -> float: ws = self.ws return float('nan') if not ws else ws.latency def is_ws_ratelimited(self) -> bool: if self.ws: return self.ws.is_ratelimited() return False @property def user(self) -> Optional[ClientUser]: return self._connection.user @property def guilds(self) -> List[Guild]: return self._connection.guilds @property def emojis(self) -> List[Emoji]: return self._connection.emojis @property def stickers(self) -> List[GuildSticker]: return self._connection.stickers @property def cached_messages(self) -> Sequence[Message]: return utils.SequenceProxy(self._connection._messages or []) @property def private_channels(self) -> List[PrivateChannel]: return self._connection.private_channels @property def voice_clients(self) -> List[VoiceProtocol]: return self._connection.voice_clients @property def application_id(self) -> Optional[int]: return self._connection.application_id @property def application_flags(self) -> ApplicationFlags: return self._connection.application_flags def is_ready(self) -> bool: return self._ready.is_set() async def _run_event(self, coro: Callable[..., Coroutine[Any, Any, Any]], event_name: str, *args: Any, **kwargs: Any) -> None: try: await coro(*args, **kwargs) except asyncio.CancelledError: pass except Exception: try: await self.on_error(event_name, *args, **kwargs) except asyncio.CancelledError: pass def _schedule_event(self, coro: Callable[..., Coroutine[Any, Any, Any]], event_name: str, *args: Any, **kwargs: Any) -> asyncio.Task: wrapped = self._run_event(coro, event_name, *args, **kwargs) return asyncio.create_task(wrapped, name=f'Novus: {event_name}') def dispatch(self, event: str, *args: Any, **kwargs: Any) -> None: _log.debug('Dispatching event %s', event) method = 'on_' + event listeners = self._listeners.get(event) if listeners: removed = [] for i, (future, condition) in enumerate(listeners): if future.cancelled(): removed.append(i) continue try: result = condition(*args) except Exception as exc: future.set_exception(exc) removed.append(i) else: if result: if len(args) == 0: future.set_result(None) elif len(args) == 1: future.set_result(args[0]) else: future.set_result(args) removed.append(i) if len(removed) == len(listeners): self._listeners.pop(event) else: for idx in reversed(removed): del listeners[idx] try: coro = getattr(self, method) except AttributeError: pass else: self._schedule_event(coro, method, *args, **kwargs) async def on_error(self, event_method: str, *args: Any, **kwargs: Any) -> None: print(f'Ignoring exception in {event_method}', file=sys.stderr) traceback.print_exc() async def _call_before_identify_hook(self, shard_id: Optional[int], *, initial: bool = False) -> None: await self.before_identify_hook(shard_id, initial=initial) async def before_identify_hook(self, shard_id: Optional[int], *, initial: bool = False) -> None: if not initial: await asyncio.sleep(5.0) async def login(self, token: str) -> None: _log.info('logging in using static token') data = await self.http.static_login(token.strip()) self._connection.user = ClientUser(state=self._connection, data=data) async def connect(self, *, reconnect: bool = True) -> None: backoff = ExponentialBackoff() ws_params = { 'initial': True, 'shard_id': self.shard_id, } while not self.is_closed(): try: coro = DiscordWebSocket.from_client(self, **ws_params) self.ws = await asyncio.wait_for(coro, timeout=60.0) ws_params['initial'] = False while True: await self.ws.poll_event() except ReconnectWebSocket as e: _log.info('Got a request to %s the websocket.', e.op) self.dispatch('disconnect') ws_params.update(sequence=self.ws.sequence, resume=e.resume, session=self.ws.session_id) continue except (OSError, HTTPException, GatewayNotFound, ConnectionClosed, aiohttp.ClientError, asyncio.TimeoutError) as exc: self.dispatch('disconnect') if not reconnect: await self.close() if isinstance(exc, ConnectionClosed) and exc.code == 1000: return raise if self.is_closed(): return if isinstance(exc, OSError) and exc.errno in (54, 10054): ws_params.update(sequence=self.ws.sequence, initial=False, resume=True, session=self.ws.session_id) continue if isinstance(exc, ConnectionClosed): if exc.code == 4014: raise PrivilegedIntentsRequired(exc.shard_id) from None if exc.code != 1000: await self.close() raise retry = backoff.delay() _log.exception("Attempting a reconnect in %.2fs", retry) await asyncio.sleep(retry) ws_params.update(sequence=self.ws.sequence, resume=True, session=self.ws.session_id) async def close(self) -> None: if self._closed: return self._closed = True for voice in self.voice_clients: try: await voice.disconnect(force=True) except Exception: pass if self.ws is not None and self.ws.open: await self.ws.close(code=1000) await self.http.close() self._ready.clear() def clear(self) -> None: self._closed = False self._ready.clear() self._connection.clear() self.http.recreate() async def start(self, token: str, *, reconnect: bool = True) -> None: await self.login(token) await self.connect(reconnect=reconnect) def run(self, *args: Any, **kwargs: Any) -> None: loop = self.loop try: loop.add_signal_handler(signal.SIGINT, lambda: loop.stop()) loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop()) except NotImplementedError: pass async def runner(): try: await self.start(*args, **kwargs) finally: if not self.is_closed(): await self.close() def stop_loop_on_completion(f): loop.stop() future = asyncio.ensure_future(runner(), loop=loop) future.add_done_callback(stop_loop_on_completion) try: loop.run_forever() except KeyboardInterrupt: _log.info('Received signal to terminate bot and event loop.') finally: future.remove_done_callback(stop_loop_on_completion) _log.info('Cleaning up tasks.') _cleanup_loop(loop) if not future.cancelled(): try: return future.result() except KeyboardInterrupt: return None def is_closed(self) -> bool: return self._closed @property def activity(self) -> Optional[ActivityTypes]: return create_activity(self._connection._activity) @activity.setter def activity(self, value: Optional[ActivityTypes]) -> None: if value is None: self._connection._activity = None elif isinstance(value, BaseActivity): self._connection._activity = value.to_dict() else: raise TypeError('activity must derive from BaseActivity.') @property def status(self): if self._connection._status in set(state.value for state in Status): return Status(self._connection._status) return Status.online @status.setter def status(self, value): if value is Status.offline: self._connection._status = 'invisible' elif isinstance(value, Status): self._connection._status = str(value) else: raise TypeError('status must derive from Status.') @property def allowed_mentions(self) -> Optional[AllowedMentions]: return self._connection.allowed_mentions @allowed_mentions.setter def allowed_mentions(self, value: Optional[AllowedMentions]) -> None: if value is None or isinstance(value, AllowedMentions): self._connection.allowed_mentions = value else: raise TypeError(f'allowed_mentions must be AllowedMentions not {value.__class__!r}') @property def intents(self) -> Intents: return self._connection.intents @property def users(self) -> List[User]: return list(self._connection._users.values())
MIT License
aziele/alfpy
alfpy/word_bool_distance.py
Distance.pwdist_rogerstanimoto
python
def pwdist_rogerstanimoto(self, seq1idx, seq2idx): u = self[seq1idx] v = self[seq2idx] (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) r = float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) return r
Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays. Returns: distance value (double)
https://github.com/aziele/alfpy/blob/e0782e9551458ef17ab29df8af13fc0f8925e894/alfpy/word_bool_distance.py#L97-L109
import numpy as np from .utils import distance def _nbool_correspond_ft_tf(u, v): not_u = ~u not_v = ~v nft = (not_u & v).sum() ntf = (u & not_v).sum() return (nft, ntf) def _nbool_correspond_all(u, v): not_u = ~u not_v = ~v nff = (not_u & not_v).sum() nft = (not_u & v).sum() ntf = (u & not_v).sum() ntt = (u & v).sum() return (nff, nft, ntf, ntt) class Distance(distance.Distance): def pwdist_dice(self, seq1idx, seq2idx): u = self[seq1idx] v = self[seq2idx] ntt = (u & v).sum() (nft, ntf) = _nbool_correspond_ft_tf(u, v) return float(ntf + nft) / float(2.0 * ntt + ntf + nft) def pwdist_yule(self, seq1idx, seq2idx): u = self[seq1idx] v = self[seq2idx] (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
MIT License
nervanasystems/neon
neon/data/datasets.py
Dataset.gen_iterators
python
def gen_iterators(self): raise NotImplemented()
Method that generates the data set iterators for the train, test and validation data sets. This method needs to set the instance data_set attribute to a dictionary of data iterators. Returns: dict: dictionary with the various data set iterators
https://github.com/nervanasystems/neon/blob/8c3fb8a93b4a89303467b25817c60536542d08bd/neon/data/datasets.py#L158-L168
from __future__ import division from future import standard_library standard_library.install_aliases() from future.moves.urllib.request import Request, urlopen import logging import os import sys import zipfile from neon import NervanaObject, logger as neon_logger from neon.util.compat import PY3 logger = logging.getLogger(__name__) class Dataset(NervanaObject): def __init__(self, filename, url, size, path='.', subset_pct=100): super(Dataset, self).__init__(name=None) self.filename = filename self.url = url self.size = size self.path = path self.subset_pct = subset_pct self._data_dict = None if subset_pct != 100: raise NotImplemented('subset percentage feature is not yet implemented') def serialize(self): return self.get_description() def load_zip(self, filename, size): workdir, filepath = self._valid_path_append(self.path, '', filename) if not os.path.exists(filepath): self.fetch_dataset(self.url, filename, filepath, size) if '.zip' in filepath: zip_ref = zipfile.ZipFile(filepath) zip_ref.extractall(workdir) zip_ref.close() filepath = filepath.split('.zip')[0] return filepath @staticmethod def _valid_path_append(path, *args): full_path = os.path.expanduser(path) res = [] if not os.path.exists(full_path): os.makedirs(full_path) if not os.path.isdir(full_path): raise ValueError("path: {0} is not a valid directory".format(path)) for suffix_path in args: res.append(os.path.join(full_path, suffix_path)) if len(res) == 0: return path elif len(res) == 1: return res[0] else: return res @staticmethod def fetch_dataset(url, sourcefile, destfile, totalsz): req = Request(os.path.join(url, sourcefile), headers={'User-Agent': 'neon'}) cloudfile = urlopen(req) neon_logger.display("Downloading file: {}".format(destfile)) blockchar = u'\u2588' with open(destfile, 'wb') as f: data_read = 0 chunksz = 1024**2 while 1: data = cloudfile.read(chunksz) if not data: break data_read = min(totalsz, data_read + chunksz) progress_string = u'Download Progress |{:<50}| '.format( blockchar * int(float(data_read) / totalsz * 50)) sys.stdout.write('\r') if PY3: sys.stdout.write(progress_string) else: sys.stdout.write(progress_string.encode("utf-8")) sys.stdout.flush() f.write(data) neon_logger.display("Download Complete")
Apache License 2.0
uber/doubles
doubles/patch.py
Patch.set_value
python
def set_value(self, value): self._value = value setattr(self.target, self._name, value)
Set the value of the target. :param obj value: The value to set.
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/patch.py#L25-L31
from doubles.exceptions import VerifyingDoubleError from doubles.utils import get_module, get_path_components class Patch(object): def __init__(self, target): module_path, self._name = get_path_components(target) self.target = get_module(module_path, target) self._capture_original_object() self.set_value(None) def _capture_original_object(self): try: self._doubles_target = getattr(self.target, self._name) except AttributeError: raise VerifyingDoubleError(self.target, self._name)
MIT License
gordonbrander/lettersmith_py
lettersmith/query.py
takes
python
def takes(n): def take_bound(iterable): return islice(iterable, n) return take_bound
Take `n` elements from `iterable`.
https://github.com/gordonbrander/lettersmith_py/blob/96ddaf1268ac53062b2e7b1d05d06dc092865666/lettersmith/query.py#L59-L68
from itertools import islice from random import sample def filters(predicate): def filter_bound(iterable): return filter(predicate, iterable) return filter_bound def rejects(predicate): def reject_bound(iterable): for item in iterable: if not predicate(item): yield item return reject_bound def maps(a2b): def map_bound(iterable): return map(a2b, iterable) return map_bound def sorts(key=None, reverse=False): def sort_bound(iterable): return sorted(iterable, key=key, reverse=reverse) return sort_bound
MIT License
c01db33f/reil
x86/misc.py
x86_setne
python
def x86_setne(ctx, i): conditional_set(ctx, i, conditional.NE)
set if not equal
https://github.com/c01db33f/reil/blob/3deec3a3bb69aae51cc0d728d5f83156cfba2ab6/x86/misc.py#L403-L405
import reil.error from reil.shorthand import * from reil.utilities import * import reil.x86.conditional as conditional import reil.x86.operand as operand from reil.x86.utilities import * def conditional_set(ctx, i, condition): c = conditional.condition(ctx, condition) operand.set(ctx, i, 0, c) def _convert(ctx, size): a = ctx.accumulator prev_a = a a = ctx.tmp(size) result = ctx.tmp(size * 2) high_word = ctx.tmp(size) low_word = ctx.tmp(size) ctx.emit( str_ (prev_a, a)) ctx.emit( sex_ (a, result)) ctx.emit( str_ (result, low_word)) ctx.emit( lshr_ (result, imm(size, 8), high_word)) ctx.emit( str_ (low_word, ctx.accumulator)) ctx.emit( str_ (high_word, ctx.data)) def _convert_2(ctx, size): a = ctx.accumulator prev_a = a a = ctx.tmp(size) result = ctx.tmp(size * 2) ctx.emit( str_ (prev_a, a)) ctx.emit( sex_ (a, result)) ctx.emit( str_ (result, ctx.accumulator)) def x86_arpl(ctx, i): dest_seg = operand.get(ctx, i, 0) src_seg = operand.get(ctx, i, 1) dest_rpl = ctx.tmp(16) src_rpl = ctx.tmp(16) tmp0 = ctx.tmp(32) tmp1 = ctx.tmp(8) result_seg = ctx.tmp(16) tmp2 = ctx.tmp(16) ctx.emit( lshr_ (dest_seg, imm(14, 8), dest_rpl)) ctx.emit( lshr_ (src_seg, imm(14, 8), src_rpl)) ctx.emit( sub_ (dest_seg, src_seg, tmp0)) ctx.emit( and_ (tmp0, imm(sign_bit(32), 32), tmp0)) ctx.emit( bisz_ (tmp0, tmp1)) ctx.emit( jcc_ ('check_passed')) ctx.emit( str_ (imm(1, 8), r('zf', 8))) ctx.emit( and_ (dest_seg, imm(0b0011111111111111, 16), result_seg)) ctx.emit( and_ (src_seg, imm(0b1100000000000000, 16), tmp2)) ctx.emit( or_ (dest_seg, tmp2, dest_seg)) operand.set(ctx, i, 0, result_seg) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('check_passed') ctx.emit( str_ (imm(0, 8), r('zf', 8))) ctx.emit('done') ctx.emit( nop_()) def x86_bswap(ctx, i): a = operand.get(ctx, i, 0) bytes = unpack(ctx, a, 8) bytes.reverse() value = pack(ctx, bytes) operand.set(ctx, i, 0, value) def x86_clc(ctx, i): ctx.emit( str_ (imm(0, 8), r('cf', 8))) def x86_cld(ctx, i): ctx.emit( str_ (imm(0, 8), r('df', 8))) def x86_cwd(ctx, i): _convert(ctx, 16) def x86_cdq(ctx, i): _convert(ctx, 32) def x86_cqo(ctx, i): _convert(ctx, 64) def x86_cbw(ctx, i): _convert_2(ctx, 8) def x86_cwde(ctx, i): _convert_2(ctx, 16) def x86_cdqe(ctx, i): _convert_2(ctx, 32) def x86_cmc(ctx, i): ctx.emit( xor_ (r('cf', 8), imm(1, 8), r('cf', 8))) def x86_cmpxchg(ctx, i): a = ctx.accumulator b = operand.get(ctx, i, 0) c = operand.get(ctx, i, 1) if b.size != a.size: prev_a = a a = ctx.tmp(b.size) ctx.emit( str_ (prev_a, a)) tmp0 = ctx.tmp(8) ctx.emit( equ_ (a, b, tmp0)) ctx.emit( jcc_ (tmp0, 'equal')) ctx.emit('not-equal') ctx.emit( str_ (c, ctx.accumulator)) ctx.emit( str_ (imm(0, 8), r('zf', 8))) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('equal') operand.set(ctx, i, 0, c) ctx.emit( str_ (imm(1, 8), r('zf', 8))) ctx.emit('done') ctx.emit( nop_()) def x86_cmpxchg8b(ctx, i): edx = operand.get_register(ctx, i, 'edx') eax = operand.get_register(ctx, i, 'eax') edx_eax = ctx.tmp(64) ecx = operand.get_register(ctx, i, 'ecx') ebx = operand.get_register(ctx, i, 'ebx') ecx_ebx = ctx.tmp(64) value = operand.get(ctx, i, 0) tmp0 = ctx.tmp(64) tmp1 = ctx.tmp(8) result_eax = ctx.tmp(32) result_edx = ctx.tmp(32) ctx.emit( lshl_ (edx, imm(32, 8), edx_eax)) ctx.emit( str_ (eax, tmp0)) ctx.emit( or_ (edx_eax, tmp0, edx_eax)) ctx.emit( equ_ (value, edx_eax, tmp1)) ctx.emit( jcc_ (tmp1, 'equal')) ctx.emit('not-equal') ctx.emit( str_ (value, result_eax)) ctx.emit( lshr_ (value, imm(32, 8), value)) ctx.emit( str_ (value, result_edx)) operand.set_register(ctx, i, 'edx', result_edx) operand.set_register(ctx, i, 'eax', result_eax) ctx.emit( str_ (imm(0, 8), r('zf', 8))) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('equal') ctx.emit( lshl_ (ecx, imm(32, 8), ecx_ebx)) ctx.emit( str_ (ebx, tmp0)) ctx.emit( or_ (ecx_ebx, tmp0, ecx_ebx)) operand.set(ctx, i, 0, ecx_ebx) ctx.emit( str_ (imm(1, 8), r('zf', 8))) ctx.emit('done') ctx.emit( nop_()) def x86_cmpxchg16b(ctx, i): rdx = operand.get_register(ctx, i, 'rdx') rax = operand.get_register(ctx, i, 'rax') rdx_rax = ctx.tmp(128) rcx = operand.get_register(ctx, i, 'rcx') rbx = operand.get_register(ctx, i, 'rbx') rcx_rbx = ctx.tmp(128) value = operand.get(ctx, i, 0) tmp0 = ctx.tmp(128) tmp1 = ctx.tmp(8) result_rax = ctx.tmp(64) result_rdx = ctx.tmp(64) ctx.emit( lshl_ (rdx, imm(64, 8), rdx_rax)) ctx.emit( str_ (rax, tmp0)) ctx.emit( or_ (rdx_rax, tmp0, rdx_rax)) ctx.emit( equ_ (value, rdx_rax, tmp1)) ctx.emit( jcc_ (tmp1, 'equal')) ctx.emit('not-equal') ctx.emit( str_ (value, result_rax)) ctx.emit( lshr_ (value, imm(64, 8), value)) ctx.emit( str_ (value, result_rdx)) operand.set_register(ctx, i, 'rdx', result_rdx) operand.set_register(ctx, i, 'rax', result_rax) ctx.emit( str_ (imm(0, 8), r('zf', 8))) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('equal') ctx.emit( lshl_ (rcx, imm(64, 8), rcx_rbx)) ctx.emit( str_ (rbx, tmp0)) ctx.emit( or_ (rcx_rbx, tmp0, rcx_rbx)) operand.set(ctx, i, 0, rcx_rbx) ctx.emit( str_ (imm(1, 8), r('zf', 8))) ctx.emit('done') ctx.emit( nop_()) def x86_cpuid(ctx, i): eax = operand.get_register(ctx, i, 'eax') flag = ctx.tmp(8) ctx.emit( equ_ (eax, imm(0, 32), flag)) ctx.emit( jcc_ (flag, 'cpuid_basic')) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('cpuid_basic') operand.set_register(ctx, i, 'eax', imm(0, 32)) operand.set_register(ctx, i, 'ebx', imm(0x756e6547, 32)) operand.set_register(ctx, i, 'ecx', imm(0x49656e69, 32)) operand.set_register(ctx, i, 'edx', imm(0x6c65746e, 32)) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('done') ctx.emit( nop_()) def x86_int(ctx, i): ctx.emit( sys_ (imm(0, 8))) def x86_int1(ctx, i): ctx.emit( sys_ (imm(1, 8))) def x86_int3(ctx, i): ctx.emit( sys_ (imm(3, 8))) def x86_into(ctx, i): ctx.emit( jcc_ (r('of', 8), 'do_interrupt')) ctx.emit( jcc_ (imm(1, 8), 'done')) ctx.emit('do_interrupt') ctx.emit( sys_ (imm(4, 8))) ctx.emit('done') ctx.emit( nop_ ()) def x86_lahf(ctx, i): result_ah = ctx.tmp(8) ctx.emit( str_ (imm(0, 8), result_ah)) ctx.emit( or_ (r('sf', 8), result_ah)) ctx.emit( lshl_ (result_ah, imm(1, 8), result_ah)) ctx.emit( or_ (r('zf', 8), result_ah)) ctx.emit( lshl_ (result_ah, imm(2, 8), result_ah)) ctx.emit( or_ (r('af', 8), result_ah)) ctx.emit( lshl_ (result_ah, imm(2, 8), result_ah)) ctx.emit( or_ (r('pf', 8), result_ah)) ctx.emit( lshl_ (result_ah, imm(1, 8), result_ah)) ctx.emit( or_ (imm(1, 8), result_ah)) ctx.emit( lshl_ (result_ah, imm(1, 8), result_ah)) ctx.emit( or_ (r('cf', 8), result_ah)) operand.set_register(ctx, i, 'ah', result_ah) def x86_nop(ctx, i): ctx.emit( nop_()) def x86_seta(ctx, i): conditional_set(ctx, i, conditional.A) def x86_setae(ctx, i): conditional_set(ctx, i, conditional.AE) def x86_setb(ctx, i): conditional_set(ctx, i, conditional.B) def x86_setbe(ctx, i): conditional_set(ctx, i, conditional.BE) def x86_setcxz(ctx, i): conditional_set(ctx, i, conditional.CXZ) def x86_setecxz(ctx, i): conditional_set(ctx, i, conditional.ECXZ) def x86_setrcxz(ctx, i): conditional_set(ctx, i, conditional.RCXZ) def x86_sete(ctx, i): conditional_set(ctx, i, conditional.E) def x86_setg(ctx, i): conditional_set(ctx, i, conditional.G) def x86_setge(ctx, i): conditional_set(ctx, i, conditional.GE) def x86_setl(ctx, i): conditional_set(ctx, i, conditional.L) def x86_setle(ctx, i): conditional_set(ctx, i, conditional.LE) def x86_setmp(ctx, i): conditional_set(ctx, i, conditional.UN)
Apache License 2.0
spotify/luigi
luigi/target.py
FileSystem.exists
python
def exists(self, path): pass
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise :param str path: a path within the FileSystem to check for existence.
https://github.com/spotify/luigi/blob/ad5ddc9875e54cca8209863a8ec7bcc5d13ece8a/luigi/target.py#L98-L104
import abc import io import os import random import tempfile import logging import warnings from contextlib import contextmanager logger = logging.getLogger('luigi-interface') class Target(metaclass=abc.ABCMeta): @abc.abstractmethod def exists(self): pass class FileSystemException(Exception): pass class FileAlreadyExists(FileSystemException): pass class MissingParentDirectory(FileSystemException): pass class NotADirectory(FileSystemException): pass class FileSystem(metaclass=abc.ABCMeta): @abc.abstractmethod
Apache License 2.0
schibum/sndlatr
gae/tests/__init__.py
BaseTestCase.set_next_response
python
def set_next_response(self, code, content, persist=False): self._response_queue.append((code, content, persist))
Set response for next ctx.urlfetch call. If persist is true this response is always returned.
https://github.com/schibum/sndlatr/blob/6bbc3609ffd85bc3cfad18bee67b48b06be19038/gae/tests/__init__.py#L53-L57
import os import json import os from unittest import TestCase from contextlib import contextmanager import webapp2 from google.appengine.ext import testbed import mock import main test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def fixture_file(filename): return os.path.join(test_data_dir, filename) def fixture_file_content(filename): with open(fixture_file(filename)) as fd: return fd.read() class BaseTestCase(TestCase): enable_xsrf = False def setUp(self): self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_user_stub() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() self.testbed.init_urlfetch_stub() self.testbed.init_mail_stub() self.testbed.init_taskqueue_stub( root_path=os.path.join(os.path.dirname(__file__), '..')) self.addCleanup(self.testbed.deactivate) self.taskqueue_stub = self.testbed.get_stub( testbed.TASKQUEUE_SERVICE_NAME) self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME) urlfetch = self.testbed.get_stub('urlfetch') urlfetch._RetrieveURL = self.retrieve_mock self._response_queue = [] self.patch_xsrf()
Apache License 2.0
opendilab/di-star
distar/model/alphastar/actor_critic.py
AlphaStarActorCritic.freeze_module
python
def freeze_module(self, freeze_targets=None): if freeze_targets is None: if self.freeze_targets is None: raise Exception("not provided arguments(freeze_targets)") else: freeze_targets = self.freeze_targets else: self.freeze_targets = freeze_targets def get_submodule(name): part = name.split('.') module = self for p in part: module = getattr(module, p) return module for name in freeze_targets: module = get_submodule(name) module.eval() for m in module.parameters(): m.requires_grad_(False)
Note: must be called after the model initialization, before the model forward
https://github.com/opendilab/di-star/blob/f12d79403488e7df0498d7b116fc23a67506112b/distar/model/alphastar/actor_critic.py#L78-L104
import os.path as osp from collections import namedtuple, OrderedDict, defaultdict from functools import reduce import torch import torch.nn as nn from collections.abc import Sequence, Mapping from .encoder import Encoder from .policy import Policy from .value import ValueBaseline from ctools.model import ValueActorCriticBase from ctools.utils import read_config, deep_merge_dicts from distar.envs import AlphaStarEnv alphastar_model_default_config = read_config(osp.join(osp.dirname(__file__), "actor_critic_default_config.yaml")) def detach_grad(data): if isinstance(data, Sequence): for i in range(len(data)): data[i] = detach_grad(data[i]) elif isinstance(data, Mapping): for k in data.keys(): data[k] = detach_grad(data[k]) elif isinstance(data, torch.Tensor): data = data.detach() return data class AlphaStarActorCritic(ValueActorCriticBase): EvalInput = namedtuple( 'EvalInput', ['map_size', 'entity_raw', 'scalar_info', 'spatial_info', 'entity_info', 'prev_state'] ) EvalOutput = namedtuple('EvalOutput', ['actions', 'logits']) MimicOutput = namedtuple('MimicOutput', ['logits', 'next_state']) StepInput = namedtuple('StepInput', ['home', 'away']) StepOutput = namedtuple('StepOutput', ['actions', 'logits', 'baselines']) CriticInput = namedtuple( 'CriticInput', [ 'lstm_output_home', 'embeddings_entity_away', 'embeddings_spatial_away', 'baseline_feature_home', 'baseline_feature_away', 'score_embedding_home', 'score_embedding_away', 'cum_stat_home', 'immediate_cum_stat_home', 'immediate_cum_stat_away' ] ) CriticOutput = namedtuple('CriticOutput', ['winloss', 'build_order', 'built_unit', 'effect', 'upgrade', 'battle']) def __init__(self, model_config=None): super(AlphaStarActorCritic, self).__init__() cfg = deep_merge_dicts(alphastar_model_default_config["model"], model_config) self.cfg = self._merge_input_dim(cfg) self.encoder = Encoder(self.cfg.encoder) self.policy = Policy(self.cfg.policy) self.only_update_baseline = cfg.get('only_update_baseline', False) if self.cfg.use_value_network: self.value_networks = nn.ModuleDict() self.value_cum_stat_keys = OrderedDict() for k, v in self.cfg.value.items(): if k in self.cfg.enable_baselines: self.value_networks[v.name] = ValueBaseline(v.param) self.value_cum_stat_keys[v.name] = v.cum_stat_keys self.freeze_module(self.cfg.freeze_targets) def _merge_input_dim(self, cfg): env_info = AlphaStarEnv({}).info() cfg.encoder.obs_encoder.entity_encoder.input_dim = env_info.obs_space['entity'].shape[-1] cfg.encoder.obs_encoder.spatial_encoder.input_dim = env_info.obs_space['spatial'].shape[ 0] + cfg.encoder.scatter.output_dim handle = cfg.encoder.obs_encoder.scalar_encoder.module for k in handle.keys(): handle[k].input_dim = env_info.obs_space['scalar'].shape[k] cfg.encoder.score_cumulative.input_dim = env_info.obs_space['scalar'].shape['score_cumulative'] return cfg
Apache License 2.0
adamlwgriffiths/pyglet
contrib/wydget/wydget/element.py
Element.getRects
python
def getRects(self, view_clip=None, exclude=None): if not self.is_visible: return [] r = util.Rect(self._x, self._y, self._width, self._height) if view_clip is not None: r = r.intersect(view_clip) if r is None: return [] x, y, z = self._x, self._y, self._z clipped = self.rect clipped.x -= x clipped.y -= y if view_clip: view_clip = view_clip.copy() view_clip.x -= x view_clip.y -= y clipped = clipped.intersect(view_clip) if not clipped: return [] rects = [] if not self.is_transparent: rects.append((self, (x, y, z, clipped))) if not self.children: return rects pad = self._padding x += pad y += pad if view_clip is not None: view_clip = view_clip.copy() view_clip.x -= pad view_clip.y -= pad if self.view_clip is not None: if view_clip is not None: view_clip = view_clip.intersect(self.view_clip) else: view_clip = self.view_clip if not view_clip: return rects for child in self.children: if exclude is child: continue for obj, (ox, oy, oz, c) in child.getRects(view_clip, exclude): rects.append((obj, (ox+x, oy+y, oz+z, c))) return rects
Determine the drawing parameters for this element and its children. Returns a list of (element, (x, y, z, clipped)) where: (x, y, z) is the screen location to render at clipped is the relative rectangle to render
https://github.com/adamlwgriffiths/pyglet/blob/18bd86a8f235e4f5edd94b0d38073d0e5477a366/contrib/wydget/wydget/element.py#L326-L386
import inspect import math from pyglet.gl import * from wydget import loadxml from wydget import event from wydget import util intceil = lambda i: int(math.ceil(i)) class Element(object): is_focusable = False classes = () view_clip = None _x = _y = _width = _height = None def __init__(self, parent, x, y, z, width, height, padding=0, border=None, bgcolor=None, is_visible=True, is_enabled=True, is_transparent=False, children=None, id=None, classes=()): self.parent = parent self.id = id or self.allocateID() self.classes = classes self.children = children or [] self.bgcolor = util.parse_color(bgcolor) self.border = util.parse_color(border) self._padding = util.parse_value(padding) if border: self._padding += 1 self.x_spec = util.Position(x, self, parent, 'width') self.y_spec = util.Position(y, self, parent, 'height') self._z = util.parse_value(z) or 0 self.width_spec = util.Dimension(width, self, parent, 'width') self.height_spec = util.Dimension(height, self, parent, 'height') if self.x_spec.is_fixed: self.x = self.x_spec.calculate() if self.y_spec.is_fixed: self.y = self.y_spec.calculate() if self.width_spec.is_fixed: self.width = self.width_spec.calculate() if self.height_spec.is_fixed: self.height = self.height_spec.calculate() self.is_visible = is_visible self.is_enabled = is_enabled self.is_modal = False self.is_transparent = is_transparent self._event_handlers = {} self.parent.addChild(self) self.resetGeometry() _next_id = 1 def allocateID(self): id = '%s-%d'%(self.__class__.__name__, Element._next_id) Element._next_id += 1 return id def set_x(self, value): self._x = value self.setDirty() x = property(lambda self: self._x and int(self._x), set_x) def set_y(self, value): self._y = value self.setDirty() y = property(lambda self: self._y and int(self._y), set_y) def set_z(self, value): self._z = value self.setDirty() z = property(lambda self: self._z and int(self._z), set_z) def set_width(self, value): self._width = value self.setDirty() width = property(lambda self: self._width and intceil(self._width), set_width) def set_height(self, value): self._height = value self.setDirty() height = property(lambda self: self._height and intceil(self._height), set_height) def set_padding(self, value): self._padding = value self.setDirty() padding = property(lambda self: self._padding and int(self._padding), set_padding) def get_rect(self): return util.Rect(int(self._x), int(self._y), self.width, self.height) rect = property(get_rect) def get_inner_rect(self): p = self._padding return util.Rect(int(p), int(p), intceil(self._width - p*2), intceil(self._height - p*2)) inner_rect = property(get_inner_rect) def get_inner_width(self): return intceil(self._width - self._padding*2) inner_width = property(get_inner_width) def get_inner_height(self): return intceil(self._height - self._padding*2) inner_height = property(get_inner_height) def get_min_width(self): if self.width_spec.value is not None: return self.width_spec.value width = self.width if width is None: width = self.intrinsic_width() return width min_width = property(get_min_width) def intrinsic_width(self): raise NotImplementedError('intrinsic_width on %r'%self.__class__) def get_min_height(self): if self.height_spec.value is not None: return self.height_spec.value height = self.height if height is None: height = self.intrinsic_height() return height min_height = property(get_min_height) def intrinsic_height(self): raise NotImplementedError('intrinsic_height on %r'%self.__class__) @classmethod def fromXML(cls, element, parent): kw = loadxml.parseAttributes(element) obj = cls(parent, **kw) for child in element.getchildren(): loadxml.getConstructor(child.tag)(child, obj) return obj def addChild(self, child): self.children.append(child) child.parent = self self.getGUI().register(child) self.resetGeometry() def getParent(self, selector): if isinstance(selector, str): if selector[0] in '.#': raise NotImplementedError( 'lookup by id and class not yet supported') selector = [s.strip() for s in selector.split(',')] if self.name in selector: return self return self.parent.getParent(selector) def holds(self, element): for child in self.children: if child is element: return True if child.holds(element): return True return False def resetGeometry(self): if not self.width_spec.is_fixed: self._width = None if not self.height_spec.is_fixed: self._height = None for child in self.children: child.resetGeometry() self.setDirty() def resize(self): ok = True if self._width is None: w = self.width_spec.calculate() if w is None: ok = False else: self.width = w if self._height is None: h = self.height_spec.calculate() if h is None: ok = False else: self.height = h return ok
BSD 3-Clause New or Revised License
rosenbrockc/aflow
aflow/control.py
search
python
def search(catalog=None, batch_size=100): return Query(catalog, batch_size)
Returns a :class:`aflow.control.Query` to help construct the search query. Args: catalog (str): one of the catalogs supported on AFLOW: ['icsd', 'lib1', 'lib2', 'lib3']. Also supports a `list` of catalog names. batch_size (int): number of data entries to return per HTTP request.
https://github.com/rosenbrockc/aflow/blob/785284fe144b0bc63a52470ad8fcb3e27b0ca683/aflow/control.py#L9-L18
server = "http://aflowlib.duke.edu/search/API/?" from aflow import msg
MIT License
numba/numba
numba/core/compiler_machinery.py
CompilerPass.name
python
def name(cls): return cls._name
Returns the name of the pass
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/core/compiler_machinery.py#L38-L42
import timeit from abc import abstractmethod, ABCMeta from collections import namedtuple, OrderedDict import inspect from numba.core.compiler_lock import global_compiler_lock from numba.core import errors, config, transforms, utils from numba.core.tracing import event from numba.core.postproc import PostProcessor from numba.core.ir_utils import enforce_no_dels, legalize_single_scope _termcolor = errors.termcolor() class SimpleTimer(object): def __enter__(self): self.ts = timeit.default_timer() return self def __exit__(self, *exc): self.elapsed = timeit.default_timer() - self.ts class CompilerPass(metaclass=ABCMeta): @abstractmethod def __init__(self, *args, **kwargs): self._analysis = None self._pass_id = None @classmethod
BSD 2-Clause Simplified License
lun-4/jose
unused/coins.py
Coins.empty_account
python
def empty_account(self, account_id, account_type, amount): if account_type == 'user': return { 'type': 'user', 'amount': str(decimal.Decimal(amount)), 'id': account_id, 'taxpaid': str(decimal.Decimal(0)), 'times_stolen': 0, 'success_steal': 0, 'loaning_from': None, 'interest_tbank': '', } elif account_type == 'taxbank': return { 'type': 'taxbank', 'id': account_id, 'amount': str(decimal.Decimal(0)), 'loans': {}, }
Return an empty account object. Parameters ---------- account_id: int ID of this account. account_type: str Account's type, can be ``"user"`` or ``"taxbank"``. amount: int or float or ``decimal.Decimal`` Account's starting amount, only valid for user accounts.
https://github.com/lun-4/jose/blob/fdafb121e0c5d7a731b52b3503f6b12e3538948a/unused/coins.py#L114-L151
import time import decimal import logging import collections import asyncio import pprint import math import discord from random import SystemRandom from discord.ext import commands from .common import Cog, CoinConverter log = logging.getLogger(__name__) random = SystemRandom() REWARD_COOLDOWN = 1800 TAX_CONSTANT = decimal.Decimal('1.0065') PROB_CONSTANT = decimal.Decimal('1.003384590736') COIN_BASE_PROBABILITY = decimal.Decimal('0.012') TAX_MULTIPLIER = decimal.Decimal('1.42') class TransferError(Exception): pass TRANSFER_OBJECTS = [ 'bananas', 'computers', 'dogs', 'memes', 'cats', 'coins', 'paintings', ] class Coins(Cog, requires=['config']): def __init__(self, bot): super().__init__(bot) self.jcoin_coll = self.config.jose_db['josecoin'] self.hidecoin_coll = self.config.jose_db['jcoin-hidecoin'] self.BASE_PROBABILITY = COIN_BASE_PROBABILITY self.INF = decimal.Decimal('inf') self.TransferError = TransferError self.bot.simple_exc.append(TransferError) self.reward_env = {} self.acct_cache = collections.defaultdict(list) self.gacct_locks = collections.defaultdict(asyncio.Lock) self.cache = {} self.transfer_lock = asyncio.Lock() self.delete_lock = asyncio.Lock() self.locked_accounts = [] self.gdp = None self.transfers_done = 0 def get_name(self, user_id, account=None): if isinstance(user_id, discord.Guild): return f'taxbank:{user_id.name}' elif isinstance(user_id, discord.User): return str(user_id) obj = self.bot.get_user(int(user_id)) if obj is None: obj = self.bot.get_guild(user_id) if obj is not None: obj = f'taxbank:{obj}' if obj is None: if account: if account['type'] == 'user': return f'Unfindable User {user_id}' elif account['type'] == 'guild': return f'Unfindable Guild {user_id}' else: return f'Unfindable Unknown {user_id}' else: return f'Unfindable ID {user_id}' return str(obj)
MIT License
mariacer/cl_in_rnns
sequential/pos_tagging/train_utils_pos.py
get_soft_trgt_acc_func
python
def get_soft_trgt_acc_func(): def soft_trgt_acc_fct(config, X, Y_logits, T_soft_logits, data, in_seq_lens=None): if in_seq_lens is None: raise NotImplementedError('This soft accuracy is currently ' + 'only implemented if sequence lengths are provided, as they ' + 'can\'t be inferred easily.') assert np.all(np.equal(X.shape[:2], T_soft_logits.shape[:2])) assert np.all(np.equal(Y_logits.shape[:2], T_soft_logits.shape[:2])) num_correct = 0 total_num_ts = 0 for bid in range(X.shape[1]): sl = int(in_seq_lens[bid]) total_num_ts += sl Y_logits_i = Y_logits[:sl, bid, :] T_soft_logits_i = T_soft_logits[:sl, bid, :] predicted = Y_logits_i.argmax(dim=1) label = T_soft_logits_i.argmax(dim=1) num_correct += (predicted == label).sum().cpu().item() return num_correct / total_num_ts * 100. return soft_trgt_acc_fct
Get the accuracy function that can deal with generated soft targets. Returns: (func): A function handle.
https://github.com/mariacer/cl_in_rnns/blob/333b8e03391600a8e3df7d684a3f171b135d273a/sequential/pos_tagging/train_utils_pos.py#L242-L279
from sklearn.metrics import f1_score import torch import numpy as np from data.timeseries.mud_data import get_mud_handlers from mnets.classifier_interface import Classifier from sequential.replay_utils import gauss_reconstruction_loss from sequential import train_utils_sequential as tuseq def generate_tasks(config, logger, writer=None): logger.info('Running PoS experiment.') dhandlers = get_mud_handlers('../../datasets', num_tasks=config.num_tasks) for t, d in enumerate(dhandlers): assert 'task_id' not in d._data.keys() d._data['task_id'] = t return dhandlers def get_loss_func(config, device, logger, ewc_loss=False): if hasattr(config, 'ts_weighting') or hasattr(config, 'ts_weighting_fisher'): raise NotImplementedError('The copy task dataset has a fixed loss ' + 'weighting scheme, which is not configurable.') ce_loss = tuseq.sequential_nll(loss_type='ce', reduction='sum') sample_loss_func = lambda Y, T, tsf, beta: ce_loss(Y, T, None, None, None, ts_factors=tsf, beta=beta) def task_loss_func(Y, T, data, allowed_outputs, empirical_fisher, batch_ids): tsf = torch.zeros(T.shape[0], T.shape[1]).to(T.device) seq_lengths = data.get_out_seq_lengths(batch_ids) for i in range(batch_ids.size): sl = int(seq_lengths[i]) tsf[:sl, i] = 1 return sample_loss_func(Y, T, tsf, None) return task_loss_func def get_accuracy_func(config): def get_accuracy(logit_outputs, targets, data, batch_ids): seq_lengths = data.get_out_seq_lengths(batch_ids) input_data = data._data['in_data'][batch_ids,:] predicted = logit_outputs.argmax(dim=2) targets = targets.argmax(dim=2) all_compared = predicted == targets num_correct = 0 num_total = 0 for i in range(batch_ids.size): comp_idx = np.arange(0, int(seq_lengths[i])) exclude_idx = np.where(input_data[i,:] == 0) comp_idx = np.setdiff1d(comp_idx, exclude_idx) num_correct += all_compared[comp_idx, i].sum().cpu().item() num_total += len(comp_idx) if num_total != 0: accuracy = 100. * num_correct / num_total else: accuracy = 0 return accuracy, None return get_accuracy def get_vae_rec_loss_func(): return gauss_reconstruction_loss def get_distill_loss_func(): def distill_loss_fct(config, X, Y_logits, T_soft_logits, data, in_seq_lens=None): if in_seq_lens is None: raise NotImplementedError('This distillation loss is currently ' + 'only implemented if sequence lengths are provided, as they ' + 'can\'t be inferred easily.') assert np.all(np.equal(X.shape[:2], T_soft_logits.shape[:2])) assert np.all(np.equal(Y_logits.shape[:2], T_soft_logits.shape[:2])) T=2. target_mapping = None if config.all_task_softmax: target_mapping = list(range(T_soft_logits.shape[2])) dloss = 0 total_num_ts = 0 for bid in range(X.shape[1]): sl = int(in_seq_lens[bid]) total_num_ts += sl Y_logits_i = Y_logits[:sl, bid, :] T_soft_logits_i = T_soft_logits[:sl, bid, :] dloss += Classifier.knowledge_distillation_loss(Y_logits_i, T_soft_logits_i, target_mapping=target_mapping, device=Y_logits.device, T=T) * sl return dloss / total_num_ts return distill_loss_fct
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/envelope_document.py
EnvelopeDocument.attachment_tab_id
python
def attachment_tab_id(self): return self._attachment_tab_id
Gets the attachment_tab_id of this EnvelopeDocument. # noqa: E501 # noqa: E501 :return: The attachment_tab_id of this EnvelopeDocument. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/envelope_document.py#L178-L186
import pprint import re import six from docusign_esign.client.configuration import Configuration class EnvelopeDocument(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'added_recipient_ids': 'list[str]', 'attachment_tab_id': 'str', 'authoritative_copy': 'str', 'authoritative_copy_metadata': 'PropertyMetadata', 'available_document_types': 'list[SignatureType]', 'contains_pdf_form_fields': 'str', 'display': 'str', 'display_metadata': 'PropertyMetadata', 'document_base64': 'str', 'document_fields': 'list[NameValue]', 'document_id': 'str', 'document_id_guid': 'str', 'error_details': 'ErrorDetails', 'include_in_download': 'str', 'include_in_download_metadata': 'PropertyMetadata', 'name': 'str', 'name_metadata': 'PropertyMetadata', 'order': 'str', 'pages': 'list[Page]', 'signer_must_acknowledge': 'str', 'signer_must_acknowledge_metadata': 'PropertyMetadata', 'size_bytes': 'str', 'template_locked': 'str', 'template_required': 'str', 'type': 'str', 'uri': 'str' } attribute_map = { 'added_recipient_ids': 'addedRecipientIds', 'attachment_tab_id': 'attachmentTabId', 'authoritative_copy': 'authoritativeCopy', 'authoritative_copy_metadata': 'authoritativeCopyMetadata', 'available_document_types': 'availableDocumentTypes', 'contains_pdf_form_fields': 'containsPdfFormFields', 'display': 'display', 'display_metadata': 'displayMetadata', 'document_base64': 'documentBase64', 'document_fields': 'documentFields', 'document_id': 'documentId', 'document_id_guid': 'documentIdGuid', 'error_details': 'errorDetails', 'include_in_download': 'includeInDownload', 'include_in_download_metadata': 'includeInDownloadMetadata', 'name': 'name', 'name_metadata': 'nameMetadata', 'order': 'order', 'pages': 'pages', 'signer_must_acknowledge': 'signerMustAcknowledge', 'signer_must_acknowledge_metadata': 'signerMustAcknowledgeMetadata', 'size_bytes': 'sizeBytes', 'template_locked': 'templateLocked', 'template_required': 'templateRequired', 'type': 'type', 'uri': 'uri' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._added_recipient_ids = None self._attachment_tab_id = None self._authoritative_copy = None self._authoritative_copy_metadata = None self._available_document_types = None self._contains_pdf_form_fields = None self._display = None self._display_metadata = None self._document_base64 = None self._document_fields = None self._document_id = None self._document_id_guid = None self._error_details = None self._include_in_download = None self._include_in_download_metadata = None self._name = None self._name_metadata = None self._order = None self._pages = None self._signer_must_acknowledge = None self._signer_must_acknowledge_metadata = None self._size_bytes = None self._template_locked = None self._template_required = None self._type = None self._uri = None self.discriminator = None setattr(self, "_{}".format('added_recipient_ids'), kwargs.get('added_recipient_ids', None)) setattr(self, "_{}".format('attachment_tab_id'), kwargs.get('attachment_tab_id', None)) setattr(self, "_{}".format('authoritative_copy'), kwargs.get('authoritative_copy', None)) setattr(self, "_{}".format('authoritative_copy_metadata'), kwargs.get('authoritative_copy_metadata', None)) setattr(self, "_{}".format('available_document_types'), kwargs.get('available_document_types', None)) setattr(self, "_{}".format('contains_pdf_form_fields'), kwargs.get('contains_pdf_form_fields', None)) setattr(self, "_{}".format('display'), kwargs.get('display', None)) setattr(self, "_{}".format('display_metadata'), kwargs.get('display_metadata', None)) setattr(self, "_{}".format('document_base64'), kwargs.get('document_base64', None)) setattr(self, "_{}".format('document_fields'), kwargs.get('document_fields', None)) setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None)) setattr(self, "_{}".format('document_id_guid'), kwargs.get('document_id_guid', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('include_in_download'), kwargs.get('include_in_download', None)) setattr(self, "_{}".format('include_in_download_metadata'), kwargs.get('include_in_download_metadata', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None)) setattr(self, "_{}".format('order'), kwargs.get('order', None)) setattr(self, "_{}".format('pages'), kwargs.get('pages', None)) setattr(self, "_{}".format('signer_must_acknowledge'), kwargs.get('signer_must_acknowledge', None)) setattr(self, "_{}".format('signer_must_acknowledge_metadata'), kwargs.get('signer_must_acknowledge_metadata', None)) setattr(self, "_{}".format('size_bytes'), kwargs.get('size_bytes', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('type'), kwargs.get('type', None)) setattr(self, "_{}".format('uri'), kwargs.get('uri', None)) @property def added_recipient_ids(self): return self._added_recipient_ids @added_recipient_ids.setter def added_recipient_ids(self, added_recipient_ids): self._added_recipient_ids = added_recipient_ids @property
MIT License
societe-generale/aikit
aikit/cross_validation.py
create_cv
python
def create_cv(cv=3, y=None, classifier=False, shuffle=False, random_state=None): if cv is None: cv = 3 if isinstance(cv, sklearn.model_selection._split.numbers.Integral): if ( classifier and (y is not None) and (sklearn.model_selection._split.type_of_target(y) in ("binary", "multiclass")) ): return sklearn.model_selection.StratifiedKFold(cv, shuffle=shuffle, random_state=random_state) else: return sklearn.model_selection.KFold(cv, shuffle=shuffle, random_state=random_state) if not hasattr(cv, "split") or isinstance(cv, str): if not isinstance(cv, sklearn.model_selection._split.Iterable) or isinstance(cv, str): raise ValueError( "Expected cv as an integer, cross-validation " "object (from sklearn.model_selection) " "or an iterable. Got %s." % cv ) return sklearn.model_selection._split._CVIterableWrapper(cv) return cv
Input checker utility for building a cross-validator, difference from sklearn.model_selection.check_cv : * shuffle and random_state params Parameters ---------- cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if classifier is True and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. y : array-like, optional The target variable for supervised learning problems. classifier : boolean, optional, default False Whether the task is a classification task, in which case stratified KFold will be used. shuffle : boolean, optional, default False if True will use shuffle = True from StratiedKFold random_state : int or None, default = None will be passed to the StratifiedKFold object Returns ------- checked_cv : a cross-validator instance. The return value is a cross-validator which generates the train/test splits via the ``split`` method.
https://github.com/societe-generale/aikit/blob/17f6cbc97e7f91e57a1e8d7db39ad29990dfbb47/aikit/cross_validation.py#L50-L116
import numpy as np import pandas as pd import scipy.sparse as sp from collections import OrderedDict from time import time import numbers from joblib import Parallel, delayed import sklearn.model_selection from sklearn.model_selection._split import BaseCrossValidator, _num_samples, train_test_split try: from sklearn.utils.validation import _check_fit_params except ImportError: _check_fit_params = None if _check_fit_params is None: from sklearn.model_selection._validation import _index_param_value def _check_fit_params(X, fit_params, indices=None): return {k: _index_param_value(X, v, indices) for k,v in fit_params.items()} import sklearn.base from aikit.tools.helper_functions import function_has_named_argument def is_clusterer(estimator): return getattr(estimator, "_estimator_type", None) == "clusterer"
BSD 2-Clause Simplified License
chrisyounger/git_for_splunk
bin/git_for_splunk/aob_py2/splunktaucclib/rest_handler/admin_external.py
handle
python
def handle( endpoint, handler=AdminExternalHandler, context_info=admin.CONTEXT_APP_ONLY, ): real_handler = type( handler.__name__, (handler, ), {'endpoint': endpoint}, ) admin.init(real_handler, ctxInfo=context_info)
Handle request. :param endpoint: REST endpoint :param handler: REST handler :param context_info: :return:
https://github.com/chrisyounger/git_for_splunk/blob/c450f32069b5d1087d4e4ebb0803bf7a0f25c60d/bin/git_for_splunk/aob_py2/splunktaucclib/rest_handler/admin_external.py#L151-L169
from __future__ import absolute_import from functools import wraps from splunk import admin from solnlib.splunkenv import get_splunkd_uri from solnlib.utils import is_true from .eai import EAI_FIELDS from .handler import RestHandler __all__ = [ 'make_conf_item', 'build_conf_info', 'AdminExternalHandler', ] def make_conf_item(conf_item, content, eai): for key, val in list(content.items()): conf_item[key] = val for eai_field in EAI_FIELDS: conf_item.setMetadata(eai_field, eai.content[eai_field]) return conf_item def build_conf_info(meth): @wraps(meth) def wrapper(self, confInfo): result = meth(self, confInfo) for entity in result: make_conf_item( confInfo[entity.name], entity.content, entity.eai, ) return wrapper class AdminExternalHandler(admin.MConfigHandler, object): endpoint = None ACTION_CRED = '--cred--' def __init__(self, *args, **kwargs): admin.MConfigHandler.__init__( self, *args, **kwargs ) self.handler = RestHandler( get_splunkd_uri(), self.getSessionKey(), self.endpoint, ) self.payload = self._convert_payload() def setup(self): if self.requestedAction == admin.ACTION_LIST: self.supportedArgs.addOptArg(self.ACTION_CRED) actions = (admin.ACTION_LIST, admin.ACTION_REMOVE) if self.requestedAction in actions: return model = self.endpoint.model( self.callerArgs.id, self.payload, ) if self.requestedAction == admin.ACTION_CREATE: for field in model.fields: if field.required: self.supportedArgs.addReqArg(field.name) else: self.supportedArgs.addOptArg(field.name) if self.requestedAction == admin.ACTION_EDIT: for field in model.fields: self.supportedArgs.addOptArg(field.name) @build_conf_info def handleList(self, confInfo): decrypt = self.callerArgs.data.get( self.ACTION_CRED, [False], ) decrypt = is_true(decrypt[0]) if self.callerArgs.id: result = self.handler.get( self.callerArgs.id, decrypt=decrypt, ) else: result = self.handler.all( decrypt=decrypt, count=0, ) return result @build_conf_info def handleCreate(self, confInfo): return self.handler.create( self.callerArgs.id, self.payload, ) @build_conf_info def handleEdit(self, confInfo): disabled = self.payload.get('disabled') if disabled is None: return self.handler.update( self.callerArgs.id, self.payload, ) elif is_true(disabled): return self.handler.disable(self.callerArgs.id) else: return self.handler.enable(self.callerArgs.id) @build_conf_info def handleRemove(self, confInfo): return self.handler.delete(self.callerArgs.id) def _convert_payload(self): check_actions = (admin.ACTION_CREATE, admin.ACTION_EDIT) if self.requestedAction not in check_actions: return None payload = {} for filed, value in list(self.callerArgs.data.items()): payload[filed] = value[0] if value and value[0] else '' return payload
Apache License 2.0
unofficial-memsource/memsource-cli-client
memsource_cli/models/search_response_list_tm_dto_v3.py
SearchResponseListTmDtoV3.search_results
python
def search_results(self, search_results): self._search_results = search_results
Sets the search_results of this SearchResponseListTmDtoV3. :param search_results: The search_results of this SearchResponseListTmDtoV3. # noqa: E501 :type: list[SearchTMResponseDtoV3]
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/search_response_list_tm_dto_v3.py#L63-L71
import pprint import re import six from memsource_cli.models.search_tm_response_dto_v3 import SearchTMResponseDtoV3 class SearchResponseListTmDtoV3(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'search_results': 'list[SearchTMResponseDtoV3]' } attribute_map = { 'search_results': 'searchResults' } def __init__(self, search_results=None): self._search_results = None self.discriminator = None if search_results is not None: self.search_results = search_results @property def search_results(self): return self._search_results @search_results.setter
Apache License 2.0
hengyicai/adaptive_multi-curricula_learning_for_dialog
parlai/core/torch_generator_agent.py
TorchGeneratorModel.decode_greedy
python
def decode_greedy(self, encoder_states, bsz, maxlen): xs = self._starts(bsz) incr_state = None logits = [] for _i in range(maxlen): scores, incr_state = self.decoder(xs, encoder_states, incr_state) scores = scores[:, -1:, :] scores = self.output(scores) _, preds = scores.max(dim=-1) logits.append(scores) xs = torch.cat([xs, preds], dim=1) all_finished = ((xs == self.END_IDX).sum(dim=1) > 0).sum().item() == bsz if all_finished: break logits = torch.cat(logits, 1) return logits, xs
Greedy search :param int bsz: Batch size. Because encoder_states is model-specific, it cannot infer this automatically. :param encoder_states: Output of the encoder model. :type encoder_states: Model specific :param int maxlen: Maximum decoding length :return: pair (logits, choices) of the greedy decode :rtype: (FloatTensor[bsz, maxlen, vocab], LongTensor[bsz, maxlen])
https://github.com/hengyicai/adaptive_multi-curricula_learning_for_dialog/blob/d1bc6f5348af628cc2e9c1655e75a2cf22ccc01b/parlai/core/torch_generator_agent.py#L67-L106
import os import math import tempfile from collections import defaultdict, Counter, namedtuple from operator import attrgetter import torch import torch.nn as nn import torch.nn.functional as F from parlai.core.torch_agent import TorchAgent, Batch, Output from parlai.core.utils import padded_tensor, round_sigfigs, warn_once, neginf from parlai.core.thread_utils import SharedTable from parlai.core.distributed_utils import is_distributed class TorchGeneratorModel(nn.Module): def __init__( self, padding_idx=0, start_idx=1, end_idx=2, unknown_idx=3, input_dropout=0, longest_label=1, ): super().__init__() self.NULL_IDX = padding_idx self.END_IDX = end_idx self.register_buffer('START', torch.LongTensor([start_idx])) self.longest_label = longest_label def _starts(self, bsz): return self.START.detach().expand(bsz, 1)
MIT License
microsoft/task_oriented_dialogue_as_dataflow_synthesis
src/dataflow/core/program_utils.py
mk_revise
python
def mk_revise( root_location_idx: Idx, old_location_idx: Idx, new_idx: Idx, idx: Idx, ) -> Tuple[Expression, Idx]: return mk_struct_op( schema=DataflowFn.Revise.value, args=[ (ROOT_LOCATION, root_location_idx), (OLD_LOCATION, old_location_idx), (NEW, new_idx), ], idx=idx, )
Revises the salient constraint satisfying the constraint at `old_location_idx`, in the salient computation satisfying the constraint at `root_location_idx`, with the constraint at `new_idx`. In Lispress: ``` (Revise :rootLocation {root_location} :oldLocation {old_location} :new {new})
https://github.com/microsoft/task_oriented_dialogue_as_dataflow_synthesis/blob/e848243ff88f4b0d1a383b8df2612a1f3edb5300/src/dataflow/core/program_utils.py#L113-L135
import re from enum import Enum from json import dumps from typing import Any, List, Optional, Tuple from dataflow.core.program import ( BuildStructOp, CallLikeOp, Expression, TypeName, ValueOp, ) from dataflow.core.sexp import Sexp ROOT_LOCATION = "rootLocation" OLD_LOCATION = "oldLocation" NEW = "new" NON_EMPTY_BASE = "nonEmptyBase" Idx = int class OpType(Enum): Call = "Call" Struct = "Struct" Value = "#" class DataflowFn(Enum): Find = "find" Abandon = "abandon" Revise = "ReviseConstraint" Refer = "refer" RoleConstraint = "roleConstraint" Get = "get" def idx_str(idx: Idx) -> str: return f"[{idx}]" def is_idx_str(s: str) -> bool: return s.startswith("[") and s.endswith("]") def unwrap_idx_str(s: str) -> int: return int(s[1:-1]) def is_struct_op_schema(name: str) -> bool: if len(name) == 0: return False return re.match(r"[A-Z]", name[0]) is not None def get_named_args(e: Expression) -> List[Tuple[str, Optional[str]]]: if isinstance(e.op, BuildStructOp): bso = e.op zeroth_field = [] if bso.empty_base else [NON_EMPTY_BASE] fields = zeroth_field + list(bso.op_fields) else: fields = [f"arg{i}" for i in range(len(e.arg_ids))] return list(zip(fields, e.arg_ids)) def mk_constraint( tpe: str, args: List[Tuple[Optional[str], int]], idx: Idx, ) -> Tuple[Expression, Idx]: return mk_struct_op(schema=f"Constraint[{tpe.capitalize()}]", args=args, idx=idx) def mk_equality_constraint(val: int, idx: Idx) -> Tuple[Expression, Idx]: return mk_call_op(name="?=", args=[val], idx=idx) def mk_unset_constraint(idx: Idx) -> Tuple[Expression, Idx]: return mk_struct_op(schema="EmptyConstraint", args=[], idx=idx) def mk_salience(tpe: str, idx: Idx) -> Tuple[List[Expression], Idx]: constraint_expr, constraint_idx = mk_constraint(tpe=tpe, args=[], idx=idx) salience_expr, idx = mk_call_op( name=DataflowFn.Refer.value, args=[constraint_idx], idx=constraint_idx ) return [constraint_expr, salience_expr], idx def mk_salient_action(idx: Idx) -> Tuple[List[Expression], Idx]: path_expr, path_idx = mk_value_op(schema="Path", value="output", idx=idx,) intension_expr, intension_idx = mk_call_op( name=DataflowFn.RoleConstraint.value, args=[path_idx], idx=path_idx, ) return [path_expr, intension_expr], intension_idx
MIT License
jazzband/sorl-thumbnail
sorl/thumbnail/shortcuts.py
delete
python
def delete(file_, delete_file=True): return default.backend.delete(file_, delete_file)
A shortcut for the Backend ``delete`` method
https://github.com/jazzband/sorl-thumbnail/blob/8f492eb17a77d9a3fc0e7204b02c06823364f50a/sorl/thumbnail/shortcuts.py#L11-L15
from sorl.thumbnail import default def get_thumbnail(file_, geometry_string, **options): return default.backend.get_thumbnail(file_, geometry_string, **options)
BSD 3-Clause New or Revised License
hopson97/android-apple-drop
scripts/projectile.py
update
python
def update(projectiles, projectileDirections, apples): removeMe = [] for i in range(len(projectiles)): moveProjectile(projectileDirections[i], projectiles[i]) testForAppleProjectileCollision(projectiles[i], apples) if isOutOfBounds(projectiles[i].getCenter()): removeMe.append(i) ''' for x in removeMe: projectiles[i].undraw() projectileDirections.pop(x) projectiles.pop(x) '''
Updates the player's projectiles
https://github.com/hopson97/android-apple-drop/blob/643999a5dee8caacc8225b358067f5fff23228b9/scripts/projectile.py#L30-L45
import graphics as gfx import common import vector import apple as appleFuncs SPEED = 12 def testForAppleProjectileCollision(projectile, apples): for apple in apples[:]: appleCenter = apple.getCenter() projCenter = projectile.getCenter() if vector.distanceBetween(appleCenter, projCenter) < appleFuncs.DIAMETER: appleFuncs.removeApple(apples, apple) def moveProjectile(direction, projectile): dx = direction.getX() dy = direction.getY() projectile.move(dx, dy) def isOutOfBounds(centre): x = centre.getX() y = centre.getY() d = appleFuncs.DIAMETER return x - d > common.WINDOW_WIDTH or x + d < 0 or y - d > common.WINDOW_HEIGHT or y + d < 0
Apache License 2.0
kristerw/spirv-tools
spirv_tools/read_spirv.py
parse_functions
python
def parse_functions(binary, module): while True: op_name, _ = binary.get_next_opcode(peek=True, accept_eol=True) if op_name is None: return if op_name != 'OpFunction': raise ParseError('Expected an "OpFunction" instruction') function = parse_function(binary, module) module.append_function(function)
Parse all functions (i.e. rest of the module).
https://github.com/kristerw/spirv-tools/blob/ed2a5aca2100d28f2c61d471c42bc3dc2603ea4f/spirv_tools/read_spirv.py#L313-L323
import array from operator import itemgetter from spirv_tools import spirv from spirv_tools import ir class ParseError(Exception): class SpirvBinary(object): def __init__(self, words): if len(words) < 5: raise ParseError('File length shorter than header size') magic = words[0] if magic != ir.MAGIC: words.byteswap() magic = words[0] if magic != ir.MAGIC: raise ParseError('Incorrect magic: ' + format(magic, '#x')) version = words[1] if version != ir.VERSION: raise ParseError('Unknown version ' + str(version)) self.words = words self.idx = 5 self.length = 0 def get_next_opcode(self, peek=False, accept_eol=False): if self.idx == len(self.words): if accept_eol: return None, None else: raise ParseError('Unexpected end of file') opcode = self.words[self.idx] & 0xFFFF self.length = (self.words[self.idx] >> 16) - 1 if not peek: self.idx += 1 if opcode not in ir.OPCODE_TO_OPNAME: raise ParseError('Invalid opcode ' + str(opcode)) op_name = ir.OPCODE_TO_OPNAME[opcode] return op_name, ir.INST_FORMAT[op_name] def get_next_word(self, peek=False, accept_eol=False): if self.idx == len(self.words): if accept_eol: return None else: raise ParseError('Unexpected end of file') if self.length == 0: if accept_eol: return None else: raise ParseError('Incorrect instruction length') word = self.words[self.idx] if not peek: self.idx += 1 self.length -= 1 return word def expect_eol(self): if self.length != 0: raise ParseError('Spurius words after parsing instruction') def parse_literal_string(binary): result = [] while True: word = binary.get_next_word() for _ in range(4): octet = word & 255 if octet == 0: return ''.join(result) result.append(chr(octet)) word >>= 8 raise ParseError('bad encoding') def parse_id(binary, module, accept_eol=False): word = binary.get_next_word(accept_eol=accept_eol) if word is not None: if word in module.value_to_id: return module.value_to_id[word] new_id = ir.Id(module, word) module.value_to_id[word] = new_id return new_id else: return None def expand_mask(kind, value): result = [] if value != 0: mask_values = zip(spirv.spv[kind].values(), spirv.spv[kind].keys()) mask_values = sorted(mask_values, key=itemgetter(0)) for mask_number, mask_token in mask_values: if (mask_number & value) != 0: result.append(mask_token) value = value ^ mask_number if value != 0: raise ParseError('Invalid mask value') return result def parse_operand(binary, module, kind): if kind == 'Id': return [parse_id(binary, module)] elif kind == 'LiteralNumber': return [binary.get_next_word()] elif kind == 'LiteralString': return [parse_literal_string(binary)] elif kind == 'OptionalLiteralString': word = binary.get_next_word(peek=True, accept_eol=True) if word is None: return [] return [parse_literal_string(binary)] elif kind == 'VariableLiteralNumber' or kind == 'OptionalLiteralNumber': operands = [] while True: word = binary.get_next_word(accept_eol=True) if word is None: return operands operands.append(word) elif kind in ['VariableId', 'OptionalId']: operands = [] while True: tmp_id = parse_id(binary, module, accept_eol=True) if tmp_id is None: return operands operands.append(tmp_id) elif kind == 'VariableIdLiteralPair': operands = [] while True: tmp_id = parse_id(binary, module, accept_eol=True) if tmp_id is None: return operands operands.append(tmp_id) word = binary.get_next_word() operands.append(word) elif kind == 'VariableLiteralIdPair': operands = [] while True: word = binary.get_next_word(accept_eol=True) if word is None: return operands operands.append(word) tmp_id = parse_id(binary, module) operands.append(tmp_id) elif kind == 'OptionalMemoryAccessMask': val = binary.get_next_word(accept_eol=True) if val is None: return [] result = expand_mask(kind[8:], val) try: aligned_idx = result.index('Aligned') except ValueError: pass else: result[aligned_idx] = ( 'Aligned', binary.get_next_word(accept_eol=False)) return [result] elif kind[:8] == 'Optional' and kind[-4:] == 'Mask': val = binary.get_next_word(accept_eol=True) if val is None: return [] return [expand_mask(kind[8:], val)] elif kind in ir.MASKS: val = binary.get_next_word() return [expand_mask(kind, val)] elif kind in spirv.spv: val = binary.get_next_word() constants = spirv.spv[kind] for name in constants: if constants[name] == val: return [name] raise ParseError('Unknown "' + kind + '" value' + str(val)) raise ParseError('Unknown kind "' + kind + '"') def parse_instruction(binary, module): op_name, op_format = binary.get_next_opcode() operands = [] inst_type_id = None if op_format['type']: inst_type_id = parse_id(binary, module) result_id = None if op_format['result']: result_id = parse_id(binary, module) if result_id.inst is not None: raise ParseError('ID ' + str(result_id) + ' is already defined') for kind in op_format['operands']: operands = operands + parse_operand(binary, module, kind) binary.expect_eol() if op_name == 'OpFunction': return ir.Function(module, operands[0], operands[1], result_id=result_id) else: return ir.Instruction(module, op_name, inst_type_id, operands, result_id=result_id) def parse_global_instructions(binary, module): while True: op_name, _ = binary.get_next_opcode(peek=True, accept_eol=True) if op_name is None: return if op_name == 'OpFunction': return inst = parse_instruction(binary, module) module.insert_global_inst(inst) def parse_basic_block(binary, module, function): binary.get_next_opcode() basic_block_id = parse_id(binary, module) binary.expect_eol() basic_block = ir.BasicBlock(module, basic_block_id) while True: inst = parse_instruction(binary, module) if not isinstance(inst, ir.Instruction): raise ParseError('Invalid opcode OpFunction in basic block') if inst.op_name == 'OpLabel': raise ParseError('Invalid opcode OpLabel in basic block') basic_block.append_inst(inst) if inst.op_name in ir.BRANCH_INSTRUCTIONS: function.append_basic_block(basic_block) return def parse_function(binary, module): function = parse_instruction(binary, module) while True: op_name, _ = binary.get_next_opcode(peek=True) if op_name == 'OpLabel': parse_basic_block(binary, module, function) elif op_name == 'OpFunctionEnd': binary.get_next_opcode() binary.expect_eol() return function elif op_name == 'OpFunctionParameter': inst = parse_instruction(binary, module) function.append_parameter(inst) else: raise ParseError('Invalid opcode ' + op_name)
MIT License
tensorflow/mesh
mesh_tensorflow/experimental/unet.py
get_input_mtf_shapes
python
def get_input_mtf_shapes(dataset_str): if dataset_str == 'train': batch_dim = mtf.Dimension('batch', FLAGS.batch_size_train) else: assert dataset_str == 'eval' batch_dim = mtf.Dimension('batch', FLAGS.batch_size_eval) image_nx_dim = mtf.Dimension('image_nx_block', FLAGS.image_nx_block) image_ny_dim = mtf.Dimension('image_ny_block', FLAGS.image_ny_block) image_sx_dim = mtf.Dimension('image_sx_block', FLAGS.ct_resolution // FLAGS.image_nx_block) image_sy_dim = mtf.Dimension('image_sy_block', FLAGS.ct_resolution // FLAGS.image_ny_block) batch_spatial_dims = [batch_dim, image_nx_dim, image_sx_dim, image_ny_dim, image_sy_dim] if not FLAGS.sampled_2d_slices: image_sz_dim = mtf.Dimension('image_sz_block', FLAGS.ct_resolution) batch_spatial_dims += [image_sz_dim] image_c_dim = mtf.Dimension('image_c', FLAGS.image_c) mtf_image_shape = mtf.Shape(batch_spatial_dims + [image_c_dim]) label_c_dim = mtf.Dimension('label_c', FLAGS.label_c) mtf_label_shape = mtf.Shape(batch_spatial_dims + [label_c_dim]) return [mtf_image_shape, mtf_label_shape]
Returns a list of mtf.Shapes of input tensors.
https://github.com/tensorflow/mesh/blob/52a2332c3bb0aa5898caba7efecc8cfa0486276e/mesh_tensorflow/experimental/unet.py#L279-L306
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import mesh_tensorflow as mtf import numpy as np from six.moves import range import tensorflow.compat.v1 as tf from mesh_tensorflow.experimental import data_aug_lib from tensorflow.python.platform import flags FLAGS = flags.FLAGS flags.DEFINE_boolean('sampled_2d_slices', False, 'Whether to build model on 2D CT slices instead of 3D.') flags.DEFINE_integer('ct_resolution', 128, 'Resolution of CT images along depth, height and ' 'width dimensions.') flags.DEFINE_integer('n_dataset_read_interleave', 16, 'The number of interleave processes.') flags.DEFINE_integer('n_dataset_processes', 16, 'The number of data augmentation processes.') flags.DEFINE_integer('batch_size_train', 32, 'Training batch size.') flags.DEFINE_integer('batch_size_eval', 32, 'Evaluation batch size.') flags.DEFINE_integer('image_nx_block', 8, 'The number of x blocks.') flags.DEFINE_integer('image_ny_block', 8, 'The number of y blocks.') flags.DEFINE_integer('image_c', 1, 'The number of input image channels. ' 'If sampled_2d_slices is False, image_c should be 1.') flags.DEFINE_integer('label_c', 3, 'The number of output classes.') flags.DEFINE_integer('pred_downsample', 2, 'Down-sampling the results by the factor of ' '`pred_downsample`, before outputing the results.') flags.DEFINE_boolean('output_ground_truth', True, 'Whether to return the ground truth tensor in Unet, ' 'in addition to returning the prediction tensor.') flags.DEFINE_integer('n_base_filters', 32, 'The number of filters.') flags.DEFINE_integer('network_depth', 4, 'The number of pooling layers.') flags.DEFINE_integer('n_conv_per_block', 2, 'The number of conv layers between poolings.') flags.DEFINE_boolean('with_batch_norm', True, 'Whether to use batch norm.') flags.DEFINE_float('dropout_keep_p', 0.5, 'Probability to keep activations.') flags.DEFINE_float('xen_liver_weight', 8, 'The weight of liver region pixels, ' 'when computing the cross-entropy loss') flags.DEFINE_float('xen_lesion_weight', 16, 'The weight of lesion region pixels, ' 'when computing the cross-entropy loss') flags.DEFINE_float('dice_loss_weight', 0.2, 'The weight of dice loss, ranges from 0 to 1') flags.DEFINE_float('dice_epsilon', 0.1, 'A small value that prevents 0 dividing.') flags.DEFINE_float('image_translate_ratio', 0.0, 'How much you want to translate the image and label, ' 'for data augmentation.') flags.DEFINE_float('image_transform_ratio', 0.0, 'How much you want to sheer the image and label, ' 'for data augmentation.') flags.DEFINE_float('image_noise_probability', 0.0, 'Probability of adding noise during data augmentation.') flags.DEFINE_float('image_noise_ratio', 0.0, 'How much random noise you want to add to CT images.') flags.DEFINE_float('image_corrupt_ratio_mean', 0.0, 'How much non-liver area you want to block-out in average.') flags.DEFINE_float('image_corrupt_ratio_stddev', 0.0, 'Std-dev of how much non-liver area you want to block-out.') flags.DEFINE_float('per_class_intensity_scale', 0.0, 'How much to scale intensities of lesion/non-lesion areas.') flags.DEFINE_float('per_class_intensity_shift', 0.0, 'How much to shift intensities of lesion/non-lesion areas.') flags.DEFINE_string('mtf_dtype', 'bfloat16', 'dtype for MeshTensorflow.') flags.DEFINE_string('layout', 'batch:cores, image_nx_block:rows, image_ny_block:columns', 'layout rules') flags.DEFINE_string('train_file_pattern', '', 'Path to CT scan training data.') flags.DEFINE_string('eval_file_pattern', '', 'Path to CT scan evalutation data.') def get_layout(): return mtf.convert_to_layout_rules(FLAGS.layout) def get_dataset_creator(dataset_str): if dataset_str == 'train': data_file_pattern = FLAGS.train_file_pattern.format(FLAGS.ct_resolution) shuffle = True interleave = True else: assert dataset_str == 'eval' data_file_pattern = FLAGS.eval_file_pattern.format(FLAGS.ct_resolution) shuffle = False interleave = False def _dataset_creator(): def _get_stacked_2d_slices(image_3d, label_3d): image_stack = [] label_stack = [] for begin_idx in range(0, FLAGS.ct_resolution - FLAGS.image_c + 1): slice_begin = [0, 0, begin_idx] slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, FLAGS.image_c] image = tf.slice(image_3d, slice_begin, slice_size) slice_begin = [0, 0, begin_idx + FLAGS.image_c // 2] slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, 1] label = tf.slice(label_3d, slice_begin, slice_size) spatial_dims_w_blocks = [FLAGS.image_nx_block, FLAGS.ct_resolution // FLAGS.image_nx_block, FLAGS.image_ny_block, FLAGS.ct_resolution // FLAGS.image_ny_block] image = tf.reshape(image, spatial_dims_w_blocks + [FLAGS.image_c]) label = tf.reshape(label, spatial_dims_w_blocks) label = tf.cast(label, tf.int32) label = tf.one_hot(label, FLAGS.label_c) data_dtype = tf.as_dtype(FLAGS.mtf_dtype) image = tf.cast(image, data_dtype) label = tf.cast(label, data_dtype) image_stack.append(image) label_stack.append(label) return tf.stack(image_stack), tf.stack(label_stack) def _parser_fn(serialized_example): features = {} features['image/ct_image'] = tf.FixedLenFeature([], tf.string) features['image/label'] = tf.FixedLenFeature([], tf.string) parsed = tf.parse_single_example(serialized_example, features=features) spatial_dims = [FLAGS.ct_resolution] * 3 if FLAGS.sampled_2d_slices: noise_shape = [FLAGS.ct_resolution] * 2 + [FLAGS.image_c] else: noise_shape = [FLAGS.ct_resolution] * 3 image = tf.decode_raw(parsed['image/ct_image'], tf.float32) label = tf.decode_raw(parsed['image/label'], tf.float32) if dataset_str != 'train': image = tf.clip_by_value(image / 1024.0 + 0.5, 0, 1) image = tf.reshape(image, spatial_dims) label = tf.reshape(label, spatial_dims) if dataset_str == 'eval' and FLAGS.sampled_2d_slices: return _get_stacked_2d_slices(image, label) if FLAGS.sampled_2d_slices: begin_idx = tf.random_uniform( shape=[], minval=0, maxval=FLAGS.ct_resolution - FLAGS.image_c + 1, dtype=tf.int32) slice_begin = [0, 0, begin_idx] slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, FLAGS.image_c] image = tf.slice(image, slice_begin, slice_size) label = tf.slice(label, slice_begin, slice_size) if dataset_str == 'train': for flip_axis in [0, 1, 2]: image, label = data_aug_lib.maybe_flip(image, label, flip_axis) image, label = data_aug_lib.maybe_rot180(image, label, static_axis=2) image = data_aug_lib.intensity_shift( image, label, FLAGS.per_class_intensity_scale, FLAGS.per_class_intensity_shift) image = data_aug_lib.image_corruption( image, label, FLAGS.ct_resolution, FLAGS.image_corrupt_ratio_mean, FLAGS.image_corrupt_ratio_stddev) image = data_aug_lib.maybe_add_noise( image, noise_shape, 1, 4, FLAGS.image_noise_probability, FLAGS.image_noise_ratio) image, label = data_aug_lib.projective_transform( image, label, FLAGS.ct_resolution, FLAGS.image_translate_ratio, FLAGS.image_transform_ratio, FLAGS.sampled_2d_slices) if FLAGS.sampled_2d_slices: label = tf.slice(label, [0, 0, FLAGS.image_c // 2], [FLAGS.ct_resolution, FLAGS.ct_resolution, 1]) spatial_dims_w_blocks = [FLAGS.image_nx_block, FLAGS.ct_resolution // FLAGS.image_nx_block, FLAGS.image_ny_block, FLAGS.ct_resolution // FLAGS.image_ny_block] if not FLAGS.sampled_2d_slices: spatial_dims_w_blocks += [FLAGS.ct_resolution] image = tf.reshape(image, spatial_dims_w_blocks + [FLAGS.image_c]) label = tf.reshape(label, spatial_dims_w_blocks) label = tf.cast(label, tf.int32) label = tf.one_hot(label, FLAGS.label_c) data_dtype = tf.as_dtype(FLAGS.mtf_dtype) image = tf.cast(image, data_dtype) label = tf.cast(label, data_dtype) return image, label dataset_fn = functools.partial(tf.data.TFRecordDataset, compression_type='GZIP') dataset = tf.data.Dataset.list_files(data_file_pattern, shuffle=shuffle).repeat() if interleave: dataset = dataset.apply( tf.data.experimental.parallel_interleave( lambda file_name: dataset_fn(file_name).prefetch(1), cycle_length=FLAGS.n_dataset_read_interleave, sloppy=True)) else: dataset = dataset.apply( tf.data.experimental.parallel_interleave( lambda file_name: dataset_fn(file_name).prefetch(1), cycle_length=1, sloppy=False)) if shuffle: dataset = dataset.shuffle(FLAGS.n_dataset_processes).map( _parser_fn, num_parallel_calls=FLAGS.n_dataset_processes) else: dataset = dataset.map(_parser_fn) if dataset_str == 'eval' and FLAGS.sampled_2d_slices: dataset = dataset.unbatch() return dataset return _dataset_creator
Apache License 2.0
openebs/elves
ansible/files/generate-inventory.py
appendRemoteKeyInLocal
python
def appendRemoteKeyInLocal(key, authorized_key_path, ip): key_append_cmd = 'echo "%s" >> %s' %(key, authorized_key_path) executeCmd(key_append_cmd) logging.debug ("******** Remote key for %s has been added into authorized_keys in LocalHost ********", ip)
Adds remote key as an authorized key into localhost
https://github.com/openebs/elves/blob/04fc973c8a7b6fcc88e3e3f3746f069c3b21e67a/ansible/files/generate-inventory.py#L38-L43
import os import sys from utils import sshToOtherClient, executeCmd import subprocess import logging import argparse import configparser import warnings import paramiko import re def getLocalKey(cmd, path): executeCmd(cmd) out = subprocess.Popen ("cat" + " " + path, shell=True, stdout=subprocess.PIPE) key = out.stdout.read().rstrip('\n') logging.debug ("******** Local key has been generated successfully : %s ********", key) return key def getRemoteKey(cmd, path, ip, user, passwd): remote_key_generation_result = sshToOtherClient (ip, user, passwd, cmd) showKeyCmd = 'cat %s' %(path) remote_key = sshToOtherClient (ip, user, passwd, showKeyCmd) logging.debug ("******** Remote key for %s has been generated successfully : %s ********", ip, remote_key) return remote_key def appendLocalKeyInRemote(key, authorized_key_path, ip, user, passwd): key_append_cmd = 'echo "%s" >> %s' %(key, authorized_key_path) sshToOtherClient (ip, user, passwd, key_append_cmd) logging.debug ("******** Local key has been added into authorized_keys in %s ********", ip)
Apache License 2.0
ucloud/ucloud-sdk-python2
ucloud/testing/utest.py
Step.__init__
python
def __init__( self, invoker, max_retries=0, retry_interval=0, startup_delay=0, retry_for=(CompareError, ValueNotFoundError), fast_fail=False, validators=None, **kwargs ): self.invoker = invoker self.max_retries = max_retries self.retry_interval = retry_interval self.startup_delay = startup_delay self.retry_for = retry_for self.fast_fail = fast_fail self.validators = validators or (lambda _: []) self.extras = kwargs
Step is the test step in a test scenario :param invoker: invoker is a callable function :param max_retries: the maximum retry number by the `retry_for` exception, it will resolve the flaky testing case :param retry_interval: the interval between twice retrying :param retry_for: the exceptions to retrying :param startup_delay: the delay seconds before any action execution :param fast_fail: if fast fail is true, the test will fail when got unexpected exception :return:
https://github.com/ucloud/ucloud-sdk-python2/blob/90fb43198df73a78d64bbd98675dc7b302856057/ucloud/testing/utest.py#L13-L42
import logging import time from ucloud.client import Client from ucloud.testing import op from ucloud.testing.exc import ValueNotFoundError, CompareError, ValidateError logger = logging.getLogger(__name__) class Step(object):
Apache License 2.0
mikedacre/fyrd
fyrd/run.py
normalize_imports
python
def normalize_imports(imports, prot=True): out_impts = [] prot_impts = [] path_impts = [] imports = listify(imports) if not imports: return [] for imp in imports: if not isinstance(imp, _str): raise ValueError('All imports must be strings') if imp.startswith('try:'): prot_impts.append(imp.rstrip()) elif imp.startswith('import') or imp.startswith('from'): out_impts.append(imp.rstrip()) elif imp.startswith('sys.path.append') or imp.startswith('sys.path.insert'): path_impts.append(imp.rstrip()) else: if imp.startswith('@'): continue out_impts.append('import {}'.format(imp)) if prot: for imp in out_impts: prot_impts.append(PROT_IMPT.format(imp)) out = prot_impts else: out = out_impts + prot_impts out = list(set(out)) if path_impts: out = list(set(path_impts)) + out return out
Take a heterogenous list of imports and normalize it. Parameters ---------- imports : list A list of strings, formatted differently. prot : bool Protect imports with try..except blocks Returns ------- list A list of strings that can be used for imports
https://github.com/mikedacre/fyrd/blob/6445719bfdcb3358a597300a25384acd8d3df80b/fyrd/run.py#L943-L993
from __future__ import with_statement import os as _os import re as _re import sys as _sys import inspect as _inspect import argparse as _argparse from collections import OrderedDict as _OD import bz2 import gzip from subprocess import Popen from subprocess import PIPE from time import sleep from glob import glob as _glob from six import text_type as _txt from six import string_types as _str from six import integer_types as _int from six.moves import input as _get_input from tqdm import tqdm, tqdm_notebook try: if str(type(get_ipython())) == "<class 'ipykernel.zmqshell.ZMQInteractiveShell'>": _pb = tqdm_notebook else: _pb = tqdm except NameError: _pb = tqdm from . import logme as _logme STRPRSR = _re.compile(r'{(.*?)}') def get_pbar(iterable, name=None, unit=None, **kwargs): from . import conf show_pb = bool(conf.get_option('queue', 'progressbar', True)) if 'desc' in kwargs: dname = kwargs.pop('desc') name = name if name else dname if 'disable' in kwargs: disable = kwargs['disable'] else: disable = False if show_pb else True return _pb(iterable, desc=name, unit=unit, disable=disable, **kwargs) class CustomFormatter(_argparse.ArgumentDefaultsHelpFormatter, _argparse.RawDescriptionHelpFormatter): pass class CommandError(Exception): pass def string_getter(string): locs = STRPRSR.findall(string) ints = {int(i) for i in locs if i.isdigit()} strs = {i for i in locs if not i.isdigit()} if ints and '{}' in string: raise ValueError('Cannot parse string with both numbered and ' 'unnumbered braces') return ints, strs def parse_glob(string, get_vars=None): get_vars = listify(get_vars) test_string = STRPRSR.sub('*', string) files = _glob(test_string) if not get_vars: return _OD([(f, None) for f in files]) results = _OD([(f, {}) for f in files]) int_vars, str_vars = string_getter(string) if '{}' in string or int_vars: raise ValueError('Cannot have numeric placeholders in file strings ', "i.e. no '{0}', '{1}', '{}', etc") for var in get_vars: if var not in str_vars: _logme.log('Variable {0} not in search string: {1}' .format(var, string), 'warn') continue test_var = var if var.startswith('{') else '{' + var + '}' test_string = STRPRSR.sub('.*?', string.replace(test_var, '(.*?)')) test_string = _re.sub(r'([^.])\*', r'\1.*', test_string) test_string = _re.sub(r'^\*', r'.*', test_string) if test_string.endswith('?'): test_string = test_string[:-1] if test_string.endswith('?)'): test_string = test_string[:-2] + ')' test_regex = _re.compile(test_string) for fl in files: vrs = test_regex.findall(fl) ulen = len(set(vrs)) if ulen != 1: _logme.log('File {0} has multiple values for {1}: {2}' .format(fl, test_var, vrs), 'critical') raise ValueError('Invalid file search string') if ulen == 0: _logme.log('File {0} has no results for {1}' .format(fl, test_var), 'error') continue results[fl][var] = vrs[0] return results def file_getter(file_strings, variables, extra_vars=None, max_count=None): extra_vars = listify(extra_vars) if extra_vars else [] files = [] count = 0 for file_string in file_strings: files.append(parse_glob(file_string, variables)) count += 1 if max_count and count == max_count: break var_vals = {i: [] for i in variables} empty = {} for f in files: for pvars in f.values(): if not pvars: continue for var, val in pvars.items(): if not val: if var in empty: empty[var].append(f) else: empty[var] = [f] else: var_vals[var].append(val) fail = False if empty: _logme.log('The following variables had no ' 'result in some files, cannot continue:\n' '\n'.join( ['{0} files: {1}'.format(i, j) for i, j in empty.items()] ), 'critical') fail = True bad = [] results = [] for file_info in zip(*[fl.items() for fl in files]): good = True final_dict = {} final_files = tuple([_os.path.abspath(fl[0]) for fl in file_info]) bad_dcts = [] for dct in [f[1] for f in file_info]: if not dct: continue final_dict.update(dct) bad_dcts.append(dct) for var, val in dct.items(): for fl in file_info: if var in fl[1] and val != fl[1][var]: good = False if not good: bad.append((final_files, bad_dcts)) break for extra_var in extra_vars: try: evari = extra_var.split(':') if len(evari) == 2: final_dict[evari[1]] = evari[2] continue var, orig_var, parse_str, sub_str = evari except ValueError: _logme.log( '{} is malformatted should be: '.format(extra_var) + 'either new_var:orig_var:regex:sub ' 'or variable:value', 'critical' ) raise if orig_var not in final_dict: raise ValueError( 'Extra variable {0} sets {1} as '.format(var, orig_var) + 'the original variable, but it is not in the dict for ' '{0}'.format(final_files) ) final_dict[var] = _re.sub(parse_str, sub_str, final_dict[orig_var]) results.append((final_files, final_dict)) if empty: _logme.log('The following file combinations had mismatched variables, ' 'cannot continue:\n' '\n'.join( ['{0} dicts: {1}'.format(i, j) for i, j in bad] ), 'critical') fail = True if fail: raise ValueError('File parsing failure') return results def listify(iterable): if isinstance(iterable, list): return iterable if isinstance(iterable, (_str, _txt, _int, float)): return [iterable] if not iterable: return [] try: iterable = list(iterable) except TypeError: iterable = [iterable] return iterable def merge_lists(lists): outlist = [] for lst in listify(lists): outlist += lst return outlist def write_iterable(iterable, outfile): with open_zipped(outfile, 'w') as fout: fout.write('\n'.join(iterable)) def indent(string, prefix=' '): out = '' for i in string.split('\n'): out += '{}{}\n'.format(prefix, i) return out def is_exc(x): return bool(isinstance(x, tuple) and len(x) == 3 and issubclass(BaseException, x[0])) def open_zipped(infile, mode='r'): mode = mode[0] + 't' if hasattr(infile, 'write'): return infile if isinstance(infile, _str): if infile.endswith('.gz'): return gzip.open(infile, mode) if infile.endswith('.bz2'): if hasattr(bz2, 'open'): return bz2.open(infile, mode) else: return bz2.BZ2File(infile, mode) return open(infile, mode) def exp_file(infile): return _os.path.expandvars( _re.sub( '~', '$HOME', infile ) ) def cmd_or_file(string): if _os.path.isfile(string): with open_zipped(string) as fin: command = fin.read().strip() else: command = string.strip() return command def block_read(files, size=65536): while True: b = files.read(size) if not b: break yield b def count_lines(infile, force_blocks=False): if which('wc') and not force_blocks: _logme.log('Using wc', 'debug') if infile.endswith('.gz'): cat = 'zcat' elif infile.endswith('.bz2'): cat = 'bzcat' else: cat = 'cat' command = "{cat} {infile} | wc -l | awk '{{print $1}}'".format( cat=cat, infile=infile ) return int(cmd(command)[1]) else: _logme.log('Using block read', 'debug') with open_zipped(infile) as fin: return sum(bl.count("\n") for bl in block_read(fin)) def split_file(infile, parts, outpath='', keep_header=False): _logme.log('Getting line count', 'debug') num_lines = int(count_lines(infile)/int(parts)) + 1 cnt = 0 currjob = 1 suffix = '.split_' + str(currjob).zfill(4) + '.' + infile.split('.')[-1] file_name = _os.path.basename(infile) run_file = _os.path.join(outpath, file_name + suffix) outfiles = [run_file] _logme.log('Splitting file', 'debug') with open_zipped(infile) as fin: header = fin.readline() if keep_header else '' sfile = open_zipped(run_file, 'w') sfile.write(header) for line in fin: cnt += 1 if cnt < num_lines: sfile.write(line) elif cnt == num_lines: sfile.write(line) sfile.close() currjob += 1 suffix = '.split_' + str(currjob).zfill(4) + '.' + infile.split('.')[-1] run_file = _os.path.join(outpath, file_name + suffix) sfile = open_zipped(run_file, 'w') outfiles.append(run_file) sfile.write(header) cnt = 0 sfile.close() _logme.log('Split files: {}'.format(outfiles), 'debug') return tuple(outfiles) def is_exe(fpath): return _os.path.isfile(fpath) and _os.access(fpath, _os.X_OK) def file_type(infile): name_parts = infile.split('.') if name_parts[-1] == 'gz' or name_parts[-1] == 'bz2': name_parts.pop() return name_parts[-1] def is_file_type(infile, types): if hasattr(infile, 'write'): infile = infile.name types = listify(types) for typ in types: if file_type(infile) == typ: return True return False def cmd(command, args=None, stdout=None, stderr=None, tries=1): tries = int(tries) assert tries > 0 count = 1 if isinstance(command, (list, tuple)): if args: raise ValueError('Cannot submit list/tuple command as ' + 'well as args argument') command = ' '.join(command) assert isinstance(command, _str) if args: if isinstance(args, (list, tuple)): args = ' '.join(args) args = command + args else: args = command _logme.log('Running {} as {}'.format(command, args), 'verbose') while True: try: pp = Popen(args, shell=True, universal_newlines=True, stdout=PIPE, stderr=PIPE) except FileNotFoundError: _logme.log('{} does not exist'.format(command), 'critical') raise out, err = pp.communicate() code = pp.returncode if code == 0 or count == tries: break _logme.log('Command {} failed with code {}, retrying.' .format(command, code), 'warn') sleep(1) count += 1 _logme.log('{} completed with code {}'.format(command, code), 'debug') if stdout: with open_zipped(stdout, 'w') as fout: fout.write(out) if stderr: with open_zipped(stderr, 'w') as fout: fout.write(err) return code, out.rstrip(), err.rstrip() def export_run(function, args, kwargs): kwargs['imports'] = export_imports(function, kwargs) return function(*args, **kwargs) def which(program): fpath, program = _os.path.split(program) if fpath: if is_exe(program): return _os.path.abspath(program) else: for path in _os.environ["PATH"].split(_os.pathsep): path = path.strip('"') exe_file = _os.path.join(path, program) if is_exe(exe_file): return _os.path.abspath(exe_file) return None def check_pid(pid): try: _os.kill(pid, 0) except OSError: return False else: return True def replace_argument(args, find_string, replace_string, error=True): double = False if isinstance(args, list): args, kwargs = args double = True elif isinstance(args, tuple): kwargs = None elif isinstance(args, dict): kwargs = args.copy() args = None else: raise ValueError('args must be list/tuple/dict, is {}\nval: {}' .format(type(args), args)) if not args and not kwargs: msg = 'No arguments or keyword arguments found' if error: raise ValueError(msg) else: _logme.log(msg, 'warn') if double: return None, None else: return None found = False newargs = tuple() if args: for arg in listify(args): if isinstance(arg, _str) and find_string in arg: arg = arg.format(**{find_string.strip('{}'): replace_string}) found = True newargs += (arg,) newkwds = {} if kwargs: for arg, value in kwargs.items(): if isinstance(value, _str) and find_string in value: value = replace_string found = True newkwds[arg] = value if found is not True: msg = 'Could not find {}'.format(find_string) if error: raise ValueError(msg) else: _logme.log(msg, 'warn') if double: return None, None else: return None if double: return [newargs, newkwds] else: if newargs: return newargs else: return newkwds def opt_split(opt, split_on): opt = listify(opt) split_on = listify(split_on) final_list = [] for o in opt: final_list += _re.split('[{}]'.format(''.join(split_on)), o) return list(set(final_list)) def get_yesno(message, default=None): if default: if default.lower().startswith('y'): tailstr = '[Y/n] ' elif default.lower().startswith('n'): tailstr = '[y/N] ' else: raise ValueError('Invalid default') else: tailstr = '[y/n] ' message = message + tailstr if message.endswith(' ') else message + ' ' + tailstr ans = get_input(message, 'yesno', default) if ans.lower().startswith('y'): return True elif ans.lower().startswith('n'): return False else: raise ValueError('Invalid response: {}'.format(ans)) def get_input(message, valid_answers=None, default=None): if not message.endswith(' '): message = message + ' ' if valid_answers: if isinstance(valid_answers, _str): if valid_answers.lower() == 'yesno': valid_answers = ['yes', 'no', 'y', 'n'] else: valid_answers = [valid_answers] if not isinstance(valid_answers, (list, tuple, set, frozenset)): _logme.log('valid_answers must be a list, is {}' .format(type(valid_answers)), 'critical') raise ValueError('Invalid argument') valid_answers = [i.lower() for i in valid_answers] while True: ans = _get_input(message) if not ans and default: return default if ans.lower() in valid_answers: return ans else: _logme.log('Invalid response to input question', 'debug') _sys.stderr.write('Invalid response: {}\n'.format(ans) + 'Valid responses: {}\n' .format(valid_answers) + 'Please try again.\n') else: return _get_input(message) def syspath_fmt(syspaths): outlist = [] for pth in listify(syspaths): if 'sys.path' in pth: outlist.append(pth) continue if _os.path.exists(pth): outlist.append("sys.path.append('{}')".format( _os.path.abspath(pth) )) else: raise OSError('Paths must exist, {} does not.' .format(pth)) return '\n'.join(outlist) PROT_IMPT = """\ try: {} except ImportError: pass """
MIT License
marius-juston/autocnn
auto_cnn/cnn_structure.py
Layer.tensor_rep
python
def tensor_rep(self, inputs: tf.keras.layers.Layer) -> tf.keras.layers.Layer: pass
Returns the keras Layer representation of the object :param inputs: The previous layer to be passed in as input for the next :return: the keras Layer representation of the object
https://github.com/marius-juston/autocnn/blob/f5e8351a6d2021c9e9047959d6571ed9d7990a85/auto_cnn/cnn_structure.py#L11-L18
import os from abc import abstractmethod, ABC from typing import Iterable, Callable, Union, Sequence, Dict, Any, Tuple, List import tensorflow as tf from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2 class Layer(ABC): @abstractmethod
MIT License
opendilab/gobigger
gobigger/managers/base_manager.py
BaseManager.refresh
python
def refresh(self): raise NotImplementedError
Overview: Refresh. Used to refresh the balls in management. Such as replenishing eaten food balls
https://github.com/opendilab/gobigger/blob/2fae8678662a9be9d5d5797fd09815a9c920d215/gobigger/managers/base_manager.py#L36-L41
import math import logging from abc import ABC, abstractmethod from easydict import EasyDict from pygame.math import Vector2 from gobigger.utils import format_vector, Border from gobigger.balls import FoodBall, ThornsBall, CloneBall, SporeBall class BaseManager(ABC): def __init__(self, cfg, border): self.cfg = cfg self.border = border self.balls = {} self.ball_settings = self.cfg.ball_settings def get_balls(self): return self.balls.values() def add_balls(self, balls): raise NotImplementedError
Apache License 2.0
karchinlab/2020plus
src/classify/python/generic_classifier.py
GenericClassifier.get_onco_pr_metrics
python
def get_onco_pr_metrics(self): return self.onco_precision_array, self.onco_recall_array, self.onco_mean_pr_auc
Simple get method for oncogene Precision-Recall metrics.
https://github.com/karchinlab/2020plus/blob/3a645e2dfedbb3857494e8e7f9cf30eb8f4e87cc/src/classify/python/generic_classifier.py#L464-L466
import src.utils.python.util as _utils import src.utils.python.math as mymath import src.features.python.feature_utils as futils import numpy as np from numpy import interp from sklearn import cross_validation import sklearn.metrics as metrics import pandas as pd class GenericClassifier(object): def __init__(self, total_iterations=5, classify_oncogene=True, classify_tsg=True, rseed=None): self.min_count = 0 self.prng = np.random.RandomState(rseed) self.rseed = rseed self.set_classes(oncogene=classify_oncogene, tsg=classify_tsg) self.set_total_iter(total_iterations) self._init_metrics() self.vogelsteins_oncogenes = _utils.oncogene_set self.vogelsteins_tsg = _utils.tsg_set def set_total_iter(self, myiterations): self.total_iter = myiterations def _filter_rows(self, df): row_sums = df.T.sum() filtered_df = df[row_sums > self.min_count] return filtered_df def train(self): self.x, self.y = futils.randomize(self.x, self.prng) futils.check_num_classes(self.y) self.clf.fit(self.x, self.y) def train_cv(self, k=10): self.num_pred = 0 self.test_fold_df = pd.DataFrame({l+1: 0 for l in range(self.total_iter)}, index=self.x.index) for i in range(self.total_iter): self.x, self.y = futils.randomize(self.x, self.prng) futils.check_num_classes(self.y) k_fold = cross_validation.StratifiedKFold(self.y, n_folds=k) for nfold, (train_ix, test_ix) in enumerate(k_fold): tmp_train_ix = self.x.iloc[train_ix].index tmp_test_ix = self.x.iloc[test_ix].index self.test_fold_df.loc[tmp_test_ix, i+1] = nfold + 1 if self.is_weighted_sample: num_train = len(train_ix) sample_weight = np.zeros(num_train) onco_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.onco_num)[0] tsg_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.tsg_num)[0] other_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.other_num)[0] sample_weight[onco_ix] = 1. / len(onco_ix) sample_weight[tsg_ix] = 1. / len(tsg_ix) sample_weight[other_ix] = 1. / len(other_ix) self.clf.fit(self.x.ix[tmp_train_ix].copy(), self.y.ix[tmp_train_ix].copy(), sample_weight=sample_weight) else: self.clf.fit(self.x.loc[tmp_train_ix].copy(), self.y.loc[tmp_train_ix].copy()) self.clf.append_fold_result() self.clf.append_cv_result() self.num_pred += 1 self.clf.set_cv_fold(self.test_fold_df) def predict(self): self.num_pred = 1 num_genes = len(self.y) proba_ = self.clf.predict_proba(self.x) onco_prob = proba_[:, self.onco_num] tsg_prob = proba_[:, self.tsg_num] other_prob = 1 - (onco_prob + tsg_prob) return onco_prob, tsg_prob, other_prob def predict_cv(self, k=10): self.num_pred = 0 prediction = pd.Series(index=self.x.index) onco_prob = pd.Series(index=self.x.index).fillna(0) tsg_prob = pd.Series(index=self.x.index).fillna(0) new_genes = self.x.index.difference(self.clf.cv_folds.index) for i in range(self.total_iter): test_feat = self.x.ix[new_genes] if not test_feat.empty: self.clf.set_model(i+1, 1) tmp_prob = self.clf.predict_proba(test_feat) onco_prob.ix[new_genes] += tmp_prob[:, self.onco_num] tsg_prob.ix[new_genes] += tmp_prob[:, self.tsg_num] col = 'X{0}'.format(i+1) for j in range(k): self.clf.set_model(i+1, j+1) good_ix = self.clf.cv_folds[self.clf.cv_folds[col]==j+1].index tmp_test_ix = self.x.index.intersection(good_ix) test_feat = self.x.ix[tmp_test_ix] if not test_feat.empty: tmp_prob = self.clf.predict_proba(test_feat) onco_prob.ix[tmp_test_ix] += tmp_prob[:, self.onco_num] tsg_prob.ix[tmp_test_ix] += tmp_prob[:, self.tsg_num] self.num_pred += 1 onco_prob /= self.num_pred tsg_prob /= self.num_pred other_prob = 1 - (onco_prob + tsg_prob) return onco_prob, tsg_prob, other_prob def kfold_validation(self, k=10): self.num_pred = 0 for i in range(self.total_iter): self.x, self.y = futils.randomize(self.x, self.prng) futils.check_num_classes(self.y) num_genes = len(self.y) onco_pred = np.zeros(num_genes) onco_prob = np.zeros(num_genes) tsg_pred = np.zeros(num_genes) tsg_prob = np.zeros(num_genes) overall_pred = np.zeros(num_genes) k_fold = cross_validation.StratifiedKFold(self.y, n_folds=k) for train_ix, test_ix in k_fold: if self.is_weighted_sample: num_train = len(train_ix) sample_weight = np.zeros(num_train) onco_ix = np.nonzero(self.y[train_ix]==self.onco_num)[0] tsg_ix = np.nonzero(self.y[train_ix]==self.tsg_num)[0] other_ix = np.nonzero(self.y[train_ix]==self.other_num)[0] sample_weight[onco_ix] = 1. / len(onco_ix) sample_weight[tsg_ix] = 1. / len(tsg_ix) sample_weight[other_ix] = 1. / len(other_ix) self.clf.fit(self.x.iloc[train_ix].copy(), self.y.iloc[train_ix].copy(), sample_weight=sample_weight) else: self.clf.fit(self.x.iloc[train_ix].copy(), self.y.iloc[train_ix].copy()) y_pred = self.clf.predict(self.x.iloc[test_ix]) proba_ = self.clf.predict_proba(self.x.iloc[test_ix]) overall_pred[test_ix] = y_pred onco_pred[test_ix] = (y_pred==self.onco_num).astype(int) onco_prob[test_ix] = proba_[:, self.onco_num] tsg_pred[test_ix] = (y_pred==self.tsg_num).astype(int) tsg_prob[test_ix] = proba_[:, self.tsg_num] true_onco = (self.y==self.onco_num).astype(int) self._update_onco_metrics(true_onco, onco_pred, onco_prob) true_tsg = (self.y==self.tsg_num).astype(int) self._update_tsg_metrics(true_tsg, tsg_pred, tsg_prob) self._update_metrics(self.y, overall_pred, onco_prob, tsg_prob) self.num_pred += 1 self._on_finish() def kfold_prediction(self, k=10): self.num_pred = 0 prediction = pd.Series(index=self.y.index) onco_prob = pd.Series(index=self.y.index).fillna(0) tsg_prob = pd.Series(index=self.y.index).fillna(0) for i in range(self.total_iter): self.x, self.y = futils.randomize(self.x, self.prng) futils.check_num_classes(self.y) k_fold = cross_validation.StratifiedKFold(self.y, n_folds=k) for train_ix, test_ix in k_fold: tmp_train_ix = self.x.iloc[train_ix].index tmp_test_ix = self.x.iloc[test_ix].index if self.is_weighted_sample: num_train = len(train_ix) sample_weight = np.zeros(num_train) onco_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.onco_num)[0] tsg_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.tsg_num)[0] other_ix = np.nonzero(self.y.ix[tmp_train_ix]==self.other_num)[0] sample_weight[onco_ix] = 1. / len(onco_ix) sample_weight[tsg_ix] = 1. / len(tsg_ix) sample_weight[other_ix] = 1. / len(other_ix) self.clf.fit(self.x.ix[tmp_train_ix].copy(), self.y.ix[tmp_train_ix].copy(), sample_weight=sample_weight) else: self.clf.fit(self.x.ix[tmp_train_ix].copy(), self.y.ix[tmp_train_ix].copy()) tmp_prob = self.clf.predict_proba(self.x.ix[tmp_test_ix]) onco_prob.ix[tmp_test_ix] += tmp_prob[:, self.onco_num] tsg_prob.ix[tmp_test_ix] += tmp_prob[:, self.tsg_num] self.num_pred += 1 onco_prob /= self.num_pred tsg_prob /= self.num_pred other_prob = 1 - (onco_prob + tsg_prob) return onco_prob, tsg_prob, other_prob def _init_metrics(self): self.feature_importance = [] self.confusion_matrix = np.zeros((self.num_classes, self.num_classes)) self.num_pred = 0 num_points = 100 self.onco_f1_score = np.zeros(self.total_iter) self.onco_precision = np.zeros(self.total_iter) self.onco_recall = np.zeros(self.total_iter) self.onco_gene_count = np.zeros(self.total_iter) self.onco_tpr_array = np.zeros((self.total_iter, num_points)) self.onco_fpr_array = np.linspace(0, 1, num_points) self.onco_precision_array = np.zeros((self.total_iter, num_points)) self.onco_recall_array = np.linspace(0, 1, num_points) self.onco_threshold_array = np.zeros((self.total_iter, num_points)) self.tsg_f1_score = np.zeros(self.total_iter) self.tsg_precision = np.zeros(self.total_iter) self.tsg_recall = np.zeros(self.total_iter) self.tsg_gene_count = np.zeros(self.total_iter) self.tsg_tpr_array = np.zeros((self.total_iter, num_points)) self.tsg_fpr_array = np.linspace(0, 1, num_points) self.tsg_precision_array = np.zeros((self.total_iter, num_points)) self.tsg_recall_array = np.linspace(0, 1, num_points) self.driver_precision = np.zeros(self.total_iter) self.driver_recall = np.zeros(self.total_iter) self.driver_tpr_array = np.zeros((self.total_iter, num_points)) self.driver_fpr_array = np.linspace(0, 1, num_points) self.driver_precision_array = np.zeros((self.total_iter, num_points)) self.driver_recall_array = np.linspace(0, 1, num_points) self.driver_threshold_array = np.zeros((self.total_iter, num_points)) self.cancer_gene_count = np.zeros(self.total_iter) self.f1_score = np.zeros(self.total_iter) self.precision = np.zeros(self.total_iter) self.recall = np.zeros(self.total_iter) def _update_metrics(self, y_true, y_pred, onco_prob, tsg_prob): self.driver_gene_pred = pd.Series(y_pred, self.y.index) self.driver_gene_score = pd.Series(onco_prob+tsg_prob, self.y.index) prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred, average='macro') cancer_gene_pred = ((onco_prob + tsg_prob)>.5).astype(int) self.cancer_gene_count[self.num_pred] = np.sum(cancer_gene_pred) self.precision[self.num_pred] = prec self.recall[self.num_pred] = recall self.f1_score[self.num_pred] = fscore driver_prob = onco_prob + tsg_prob driver_true = (y_true > 0).astype(int) p, r, thresh = metrics.precision_recall_curve(driver_true, driver_prob) p, r, thresh = p[::-1], r[::-1], thresh[::-1] thresh = np.insert(thresh, 0, 1.0) self.driver_precision_array[self.num_pred, :] = interp(self.driver_recall_array, r, p) self.driver_threshold_array[self.num_pred, :] = interp(self.driver_recall_array, r, thresh) prec, recall, fscore, support = metrics.precision_recall_fscore_support(driver_true, cancer_gene_pred) self.driver_precision[self.num_pred] = prec[1] self.driver_recall[self.num_pred] = recall[1] fpr, tpr, thresholds = metrics.roc_curve(driver_true, driver_prob) self.driver_tpr_array[self.num_pred, :] = interp(self.driver_fpr_array, fpr, tpr) def _update_onco_metrics(self, y_true, y_pred, prob): self.onco_gene_pred = pd.Series(y_pred, self.y.index) self.onco_gene_score = pd.Series(prob, self.y.index) self.onco_gene_count[self.num_pred] = sum(y_pred) prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred) self.onco_precision[self.num_pred] = prec[self.onco_num] self.onco_recall[self.num_pred] = recall[self.onco_num] self.onco_f1_score[self.num_pred] = fscore[self.onco_num] self.logger.debug('Onco Iter %d: Precission=%s, Recall=%s, f1_score=%s' % ( self.num_pred + 1, str(prec), str(recall), str(fscore))) fpr, tpr, thresholds = metrics.roc_curve(y_true, prob) self.onco_tpr_array[self.num_pred, :] = interp(self.onco_fpr_array, fpr, tpr) p, r, thresh = metrics.precision_recall_curve(y_true, prob) p, r, thresh = p[::-1], r[::-1], thresh[::-1] thresh = np.insert(thresh, 0, 1.0) self.onco_precision_array[self.num_pred, :] = interp(self.onco_recall_array, r, p) self.onco_threshold_array[self.num_pred, :] = interp(self.onco_recall_array, r, thresh) def _update_tsg_metrics(self, y_true, y_pred, prob): self.tsg_gene_pred = pd.Series(y_pred, self.y.index) self.tsg_gene_score = pd.Series(prob, self.y.index) self.tsg_gene_count[self.num_pred] = sum(y_pred) prec, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred) tsg_col = 1 self.tsg_precision[self.num_pred] = prec[tsg_col] self.tsg_recall[self.num_pred] = recall[tsg_col] self.tsg_f1_score[self.num_pred] = fscore[tsg_col] self.logger.debug('Tsg Iter %d: Precission=%s, Recall=%s, f1_score=%s' % ( self.num_pred + 1, str(prec), str(recall), str(fscore))) fpr, tpr, thresholds = metrics.roc_curve(y_true, prob) self.tsg_tpr_array[self.num_pred, :] = interp(self.tsg_fpr_array, fpr, tpr) p, r, thresh = metrics.precision_recall_curve(y_true, prob) p, r, thresh = p[::-1], r[::-1], thresh[::-1] self.tsg_precision_array[self.num_pred, :] = interp(self.tsg_recall_array, r, p) def _on_finish(self): self.onco_mean_roc_auc = float(metrics.auc(self.onco_fpr_array, np.mean(self.onco_tpr_array, axis=0))) self.tsg_mean_roc_auc = float(metrics.auc(self.tsg_fpr_array, np.mean(self.tsg_tpr_array, axis=0))) self.driver_mean_roc_auc = float(metrics.auc(self.driver_fpr_array, np.mean(self.driver_tpr_array, axis=0))) self.onco_mean_pr_auc = float(metrics.auc(self.onco_recall_array, np.mean(self.onco_precision_array, axis=0))) self.tsg_mean_pr_auc = float(metrics.auc(self.tsg_recall_array, np.mean(self.tsg_precision_array, axis=0))) self.driver_mean_pr_auc = float(metrics.auc(self.driver_recall_array, np.mean(self.driver_precision_array, axis=0))) self.logger.debug('TSG: Precision=%s, Recall=%s, Fscore=%s' % ( np.mean(self.tsg_precision), np.mean(self.tsg_recall), np.mean(self.tsg_f1_score))) self.logger.debug('Oncogene: Precision=%s, Recall=%s, Fscore=%s' % ( np.mean(self.onco_precision), np.mean(self.onco_recall), np.mean(self.onco_f1_score))) self.logger.debug('Driver: Precision=%s, Recall=%s' % ( np.mean(self.driver_precision), np.mean(self.driver_recall))) def get_onco_roc_metrics(self): return self.onco_tpr_array, self.onco_fpr_array, self.onco_mean_roc_auc def get_tsg_roc_metrics(self): return self.tsg_tpr_array, self.tsg_fpr_array, self.tsg_mean_roc_auc
Apache License 2.0
microsoft/electionguard-python
src/electionguard_tools/factories/ballot_factory.py
BallotFactory.get_fake_ballot
python
def get_fake_ballot( self, internal_manifest: InternalManifest, ballot_id: str = None, with_trues=True, ) -> PlaintextBallot: if ballot_id is None: ballot_id = "some-unique-ballot-id-123" contests: List[PlaintextBallotContest] = [] for contest in internal_manifest.get_contests_for( internal_manifest.ballot_styles[0].object_id ): contests.append( self.get_random_contest_from(contest, Random(), with_trues=with_trues) ) fake_ballot = PlaintextBallot( ballot_id, internal_manifest.ballot_styles[0].object_id, contests ) return fake_ballot
Get a single Fake Ballot object that is manually constructed with default vaules
https://github.com/microsoft/electionguard-python/blob/eb19846cd17ae73064586da8f0be11d97c565b43/src/electionguard_tools/factories/ballot_factory.py#L94-L119
from typing import TypeVar, Callable, List, Tuple import os from random import Random, randint import uuid from hypothesis.strategies import ( composite, booleans, integers, text, uuids, SearchStrategy, ) from electionguard.ballot import ( PlaintextBallot, PlaintextBallotContest, PlaintextBallotSelection, ) from electionguard.encrypt import selection_from from electionguard.manifest import ( ContestDescription, SelectionDescription, InternalManifest, ) from electionguard_tools.helpers.serialize import ( from_file_to_dataclass, from_list_in_file_to_dataclass, ) _T = TypeVar("_T") _DrawType = Callable[[SearchStrategy[_T]], _T] data = os.path.realpath(os.path.join(__file__, "../../../../data")) class BallotFactory: simple_ballot_filename = "ballot_in_simple.json" simple_ballots_filename = "plaintext_ballots_simple.json" @staticmethod def get_random_selection_from( description: SelectionDescription, random_source: Random, is_placeholder=False, ) -> PlaintextBallotSelection: selected = bool(random_source.randint(0, 1)) return selection_from(description, is_placeholder, selected) def get_random_contest_from( self, description: ContestDescription, random: Random, suppress_validity_check=False, with_trues=False, ) -> PlaintextBallotContest: if not suppress_validity_check: assert description.is_valid(), "the contest description must be valid" selections: List[PlaintextBallotSelection] = list() voted = 0 for selection_description in description.ballot_selections: selection = self.get_random_selection_from(selection_description, random) voted += selection.vote if voted <= 1 and selection.vote and with_trues: selections.append(selection) continue if voted <= description.number_elected and bool(random.randint(0, 1)) == 1: selections.append(selection) elif bool(random.randint(0, 1)) == 1: selections.append(selection_from(selection_description)) return PlaintextBallotContest( description.object_id, description.sequence_order, selections )
MIT License
openstack/horizon
horizon/notifications.py
process_message_notification
python
def process_message_notification(request, messages_path): if not messages_path: return global _MESSAGES_CACHE global _MESSAGES_MTIME if (_MESSAGES_CACHE is None or _MESSAGES_MTIME != os.path.getmtime(messages_path)): _MESSAGES_CACHE = _get_processed_messages(messages_path) _MESSAGES_MTIME = os.path.getmtime(messages_path) for msg in _MESSAGES_CACHE: msg.send_message(request)
Process all the msg file found in the message directory
https://github.com/openstack/horizon/blob/5e405d71926764b8aa60c75794b62f668f4e8122/horizon/notifications.py#L132-L149
import glob import json import logging import os from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import messages LOG = logging.getLogger(__name__) _MESSAGES_CACHE = None _MESSAGES_MTIME = None class JSONMessage(object): INFO = messages.info SUCCESS = messages.success WARNING = messages.warning ERROR = messages.error MESSAGE_LEVELS = { 'info': INFO, 'success': SUCCESS, 'warning': WARNING, 'error': ERROR } def __init__(self, path, fail_silently=False): self._path = path self._data = '' self.failed = False self.fail_silently = fail_silently self.message = '' self.level = self.INFO self.level_name = 'info' def _read(self): with open(self._path, 'rb') as file_obj: self._data = file_obj.read() def _parse(self): attrs = {} try: data = self._data.decode('utf-8') attrs = json.loads(data) except ValueError as exc: self.failed = True params = {'path': self._path, 'exception': exc} if self.fail_silently: LOG.warning("Message json file '%(path)s' is malformed. " "%(exception)s", params) else: raise exceptions.MessageFailure( _("Message json file '%(path)s' is malformed. " "%(exception)s") % params) else: level_name = attrs.get('level', 'info') if level_name in self.MESSAGE_LEVELS: self.level_name = level_name self.level = self.MESSAGE_LEVELS.get(self.level_name, self.INFO) self.message = attrs.get('message', '') def load(self): try: self._read() self._parse() except Exception as exc: self.failed = True params = {'path': self._path, 'exception': exc} if self.fail_silently: LOG.warning("Error processing message json file '%(path)s': " "%(exception)s", params) else: raise exceptions.MessageFailure( _("Error processing message json file '%(path)s': " "%(exception)s") % params) def send_message(self, request): if self.failed: return self.level(request, mark_safe(self.message)) def _is_path(path): return os.path.exists(path) and os.path.isdir(path) def _get_processed_messages(messages_path): msgs = list() if not _is_path(messages_path): LOG.error('%s is not a valid messages path.', messages_path) return msgs for fname in glob.glob(os.path.join(messages_path, '*.json')): fpath = os.path.join(messages_path, fname) msg = JSONMessage(fpath, fail_silently=True) msg.load() if not msg.failed: msgs.append(msg) return msgs
Apache License 2.0
databricks/koalas
databricks/koalas/utils.py
is_name_like_value
python
def is_name_like_value( value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False ) -> bool: if value is None: return allow_none elif isinstance(value, tuple): return allow_tuple and is_name_like_tuple( value, allow_none=allow_none, check_type=check_type ) elif is_list_like(value) or isinstance(value, slice): return False elif check_type: return as_spark_type(type(value), raise_error=False) is not None else: return True
Check the given value is like a name. Examples -------- >>> is_name_like_value('abc') True >>> is_name_like_value(1) True >>> is_name_like_value(None) True >>> is_name_like_value(('abc',)) True >>> is_name_like_value(1.0j) True >>> is_name_like_value(list('abc')) False >>> is_name_like_value(None, allow_none=False) False >>> is_name_like_value(('abc',), allow_tuple=False) False >>> is_name_like_value(1.0j, check_type=True) False
https://github.com/databricks/koalas/blob/e971d6f37ede45297bbf9d509ae2a7b51717f322/databricks/koalas/utils.py#L673-L711
import functools from collections import OrderedDict from contextlib import contextmanager from distutils.version import LooseVersion import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING import warnings import pyarrow import pyspark from pyspark import sql as spark from pyspark.sql import functions as F from pyspark.sql.types import DoubleType import pandas as pd from pandas.api.types import is_list_like from databricks import koalas as ks from databricks.koalas.typedef.typehints import ( as_spark_type, extension_dtypes, spark_type_to_pandas_dtype, ) if TYPE_CHECKING: from databricks.koalas.base import IndexOpsMixin from databricks.koalas.frame import DataFrame from databricks.koalas.internal import InternalFrame ERROR_MESSAGE_CANNOT_COMBINE = ( "Cannot combine the series or dataframe because it comes from a different dataframe. " "In order to allow this operation, enable 'compute.ops_on_diff_frames' option." ) if LooseVersion(pyspark.__version__) < LooseVersion("3.0"): SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.enabled" else: SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled" def same_anchor( this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"], that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"], ) -> bool: from databricks.koalas.base import IndexOpsMixin from databricks.koalas.frame import DataFrame from databricks.koalas.internal import InternalFrame if isinstance(this, InternalFrame): this_internal = this else: assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this) this_internal = this._internal if isinstance(that, InternalFrame): that_internal = that else: assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that) that_internal = that._internal return ( this_internal.spark_frame is that_internal.spark_frame and this_internal.index_level == that_internal.index_level and all( this_scol._jc.equals(that_scol._jc) for this_scol, that_scol in zip( this_internal.index_spark_columns, that_internal.index_spark_columns ) ) ) def combine_frames(this, *args, how="full", preserve_order_column=False): from databricks.koalas.config import get_option from databricks.koalas.frame import DataFrame from databricks.koalas.internal import ( InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, ) from databricks.koalas.series import Series if all(isinstance(arg, Series) for arg in args): assert all( same_anchor(arg, args[0]) for arg in args ), "Currently only one different DataFrame (from given Series) is supported" assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this." that = args[0]._kdf[list(args)] elif len(args) == 1 and isinstance(args[0], DataFrame): assert isinstance(args[0], DataFrame) assert not same_anchor( this, args[0] ), "We don't need to combine. `this` and `that` are same." that = args[0] else: raise AssertionError("args should be single DataFrame or " "single/multiple Series") if get_option("compute.ops_on_diff_frames"): def resolve(internal, side): rename = lambda col: "__{}_{}".format(side, col) internal = internal.resolved_copy sdf = internal.spark_frame sdf = internal.spark_frame.select( [ scol_for(sdf, col).alias(rename(col)) for col in sdf.columns if col not in HIDDEN_COLUMNS ] + list(HIDDEN_COLUMNS) ) return internal.copy( spark_frame=sdf, index_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.index_spark_column_names ], data_spark_columns=[ scol_for(sdf, rename(col)) for col in internal.data_spark_column_names ], ) this_internal = resolve(this._internal, "this") that_internal = resolve(that._internal, "that") this_index_map = list( zip( this_internal.index_spark_column_names, this_internal.index_names, this_internal.index_dtypes, ) ) that_index_map = list( zip( that_internal.index_spark_column_names, that_internal.index_names, that_internal.index_dtypes, ) ) assert len(this_index_map) == len(that_index_map) join_scols = [] merged_index_scols = [] this_and_that_index_map = list(zip(this_index_map, that_index_map)) this_sdf = this_internal.spark_frame.alias("this") that_sdf = that_internal.spark_frame.alias("that") index_column_names = [] index_use_extension_dtypes = [] for ( i, ((this_column, this_name, this_dtype), (that_column, that_name, that_dtype)), ) in enumerate(this_and_that_index_map): if this_name == that_name: this_scol = scol_for(this_sdf, this_column) that_scol = scol_for(that_sdf, that_column) join_scol = this_scol == that_scol join_scols.append(join_scol) column_name = SPARK_INDEX_NAME_FORMAT(i) index_column_names.append(column_name) index_use_extension_dtypes.append( any(isinstance(dtype, extension_dtypes) for dtype in [this_dtype, that_dtype]) ) merged_index_scols.append( F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name) ) else: raise ValueError("Index names must be exactly matched currently.") assert len(join_scols) > 0, "cannot join with no overlapping index names" joined_df = this_sdf.join(that_sdf, on=join_scols, how=how) if preserve_order_column: order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)] else: order_column = [] joined_df = joined_df.select( merged_index_scols + [ scol_for(this_sdf, this_internal.spark_column_name_for(label)) for label in this_internal.column_labels ] + [ scol_for(that_sdf, that_internal.spark_column_name_for(label)) for label in that_internal.column_labels ] + order_column ) index_spark_columns = [scol_for(joined_df, col) for col in index_column_names] index_dtypes = [ spark_type_to_pandas_dtype(field.dataType, use_extension_dtypes=use_extension_dtypes) for field, use_extension_dtypes in zip( joined_df.select(index_spark_columns).schema, index_use_extension_dtypes ) ] index_columns = set(index_column_names) new_data_columns = [ col for col in joined_df.columns if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME ] data_dtypes = this_internal.data_dtypes + that_internal.data_dtypes level = max(this_internal.column_labels_level, that_internal.column_labels_level) def fill_label(label): if label is None: return ([""] * (level - 1)) + [None] else: return ([""] * (level - len(label))) + list(label) column_labels = [ tuple(["this"] + fill_label(label)) for label in this_internal.column_labels ] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels] column_label_names = ( [None] * (1 + level - this_internal.column_labels_level) ) + this_internal.column_label_names return DataFrame( InternalFrame( spark_frame=joined_df, index_spark_columns=index_spark_columns, index_names=this_internal.index_names, index_dtypes=index_dtypes, column_labels=column_labels, data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns], data_dtypes=data_dtypes, column_label_names=column_label_names, ) ) else: raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE) def align_diff_frames( resolve_func, this: "DataFrame", that: "DataFrame", fillna: bool = True, how: str = "full", preserve_order_column: bool = False, ) -> "DataFrame": from databricks.koalas.frame import DataFrame assert how == "full" or how == "left" or how == "inner" this_column_labels = this._internal.column_labels that_column_labels = that._internal.column_labels common_column_labels = set(this_column_labels).intersection(that_column_labels) combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column) combined_column_labels = combined._internal.column_labels that_columns_to_apply = [] this_columns_to_apply = [] additional_that_columns = [] columns_to_keep = [] column_labels_to_keep = [] for combined_label in combined_column_labels: for common_label in common_column_labels: if combined_label == tuple(["this", *common_label]): this_columns_to_apply.append(combined_label) break elif combined_label == tuple(["that", *common_label]): that_columns_to_apply.append(combined_label) break else: if how == "left" and combined_label in [ tuple(["that", *label]) for label in that_column_labels ]: additional_that_columns.append(combined_label) elif fillna: columns_to_keep.append(F.lit(None).cast(DoubleType()).alias(str(combined_label))) column_labels_to_keep.append(combined_label) else: columns_to_keep.append(combined._kser_for(combined_label)) column_labels_to_keep.append(combined_label) that_columns_to_apply += additional_that_columns if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0: kser_set, column_labels_applied = zip( *resolve_func(combined, this_columns_to_apply, that_columns_to_apply) ) columns_applied = list(kser_set) column_labels_applied = list(column_labels_applied) else: columns_applied = [] column_labels_applied = [] applied = DataFrame( combined._internal.with_new_columns( columns_applied + columns_to_keep, column_labels=column_labels_applied + column_labels_to_keep, ) ) this_labels = OrderedDict() for this_label in this_column_labels: for new_label in applied._internal.column_labels: if new_label[1:] not in this_labels and this_label == new_label[1:]: this_labels[new_label[1:]] = new_label other_labels = OrderedDict() for new_label in applied._internal.column_labels: if new_label[1:] not in this_labels: other_labels[new_label[1:]] = new_label kdf = applied[list(this_labels.values()) + list(other_labels.values())] kdf.columns = kdf.columns.droplevel() return kdf def is_testing(): return "KOALAS_TESTING" in os.environ def default_session(conf=None): if conf is None: conf = dict() should_use_legacy_ipc = False if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15") and LooseVersion( pyspark.__version__ ) < LooseVersion("3.0"): conf["spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1" conf["spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1" conf["spark.mesos.driverEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1" conf["spark.kubernetes.driverEnv.ARROW_PRE_0_15_IPC_FORMAT"] = "1" should_use_legacy_ipc = True builder = spark.SparkSession.builder.appName("Koalas") for key, value in conf.items(): builder = builder.config(key, value) builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False) if LooseVersion(pyspark.__version__) >= LooseVersion("3.0.1") and is_testing(): builder.config("spark.executor.allowSparkContext", False) session = builder.getOrCreate() if not should_use_legacy_ipc: is_legacy_ipc_set = any( v == "1" for v in [ session.conf.get("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", None), session.conf.get("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", None), session.conf.get("spark.mesos.driverEnv.ARROW_PRE_0_15_IPC_FORMAT", None), session.conf.get("spark.kubernetes.driverEnv.ARROW_PRE_0_15_IPC_FORMAT", None), ] ) if is_legacy_ipc_set: raise RuntimeError( "Please explicitly unset 'ARROW_PRE_0_15_IPC_FORMAT' environment variable in " "both driver and executor sides. Check your spark.executorEnv.*, " "spark.yarn.appMasterEnv.*, spark.mesos.driverEnv.* and " "spark.kubernetes.driverEnv.* configurations. It is required to set this " "environment variable only when you use pyarrow>=0.15 and pyspark<3.0." ) return session @contextmanager def sql_conf(pairs, *, spark=None): assert isinstance(pairs, dict), "pairs should be a dictionary." if spark is None: spark = default_session() keys = pairs.keys() new_values = pairs.values() old_values = [spark.conf.get(key, None) for key in keys] for key, new_value in zip(keys, new_values): spark.conf.set(key, new_value) try: yield finally: for key, old_value in zip(keys, old_values): if old_value is None: spark.conf.unset(key) else: spark.conf.set(key, old_value) def validate_arguments_and_invoke_function( pobj: Union[pd.DataFrame, pd.Series], koalas_func: Callable, pandas_func: Callable, input_args: Dict, ): import inspect args = input_args.copy() del args["self"] if "kwargs" in args: kwargs = args["kwargs"] del args["kwargs"] args = {**args, **kwargs} koalas_params = inspect.signature(koalas_func).parameters pandas_params = inspect.signature(pandas_func).parameters for param in koalas_params.values(): if param.name not in pandas_params: if args[param.name] == param.default: del args[param.name] else: raise TypeError( ( "The pandas version [%s] available does not support parameter '%s' " + "for function '%s'." ) % (pd.__version__, param.name, pandas_func.__name__) ) args["self"] = pobj return pandas_func(**args) def lazy_property(fn): attr_name = "_lazy_" + fn.__name__ @property @functools.wraps(fn) def wrapped_lazy_property(self): if not hasattr(self, attr_name): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) def deleter(self): if hasattr(self, attr_name): delattr(self, attr_name) return wrapped_lazy_property.deleter(deleter) def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column: return sdf["`{}`".format(column_name)] def column_labels_level(column_labels: List[Tuple]) -> int: if len(column_labels) == 0: return 1 else: levels = set(1 if label is None else len(label) for label in column_labels) assert len(levels) == 1, levels return list(levels)[0] def name_like_string(name: Optional[Union[str, Tuple]]) -> str: if name is None: name = ("__none__",) elif is_list_like(name): name = tuple([str(n) for n in name]) else: name = (str(name),) return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0] def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool: if value is None: return allow_none elif not isinstance(value, tuple): return False elif len(value) == 0: return False elif not allow_none and any(v is None for v in value): return False elif any(is_list_like(v) or isinstance(v, slice) for v in value): return False elif check_type: return all( v is None or as_spark_type(type(v), raise_error=False) is not None for v in value ) else: return True
Apache License 2.0
bourguet/operator_precedence_parsing
pratt_tdop_parser.py
Parser.LookupLeft
python
def LookupLeft(self, token): try: left_info = self.left_lookup[token] except KeyError: raise ParseError('Unexpected token %r' % token) return left_info
Get the parsing function and precedence for a left position token.
https://github.com/bourguet/operator_precedence_parsing/blob/1e73ec7989e142cb0bebef72d1127bdf67687bee/pratt_tdop_parser.py#L66-L72
import sys import lexer from lexer import Token from tree import Node, CompositeNode class ParseError(RuntimeError): pass def NullError(p, token, rbp): raise ParseError("%s can't be used in prefix position" % token) def LeftError(p, token, rbp, left): raise ParseError("%s can't be used in infix position" % token) MIN_BP = 0 MAX_BP = 10000 class LeftInfo(object): def __init__(self, led=None, lbp=MIN_BP, rbp=MIN_BP, nbp=MIN_BP): self.led = led or LeftError self.lbp = lbp self.rbp = rbp self.nbp = nbp class NullInfo(object): def __init__(self, nud=None, lbp=MIN_BP, rbp=MIN_BP, nbp=MIN_BP): self.nud = nud or NullError self.lbp = lbp self.rbp = rbp self.nbp = nbp class Parser(object): def __init__(self): self.lexer = None self.token = None self.null_lookup = {} self.left_lookup = {} """Specification for a TDOP parser.""" def LookupNull(self, token): try: null_info = self.null_lookup[token] except KeyError: raise ParseError('Unexpected token %r' % token) return null_info
BSD 2-Clause Simplified License
ucb-sts/sts
sts/simulation_state.py
SimulationConfig.bootstrap
python
def bootstrap(self, sync_callback=None, boot_controllers=default_boot_controllers): if sync_callback is None: sync_callback = ReplaySyncCallback(None) def initialize_io_loop(): _io_master = IOMaster() _io_master.monkey_time_sleep() msg.set_io_master(_io_master) return _io_master def wire_controller_patch_panel(controller_manager, create_io_worker): patch_panel = None if not self.interpose_on_controllers: return patch_panel remote_controllers = controller_manager.remote_controllers if len(remote_controllers) != 0: patch_panel = self.controller_patch_panel_class(create_io_worker) for c in remote_controllers: patch_panel.register_controller(c.cid, c.guest_eth_addr, c.host_device) return patch_panel def instantiate_topology(create_io_worker): log.info("Creating topology...") comma = "" if self._topology_params == "" else "," topology = eval("%s(%s%screate_io_worker=create_io_worker)" % (self._topology_class.__name__, self._topology_params, comma)) return topology def monkeypatch_select(multiplex_sockets, controller_manager): mux_select = None demuxers = [] if multiplex_sockets: log.debug("Monkeypatching STS select") revert_select_monkeypatch() mux_select = MultiplexedSelect() for c in controller_manager.controller_configs: true_socket = connect_socket_with_backoff(address=c.address, port=c.port) true_socket.setblocking(0) io_worker = mux_select.create_worker_for_socket(true_socket) demux = STSSocketDemultiplexer(io_worker, c.server_info) demuxers.append(demux) select._old_select = select.select select.select = mux_select.select return (mux_select, demuxers) revert_select_monkeypatch() io_master = initialize_io_loop() sync_connection_manager = STSSyncConnectionManager(io_master, sync_callback) controller_manager = boot_controllers(self.controller_configs, self.snapshot_service, sync_connection_manager, multiplex_sockets=self.multiplex_sockets) controller_patch_panel = wire_controller_patch_panel(controller_manager, io_master.create_worker_for_socket) topology = instantiate_topology(io_master.create_worker_for_socket) patch_panel = self._patch_panel_class(topology.switches, topology.hosts, topology.get_connected_port) openflow_buffer = OpenFlowBuffer() dataplane_trace = None if self._dataplane_trace_path is not None: dataplane_trace = Trace(self._dataplane_trace_path, topology) if self._violation_persistence_threshold is not None: violation_tracker = ViolationTracker(self._violation_persistence_threshold) else: violation_tracker = ViolationTracker() (mux_select, demuxers) = monkeypatch_select(self.multiplex_sockets, controller_manager) simulation = Simulation(topology, controller_manager, dataplane_trace, openflow_buffer, io_master, controller_patch_panel, patch_panel, sync_callback, mux_select, demuxers, violation_tracker, self._kill_controllers_on_exit) if self.ignore_interposition: simulation.set_pass_through() self.current_simulation = simulation return simulation
Return a simulation object encapsulating the state of the system in its initial starting point: - boots controllers - connects switches to controllers May be invoked multiple times!
https://github.com/ucb-sts/sts/blob/82190b7662523e3aaa21998a6a31d0878abe66c7/sts/simulation_state.py#L137-L236
from sts.util.io_master import IOMaster from sts.dataplane_traces.trace import Trace from entities import DeferredOFConnection from sts.controller_manager import ControllerManager, UserSpaceControllerPatchPanel from sts.util.deferred_io import DeferredIOWorker from sts.openflow_buffer import OpenFlowBuffer from sts.topology import * from sts.invariant_checker import ViolationTracker from sts.syncproto.sts_syncer import STSSyncConnectionManager import sts.snapshot as snapshot from sts.util.socket_mux.base import MultiplexedSelect from sts.util.socket_mux.sts_socket_multiplexer import STSSocketDemultiplexer, STSMockSocket from sts.util.convenience import find from pox.lib.util import connect_socket_with_backoff from sts.control_flow.base import ReplaySyncCallback import select import socket import logging import time log = logging.getLogger("simulation") def default_boot_controllers(controller_configs, snapshot_service, sync_connection_manager, multiplex_sockets=False): controllers = [] for c in controller_configs: controller = c.controller_class( c, sync_connection_manager=sync_connection_manager, snapshot_service=snapshot_service) controller.start(multiplex_sockets=multiplex_sockets) log.info("Launched controller %s: %s [PID %d]" % (str(c.cid), " ".join(c.expanded_start_cmd), controller.pid)) controllers.append(controller) return ControllerManager(controllers) revert_select_monkeypatch() def revert_select_monkeypatch(): if hasattr(select, "_old_select"): select.select = select._old_select class SimulationConfig(object): def __init__(self, controller_configs=None, topology_class=FatTree, topology_params="", patch_panel_class=BufferedPatchPanel, controller_patch_panel_class=UserSpaceControllerPatchPanel, dataplane_trace=None, snapshot_service=None, multiplex_sockets=False, violation_persistence_threshold=None, kill_controllers_on_exit=True, interpose_on_controllers=False, ignore_interposition=False): if controller_configs is None: controller_configs = [] self.controller_configs = controller_configs self._topology_class = topology_class self._topology_params = topology_params self._patch_panel_class = patch_panel_class self._dataplane_trace_path = dataplane_trace self._violation_persistence_threshold = violation_persistence_threshold self._kill_controllers_on_exit = kill_controllers_on_exit if snapshot_service is None: snapshot_service = snapshot.get_snapshotservice(controller_configs) self.snapshot_service = snapshot_service self.current_simulation = None self.multiplex_sockets = multiplex_sockets self.controller_patch_panel_class = controller_patch_panel_class self.interpose_on_controllers = interpose_on_controllers self.ignore_interposition = ignore_interposition if self.ignore_interposition: self.interpose_on_controllers = False
Apache License 2.0
daleal/asymmetric
asymmetric/openapi/core.py
get_parameters_amount
python
def get_parameters_amount(params: FullArgSpec) -> Dict[str, int]: parameters_amount = len(params.args) defaults_amount = 0 if params.defaults is None else len(params.defaults) no_defaults_amount = parameters_amount - defaults_amount return {"no_defaults": no_defaults_amount, "defaults": defaults_amount}
Gets a params object retrieved from a function with getfullargspec and returns a dictionary with the amount of default and not default arguments.
https://github.com/daleal/asymmetric/blob/8d1f5c4d9a5b9a9f0013952c847caff9d9958061/asymmetric/openapi/core.py#L19-L27
import functools from inspect import FullArgSpec, getfullargspec from typing import TYPE_CHECKING, Any, Dict, List from asymmetric.callbacks.callback_object import CALLBACK_OBJECT_METADATA from asymmetric.callbacks.utils import get_header_finders from asymmetric.endpoints import Endpoint from asymmetric.openapi.constants import ANY_TYPE from asymmetric.openapi.helpers import type_to_string if TYPE_CHECKING: from asymmetric.core import _Asymmetric
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/athena/client.py
Client.delete_work_group
python
def delete_work_group(self, WorkGroup: str, RecursiveDeleteOption: bool = None) -> Dict: pass
Deletes the workgroup with the specified name. The primary workgroup cannot be deleted. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup>`_ **Request Syntax** :: response = client.delete_work_group( WorkGroup='string', RecursiveDeleteOption=True|False ) **Response Syntax** :: {} **Response Structure** - *(dict) --* :type WorkGroup: string :param WorkGroup: **[REQUIRED]** The unique name of the workgroup to delete. :type RecursiveDeleteOption: boolean :param RecursiveDeleteOption: The option to delete the workgroup and its contents even if the workgroup contains any named queries. :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/athena/client.py#L371-L398
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from botocore.waiter import Waiter from typing import Union from typing import List class Client(BaseClient): def batch_get_named_query(self, NamedQueryIds: List) -> Dict: pass def batch_get_query_execution(self, QueryExecutionIds: List) -> Dict: pass def can_paginate(self, operation_name: str = None): pass def create_named_query(self, Name: str, Database: str, QueryString: str, Description: str = None, ClientRequestToken: str = None, WorkGroup: str = None) -> Dict: pass def create_work_group(self, Name: str, Configuration: Dict = None, Description: str = None, Tags: List = None) -> Dict: pass def delete_named_query(self, NamedQueryId: str) -> Dict: pass
MIT License
dcramer/kleenex
kleenex/utils.py
is_py_script
python
def is_py_script(filename): if filename.endswith(".py") and os.path.exists(filename): return True elif not os.access(filename, os.X_OK): return False else: try: with open(filename, "r") as fp: first_line = fp.readline().strip() return "#!" in first_line and "python" in first_line except StopIteration: return False
Returns True if a file is a python executable.
https://github.com/dcramer/kleenex/blob/e48460ed5630f3bc6b9727a844d39d3b844160f6/kleenex/utils.py#L5-L17
import os import os.path
Apache License 2.0
natlibfi/skosify
skosify/rdftools/access.py
localname
python
def localname(uri): return uri.split('/')[-1].split('#')[-1]
Determine the presumable local name (after namespace) of an URI.
https://github.com/natlibfi/skosify/blob/5e396d880046b03c9739088834fef8781b9e0527/skosify/rdftools/access.py#L5-L7
MIT License
mbj4668/pyang
pyang/translators/schemanode.py
SchemaNode.annot
python
def annot(self, node): self.annots.append(node) node.parent = self
Add `node` as an annotation of the receiver.
https://github.com/mbj4668/pyang/blob/376824f55677c9cd34c4e985fb3cf70deba699f1/pyang/translators/schemanode.py#L153-L156
from xml.sax.saxutils import escape class SchemaNode(object): @classmethod def element(cls, name, parent=None, interleave=None, occur=0): node = cls("element", parent, interleave=interleave) node.attr["name"] = name node.occur = occur return node @classmethod def leaf_list(cls, name, parent=None, interleave=None): node = cls("_list_", parent, interleave=interleave) node.attr["name"] = name node.keys = None node.minEl = "0" node.maxEl = None node.occur = 3 return node @classmethod def list(cls, name, parent=None, interleave=None): node = cls.leaf_list(name, parent, interleave=interleave) node.keys = [] node.keymap = {} return node @classmethod def choice(cls, parent=None, occur=0): node = cls("choice", parent) node.occur = occur node.default_case = None return node @classmethod def case(cls, parent=None): node = cls("case", parent) node.occur = 0 return node @classmethod def define(cls, name, parent=None, interleave=False): node = cls("define", parent, interleave=interleave) node.occur = 0 node.attr["name"] = name return node def __init__(self, name, parent=None, text="", interleave=None): self.name = name self.parent = parent if parent is not None: parent.children.append(self) self.text = text self.adjust_interleave(interleave) self.children = [] self.annots = [] self.attr = {} def rng_children(self): return [c for c in self.children if ":" not in c.name] def serialize_children(self): return ''.join([ch.serialize() for ch in self.children]) def serialize_annots(self): return ''.join([ch.serialize() for ch in self.annots]) def adjust_interleave(self, interleave): if interleave is None and self.parent: self.interleave = self.parent.interleave else: self.interleave = interleave def subnode(self, node): self.children.append(node) node.parent = self node.adjust_interleave(node.interleave)
ISC License
wapm-packages/python
Python-3.6.7/Lib/_strptime.py
TimeRE.__init__
python
def __init__(self, locale_time=None): if locale_time: self.locale_time = locale_time else: self.locale_time = LocaleTime() base = super() base.__init__({ 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", 'f': r"(?P<f>[0-9]{1,6})", 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)", 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])", 'G': r"(?P<G>\d\d\d\d)", 'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])", 'M': r"(?P<M>[0-5]\d|\d)", 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)", 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)", 'w': r"(?P<w>[0-6])", 'u': r"(?P<u>[1-7])", 'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)", 'y': r"(?P<y>\d\d)", 'Y': r"(?P<Y>\d\d\d\d)", 'z': r"(?P<z>[+-]\d\d[0-5]\d)", 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'), 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'), 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone for tz in tz_names), 'Z'), '%': '%'}) base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) base.__setitem__('x', self.pattern(self.locale_time.LC_date)) base.__setitem__('X', self.pattern(self.locale_time.LC_time))
Create keys/values. Order of execution is important for dependency reasons.
https://github.com/wapm-packages/python/blob/658c1822f430f6d604ecf2bcc388e469cedb2238/Python-3.6.7/Lib/_strptime.py#L185-L229
import time import locale import calendar from re import compile as re_compile from re import IGNORECASE from re import escape as re_escape from datetime import (date as datetime_date, timedelta as datetime_timedelta, timezone as datetime_timezone) try: from _thread import allocate_lock as _thread_allocate_lock except ImportError: from _dummy_thread import allocate_lock as _thread_allocate_lock __all__ = [] def _getlang(): return locale.getlocale(locale.LC_TIME) class LocaleTime(object): def __init__(self): self.lang = _getlang() self.__calc_weekday() self.__calc_month() self.__calc_am_pm() self.__calc_timezone() self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") if time.tzname != self.tzname or time.daylight != self.daylight: raise ValueError("timezone changed during initialization") def __pad(self, seq, front): seq = list(seq) if front: seq.insert(0, '') else: seq.append('') return seq def __calc_weekday(self): a_weekday = [calendar.day_abbr[i].lower() for i in range(7)] f_weekday = [calendar.day_name[i].lower() for i in range(7)] self.a_weekday = a_weekday self.f_weekday = f_weekday def __calc_month(self): a_month = [calendar.month_abbr[i].lower() for i in range(13)] f_month = [calendar.month_name[i].lower() for i in range(13)] self.a_month = a_month self.f_month = f_month def __calc_am_pm(self): am_pm = [] for hour in (1, 22): time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0)) am_pm.append(time.strftime("%p", time_tuple).lower()) self.am_pm = am_pm def __calc_date_time(self): time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0)) date_time = [None, None, None] date_time[0] = time.strftime("%c", time_tuple).lower() date_time[1] = time.strftime("%x", time_tuple).lower() date_time[2] = time.strftime("%X", time_tuple).lower() replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'), (self.f_month[3], '%B'), (self.a_weekday[2], '%a'), (self.a_month[3], '%b'), (self.am_pm[1], '%p'), ('1999', '%Y'), ('99', '%y'), ('22', '%H'), ('44', '%M'), ('55', '%S'), ('76', '%j'), ('17', '%d'), ('03', '%m'), ('3', '%m'), ('2', '%w'), ('10', '%I')] replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone for tz in tz_values]) for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')): current_format = date_time[offset] for old, new in replacement_pairs: if old: current_format = current_format.replace(old, new) time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0)) if '00' in time.strftime(directive, time_tuple): U_W = '%W' else: U_W = '%U' date_time[offset] = current_format.replace('11', U_W) self.LC_date_time = date_time[0] self.LC_date = date_time[1] self.LC_time = date_time[2] def __calc_timezone(self): try: time.tzset() except AttributeError: pass self.tzname = time.tzname self.daylight = time.daylight no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) if self.daylight: has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) class TimeRE(dict):
Apache License 2.0
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/virtual_machine.py
BaseOsMixin.Suspend
python
def Suspend(self) -> float: self._BeforeSuspend() before_suspend_timestamp = time.time() self._Suspend() self._PostSuspend() return time.time() - before_suspend_timestamp
Suspends the vm. Future plans and edge cases: checking if a vm is suspendable. Accidentally suspending a VM that is already suspending. Trying to resume a VM that is not already suspended. Returns: The amount of time it takes to Suspend a VM that is suspendable.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/virtual_machine.py#L442-L459
import abc import contextlib import logging import os.path import socket import threading import time import typing from typing import List from absl import flags import jinja2 from perfkitbenchmarker import background_workload from perfkitbenchmarker import benchmark_lookup from perfkitbenchmarker import data from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import events from perfkitbenchmarker import os_types from perfkitbenchmarker import package_lookup from perfkitbenchmarker import resource from perfkitbenchmarker import vm_util from perfkitbenchmarker.configs import option_decoders from perfkitbenchmarker.configs import spec import six FLAGS = flags.FLAGS DEFAULT_USERNAME = 'perfkit' QUOTA_EXCEEDED_MESSAGE = 'Creation failed due to quota exceeded: ' def ValidateVmMetadataFlag(options_list): for option in options_list: if ':' not in option[1:-1]: raise flags.ValidationError( '"%s" not in expected key:value format' % option) return True VM_METADATA = 'vm_metadata' flags.DEFINE_boolean( 'dedicated_hosts', False, 'If True, use hosts that only have VMs from the same ' 'benchmark running on them.') flags.DEFINE_integer( 'num_vms_per_host', None, 'The number of VMs per dedicated host. If None, VMs will be packed on a ' 'single host until no more can be packed at which point a new host will ' 'be created.') flags.DEFINE_integer( 'num_cpus_override', None, 'Rather than detecting the number of CPUs present on the machine, use this ' 'value if set. Some benchmarks will use this number to automatically ' 'scale their configurations; this can be used as a method to control ' 'benchmark scaling. It will also change the num_cpus metadata ' 'published along with the benchmark data.') flags.DEFINE_list(VM_METADATA, [], 'Metadata to add to the vm. It expects' 'key:value pairs.') flags.register_validator(VM_METADATA, ValidateVmMetadataFlag) flags.DEFINE_bool( 'skip_firewall_rules', False, 'If set, this run will not create firewall rules. This is useful if the ' 'user project already has all of the firewall rules in place and/or ' 'creating new ones is expensive') flags.DEFINE_bool( 'preprovision_ignore_checksum', False, 'Ignore checksum verification for preprovisioned data. ' 'Not recommended, please use with caution') flags.DEFINE_boolean( 'connect_via_internal_ip', False, 'Whether to use internal IP addresses for running commands on and pushing ' 'data to VMs. By default, PKB interacts with VMs using external IP ' 'addresses.') flags.DEFINE_boolean( 'ssh_via_internal_ip', False, 'Whether to use internal IP addresses for running commands on and pushing ' 'data to VMs. By default, PKB interacts with VMs using external IP ' 'addresses.') flags.DEFINE_boolean('retry_on_rate_limited', True, 'Whether to retry commands when rate limited.') GPU_K80 = 'k80' GPU_P100 = 'p100' GPU_V100 = 'v100' GPU_A100 = 'a100' GPU_P4 = 'p4' GPU_P4_VWS = 'p4-vws' GPU_T4 = 't4' VALID_GPU_TYPES = [ GPU_K80, GPU_P100, GPU_V100, GPU_A100, GPU_P4, GPU_P4_VWS, GPU_T4 ] flags.DEFINE_integer( 'gpu_count', None, 'Number of gpus to attach to the VM. Requires gpu_type to be ' 'specified.') flags.DEFINE_enum( 'gpu_type', None, VALID_GPU_TYPES, 'Type of gpus to attach to the VM. Requires gpu_count to be ' 'specified.') def GetVmSpecClass(cloud): return spec.GetSpecClass(BaseVmSpec, CLOUD=cloud) def GetVmClass(cloud, os_type): return resource.GetResourceClass(BaseVirtualMachine, CLOUD=cloud, OS_TYPE=os_type) class BaseVmSpec(spec.BaseSpec): SPEC_TYPE = 'BaseVmSpec' CLOUD = None def __init__(self, *args, **kwargs): self.machine_type = None super(BaseVmSpec, self).__init__(*args, **kwargs) @classmethod def _ApplyFlags(cls, config_values, flag_values): super(BaseVmSpec, cls)._ApplyFlags(config_values, flag_values) if flag_values['image'].present: config_values['image'] = flag_values.image if flag_values['install_packages'].present: config_values['install_packages'] = flag_values.install_packages if flag_values['machine_type'].present: config_values['machine_type'] = flag_values.machine_type if flag_values['background_cpu_threads'].present: config_values['background_cpu_threads'] = ( flag_values.background_cpu_threads) if flag_values['background_network_mbits_per_sec'].present: config_values['background_network_mbits_per_sec'] = ( flag_values.background_network_mbits_per_sec) if flag_values['background_network_ip_type'].present: config_values['background_network_ip_type'] = ( flag_values.background_network_ip_type) if flag_values['dedicated_hosts'].present: config_values['use_dedicated_host'] = flag_values.dedicated_hosts if flag_values['num_vms_per_host'].present: config_values['num_vms_per_host'] = flag_values.num_vms_per_host if flag_values['gpu_type'].present: config_values['gpu_type'] = flag_values.gpu_type if flag_values['gpu_count'].present: config_values['gpu_count'] = flag_values.gpu_count if flag_values['disable_interrupt_moderation'].present: config_values['disable_interrupt_moderation'] = ( flag_values.disable_interrupt_moderation) if flag_values['disable_rss'].present: config_values['disable_rss'] = flag_values.disable_rss if flag_values['vm_metadata'].present: config_values['vm_metadata'] = flag_values.vm_metadata if 'gpu_count' in config_values and 'gpu_type' not in config_values: raise errors.Config.MissingOption( 'gpu_type must be specified if gpu_count is set') if 'gpu_type' in config_values and 'gpu_count' not in config_values: raise errors.Config.MissingOption( 'gpu_count must be specified if gpu_type is set') @classmethod def _GetOptionDecoderConstructions(cls): result = super(BaseVmSpec, cls)._GetOptionDecoderConstructions() result.update({ 'disable_interrupt_moderation': (option_decoders.BooleanDecoder, { 'default': False}), 'disable_rss': (option_decoders.BooleanDecoder, {'default': False}), 'image': (option_decoders.StringDecoder, {'none_ok': True, 'default': None}), 'install_packages': (option_decoders.BooleanDecoder, {'default': True}), 'machine_type': (option_decoders.StringDecoder, {'none_ok': True, 'default': None}), 'gpu_type': (option_decoders.EnumDecoder, { 'valid_values': VALID_GPU_TYPES, 'default': None}), 'gpu_count': (option_decoders.IntDecoder, {'min': 1, 'default': None}), 'zone': (option_decoders.StringDecoder, {'none_ok': True, 'default': None}), 'cidr': (option_decoders.StringDecoder, {'none_ok': True, 'default': None}), 'use_dedicated_host': (option_decoders.BooleanDecoder, {'default': False}), 'num_vms_per_host': (option_decoders.IntDecoder, {'default': None}), 'background_network_mbits_per_sec': (option_decoders.IntDecoder, { 'none_ok': True, 'default': None}), 'background_network_ip_type': (option_decoders.EnumDecoder, { 'default': vm_util.IpAddressSubset.EXTERNAL, 'valid_values': [vm_util.IpAddressSubset.EXTERNAL, vm_util.IpAddressSubset.INTERNAL]}), 'background_cpu_threads': (option_decoders.IntDecoder, { 'none_ok': True, 'default': None}), 'vm_metadata': (option_decoders.ListDecoder, { 'item_decoder': option_decoders.StringDecoder(), 'default': []})}) return result class BaseOsMixin(six.with_metaclass(abc.ABCMeta, object)): IS_REBOOTABLE = True install_packages: bool is_static: bool scratch_disks: List[disk.BaseDisk] ssh_private_key: str user_name: str @abc.abstractmethod def GetConnectionIp(self): def __init__(self): super(BaseOsMixin, self).__init__() self._installed_packages = set() self.startup_script_output = None self.postrun_script_output = None self.bootable_time = None self.port_listening_time = None self.hostname = None self.remote_access_ports = [] self.primary_remote_access_port = None self._reachable = {} self._total_memory_kb = None self._num_cpus = None self._is_smt_enabled = None self.os_metadata = {} assert type( self).BASE_OS_TYPE in os_types.BASE_OS_TYPES, '%s is not in %s' % ( type(self).BASE_OS_TYPE, os_types.BASE_OS_TYPES) @property @classmethod @abc.abstractmethod def OS_TYPE(cls): raise NotImplementedError() @property @classmethod @abc.abstractmethod def BASE_OS_TYPE(cls): raise NotImplementedError() def GetOSResourceMetadata(self): return self.os_metadata def CreateRamDisk(self, disk_spec): raise NotImplementedError() @abc.abstractmethod def RemoteCommand(self, command, should_log=False, ignore_failure=False, suppress_warning=False, timeout=None, **kwargs): raise NotImplementedError() def TryRemoteCommand(self, command, **kwargs): try: self.RemoteCommand(command, **kwargs) return True except errors.VirtualMachine.RemoteCommandError: return False except: raise def Reboot(self): if not self.IS_REBOOTABLE: raise errors.VirtualMachine.VirtualMachineError( "Trying to reboot a VM that isn't rebootable.") vm_bootable_time = None if self.bootable_time is not None: vm_bootable_time = self.VMLastBootTime() before_reboot_timestamp = time.time() self._Reboot() while True: self.WaitForBootCompletion() if vm_bootable_time != self.VMLastBootTime(): break reboot_duration_sec = time.time() - before_reboot_timestamp self._AfterReboot() return reboot_duration_sec def _BeforeSuspend(self): pass def _PostSuspend(self): pass
Apache License 2.0
spantaleev/roscraco
roscraco/response/wireless.py
WirelessSettings.ssid
python
def ssid(self): return self._ssid
The current SSID (wireless network name).
https://github.com/spantaleev/roscraco/blob/87a5a7c54931d5586fd7d30c8c67a699bef69c1f/roscraco/response/wireless.py#L103-L105
from roscraco.helper import validator from roscraco.exception import RouterSettingsError class WirelessSettings(object): SECURITY_TYPE_NONE = 'none' SECURITY_TYPE_WEP64 = 'wep64' SECURITY_TYPE_WEP128 = 'wep128' SECURITY_TYPE_WPA = 'wpa' SECURITY_TYPE_WPA2 = 'wpa2' PROPERTIES = ( 'security_type', 'ssid', 'is_enabled', 'is_broadcasting_ssid', 'channel', 'password' ) def __init__(self): self._supports_wireless = True self._ssid = None self._enabled_status = True self._ssid_broadcast_status = True self._channel = None self._password = None self._internal_params = {} self._supported_security_types = set([self.__class__.SECURITY_TYPE_NONE]) self._security_type = None self._supports_ascii_wep_passwords = True self._supports_auto_channel = True self._changes_require_reboot = True def set_auto_channel_support(self, value): self._supports_auto_channel = bool(value) @property def supports_auto_channel(self): return self._supports_auto_channel def add_security_support(self, security_type): self._supported_security_types.add(security_type) @property def supported_security_types(self): return self._supported_security_types def set_security_type(self, security_type): self._security_type = security_type @property def security_type_is_wep(self): return self._security_type in (self.__class__.SECURITY_TYPE_WEP64, self.__class__.SECURITY_TYPE_WEP128) @property def security_type_is_wpa(self): return self._security_type in (self.__class__.SECURITY_TYPE_WPA, self.__class__.SECURITY_TYPE_WPA2) @property def security_type(self): return self._security_type def set_reboot_requirement_status(self, value): self._changes_require_reboot = bool(value) @property def changes_require_reboot(self): return self._changes_require_reboot def set_support_status(self, value): self._supports_wireless = bool(value) @property def is_supported(self): return self._supports_wireless def set_ssid(self, value): self._ssid = value @property
BSD 3-Clause New or Revised License
arcanitesolutions/truffe2
truffe2/generic/models.py
GenericStateModel.can_switch_to
python
def can_switch_to(self, user, dest_state): return (True, None)
Return (IfOk, Message) if someone can switch to a speficic state.
https://github.com/arcanitesolutions/truffe2/blob/5406842eb9719d7fdae7137ebd1918f2de61459c/truffe2/generic/models.py#L420-L422
from django.db import models from django.conf import settings from django.conf.urls import patterns, url from django.core.urlresolvers import reverse from django.contrib.contenttypes.models import ContentType from django.forms import CharField, Textarea, Form from django.utils.timezone import now from django.utils.translation import ugettext_lazy as _ import json import copy import inspect import importlib import os from pytz import timezone from datetime import timedelta import mimetypes from haystack import indexes import textract from celery_haystack.indexes import CelerySearchIndex from users.models import TruffeUser from generic import views from generic.forms import GenericForm from generic.search import SearchableModel from app.utils import get_property from notifications.utils import notify_people, unotify_people from rights.utils import AutoVisibilityLevel moderable_things = [] copiable_things = [] GENERICS_MODELS = {} class FalseFK(): def __init__(self, model, *args, **kwargs): self.model = model self.args = args self.kwargs = kwargs def build_models_list_of(Class): retour = [] already_returned = [] for app in settings.INSTALLED_APPS: try: module = importlib.import_module(app) models_module = importlib.import_module('.models', app) views_module = importlib.import_module('.views', app) urls_module = importlib.import_module('.urls', app) forms_module = importlib.import_module('.forms', app) except Exception as e: if str(e) not in ["No module named urls", "No module named views", "No module named forms", "No module named models"]: raise try: search_indexes_module = importlib.import_module('.search_indexes', app) except: search_indexes_module = None clsmembers = inspect.getmembers(models_module, inspect.isclass) linecls = {} for cls in clsmembers: try: linecls[cls[0]] = inspect.getsourcelines(cls[1])[1] except: linecls[cls[0]] = -1 clsmembers = sorted(clsmembers, key=lambda cls: linecls[cls[0]]) for model_name, model_class in clsmembers: if issubclass(model_class, Class) and model_class != Class and model_class not in already_returned: data = (module, (views_module, urls_module, models_module, forms_module, search_indexes_module), model_class) if model_name in ['_Unit', '_Role', '_AccountingYear']: retour.insert(0, data) else: retour.append(data) already_returned.append(model_class) return retour class GenericModel(models.Model): deleted = models.BooleanField(default=False) @staticmethod def startup(): from accounting_core.utils import AccountingYearLinked, CostCenterLinked classes = build_models_list_of(GenericModel) cache = {} for module, (views_module, urls_module, models_module, forms_module, search_indexes_module), model_class in classes: if model_class.__name__[0] != '_': continue extra_data = {'__module__': models_module.__name__} for SpecificClass in [GenericStateModel, GenericExternalUnitAllowed, GenericDelayValidableInfo, AccountingYearLinked, AutoVisibilityLevel, CostCenterLinked]: if issubclass(model_class, SpecificClass): extra_data.update(SpecificClass.do(module, models_module, model_class, cache)) for key, value in model_class.__dict__.iteritems(): if hasattr(value, '__class__') and value.__class__ == FalseFK: extra_data.update({key: models.ForeignKey(cache[value.model], *value.args, **value.kwargs)}) real_model_class = type(model_class.__name__[1:], (model_class,), extra_data) setattr(models_module, real_model_class.__name__, real_model_class) cache['%s.%s' % (models_module.__name__, real_model_class.__name__)] = real_model_class logging_class = type('%sLogging' % (real_model_class.__name__,), (GenericLogEntry,), {'object': models.ForeignKey(real_model_class, related_name='logs'), '__module__': models_module.__name__}) setattr(models_module, logging_class.__name__, logging_class) views_class = type('%sViews' % (real_model_class.__name__,), (GenericObjectView,), {'object': models.ForeignKey(real_model_class, related_name='views'), '__module__': models_module.__name__}) setattr(models_module, views_class.__name__, views_class) setattr(real_model_class, "_t2_views_class", views_class) unikey = '{}.{}'.format(models_module.__name__, real_model_class.__name__) GENERICS_MODELS[unikey] = (real_model_class, logging_class) if issubclass(model_class, GenericModelWithFiles): file_class = type('%sFile' % (real_model_class.__name__,), (GenericFile,), {'object': models.ForeignKey(real_model_class, related_name='files', blank=True, null=True), 'file': models.FileField(upload_to='uploads/_generic/%s/' % (real_model_class.__name__,)), '__module__': models_module.__name__}) setattr(models_module, file_class.__name__, file_class) full_upload_path = '%s/uploads/_generic/%s/' % (settings.MEDIA_ROOT, real_model_class.__name__) if not os.path.isdir(full_upload_path): print "[!] %s need to be a folder for file uplodad ! (And don\'t forget the gitignore)" % (full_upload_path,) else: file_class = None if issubclass(model_class, GenericTaggableObject): tag_class = type('%sTag' % (real_model_class.__name__,), (GenericTag,), {'object': models.ForeignKey(real_model_class, related_name='tags'), '__module__': models_module.__name__}) setattr(models_module, tag_class.__name__, tag_class) else: tag_class = None def generate_meta(Model): class Meta(): model = Model exclude = ('deleted', 'status', 'accounting_year') class MetaNoUnit(): model = Model exclude = ('deleted', 'status', 'accounting_year', 'unit') class MetaNoUnitExternal(): model = Model exclude = ('deleted', 'status', 'accounting_year', 'unit', 'unit_blank_user') if hasattr(model_class.MetaData, 'has_unit') and model_class.MetaData.has_unit: if issubclass(model_class, GenericExternalUnitAllowed): return MetaNoUnitExternal return MetaNoUnit return Meta form_model_class = type(real_model_class.__name__ + 'Form', (GenericForm,), {'Meta': generate_meta(real_model_class)}) setattr(forms_module, form_model_class.__name__, form_model_class) base_views_name = real_model_class.__name__.lower() if not hasattr(views_module, base_views_name + '_list'): setattr(views_module, '%s_list' % (base_views_name,), views.generate_list(module, base_views_name, real_model_class, tag_class)) setattr(views_module, '%s_list_json' % (base_views_name,), views.generate_list_json(module, base_views_name, real_model_class, tag_class)) setattr(views_module, '%s_logs' % (base_views_name,), views.generate_logs(module, base_views_name, real_model_class)) setattr(views_module, '%s_logs_json' % (base_views_name,), views.generate_logs_json(module, base_views_name, real_model_class, logging_class)) setattr(views_module, '%s_edit' % (base_views_name,), views.generate_edit(module, base_views_name, real_model_class, form_model_class, logging_class, file_class, tag_class)) setattr(views_module, '%s_show' % (base_views_name,), views.generate_show(module, base_views_name, real_model_class, logging_class, tag_class)) setattr(views_module, '%s_delete' % (base_views_name,), views.generate_delete(module, base_views_name, real_model_class, logging_class)) setattr(views_module, '%s_deleted' % (base_views_name,), views.generate_deleted(module, base_views_name, real_model_class, logging_class)) setattr(views_module, '%s_mayi' % (base_views_name,), views.generate_mayi(module, base_views_name, real_model_class, logging_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/$' % (base_views_name,), '%s_list' % (base_views_name,)), url(r'^%s/mayi$' % (base_views_name,), '%s_mayi' % (base_views_name,)), url(r'^%s/json$' % (base_views_name,), '%s_list_json' % (base_views_name,)), url(r'^%s/deleted$' % (base_views_name,), '%s_deleted' % (base_views_name,)), url(r'^%s/logs$' % (base_views_name,), '%s_logs' % (base_views_name,)), url(r'^%s/logs/json$' % (base_views_name,), '%s_logs_json' % (base_views_name,)), url(r'^%s/(?P<pk>[0-9~]+)/edit$' % (base_views_name,), '%s_edit' % (base_views_name,)), url(r'^%s/(?P<pk>[0-9,]+)/delete$' % (base_views_name,), '%s_delete' % (base_views_name,)), url(r'^%s/(?P<pk>[0-9]+)/$' % (base_views_name,), '%s_show' % (base_views_name,)), ) setattr(real_model_class, '_show_view', '%s.%s_show' % (views_module.__name__, base_views_name,)) if issubclass(model_class, GenericStateModel): setattr(views_module, '%s_switch_status' % (base_views_name,), views.generate_switch_status(module, base_views_name, real_model_class, logging_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/(?P<pk>[0-9,]+)/switch_status$' % (base_views_name,), '%s_switch_status' % (base_views_name,)), ) if hasattr(model_class.MetaData, 'menu_id_calendar'): setattr(views_module, '%s_calendar' % (base_views_name,), views.generate_calendar(module, base_views_name, real_model_class)) setattr(views_module, '%s_calendar_json' % (base_views_name,), views.generate_calendar_json(module, base_views_name, real_model_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/calendar/$' % (base_views_name,), '%s_calendar' % (base_views_name,)), url(r'^%s/calendar/json$' % (base_views_name,), '%s_calendar_json' % (base_views_name,)), ) if hasattr(model_class.MetaData, 'menu_id_calendar_related'): setattr(views_module, '%s_calendar_related' % (base_views_name,), views.generate_calendar_related(module, base_views_name, real_model_class)) setattr(views_module, '%s_calendar_related_json' % (base_views_name,), views.generate_calendar_related_json(module, base_views_name, real_model_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/related/calendar/$' % (base_views_name,), '%s_calendar_related' % (base_views_name,)), url(r'^%s/related/calendar/json$' % (base_views_name,), '%s_calendar_related_json' % (base_views_name,)), ) if issubclass(model_class, GenericStateUnitValidable): setattr(views_module, '%s_list_related' % (base_views_name,), views.generate_list_related(module, base_views_name, real_model_class)) setattr(views_module, '%s_list_related_json' % (base_views_name,), views.generate_list_related_json(module, base_views_name, real_model_class)) setattr(views_module, '%s_calendar_specific' % (base_views_name,), views.generate_calendar_specific(module, base_views_name, real_model_class)) setattr(views_module, '%s_calendar_specific_json' % (base_views_name,), views.generate_calendar_specific_json(module, base_views_name, real_model_class)) setattr(views_module, '%s_directory' % (base_views_name,), views.generate_directory(module, base_views_name, real_model_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/related/$' % (base_views_name,), '%s_list_related' % (base_views_name,)), url(r'^%s/related/json$' % (base_views_name,), '%s_list_related_json' % (base_views_name,)), url(r'^%s/specific/(?P<pk>[0-9~]+)/calendar/$' % (base_views_name,), '%s_calendar_specific' % (base_views_name,)), url(r'^%s/specific/(?P<pk>[0-9~]+)/calendar/json$' % (base_views_name,), '%s_calendar_specific_json' % (base_views_name,)), url(r'^%s/directory/$' % (base_views_name,), '%s_directory' % (base_views_name,)), ) if issubclass(model_class, GenericStateValidableOrModerable) and real_model_class not in moderable_things: moderable_things.append(real_model_class) if issubclass(model_class, AccountingYearLinked) and hasattr(model_class, 'MetaAccounting') and hasattr(model_class.MetaAccounting, 'copiable') and model_class.MetaAccounting.copiable and real_model_class not in copiable_things: copiable_things.append(real_model_class) if issubclass(model_class, GenericContactableModel): setattr(views_module, '%s_contact' % (base_views_name,), views.generate_contact(module, base_views_name, real_model_class, logging_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%s/(?P<pk>[0-9]+)/contact/(?P<key>.+)$' % (base_views_name,), '%s_contact' % (base_views_name,)), ) if file_class: setattr(views_module, '%s_file_upload' % (base_views_name,), views.generate_file_upload(module, base_views_name, real_model_class, logging_class, file_class)) setattr(views_module, '%s_file_delete' % (base_views_name,), views.generate_file_delete(module, base_views_name, real_model_class, logging_class, file_class)) setattr(views_module, '%s_file_get' % (base_views_name,), views.generate_file_get(module, base_views_name, real_model_class, logging_class, file_class)) setattr(views_module, '%s_file_get_thumbnail' % (base_views_name,), views.generate_file_get_thumbnail(module, base_views_name, real_model_class, logging_class, file_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%sfile/upload$' % (base_views_name,), '%s_file_upload' % (base_views_name,)), url(r'^%sfile/(?P<pk>[0-9]+)/delete$' % (base_views_name,), '%s_file_delete' % (base_views_name,)), url(r'^%sfile/(?P<pk>[0-9]+)/get/.*$' % (base_views_name,), '%s_file_get' % (base_views_name,)), url(r'^%sfile/(?P<pk>[0-9]+)/thumbnail$' % (base_views_name,), '%s_file_get_thumbnail' % (base_views_name,)), ) if tag_class: setattr(views_module, '%s_tag_search' % (base_views_name,), views.generate_tag_search(module, base_views_name, real_model_class, logging_class, tag_class)) urls_module.urlpatterns += patterns(views_module.__name__, url(r'^%stags/search$' % (base_views_name,), '%s_tag_search' % (base_views_name,)), ) if issubclass(model_class, SearchableModel): if not search_indexes_module: raise(Exception("{} need a search_indexes.py, please create it in {}/".format(model_class.__name__, module.__name__))) index = index_generator(real_model_class) setattr(search_indexes_module, index.__name__, index) def build_state(self): retour = {} opts = self._meta for f in sorted(opts.fields + opts.many_to_many): if isinstance(f, models.DateTimeField): if not getattr(self, f.name): retour[f.name] = None else: loc = getattr(self, f.name).astimezone(timezone(settings.TIME_ZONE)) retour[f.name] = loc.strftime("%Y-%m-%d %H:%M:%S") elif isinstance(f, models.ManyToManyField): retour[f.name] = u', '.join([unicode(x) for x in getattr(self, f.name).all()]) else: retour[f.name] = unicode(getattr(self, f.name)) return retour def last_log(self): return self.logs.order_by('-when').first() def get_creator(self): return getattr(self.logs.filter(what='created').first(), 'who', None) def get_creation_date(self): return getattr(self.logs.filter(what='created').first(), 'when', None) def display_url(self): return reverse(str(self.__class__._show_view), args=(self.pk,)) class Meta: abstract = True def get_full_class_name(self): return '{}.{}'.format(self.__class__.__module__, self.__class__.__name__) def is_new(self, user): try: view_obj = self.views.get(who=user) return view_obj.when <= self.last_log().when except self._t2_views_class.DoesNotExist: return True def user_has_seen_object(self, user): view_obj, __ = self._t2_views_class.objects.get_or_create(object=self, who=user) view_obj.when = now() view_obj.save() class GenericModelWithFiles(object): def get_images_files(self): retour = [] for file in self.files.all(): if file.is_picture(): retour.append(file) return retour def get_pdf_files(self): retour = [] for file in self.files.all(): if file.is_pdf(): retour.append(file) return retour class GenericFile(models.Model): upload_date = models.DateTimeField(auto_now_add=True) uploader = models.ForeignKey(TruffeUser) def basename(self): return os.path.basename(self.file.path) def is_picture(self): type, __ = mimetypes.guess_type(self.file.path) return type and type.startswith('image/') def is_pdf(self): type, __ = mimetypes.guess_type(self.file.path) return type == 'application/pdf' class Meta: abstract = True class GenericStateModel(object): @staticmethod def do(module, models_module, model_class, cache): return {'status': models.CharField(max_length=255, choices=model_class.MetaState.states.iteritems(), default=model_class.MetaState.default)} def status_color(self): return self.MetaState.states_colors.get(self.status, 'default') def status_icon(self): return self.MetaState.states_icons.get(self.status, '') def may_switch_to(self, user, dest_state): if self.status == dest_state: return False if user.is_superuser: return True return dest_state in self.MetaState.states_links[self.status]
BSD 2-Clause Simplified License
gaasedelen/prefix
plugin/ida_prefix.py
get_selected_funcs
python
def get_selected_funcs(): import sip twidget = idaapi.find_widget("Functions window") widget = sip.wrapinstance(int(twidget), QtWidgets.QWidget) if not widget: idaapi.warning("Unable to find 'Functions window'") return table = widget.findChild(QtWidgets.QTableView) selected_funcs = [str(s.data()) for s in table.selectionModel().selectedRows()] return match_funcs(selected_funcs)
Return the list of function names selected in the Functions window.
https://github.com/gaasedelen/prefix/blob/8ad9dc63b388e947136bfcd08a509512eeffeb27/plugin/ida_prefix.py#L533-L565
import os import idc import idaapi import idautils from PyQt5 import QtGui, QtCore, QtWidgets VERSION = "v1.2" AUTHORS = ['Andrew Marumoto', 'Markus Gaasedelen'] def PLUGIN_ENTRY(): return prefix_t() class prefix_t(idaapi.plugin_t): flags = idaapi.PLUGIN_PROC | idaapi.PLUGIN_HIDE help = "" comment = "A plugin for easy function prefixing" wanted_name = "prefix" wanted_hotkey = "" def init(self): self._init_action_bulk() self._init_action_clear() self._init_action_recursive() self._init_hooks() idaapi.msg("%s %s initialized...\n" % (self.wanted_name, VERSION)) return idaapi.PLUGIN_KEEP def run(self, arg): idaapi.msg("%s cannot be run as a script.\n" % self.wanted_name) def term(self): self._hooks.unhook() self._del_action_bulk() self._del_action_clear() self._del_action_recursive() idaapi.msg("%s terminated...\n" % self.wanted_name) def _init_hooks(self): self._hooks = Hooks() self._hooks.ready_to_run = self._init_hexrays_hooks self._hooks.hook() def _init_hexrays_hooks(self): if idaapi.init_hexrays_plugin(): idaapi.install_hexrays_callback(self._hooks.hxe_callback) ACTION_BULK = "prefix:bulk" ACTION_CLEAR = "prefix:clear" ACTION_RECURSIVE = "prefix:recursive" def _init_action_bulk(self): self._bulk_icon_id = idaapi.load_custom_icon(plugin_resource("bulk.png")) action_desc = idaapi.action_desc_t( self.ACTION_BULK, "Prefix selected functions", IDACtxEntry(bulk_prefix), None, "Assign a user prefix to the selected functions", self._bulk_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _init_action_clear(self): self._clear_icon_id = idaapi.load_custom_icon(plugin_resource("clear.png")) action_desc = idaapi.action_desc_t( self.ACTION_CLEAR, "Clear prefixes", IDACtxEntry(clear_prefix), None, "Clear user prefixes from the selected functions", self._clear_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _init_action_recursive(self): self._recursive_icon_id = idaapi.load_custom_icon(plugin_resource("recursive.png")) action_desc = idaapi.action_desc_t( self.ACTION_RECURSIVE, "Recursive function prefix", IDACtxEntry(recursive_prefix_cursor), None, "Recursively prefix callees of this function", self._recursive_icon_id ) assert idaapi.register_action(action_desc), "Action registration failed" def _del_action_bulk(self): idaapi.unregister_action(self.ACTION_BULK) idaapi.free_custom_icon(self._bulk_icon_id) self._bulk_icon_id = idaapi.BADADDR def _del_action_clear(self): idaapi.unregister_action(self.ACTION_CLEAR) idaapi.free_custom_icon(self._clear_icon_id) self._clear_icon_id = idaapi.BADADDR def _del_action_recursive(self): idaapi.unregister_action(self.ACTION_RECURSIVE) idaapi.free_custom_icon(self._recursive_icon_id) self._recursive_icon_id = idaapi.BADADDR class Hooks(idaapi.UI_Hooks): def ready_to_run(self): pass def finish_populating_widget_popup(self, widget, popup): inject_prefix_actions(widget, popup, idaapi.get_widget_type(widget)) return 0 def hxe_callback(self, event, *args): if event == idaapi.hxe_populating_popup: form, popup, vu = args if get_cursor_func_ref() == idaapi.BADADDR: return 0 idaapi.attach_action_to_popup( form, popup, prefix_t.ACTION_RECURSIVE, "Rename global item", idaapi.SETMENU_APP ) return 0 def recursive_prefix_cursor(): target = get_cursor_func_ref() if target == idaapi.BADADDR: return recursive_prefix(target) def inject_prefix_actions(form, popup, form_type): if form_type == idaapi.BWN_DISASMS: if get_cursor_func_ref() == idaapi.BADADDR: return idaapi.attach_action_to_popup( form, popup, prefix_t.ACTION_RECURSIVE, "Rename", idaapi.SETMENU_APP ) elif form_type == idaapi.BWN_FUNCS: idaapi.attach_action_to_popup( form, popup, prefix_t.ACTION_BULK, "Delete function(s)...", idaapi.SETMENU_INS ) idaapi.attach_action_to_popup( form, popup, prefix_t.ACTION_CLEAR, "Delete function(s)...", idaapi.SETMENU_INS ) idaapi.attach_action_to_popup( form, popup, None, "Delete function(s)...", idaapi.SETMENU_INS ) return 0 PREFIX_DEFAULT = "MyPrefix" PREFIX_SEPARATOR = '%' def recursive_prefix(addr): func_addr = idaapi.get_name_ea(idaapi.BADADDR, idaapi.get_func_name(addr)) if func_addr == idaapi.BADADDR: idaapi.msg("Prefix: 0x%08X does not belong to a defined function\n" % addr) return tag = idaapi.ask_str(PREFIX_DEFAULT, 0, "Function Tag") if tag == None: return elif tag == '': idaapi.warning("[ERROR] Tag cannot be empty [ERROR]") return nodes_xref_down = graph_down(func_addr, path=set([])) tmp = [] tmp1 = '' for func_addr in nodes_xref_down: tmp1 = idaapi.get_func_name(func_addr) if tmp1: tmp.append(tmp1) nodes_xref_down = tmp for rename in nodes_xref_down: func_addr = idaapi.get_name_ea(idaapi.BADADDR, rename) if tag not in rename: idaapi.set_name(func_addr,'%s%s%s' % (str(tag), PREFIX_SEPARATOR, rename), idaapi.SN_NOWARN) refresh_views() def bulk_prefix(): tag = idaapi.ask_str(PREFIX_DEFAULT, 0, "Function Tag") if tag == None: return elif tag == '': idaapi.warning("[ERROR] Tag cannot be empty [ERROR]") return for func_name in get_selected_funcs(): if func_name.startswith(tag): continue new_name = '%s%s%s' % (str(tag), PREFIX_SEPARATOR, func_name) func_addr = idaapi.get_name_ea(idaapi.BADADDR, func_name) idaapi.set_name(func_addr, new_name, idaapi.SN_NOWARN) refresh_views() def clear_prefix(): for func_name in get_selected_funcs(): i = func_name.rfind(PREFIX_SEPARATOR) if i == -1: continue new_name = func_name[i+1:] func_addr = idaapi.get_name_ea(idaapi.BADADDR, func_name) idaapi.set_name(func_addr, new_name, idaapi.SN_NOWARN) refresh_views() def refresh_views(): idaapi.refresh_idaview_anyway() current_widget = idaapi.get_current_widget() vu = idaapi.get_widget_vdui(current_widget) if vu: vu.refresh_ctext() def get_all_funcs(): return set(idaapi.get_func_name(ea) for ea in idautils.Functions()) def get_cursor_func_ref(): current_widget = idaapi.get_current_widget() form_type = idaapi.get_widget_type(current_widget) vu = idaapi.get_widget_vdui(current_widget) if vu: cursor_addr = vu.item.get_ea() elif form_type == idaapi.BWN_DISASM: cursor_addr = idaapi.get_screen_ea() opnum = idaapi.get_opnum() if opnum != -1: op_addr = idc.get_operand_value(cursor_addr, opnum) op_func = idaapi.get_func(op_addr) if op_func and op_func.start_ea == op_addr: return op_addr else: return idaapi.BADADDR cursor_func = idaapi.get_func(cursor_addr) if cursor_func and cursor_func.start_ea == cursor_addr: return cursor_addr return idaapi.BADADDR
MIT License
jameskbowler/fxcollect
fx_collect/signals/time_signals.py
TimeSignals._merge_all_signals
python
def _merge_all_signals(self): base = np.arange( self.start_date, self.end_date, dtype='datetime64[m]' ) arr_list = [] for tf, d in self._time_frames.items(): if tf == 'M1': merged = self._find_monthly_signal() elif tf == 'W1': merged = self._find_weekly_signal() else: merged = self._find_else_signal(base, d, tf) arr_list.append(merged) _a = np.array( sorted(np.concatenate(arr_list), key=lambda x: x[0]) ) fs, fe = [], [] for s in _a[_a[:,0] == self.start_date]: fs.append([s[0],s[1]-timedelta(days=2),s[2],s[3]]) for e in _a[_a[:,2] == self.end_date]: fe.append([e[0],e[1],e[2]+timedelta(days=2),e[3]]) a = _a[_a[:,0] != self.start_date] arr1 = a[a[:,2] != self.end_date] arr2 = np.array(fs) arr3 = np.array(fe) return np.array(sorted(np.concatenate( [arr1, arr2, arr3]), key=lambda x: x[0]))
Encapsulate all signal creation methods and concatenate in a date sorted Numpy Array.
https://github.com/jameskbowler/fxcollect/blob/02d8a552aad55f5002eda864d674a9f0c21dff8f/fx_collect/signals/time_signals.py#L52-L85
from datetime import datetime, timedelta from ..event import SignalEvent from ..utils.date_utils import ( end_of_last_month, end_of_month, end_of_next_month, new_york_offset ) import numpy as np class TimeSignals(object): def __init__(self, events_queue, start_date, end_date): self.events_queue = events_queue self._time_frames = { 'm1': 1, 'm5': 5, 'm15': 15, 'm30': 30, 'H1': 60, 'H2': 120, 'H4': 240, 'H8': 480, 'D1': 1440, 'W1': None , 'M1': None } self.cur_time = datetime.utcnow() self.start_date = start_date self.end_date = end_date self.signals = self._merge_all_signals() self.init_signals = self.get_init_signals()
MIT License
forslund/spotify-skill
__init__.py
SpotifySkill.query_song
python
def query_song(self, song, bonus): data = None by_word = ' {} '.format(self.translate('by')) if len(song.split(by_word)) > 1: song, artist = song.split(by_word) song_search = '*{}* artist:{}'.format(song, artist) else: song_search = song data = self.spotify.search(song_search, type='track') if data and len(data['tracks']['items']) > 0: tracks = [(best_confidence(d['name'], song), d) for d in data['tracks']['items']] tracks.sort(key=lambda x: x[0]) tracks.reverse() tracks = [t for t in tracks if t[0] > tracks[0][0] - 0.1] tracks.sort(key=lambda x: x[1]['popularity']) self.log.debug([(t[0], t[1]['name'], t[1]['artists'][0]['name']) for t in tracks]) data['tracks']['items'] = [tracks[-1][1]] return (tracks[-1][0] + bonus, {'data': data, 'name': None, 'type': 'track'}) else: return NOTHING_FOUND
Try to find a song. Searches Spotify for song and artist if provided. Arguments: song (str): Song to search for bonus (float): Any bonus to apply to the confidence Returns: Tuple with confidence and data or NOTHING_FOUND
https://github.com/forslund/spotify-skill/blob/eacff8ae9b0ae0a73c325a32eba2e1a48a03c990/__init__.py#L702-L737
import re from mycroft.skills.core import intent_handler from mycroft.util.parse import match_one, fuzzy_match from mycroft.api import DeviceApi from mycroft.messagebus import Message from requests import HTTPError from adapt.intent import IntentBuilder import time from os.path import abspath, dirname, join from subprocess import call, Popen, DEVNULL import signal from socket import gethostname import spotipy from .spotify import (MycroftSpotifyCredentials, SpotifyConnect, get_album_info, get_artist_info, get_song_info, get_show_info, load_local_credentials) import random from mycroft.skills.common_play_skill import CommonPlaySkill, CPSMatchLevel from enum import Enum class DeviceType(Enum): MYCROFT = 1 DEFAULT = 2 DESKTOP = 3 FIRSTBEST = 4 NOTFOUND = 5 class SpotifyPlaybackError(Exception): pass class NoSpotifyDevicesError(Exception): pass class PlaylistNotFoundError(Exception): pass class SpotifyNotAuthorizedError(Exception): pass MANAGED_PLATFORMS = ['mycroft_mark_1', 'mycroft_mark_2pi'] NOTHING_FOUND = (None, 0.0) DIRECT_RESPONSE_CONFIDENCE = 0.8 MATCH_CONFIDENCE = 0.5 def best_result(results): if len(results) == 0: return NOTHING_FOUND else: results.reverse() return sorted(results, key=lambda x: x[0])[-1] def best_confidence(title, query): best = title.lower() best_stripped = re.sub(r'(\(.+\)|-.+)$', '', best).strip() return max(fuzzy_match(best, query), fuzzy_match(best_stripped, query)) def update_librespot(): try: call(["bash", join(dirname(abspath(__file__)), "requirements.sh")]) except Exception as e: print('Librespot Update failed, {}'.format(repr(e))) def status_info(status): try: artist = status['item']['artists'][0]['name'] except Exception: artist = 'unknown' try: track = status['item']['name'] except Exception: track = 'unknown' try: album = status['item']['album']['name'] except Exception: album = 'unknown' return track, artist, album class SpotifySkill(CommonPlaySkill): def __init__(self): super(SpotifySkill, self).__init__() self.index = 0 self.spotify = None self.process = None self.device_name = None self.dev_id = None self.idle_count = 0 self.ducking = False self.is_player_remote = False self.mouth_text = None self.librespot_starting = False self.librespot_failed = False self.__device_list = None self.__devices_fetched = 0 self.OAUTH_ID = 1 enclosure_config = self.config_core.get('enclosure') self.platform = enclosure_config.get('platform', 'unknown') self.DEFAULT_VOLUME = 80 if self.platform == 'mycroft_mark_1' else 100 self._playlists = None self.saved_tracks = None self.regexes = {} self.last_played_type = None self.is_playing = False self.__saved_tracks_fetched = 0 def translate_regex(self, regex): if regex not in self.regexes: path = self.find_resource(regex + '.regex') if path: with open(path) as f: string = f.read().strip() self.regexes[regex] = string return self.regexes[regex] def launch_librespot(self): self.librespot_starting = True path = self.settings.get('librespot_path', None) if self.platform in MANAGED_PLATFORMS and not path: path = 'librespot' if (path and self.device_name and 'user' in self.settings and 'password' in self.settings): log_level = self.config_core.get('log_level', '') if 'librespot_log' in self.settings or log_level == 'DEBUG': outs = None else: outs = DEVNULL self.process = Popen([path, '-n', self.device_name, '-u', self.settings['user'], '-p', self.settings['password']], stdout=outs, stderr=outs) time.sleep(3) if self.process and self.process.poll() is not None: self.log.error('librespot failed to start.') if self.settings.get('user'): self.librespot_failed = True self.process = None self.librespot_starting = False return dev = self.device_by_name(self.device_name) if dev: self.spotify.volume(dev['id'], self.DEFAULT_VOLUME) self.librespot_starting = False def initialize(self): super().initialize() self.cancel_scheduled_event('SpotifyLogin') self.add_event('mycroft.audio.service.next', self.next_track) self.add_event('mycroft.audio.service.prev', self.prev_track) self.add_event('mycroft.audio.service.pause', self.pause) self.add_event('mycroft.audio.service.resume', self.resume) self.settings_change_callback = self.on_websettings_changed self.schedule_repeating_event(self.on_websettings_changed, None, 5 * 60, name='SpotifyLogin') if self.platform in MANAGED_PLATFORMS: update_librespot() self.on_websettings_changed() def on_websettings_changed(self): if not self.spotify: try: self.load_credentials() except Exception as e: self.log.debug('Credentials could not be fetched. ' '({})'.format(repr(e))) if self.spotify: self.cancel_scheduled_event('SpotifyLogin') if 'user' in self.settings and 'password' in self.settings: if self.process: self.stop_librespot() self.launch_librespot() self.refresh_saved_tracks() def load_local_creds(self): try: creds = load_local_credentials(self.settings['user']) spotify = SpotifyConnect(client_credentials_manager=creds) except Exception: self.log.exception('Couldn\'t fetch credentials') spotify = None return spotify def load_remote_creds(self): try: creds = MycroftSpotifyCredentials(self.OAUTH_ID) spotify = SpotifyConnect(client_credentials_manager=creds) except HTTPError: self.log.info('Couldn\'t fetch credentials') spotify = None return spotify def load_credentials(self): self.spotify = self.load_local_creds() or self.load_remote_creds() if self.spotify: self.create_intents() self.device_name = DeviceApi().get().get('name') def failed_auth(self): if 'user' not in self.settings: self.log.error('Settings hasn\'t been received yet') self.speak_dialog('NoSettingsReceived') elif not self.settings.get("user"): self.log.error('User info has not been set.') self.speak_dialog('NotConfigured') else: self.log.error('User info has been set but Auth failed.') self.speak_dialog('NotAuthorized') def handle_listener_started(self, message): if (self.spotify.is_playing() and self.is_player_remote and self.settings.get('use_ducking', False)): self.__pause() self.ducking = True self.idle_count = 0 self.cancel_scheduled_event('IdleCheck') self.schedule_repeating_event(self.check_for_idle, None, 1, name='IdleCheck') def check_for_idle(self): if not self.ducking: self.cancel_scheduled_event('IdleCheck') return active = self.enclosure.display_manager.get_active() if not active == '' or active == 'SpotifySkill': self.idle_count += 1 if self.idle_count >= 5: self.cancel_scheduled_event('IdleCheck') self.ducking = False self.resume() else: self.idle_count = 0 def start_monitor(self): self.stop_monitor() self.schedule_repeating_event(self._update_display, None, 5, name='MonitorSpotify') self.add_event('recognizer_loop:record_begin', self.handle_listener_started) def stop_monitor(self): self.cancel_scheduled_event('MonitorSpotify') def _update_display(self, message): status = self.spotify.status() if self.spotify else {} self.is_playing = self.spotify.is_playing() if not status or not status.get('is_playing'): self.stop_monitor() self.mouth_text = None self.enclosure.mouth_reset() self.disable_playing_intents() return try: artist = status['item']['artists'][0]['name'] except Exception: artist = '' try: track = status['item']['name'] except Exception: track = '' try: image = status['item']['album']['images'][0]['url'] except Exception: image = '' self.CPS_send_status(artist=artist, track=track, image=image) if artist and track: text = '{}: {}'.format(artist, track) else: text = '' if text != self.mouth_text: self.mouth_text = text self.enclosure.mouth_text(text) def CPS_match_query_phrase(self, phrase): if not self.playback_prerequisits_ok(): self.log.debug('Spotify is not available to play') if 'spotify' in phrase: return phrase, CPSMatchLevel.GENERIC else: return None spotify_specified = 'spotify' in phrase bonus = 0.1 if spotify_specified else 0.0 phrase = re.sub(self.translate_regex('on_spotify'), '', phrase, re.IGNORECASE) confidence, data = self.continue_playback(phrase, bonus) if not data: confidence, data = self.specific_query(phrase, bonus) if not data: confidence, data = self.generic_query(phrase, bonus) if data: self.log.info('Spotify confidence: {}'.format(confidence)) self.log.info(' data: {}'.format(data)) if data.get('type') in ['saved_tracks', 'album', 'artist', 'track', 'playlist', 'show']: if spotify_specified: level = CPSMatchLevel.EXACT else: if confidence > 0.9: level = CPSMatchLevel.TITLE elif confidence < 0.5: level = CPSMatchLevel.GENERIC else: level = CPSMatchLevel.TITLE phrase += ' on spotify' elif data.get('type') == 'continue': if spotify_specified > 0: level = CPSMatchLevel.EXACT else: level = CPSMatchLevel.GENERIC phrase += ' on spotify' else: self.log.warning('Unexpected spotify type: ' '{}'.format(data.get('type'))) level = CPSMatchLevel.GENERIC return phrase, level, data else: self.log.debug('Couldn\'t find anything to play on Spotify') def continue_playback(self, phrase, bonus): if phrase.strip() == 'spotify': return (1.0, { 'data': None, 'name': None, 'type': 'continue' }) else: return NOTHING_FOUND def specific_query(self, phrase, bonus): match = re.match(self.translate_regex('saved_songs'), phrase, re.IGNORECASE) if match and self.saved_tracks: return (1.0, {'data': None, 'type': 'saved_tracks'}) match = re.match(self.translate_regex('playlist'), phrase, re.IGNORECASE) if match: return self.query_playlist(match.groupdict()['playlist']) match = re.match(self.translate_regex('album'), phrase, re.IGNORECASE) if match: bonus += 0.1 album = match.groupdict()['album'] return self.query_album(album, bonus) match = re.match(self.translate_regex('artist'), phrase, re.IGNORECASE) if match: artist = match.groupdict()['artist'] return self.query_artist(artist, bonus) match = re.match(self.translate_regex('song'), phrase, re.IGNORECASE) if match: song = match.groupdict()['track'] return self.query_song(song, bonus) match = re.match(self.translate_regex('podcast'), phrase, re.IGNORECASE) if match: return self.query_show(match.groupdict()['podcast']) return NOTHING_FOUND def generic_query(self, phrase, bonus): self.log.info('Handling "{}" as a genric query...'.format(phrase)) results = [] self.log.info('Checking users playlists') playlist, conf = self.get_best_user_playlist(phrase) if playlist: uri = self.playlists[playlist] data = { 'data': uri, 'name': playlist, 'type': 'playlist' } if conf and conf > DIRECT_RESPONSE_CONFIDENCE: return (conf, data) elif conf and conf > MATCH_CONFIDENCE: results.append((conf, data)) self.log.info('Checking artists') conf, data = self.query_artist(phrase, bonus) if conf and conf > DIRECT_RESPONSE_CONFIDENCE: return conf, data elif conf and conf > MATCH_CONFIDENCE: results.append((conf, data)) self.log.info('Checking tracks') conf, data = self.query_song(phrase, bonus) if conf and conf > DIRECT_RESPONSE_CONFIDENCE: return conf, data elif conf and conf > MATCH_CONFIDENCE: results.append((conf, data)) self.log.info('Checking albums') conf, data = self.query_album(phrase, bonus) if conf and conf > DIRECT_RESPONSE_CONFIDENCE: return conf, data elif conf and conf > MATCH_CONFIDENCE: results.append((conf, data)) self.log.info('Checking tracks') conf, data = self.get_best_public_playlist(phrase) if conf and conf > DIRECT_RESPONSE_CONFIDENCE: return conf, data elif conf and conf > MATCH_CONFIDENCE: results.append((conf, data)) return best_result(results) def query_artist(self, artist, bonus=0.0): bonus += 0.1 data = self.spotify.search(artist, type='artist') if data and data['artists']['items']: best = data['artists']['items'][0]['name'] confidence = fuzzy_match(best, artist.lower()) + bonus confidence = min(confidence, 1.0) return (confidence, { 'data': data, 'name': None, 'type': 'artist' }) else: return NOTHING_FOUND def query_album(self, album, bonus): data = None by_word = ' {} '.format(self.translate('by')) if len(album.split(by_word)) > 1: album, artist = album.split(by_word) album_search = '*{}* artist:{}'.format(album, artist) bonus += 0.1 else: album_search = album data = self.spotify.search(album_search, type='album') if data and data['albums']['items']: best = data['albums']['items'][0]['name'].lower() confidence = best_confidence(best, album) confidence = min(confidence + bonus, 1.0) self.log.info((album, best, confidence)) return (confidence, { 'data': data, 'name': None, 'type': 'album' }) return NOTHING_FOUND def query_playlist(self, playlist): result, conf = self.get_best_user_playlist(playlist) if playlist and conf > 0.5: uri = self.playlists[result] return (conf, {'data': uri, 'name': playlist, 'type': 'playlist'}) else: return self.get_best_public_playlist(playlist) def query_show(self, podcast): data = self.spotify.search(podcast, type='show') if data and data['shows']['items']: best = data['shows']['items'][0]['name'].lower() confidence = best_confidence(best, podcast) return (confidence, {'data': data, 'type': 'show'})
Apache License 2.0
pawamoy/git-changelog
scripts/update_changelog.py
write_changelog
python
def write_changelog(filepath: str, lines: List[str]) -> None: with open(filepath, "w") as changelog_file: changelog_file.write("\n".join(lines).rstrip("\n") + "\n")
Write the changelog file. Arguments: filepath: The path to the changelog file. lines: The lines to write to the file.
https://github.com/pawamoy/git-changelog/blob/aafa7793ec02af8b443576262af4e244901787dc/scripts/update_changelog.py#L65-L74
import re import sys from typing import List, Optional, Pattern import httpx from jinja2.sandbox import SandboxedEnvironment from git_changelog.build import Changelog, Version TEMPLATE_URL = "https://raw.githubusercontent.com/pawamoy/jinja-templates/master/keepachangelog.md" COMMIT_STYLE = "angular" def latest(lines: List[str], regex: Pattern) -> Optional[str]: for line in lines: match = regex.search(line) if match: return match.groupdict()["version"] return None def unreleased(versions: List[Version], last_release: str) -> List[Version]: for index, version in enumerate(versions): if version.tag == last_release: return versions[:index] return versions def read_changelog(filepath: str) -> List[str]: with open(filepath, "r") as changelog_file: return changelog_file.read().splitlines(keepends=False)
ISC License
geophysics-ubonn/reda
lib/reda/importers/legacy/eit40.py
apply_correction_factors
python
def apply_correction_factors(df, correction_file): if isinstance(correction_file, (list, tuple)): corr_data_raw = np.vstack( [np.loadtxt(x) for x in correction_file] ) else: corr_data_raw = np.loadtxt(correction_file) A = (corr_data_raw[:, 0] / 1e4).astype(int) B = (corr_data_raw[:, 0] % 1e4).astype(int) M = (corr_data_raw[:, 1] / 1e4).astype(int) N = (corr_data_raw[:, 1] % 1e4).astype(int) corr_data = np.vstack((A, B, M, N, corr_data_raw[:, 2])).T corr_data[:, 0:2] = np.sort(corr_data[:, 0:2], axis=1) corr_data[:, 2:4] = np.sort(corr_data[:, 2:4], axis=1) if 'frequency' not in df.columns: raise Exception( 'No frequency data found. Are you sure this is a seit data set?' ) gf = df.groupby(['a', 'b', 'm', 'n']) for key, item in gf.groups.items(): item_norm = np.hstack((np.sort(key[0:2]), np.sort(key[2:4]))) index = np.where( (corr_data[:, 0] == item_norm[0]) & (corr_data[:, 1] == item_norm[1]) & (corr_data[:, 2] == item_norm[2]) & (corr_data[:, 3] == item_norm[3]) )[0] if len(index) == 0: print(key) raise Exception( 'No correction factor found for this configuration' ) factor = corr_data[index, 4] for col in ('r', 'Zt', 'Vmn', 'rho_a'): if col in df.columns: df.iloc[item, df.columns.get_loc(col)] *= factor return corr_data
Apply correction factors for a pseudo-2D measurement setup. See Weigand and Kemna, 2017, Biogeosciences, for detailed information.
https://github.com/geophysics-ubonn/reda/blob/5be52ecb184f45f0eabb23451f039fec3d9537c5/lib/reda/importers/legacy/eit40.py#L337-L387
import numpy as np import scipy.io as sio import pandas as pd import datetime import reda.utils.geometric_factors as redaK def _add_rhoa(df, spacing): df['k'] = redaK.compute_K_analytical(df, spacing=spacing) df['rho_a'] = df['r'] * df['k'] if 'Zt' in df.columns: df['rho_a_complex'] = df['Zt'] * df['k'] return df def import_medusa_data(mat_filename, configs): df_emd = _read_mat_mnu0(mat_filename) if not isinstance(configs, np.ndarray): configs = np.loadtxt(configs).astype(int) quadpole_list = [] index = 0 for Ar, Br, M, N in configs: if np.unique((Ar, Br, M, N)).size != 4: print('ignoring', Ar, Br, M, N) continue print('constructing', Ar, Br, M, N) A = np.min((Ar, Br)) B = np.max((Ar, Br)) query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format( A, B, M )) query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format( A, B, N )) if query_M.size == 0 or query_N.size == 0: print( 'Could not find suitable injections', query_M.size, query_N.size ) continue index += 1 keep_cols = [ 'datetime', 'frequency', 'a', 'b', 'Zg1', 'Zg2', 'Zg3', 'Is', 'Il', 'Zg', 'Iab', ] df4 = pd.DataFrame() diff_cols = ['Zt', ] df4[keep_cols] = query_M[keep_cols] for col in diff_cols: df4[col] = query_M[col].values - query_N[col].values df4['m'] = query_M['p'].values df4['n'] = query_N['p'].values quadpole_list.append(df4) dfn = pd.concat(quadpole_list) Rsign = np.sign(dfn['Zt'].real) dfn['r'] = Rsign * np.abs(dfn['Zt']) dfn['Vmn'] = dfn['r'] * dfn['Iab'] dfn['rpha'] = np.arctan2( np.imag(dfn['Zt'].values), np.real(dfn['Zt'].values) ) * 1e3 df_final = dfn.reset_index() return df_final def _read_mat_mnu0(filename): print('read_mag_single_file') mat = sio.loadmat(filename) df_emd = _extract_emd(mat) return df_emd def _average_swapped_current_injections(df): AB = df[['a', 'b']].values abu = np.unique( AB.flatten().view(AB.dtype.descr * 2) ).view(AB.dtype).reshape(-1, 2) pairs = [] alone = [] abul = [x.tolist() for x in abu] for ab in abul: swap = list(reversed(ab)) if swap in abul: pair = (ab, swap) pair_r = (swap, ab) if pair not in pairs and pair_r not in pairs: pairs.append(pair) else: alone.append(ab) if len(pairs) * 2 + len(alone) != len(abul): print('len(pairs) * 2 == {0}'.format(len(pairs) * 2)) print(len(abul)) raise Exception( 'total numbers of unswapped-swapped matching do not match!' ) if len(pairs) > 0 and len(alone) > 0: print( 'WARNING: Found both swapped configurations and non-swapped ones!' ) delete_slices = [] columns = [ 'frequency', 'a', 'b', 'p', 'Z1', 'Z2', 'Z3', 'Il1', 'Il2', 'Il3', 'Is1', 'Is2', 'Is3', 'Zg1', 'Zg2', 'Zg3', 'datetime', ] dtypes = {col: df.dtypes[col] for col in columns} X = df[columns].values for pair in pairs: index_a = np.where( (X[:, 1] == pair[0][0]) & (X[:, 2] == pair[0][1]) )[0] index_b = np.where( (X[:, 1] == pair[1][0]) & (X[:, 2] == pair[1][1]) )[0] A = X[index_a, :] B = X[index_b, :] diff = A[:, [0, 3]] - B[:, [0, 3]] if not np.all(diff) == 0: raise Exception('Wrong ordering') X[index_a, 4:10] = (A[:, 4:10] - B[:, 4:10]) / 2.0 X[index_a, 10:16] = (A[:, 10:16] + B[:, 10:16]) / 2.0 delete_slices.append( index_b ) X_clean = np.delete(X, np.vstack(delete_slices), axis=0) df_clean = pd.DataFrame(X_clean, columns=columns) df_clean = df_clean.astype(dtype=dtypes) return df_clean def _extract_emd(mat): emd = mat['EMD'].squeeze() epoch = datetime.datetime(1904, 1, 1) def convert_epoch(x): timestamp = epoch + datetime.timedelta(seconds=x.astype(float)) return timestamp dfl = [] for f_id in range(0, emd.size): fdata = emd[f_id] timestamp = np.atleast_2d( [convert_epoch(x) for x in fdata['Time'].squeeze()] ).T df = pd.DataFrame( np.hstack(( timestamp, fdata['ni'], fdata['nu'], fdata['Z3'], fdata['Is3'], fdata['Il3'], fdata['Zg3'], )), ) df.columns = ( 'datetime', 'a', 'b', 'p', 'Z1', 'Z2', 'Z3', 'Is1', 'Is2', 'Is3', 'Il1', 'Il2', 'Il3', 'Zg1', 'Zg2', 'Zg3', ) df['frequency'] = np.ones(df.shape[0]) * fdata['fm'].squeeze() df['datetime'] = pd.to_datetime(df['datetime']) df['a'] = df['a'].astype(int) df['b'] = df['b'].astype(int) df['p'] = df['p'].astype(int) df['Z1'] = df['Z1'].astype(complex) df['Z2'] = df['Z2'].astype(complex) df['Z3'] = df['Z3'].astype(complex) df['Zg1'] = df['Zg1'].astype(complex) df['Zg2'] = df['Zg2'].astype(complex) df['Zg3'] = df['Zg3'].astype(complex) df['Is1'] = df['Is1'].astype(complex) df['Is2'] = df['Is2'].astype(complex) df['Is3'] = df['Is3'].astype(complex) df['Il1'] = df['Il1'].astype(complex) df['Il2'] = df['Il2'].astype(complex) df['Il3'] = df['Il3'].astype(complex) dfl.append(df) df = pd.concat(dfl) df = _average_swapped_current_injections(df) condition = df['a'] > df['b'] df.loc[condition, ['a', 'b']] = df.loc[condition, ['b', 'a']].values df.loc[condition, ['Z1', 'Z2', 'Z3']] *= -1 df['Zt'] = np.mean(df[['Z1', 'Z2', 'Z3']].values, axis=1) sign_re = df['Zt'].real / np.abs(df['Zt'].real) df['r'] = np.abs(df['Zt']) * sign_re df['Is'] = np.mean(df[['Is1', 'Is2', 'Is3']].values, axis=1) df['Il'] = np.mean(df[['Il1', 'Il2', 'Il3']].values, axis=1) df['Zg'] = np.mean(df[['Zg1', 'Zg2', 'Zg3']].values, axis=1) df['Iab'] = np.abs(df['Is']) * 1e3 df['Iab'] = df['Iab'].astype(float) return df
MIT License
awslabs/aws-ops-automator
source/code/handlers/task_tracking_handler.py
TaskTrackingHandler.handle_request
python
def handle_request(self): def tasks_items_to_execute(): def table_name(rec): source_arn = rec["eventSourceARN"] return source_arn.split("/")[1] def from_tracking_table(rec): return table_name(rec) == os.getenv(handlers.ENV_ACTION_TRACKING_TABLE) def from_concurrency_table(rec): return table_name(rec) == os.getenv(handlers.ENV_CONCURRENCY_TABLE) def get_old_image(task_record): return task_record["dynamodb"].get("OldImage", {}) def get_new_image(task_record): return task_record["dynamodb"].get("NewImage", {}) def get_new_status(task_record): return get_new_image(task_record).get(handlers.TASK_TR_STATUS, {}).get("S") def get_old_status(task_record): return get_new_image(task_record).get(handlers.TASK_TR_STATUS, {}).get("S") def is_task_tracking_table_update(task_record): if not from_tracking_table(task_record): return False return task_record["eventName"] in ["UPDATE", "MODIFY"] def is_task_done(task_record): if not is_task_tracking_table_update(task_record): return False new_status = get_new_status(task_record) old_status = get_old_status(task_record) if old_status != new_status: return False return new_status in handlers.task_tracking_table.NOT_LONGER_ACTIVE_STATUSES def is_task_with_concurrency(task_record): return get_new_image(task_record).get(handlers.TASK_TR_CONCURRENCY_KEY, {}).get("S") is not None def get_old_last_update(task_record): return get_old_image(task_record).get(handlers.TASK_TR_LAST_WAIT_COMPLETION, {}).get("S") def get_new_last_update(task_record): return get_new_image(task_record).get(handlers.TASK_TR_LAST_WAIT_COMPLETION, {}).get("S") def is_delete_task(task_record): return from_tracking_table(r) and task_record["eventName"] == "REMOVE" def is_new_task(task_record): if from_tracking_table(r) and task_record["eventName"] == "INSERT": return get_new_status(task_record) == handlers.STATUS_PENDING return False def is_completed_with_concurrency(task_record): return is_task_done(task_record) and is_task_with_concurrency(task_record) def is_completed_without_concurrency(task_record): return is_task_done(task_record) and not is_task_with_concurrency(task_record) def is_wait_for_completion(task_record): if not is_task_tracking_table_update(task_record): return False if get_old_status(task_record) != handlers.STATUS_WAIT_FOR_COMPLETION or get_new_status(task_record) != handlers.STATUS_WAIT_FOR_COMPLETION: return False return get_old_last_update(task_record) != get_new_last_update(task_record) def is_concurrency_task_completed(concurrency_record): if not from_concurrency_table(concurrency_record): return False if concurrency_record["eventName"] == "REMOVE": return False return concurrency_record["dynamodb"].get("NewImage", {}).get("RunNext", {}).get("BOOL", False) def get_action_type(rec): if is_new_task(rec): return NEW_TASK if is_completed_without_concurrency(rec): return FINISHED_TASK if is_completed_with_concurrency(rec): return FINISHED_CONCURRENCY_TASK if is_wait_for_completion(rec): return CHECK_COMPLETION if is_delete_task(rec): return DELETE_ITEM if is_concurrency_task_completed(rec): return START_WAITING_ACTION return None for r in self._event.get("Records"): self._logger.debug("Record to process is {}", safe_json(r, indent=2)) if r.get("eventSource") == "aws:dynamodb": image_used = "NewImage" if "NewImage" in r["dynamodb"] else "OldImage" if r["dynamodb"].get("NewImage", {}).get(handlers.TASK_TR_ACTION) is None and r["dynamodb"].get("OldImage", {}).get(handlers.TASK_TR_ACTION) is not None: continue self._logger.debug_enabled = r["dynamodb"][image_used].get(handlers.TASK_TR_DEBUG, {}).get("BOOL", False) update_to_handle = get_action_type(r) if update_to_handle is not None: yield update_to_handle, r else: self._logger.debug("No action for record") try: start = datetime.now() task_handlers = [ self._handle_new_task_item, self._handle_finished_task_without_completion, self._handle_completed_concurrency_item, self._handle_check_completion, self._handle_deleted_item, self._handle_start_waiting_action ] for task_tracking_update_type, record in tasks_items_to_execute(): self.done_work = True used_image = "OldImage" if record["eventName"] == "REMOVE" else "NewImage" image = record["dynamodb"][used_image] handled_item = unpack_record(image) self._logger.debug_enabled = handled_item.get(handlers.TASK_TR_DEBUG, False) self._logger.debug("Executing handler function {} for type {} ({})", task_handlers[task_tracking_update_type].__name__, self.task_string(task_tracking_update_type), task_tracking_update_type) task_handlers[task_tracking_update_type](handled_item) if not self.done_work: self._logger.clear() running_time = float((datetime.now() - start).total_seconds()) if self.done_work: self._logger.debug(DEBUG_RESULT, running_time) return safe_dict({ "datetime": datetime.now().isoformat(), "waiting-for-execution": self.waiting_for_execution_tasks, "started-check-for-completion": self.started_completion_checks, "started-execution": self.started_tasks, "started-waiting": self.started_waiting_tasks, "completed-concurrency-tasks": self.finished_concurrency_tasks, "running-time": running_time }) finally: self._logger.flush()
Handles the event triggered by updates to the actions tracking table. :return: results of handling selected updates
https://github.com/awslabs/aws-ops-automator/blob/362abd0717b48ecca7f20d8985ae7d76f045daf3/source/code/handlers/task_tracking_handler.py#L472-L654
import os import types from datetime import datetime import actions import boto_retry import handlers import handlers.task_tracking_table import services from handlers.task_tracking_table import TaskTrackingTable from helpers import safe_dict, safe_json, full_stack from helpers.dynamodb import unpack_record from main import lambda_handler from outputs.queued_logger import QueuedLogger from outputs.result_notifications import ResultNotifications ACTIVE_INSTANCES = "InstanceCount" CONCURRENCY_ID = handlers.TASK_TR_CONCURRENCY_ID ENV_DEBUG_TASK_TACKING_HANDLER = "DEBUG_TASK_TRACKING_HANDLER" NEW_TASK = 0 FINISHED_TASK = 1 FINISHED_CONCURRENCY_TASK = 2 CHECK_COMPLETION = 3 DELETE_ITEM = 4 START_WAITING_ACTION = 5 TASK_ACTION_STRINGS = [ "New task", "Finished task", "Finished task with concurrency handling", "Check task completion", "Delete task item", "Start waiting task" ] WARN_DELETING_RESOURCES = "Error deleting resources from bucket {} with key {}" DEBUG_ACTION = "Action is \"{}\" for task \"{}\", task-id is {}" DEBUG_DRYRUN = "Action will be executed in in dry-run mode" DEBUG_LAMBDA = "Lambda function invoked {}" DEBUG_ACTION_PARAMETERS = "Action parameters are {}" DEBUG_RUNNING_ECS_TASK = "Running {} step of task {} as ECS job" DEBUG_RESULT = "Handling actions tracking update took {:>.3f} seconds" DEBUG_MEMORY_SIZE = "Task memory allocation for executing lambda is {}" DEBUG_LAMBDA_FUNCTION_ = "Executing action with Lambda function {}, payload is {}" DEBUG_START_WAITING = "Waiting list count for ConcurrencyId \"{}\" is {}, action is \"{}\", starting waiting " "task \"{}\" with id {}" DEBUG_WAITING = "The waiting list for action \"{}\" with concurrency key \"{}\" is {}, the maximum number of concurrent " "running actions for this key is {}, action with id \"{}\" has been put in waiting state" DEBUG_DELETING_RESOURCES_FROM_S3 = "Deleting resource object {} from bucket {}, {}" ERR_RUNNING_TASK = "Error running task {}, {}, {}" LOG_STREAM = "{}-{:0>4d}{:0>2d}{:0>2d}" SCHEDULER_LAMBDA_FUNCTION_DEFAULT = "SchedulerDefault" SIZED_SCHEDULER_NAME_TEMPLATE = "Scheduler{:0>04d}" class TaskTrackingHandler(object): def __init__(self, event, context): self._context = context self._event = event self._tracking_table = None self._concurrency_table = None self.started_tasks = 0 self.started_waiting_tasks = 0 self.waiting_for_execution_tasks = 0 self.started_completion_checks = 0 self.finished_concurrency_tasks = 0 self.done_work = False self.invoked_lambda_functions = [] self.events_client = None self._s3_client = None self._db_client = None classname = self.__class__.__name__ dt = datetime.utcnow() logstream = LOG_STREAM.format(classname, dt.year, dt.month, dt.day) self._logger = QueuedLogger(logstream=logstream, context=self._context, buffersize=20, debug=os.getenv(ENV_DEBUG_TASK_TACKING_HANDLER, "false").lower() == "true") @classmethod def is_handling_request(cls, event, context): if handlers.running_local(context): return False if event.get("Records", [{}])[0].get("eventSource", "") != "aws:dynamodb": return False source_arn = event["Records"][0]["eventSourceARN"] table_name = source_arn.split("/")[1] return table_name in [os.getenv(handlers.ENV_ACTION_TRACKING_TABLE), os.getenv(handlers.ENV_CONCURRENCY_TABLE)] @classmethod def task_string(cls, action): return TASK_ACTION_STRINGS[action] if 0 <= action < len(TASK_ACTION_STRINGS) else "Unknown" @property def tracking_table(self): if self._tracking_table is None: self._tracking_table = TaskTrackingTable(self._context, self._logger) return self._tracking_table @property def s3_client(self): if self._s3_client is None: self._s3_client = boto_retry.get_client_with_retries("s3", ["delete_item"], logger=self._logger) return self._s3_client @property def db_client(self): if self._db_client is None: self._db_client = boto_retry.get_client_with_retries("dynamodb", ["delete_item"], logger=self._logger) return self._db_client def _get_action_concurrency_key(self, item): action = item[handlers.TASK_TR_ACTION] action_class = actions.get_action_class(action) concurrency_key_method = getattr(action_class, handlers.ACTION_CONCURRENCY_KEY_METHOD, None) if concurrency_key_method is not None: get_key_params = { actions.ACTION_PARAM_RESOURCES: handlers.get_item_resource_data(item, self._context), actions.ACTION_PARAM_ACCOUNT: item[handlers.TASK_TR_ACCOUNT], actions.ACTION_PARAM_STACK: os.getenv(handlers.ENV_STACK_NAME), actions.ACTION_PARAM_STACK_ID: os.getenv(handlers.ENV_STACK_ID), actions.ACTION_PARAM_TASK_ID: item[handlers.TASK_TR_ID], actions.ACTION_PARAM_TASK: item[handlers.TASK_TR_NAME] } get_key_params.update(item.get(handlers.TASK_TR_PARAMETERS)) return concurrency_key_method(get_key_params) else: return action def _enter_waiting_list(self, concurrency_key): if not handlers.running_local(self._context): resp = self.concurrency_table.update_item_with_retries(Key={CONCURRENCY_ID: concurrency_key}, UpdateExpression="ADD InstanceCount :one SET RunNext=:run", ExpressionAttributeValues={":one": 1, ":run": False}, ReturnValues="UPDATED_NEW") return int(resp["Attributes"].get("InstanceCount", 0)) else: resp = self.concurrency_table.get_item_with_retries(Key={CONCURRENCY_ID: concurrency_key}) return resp.get("Item", {}).get(ACTIVE_INSTANCES, 0) def _leave_waiting_list(self, task_id, concurrency_key): self.tracking_table.get_task_item(task_id) if not handlers.running_local(self._context): resp = self.concurrency_table.update_item_with_retries(Key={CONCURRENCY_ID: concurrency_key}, UpdateExpression="ADD InstanceCount :min_one SET RunNext=:run", ExpressionAttributeValues={":min_one": -1, ":run": True}, ReturnValues="UPDATED_NEW") count = max(0, int(resp["Attributes"].get(ACTIVE_INSTANCES, 0))) if count == 0: self.concurrency_table.delete_item_with_retries(Key={CONCURRENCY_ID: concurrency_key}) else: resp = self.concurrency_table.get_item_with_retries(Key={CONCURRENCY_ID: concurrency_key}) count = resp.get("Item", {}).get(ACTIVE_INSTANCES, 0) TaskTrackingTable._run_local_stream_event(os.getenv(handlers.ENV_CONCURRENCY_TABLE), "UPDATE", {"ConcurrencyId": concurrency_key, "InstanceCount": count}, {"ConcurrencyId": concurrency_key, "InstanceCount": count + 1}, self._context) return count @property def concurrency_table(self): if self._concurrency_table is None: tablename = os.getenv(handlers.ENV_CONCURRENCY_TABLE) self._logger.debug("Using concurrency table {}", tablename) self._concurrency_table = services.get_session().resource("dynamodb").Table(tablename) boto_retry.add_retry_methods_to_resource(self._concurrency_table, ["update_item", "get_item", "delete_item"], context=self._context) return self._concurrency_table def _is_wait_listed(self, item): action = item.get(handlers.TASK_TR_ACTION, None) if action is None: return False action_properties = actions.get_action_properties(action) max_action_concurrency = action_properties.get(actions.ACTION_MAX_CONCURRENCY) if max_action_concurrency in [None, 0]: return False if types.FunctionType == type(max_action_concurrency): parameters = item[handlers.TASK_TR_PARAMETERS] max_action_concurrency = max_action_concurrency(parameters) if max_action_concurrency in [None, 0]: return False concurrency_key = self._get_action_concurrency_key(item) count = int(self._enter_waiting_list(concurrency_key)) status = handlers.STATUS_WAITING if count >= int(max_action_concurrency) else None self.tracking_table.update_task(item[handlers.TASK_TR_ID], task=item[handlers.TASK_TR_NAME], task_metrics=item.get(handlers.TASK_TR_METRICS, False), status=status, status_data={ handlers.TASK_TR_CONCURRENCY_KEY: concurrency_key, handlers.TASK_TR_CONCURRENCY_ID: concurrency_key }) if count > max_action_concurrency: self._logger.debug(DEBUG_WAITING, item[handlers.TASK_TR_ACTION], concurrency_key, count, max_action_concurrency, item[handlers.TASK_TR_ID]) return True return False def _start_task_execution(self, task_item, action=handlers.HANDLER_ACTION_EXECUTE): try: self._logger.debug("Entering start_task_execution ({}) with task {}", action, safe_json(task_item, indent=3)) event = {i: task_item.get(i) for i in task_item} event[handlers.HANDLER_EVENT_ACTION] = action self._logger.debug(DEBUG_ACTION, task_item[handlers.TASK_TR_ACTION], task_item[handlers.TASK_TR_NAME], task_item[handlers.TASK_TR_ID]) self._logger.debug(DEBUG_ACTION_PARAMETERS, safe_json(task_item.get(handlers.TASK_TR_PARAMETERS, {}), indent=3)) lambda_size = handlers.TASK_TR_COMPLETION_SIZE if action == handlers.HANDLER_ACTION_TEST_COMPLETION else handlers.TASK_TR_EXECUTE_SIZE execute_lambda_size = task_item.get(lambda_size, actions.ACTION_SIZE_STANDARD) if execute_lambda_size == actions.ACTION_USE_ECS: ecs_memory = task_item.get(handlers.TASK_EXECUTE_ECS_MEMORY if action == handlers.HANDLER_ACTION_EXECUTE else handlers.TASK_COMPLETION_ECS_MEMORY, None) else: ecs_memory = None if not handlers.running_local(self._context): self._logger.debug(DEBUG_MEMORY_SIZE, execute_lambda_size) if execute_lambda_size != actions.ACTION_USE_ECS: payload = str.encode(safe_json(event)) function_name = "{}-{}-{}".format(os.getenv(handlers.ENV_STACK_NAME), os.getenv(handlers.ENV_LAMBDA_NAME), execute_lambda_size) self._logger.debug("Running execution of task on lambda function {}", function_name) self._logger.debug(DEBUG_LAMBDA_FUNCTION_, function_name, payload) lambda_client = boto_retry.get_client_with_retries("lambda", ["invoke"], context=self._context, logger=self._logger) resp = lambda_client.invoke_with_retries(FunctionName=function_name, InvocationType="Event", LogType="None", Payload=payload) task_info = { "id": task_item[handlers.TASK_TR_ID], "task": task_item[handlers.TASK_TR_NAME], "action": task_item[handlers.TASK_TR_ACTION], "payload": payload, "status-code": resp["StatusCode"] } self._logger.debug(DEBUG_LAMBDA, safe_json(task_info, indent=2)) self.invoked_lambda_functions.append(task_info) else: ecs_args = { "subnets": os.getenv('AWSVPC_SUBNETS'), "securitygroups": os.getenv('AWSVPC_SECURITYGROUPS'), "assignpublicip": os.getenv('AWSVPC_ASSIGNPUBLICIP'), handlers.HANDLER_EVENT_ACTION: action, handlers.TASK_NAME: task_item[handlers.TASK_TR_NAME], handlers.TASK_TR_ID: task_item[handlers.TASK_TR_ID]} self._logger.debug(DEBUG_RUNNING_ECS_TASK, action, task_item[handlers.TASK_TR_NAME]) handlers.run_as_ecs_job(ecs_args, ecs_memory_size=ecs_memory, context=self._context, logger=self._logger) else: lambda_handler(event, self._context) ResultNotifications(context=self._context, logger=self._logger).publish_started(task_item) except Exception as ex: self._logger.error(ERR_RUNNING_TASK, task_item, str(ex), full_stack()) def _handle_new_task_item(self, task_item): self._logger.debug("Handling new task logic") if self._is_wait_listed(task_item): self.waiting_for_execution_tasks += 1 return self.started_tasks += 1 self._start_task_execution(task_item) def _handle_completed_concurrency_item(self, task_item): self._logger.debug("Handling completed concurrency logic") concurrency_key = task_item[handlers.TASK_TR_CONCURRENCY_KEY] self._logger.debug("Handling completed task with ConcurrencyKey {}", concurrency_key) count = self._leave_waiting_list(task_item[handlers.TASK_TR_ID], concurrency_key) self._logger.debug("Concurrency count for ConcurrencyKey {} is {}", concurrency_key, count) self.finished_concurrency_tasks += 1 ResultNotifications(context=self._context, logger=self._logger).publish_ended(task_item) def _handle_finished_task_without_completion(self, task_item): ResultNotifications(context=self._context, logger=self._logger).publish_ended(task_item) def _handle_start_waiting_action(self, concurrency_item): self._logger.debug("Handling start waiting task logic") concurrency_id = concurrency_item[handlers.TASK_TR_CONCURRENCY_ID] self._logger.debug("Handling completed task with ConcurrencyId {}", concurrency_id) waiting_list = self.tracking_table.get_waiting_tasks(concurrency_id) self._logger.debug(" List of waiting tasks for ConcurrencyKey {} is {}", concurrency_id, safe_json(waiting_list, indent=3)) if len(waiting_list) > 0: count = concurrency_item.get(ACTIVE_INSTANCES, 0) oldest_waiting_task = sorted(waiting_list, key=lambda w: w[handlers.TASK_TR_CREATED_TS])[0] self._logger.debug(DEBUG_START_WAITING, concurrency_id, count, oldest_waiting_task[handlers.TASK_TR_ACTION], oldest_waiting_task[handlers.TASK_TR_NAME], oldest_waiting_task[handlers.TASK_TR_ID]) self.started_waiting_tasks += 1 self._start_task_execution(oldest_waiting_task) def _handle_check_completion(self, task_item): self._logger.debug("Handling test for completion logic") if task_item.get(handlers.TASK_TR_RUN_LOCAL, False) and not handlers.running_local(self._context): self._logger.debug("Item running in local mode skipped") return self.started_completion_checks += 1 self._start_task_execution(task_item=task_item, action=handlers.HANDLER_ACTION_TEST_COMPLETION) def _handle_deleted_item(self, task_item): if task_item.get(handlers.TASK_TR_S3_RESOURCES, False): bucket = os.getenv(handlers.ENV_RESOURCE_BUCKET) key = task_item[handlers.TASK_TR_ID] + ".json" try: self._logger.debug(DEBUG_DELETING_RESOURCES_FROM_S3, bucket, key) self.s3_client.delete_object_with_retries(Bucket=bucket, Key=key) except Exception as ex: self._logger.warning(WARN_DELETING_RESOURCES, bucket, key, ex)
Apache License 2.0
baumgach/acdc_segmenter
image_utils.py
normalise_image
python
def normalise_image(image): img_o = np.float32(image.copy()) m = np.mean(img_o) s = np.std(img_o) return np.divide((img_o - m), s)
make image zero mean and unit standard deviation
https://github.com/baumgach/acdc_segmenter/blob/3cbc10c11142dfa00df7efb804adb95ede9cbd3f/image_utils.py#L34-L42
import numpy as np from skimage import measure import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') try: import cv2 except: logging.warning('Could not import opencv. Augmentation functions will be unavailable.') else: def rotate_image(img, angle, interp=cv2.INTER_LINEAR): rows, cols = img.shape[:2] rotation_matrix = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1) return cv2.warpAffine(img, rotation_matrix, (cols, rows), flags=interp) def resize_image(im, size, interp=cv2.INTER_LINEAR): im_resized = cv2.resize(im, (size[1], size[0]), interpolation=interp) return im_resized def convert_to_uint8(image): image = image - image.min() image = 255.0*np.divide(image.astype(np.float32),image.max()) return image.astype(np.uint8)
Apache License 2.0
marzona/rig-remote
rig_remote/stmessenger.py
STMessenger.check_end_of_scan
python
def check_end_of_scan(self): return (self.mqueue.get_from_parent() == 1)
Check to see if the scanning thread as notified us of its termination. :returns: True if termination signal sent.
https://github.com/marzona/rig-remote/blob/05957aae40e80e5250fd6c10a514283a35ff56a9/rig_remote/stmessenger.py#L83-L90
from queue_comms import QueueComms import logging logger = logging.getLogger(__name__) class STMessenger (object): def __init__(self): self.mqueue = QueueComms() def send_event_update(self, event_list): if isinstance(event_list, tuple) and len(event_list) == 2: self.mqueue.send_to_child(event_list) else: logger.error("Event list: {}".format(event_list)) raise ValueError("Bad event update attempt.") def update_queued(self): return self.mqueue.queued_for_child() def get_event_update(self): event_list = [] try: event_list = self.mqueue.get_from_child() except Exception: logger.exception("Exception while accessing a child queue.") return event_list def notify_end_of_scan(self): self.mqueue.signal_parent(1)
MIT License
readthedocs/readthedocs.org
readthedocs/organizations/forms.py
OrganizationTeamMemberForm.validate_member_invite
python
def validate_member_invite(self, invite): queryset = TeamMember.objects.filter( Q( team=self.team, invite__team=self.team, invite__email=invite.email, ), ) if queryset.exists(): raise forms.ValidationError( _('An invitation was already sent to this email'), ) return invite
Verify team member and team invite don't already exist. Query searches for duplicate :py:cls:`TeamMember` instances, and also for existing :py:cls:`TeamInvite` instances, sharing the team and email address of the given ``invite`` :param invite: :py:cls:`TeamInvite` instance
https://github.com/readthedocs/readthedocs.org/blob/2cff8376f0ef8f25ae6d8763bdbec86f47e33ab9/readthedocs/organizations/forms.py#L270-L291
from django import forms from django.contrib.auth.models import User from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.core.validators import EmailValidator from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from readthedocs.core.history import SimpleHistoryModelForm from readthedocs.core.utils import slugify from readthedocs.core.utils.extend import SettingsOverrideObject from readthedocs.organizations.constants import ADMIN_ACCESS, READ_ONLY_ACCESS from readthedocs.organizations.models import ( Organization, OrganizationOwner, Team, TeamInvite, TeamMember, ) class OrganizationForm(SimpleHistoryModelForm): name = forms.CharField(max_length=32) class Meta: model = Organization fields = ['name', 'email', 'description', 'url'] labels = { 'name': _('Organization Name'), 'email': _('Billing Email'), } url = forms.URLField( widget=forms.TextInput(attrs={'placeholder': 'http://'}), label=_('Site URL'), required=False, ) def __init__(self, *args, **kwargs): try: self.user = kwargs.pop('user') except KeyError: raise TypeError( 'OrganizationForm expects a `user` keyword argument', ) super().__init__(*args, **kwargs) def clean_name(self): name = self.cleaned_data['name'] if self.instance and self.instance.name and name == self.instance.name: return name potential_slug = slugify(name) if not potential_slug: raise forms.ValidationError(_('Invalid organization name: no slug generated')) if Organization.objects.filter(slug=potential_slug).exists(): raise forms.ValidationError( _('Organization %(name)s already exists'), params={'name': name}, ) return name class OrganizationSignupFormBase(OrganizationForm): class Meta: model = Organization fields = ['name', 'email'] labels = { 'name': _('Organization Name'), 'email': _('Billing Email'), } url = None @staticmethod def _create_default_teams(organization): organization.teams.create(name='Admins', access=ADMIN_ACCESS) organization.teams.create(name='Read Only', access=READ_ONLY_ACCESS) def save(self, commit=True): org = super().save(commit) if not commit: return org OrganizationOwner.objects.create( owner=self.user, organization=org, ) self._create_default_teams(org) return org class OrganizationSignupForm(SettingsOverrideObject): _default_class = OrganizationSignupFormBase class OrganizationOwnerForm(forms.ModelForm): class Meta: model = OrganizationOwner fields = ['owner'] owner = forms.CharField() def __init__(self, *args, **kwargs): self.organization = kwargs.pop('organization', None) super().__init__(*args, **kwargs) def clean_owner(self): username = self.cleaned_data['owner'] owner = User.objects.filter(username=username).first() if owner is None: raise forms.ValidationError( _('User %(username)s does not exist'), params={'username': username}, ) if self.organization.owners.filter(username=username).exists(): raise forms.ValidationError( _('User %(username)s is already an owner'), params={'username': username}, ) return owner class OrganizationTeamBasicFormBase(SimpleHistoryModelForm): class Meta: model = Team fields = ['name', 'access', 'organization'] error_messages = { NON_FIELD_ERRORS: { 'unique_together': _('Team already exists'), }, } organization = forms.CharField(widget=forms.HiddenInput(), required=False) def __init__(self, *args, **kwargs): self.organization = kwargs.pop('organization', None) super().__init__(*args, **kwargs) def clean_organization(self): return self.organization class OrganizationTeamBasicForm(SettingsOverrideObject): _default_class = OrganizationTeamBasicFormBase class OrganizationTeamProjectForm(forms.ModelForm): class Meta: model = Team fields = ['projects'] def __init__(self, *args, **kwargs): self.organization = kwargs.pop('organization', None) super().__init__(*args, **kwargs) self.fields['projects'] = forms.ModelMultipleChoiceField( queryset=self.organization.projects, widget=forms.CheckboxSelectMultiple, ) class OrganizationTeamMemberForm(forms.ModelForm): class Meta: model = TeamMember fields = [] member = forms.CharField(label=_('Email address or username')) def __init__(self, *args, **kwargs): self.team = kwargs.pop('team', None) super().__init__(*args, **kwargs) def clean_member(self): lookup = self.cleaned_data['member'] try: validator = EmailValidator(code='lookup not an email') validator(lookup) member = ( User.objects.filter( emailaddress__verified=True, emailaddress__email=lookup, is_active=True, ).first() ) if member is not None: return self.validate_member_user(member) invite = TeamInvite( organization=self.team.organization, team=self.team, email=lookup, ) return self.validate_member_invite(invite) except ValidationError as error: if error.code != 'lookup not an email': raise try: member = User.objects.get(username=lookup, is_active=True) return self.validate_member_user(member) except User.DoesNotExist: raise forms.ValidationError('User not found') def validate_member_user(self, member): if TeamMember.objects.filter(team=self.team, member=member).exists(): raise forms.ValidationError(_('User is already a team member'),) return member
MIT License
danfeix/scene-graph-tf-release
lib/roi_data_layer/minibatch.py
_sample_graph
python
def _sample_graph(roidb, num_fg_rois, num_rois, num_neg_rels=128): gt_rels = roidb['gt_relations'] fg_gt_ind_assignments = roidb['fg_gt_ind_assignments'] gt_to_fg_roi_inds = {} all_fg_roi_inds = [] for ind, gt_ind in fg_gt_ind_assignments.items(): if gt_ind not in gt_to_fg_roi_inds: gt_to_fg_roi_inds[gt_ind] = [] gt_to_fg_roi_inds[gt_ind].append(ind) all_fg_roi_inds.append(ind) all_fg_roi_inds = np.array(list(set(all_fg_roi_inds))) pos_rels = [] for rel in gt_rels: for sub_i in gt_to_fg_roi_inds[rel[0]]: for obj_i in gt_to_fg_roi_inds[rel[1]]: pos_rels.append([sub_i, obj_i, rel[2]]) rels = [] rels_inds = [] roi_inds = [] if len(pos_rels) > 0: _, indices = np.unique(["{} {}".format(i, j) for i,j,k in pos_rels], return_index=True) pos_rels = np.array(pos_rels)[indices, :] for rel in pos_rels: roi_inds += rel[:2].tolist() roi_inds = list(set(roi_inds)) rels.append(rel) rels_inds.append(rel[:2].tolist()) if len(roi_inds) >= num_fg_rois: break roi_candidates = np.setdiff1d(all_fg_roi_inds, roi_inds) num_rois_to_sample = min(num_fg_rois - len(roi_inds), len(roi_candidates)) if num_rois_to_sample > 0: roi_sample = npr.choice(roi_candidates, size=num_rois_to_sample, replace=False) roi_inds = np.hstack([roi_inds, roi_sample]) sample_rels = [] sample_rels_inds = [] for i in roi_inds: for j in roi_inds: if i != j and [i, j] not in rels_inds: sample_rels.append([i,j,0]) sample_rels_inds.append([i,j]) if len(sample_rels) > 0: num_neg_rels = np.minimum(len(sample_rels), num_neg_rels) inds = npr.choice(np.arange(len(sample_rels)), size=num_neg_rels, replace=False) rels += [sample_rels[i] for i in inds] rels_inds += [sample_rels_inds[i] for i in inds] num_rois_to_sample = num_rois - len(roi_inds) if num_rois_to_sample > 0: bg_roi_inds = _sample_bg_rois(roidb, num_rois_to_sample) roi_inds = np.hstack([roi_inds, bg_roi_inds]) roi_inds = np.array(roi_inds).astype(np.int64) return roi_inds.astype(np.int64), np.array(rels).astype(np.int64)
Sample a graph from the foreground rois of an image roidb: roidb of an image rois_per_image: maximum number of rois per image
https://github.com/danfeix/scene-graph-tf-release/blob/4c9e3c6a5cb0e6a241a92dc9b786f74e69ca35c4/lib/roi_data_layer/minibatch.py#L142-L232
import numpy as np import numpy.random as npr from fast_rcnn.config import cfg from utils.blob import prep_im_for_blob, im_list_to_blob import data_utils from IPython import embed from utils.timer import Timer def get_minibatch(roidb, num_classes): num_images = len(roidb) random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images) assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'. format(num_images, cfg.TRAIN.BATCH_SIZE) rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image) im_timer = Timer() im_timer.tic() im_blob, im_scales = _get_image_blob(roidb, random_scale_inds) im_timer.toc() blobs = {'ims': im_blob} rois_blob = np.zeros((0, 5), dtype=np.float32) labels_blob = np.zeros((0), dtype=np.float32) rels_blob = np.zeros((0, 3), dtype=np.int32) bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32) bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32) all_overlaps = [] box_idx_offset = 0 d_timer = Timer() d_timer.tic() for im_i in xrange(num_images): roi_inds, rels = _sample_graph(roidb[im_i], fg_rois_per_image, rois_per_image, num_neg_rels=cfg.TRAIN.NUM_NEG_RELS) if rels.size == 0: print('batch skipped') return None rels, labels, overlaps, im_rois, bbox_targets, bbox_inside_weights = _gather_samples(roidb[im_i], roi_inds, rels, num_classes) rois = _project_im_rois(im_rois, im_scales[im_i]) batch_ind = im_i * np.ones((rois.shape[0], 1)) rois_blob_this_image = np.hstack((batch_ind, rois)) rois_blob = np.vstack((rois_blob, rois_blob_this_image)) labels_blob = np.hstack((labels_blob, labels)) bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets)) bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights)) all_overlaps = np.hstack((all_overlaps, overlaps)) rels_offset = rels.copy() rels_offset[:, :2] += box_idx_offset rels_blob = np.vstack([rels_blob, rels_offset]) box_idx_offset += rois.shape[0] blobs['rois'] = rois_blob.copy() blobs['labels'] = labels_blob.copy().astype(np.int32) blobs['relations'] = rels_blob[:,:2].copy().astype(np.int32) blobs['predicates'] = rels_blob[:,2].copy().astype(np.int32) blobs['bbox_targets'] = bbox_targets_blob.copy() blobs['bbox_inside_weights'] = bbox_inside_blob.copy() blobs['bbox_outside_weights'] = np.array(bbox_inside_blob > 0).astype(np.float32).copy() num_roi = rois_blob.shape[0] num_rel = rels_blob.shape[0] blobs['rel_rois'] = data_utils.compute_rel_rois(num_rel, rois_blob, rels_blob) d_timer.toc() graph_dict = data_utils.create_graph_data(num_roi, num_rel, rels_blob[:, :2]) for k in graph_dict: blobs[k] = graph_dict[k] return blobs def _gather_samples(roidb, roi_inds, rels, num_classes): rois = roidb['boxes'] labels = roidb['max_classes'] overlaps = roidb['max_overlaps'] bg_inds = np.where(overlaps < cfg.TRAIN.FG_THRESH)[0] labels = labels.copy() labels[bg_inds] = 0 labels = labels[roi_inds] overlaps = overlaps[roi_inds] rois = rois[roi_inds] roi_ind_map = {} for i, roi_i in enumerate(roi_inds): roi_ind_map[roi_i] = i for i, rel in enumerate(rels): rels[i] = [roi_ind_map[rel[0]], roi_ind_map[rel[1]], rel[2]] bbox_targets, bbox_inside_weights = _get_bbox_regression_labels( roidb['bbox_targets'][roi_inds, :], num_classes) return rels, labels, overlaps, rois, bbox_targets, bbox_inside_weights
MIT License
andrewannex/spiceypy
spiceypy/spiceypy.py
bodfnd
python
def bodfnd(body: int, item: str) -> bool: body = ctypes.c_int(body) item = stypes.string_to_char_p(item) return bool(libspice.bodfnd_c(body, item))
Determine whether values exist for some item for any body in the kernel pool. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodfnd_c.html :param body: ID code of body. :param item: Item to find ("RADII", "NUT_AMP_RA", etc.). :return: True if the item is in the kernel pool, and is False if it is not.
https://github.com/andrewannex/spiceypy/blob/73bba459ef858671cfa6c9bc921980f3be3c7932/spiceypy/spiceypy.py#L524-L537
import warnings from contextlib import contextmanager from datetime import datetime, timezone import functools import ctypes from typing import Callable, Iterator, Iterable, Optional, Tuple, Union, Sequence import numpy from numpy import ndarray, str_ from .utils import support_types as stypes from .utils.libspicehelper import libspice from . import config from .utils.callbacks import ( UDFUNC, UDFUNS, UDFUNB, UDSTEP, UDREFN, UDREPI, UDREPU, UDREPF, UDBAIL, SpiceUDFUNS, SpiceUDFUNB, ) from .utils.support_types import ( Cell_Char, Cell_Bool, Cell_Time, Cell_Double, Cell_Int, Ellipse, Plane, SpiceCell, SpiceCellPointer, SpiceDLADescr, SpiceDSKDescr, SpiceEKAttDsc, SpiceEKSegSum, ) __author__ = "AndrewAnnex" OptionalInt = Optional[int] _default_len_out = 256 _SPICE_EK_MAXQSEL = 100 _SPICE_EK_EKRCEX_ROOM_DEFAULT = 100 def warn_deprecated_args(**kwargs) -> None: keys = list(kwargs.keys()) values = list(kwargs.values()) if any(values): varnames = ", ".join(keys) warnings.warn( f"Specifying any of: {varnames} will be deprecated as of SpiceyPy 5.0.0", DeprecationWarning, stacklevel=2, ) pass def check_for_spice_error(f: Optional[Callable]) -> None: if failed(): short = getmsg("SHORT", 26) explain = getmsg("EXPLAIN", 100).strip() long = getmsg("LONG", 1841).strip() traceback = qcktrc(200) reset() raise stypes.dynamically_instantiate_spiceyerror( short=short, explain=explain, long=long, traceback=traceback ) def spice_error_check(f): @functools.wraps(f) def with_errcheck(*args, **kwargs): try: res = f(*args, **kwargs) check_for_spice_error(f) return res except BaseException: raise return with_errcheck def spice_found_exception_thrower(f: Callable) -> Callable: @functools.wraps(f) def wrapper(*args, **kwargs): res = f(*args, **kwargs) if config.catch_false_founds: found = res[-1] if isinstance(found, bool) and not found: raise stypes.NotFoundError( "Spice returns not found for function: {}".format(f.__name__), found=found, ) elif stypes.is_iterable(found) and not all(found): raise stypes.NotFoundError( "Spice returns not found in a series of calls for function: {}".format( f.__name__ ), found=found, ) else: actualres = res[0:-1] if len(actualres) == 1: return actualres[0] else: return actualres else: return res return wrapper @contextmanager def no_found_check() -> Iterator[None]: current_catch_state = config.catch_false_founds config.catch_false_founds = False yield config.catch_false_founds = current_catch_state @contextmanager def found_check() -> Iterator[None]: current_catch_state = config.catch_false_founds config.catch_false_founds = True yield config.catch_false_founds = current_catch_state def found_check_off() -> None: config.catch_false_founds = False def found_check_on() -> None: config.catch_false_founds = True def get_found_catch_state() -> bool: return config.catch_false_founds def cell_double(cell_size: int) -> SpiceCell: return stypes.SPICEDOUBLE_CELL(cell_size) def cell_int(cell_size: int) -> SpiceCell: return stypes.SPICEINT_CELL(cell_size) def cell_char(cell_size: int, length: int) -> SpiceCell: return stypes.SPICECHAR_CELL(cell_size, length) def cell_bool(cell_size: int) -> SpiceCell: return stypes.SPICEBOOL_CELL(cell_size) def cell_time(cell_size) -> SpiceCell: return stypes.SPICETIME_CELL(cell_size) @spice_error_check def appndc( item: Union[str, Iterable[str], ndarray, str_], cell: Union[Cell_Char, SpiceCell], ) -> None: assert isinstance(cell, stypes.SpiceCell) if stypes.is_iterable(item): for c in item: libspice.appndc_c(stypes.string_to_char_p(c), cell) else: item = stypes.string_to_char_p(item) libspice.appndc_c(item, cell) @spice_error_check def appndd( item: Union[float, Iterable[float]], cell: Union[SpiceCell, Cell_Double] ) -> None: assert isinstance(cell, stypes.SpiceCell) if hasattr(item, "__iter__"): for d in item: libspice.appndd_c(ctypes.c_double(d), cell) else: item = ctypes.c_double(item) libspice.appndd_c(item, cell) @spice_error_check def appndi(item: Union[Iterable[int], int], cell: Union[SpiceCell, Cell_Int]) -> None: assert isinstance(cell, stypes.SpiceCell) if hasattr(item, "__iter__"): for i in item: libspice.appndi_c(ctypes.c_int(i), cell) else: item = ctypes.c_int(item) libspice.appndi_c(item, cell) @spice_error_check def axisar(axis: Union[ndarray, Iterable[float]], angle: float) -> ndarray: axis = stypes.to_double_vector(axis) angle = ctypes.c_double(angle) r = stypes.empty_double_matrix() libspice.axisar_c(axis, angle, r) return stypes.c_matrix_to_numpy(r) @spice_error_check def b1900() -> float: return libspice.b1900_c() @spice_error_check def b1950() -> float: return libspice.b1950_c() @spice_error_check def badkpv( caller: str, name: str, comp: str, insize: int, divby: int, intype: str ) -> bool: caller = stypes.string_to_char_p(caller) name = stypes.string_to_char_p(name) comp = stypes.string_to_char_p(comp) insize = ctypes.c_int(insize) divby = ctypes.c_int(divby) intype = ctypes.c_char(intype.encode(encoding="UTF-8")) return bool(libspice.badkpv_c(caller, name, comp, insize, divby, intype)) @spice_error_check def bltfrm(frmcls: int, out_cell: Optional[SpiceCell] = None) -> SpiceCell: frmcls = ctypes.c_int(frmcls) if not out_cell: out_cell = stypes.SPICEINT_CELL(1000) libspice.bltfrm_c(frmcls, out_cell) return out_cell @spice_error_check def bodeul(body: int, et: float) -> Tuple[float, float, float, float]: body = ctypes.c_int(body) et = ctypes.c_double(et) ra = ctypes.c_double() dec = ctypes.c_double() w = ctypes.c_double() lam = ctypes.c_double() libspice.bodeul_( ctypes.byref(body), ctypes.byref(et), ctypes.byref(ra), ctypes.byref(dec), ctypes.byref(w), ctypes.byref(lam), ) return ra.value, dec.value, w.value, lam.value @spice_error_check @spice_found_exception_thrower def bodc2n(code: int, lenout: int = _default_len_out) -> Tuple[str, bool]: code = ctypes.c_int(code) name = stypes.string_to_char_p(" " * lenout) lenout = ctypes.c_int(lenout) found = ctypes.c_int() libspice.bodc2n_c(code, lenout, name, ctypes.byref(found)) return stypes.to_python_string(name), bool(found.value) @spice_error_check def bodc2s(code: int, lenout: int = _default_len_out) -> str: code = ctypes.c_int(code) name = stypes.string_to_char_p(" " * lenout) lenout = ctypes.c_int(lenout) libspice.bodc2s_c(code, lenout, name) return stypes.to_python_string(name) @spice_error_check def boddef(name: str, code: int) -> None: name = stypes.string_to_char_p(name) code = ctypes.c_int(code) libspice.boddef_c(name, code) @spice_error_check
MIT License
d-x-y/nats-bench
nats_bench/api_utils.py
NASBenchMetaAPI.statistics
python
def statistics(self, dataset: Text, hp: Union[Text, int]) -> Dict[int, int]: if self.verbose: print("Call the statistics function with dataset={:} and hp={:}.".format(dataset, hp)) valid_datasets = ["cifar10-valid", "cifar10", "cifar100", "ImageNet16-120"] if dataset not in valid_datasets: raise ValueError("{:} not in {:}".format(dataset, valid_datasets)) nums, hp = collections.defaultdict(lambda: 0), str(hp) for index in self.evaluated_indexes: arch_info = self.arch2infos_dict[index][hp] dataset_seed = arch_info.dataset_seed if dataset not in dataset_seed: nums[0] += 1 else: nums[len(dataset_seed[dataset])] += 1 return dict(nums)
This function will count the number of total trials.
https://github.com/d-x-y/nats-bench/blob/a2690238c34145f379ff2233480178da5f59fb79/nats_bench/api_utils.py#L656-L672
import abc import bz2 import collections import copy import os import pickle import random import time from typing import Any, Dict, Optional, Text, Union import warnings import numpy as np _FILE_SYSTEM = "default" PICKLE_EXT = "pickle.pbz2" def time_string(): iso_time_format = "%Y-%m-%d %X" string = "[{:}]".format(time.strftime(iso_time_format, time.gmtime(time.time()))) return string def reset_file_system(lib: Text = "default"): global _FILE_SYSTEM _FILE_SYSTEM = lib def get_file_system(): return _FILE_SYSTEM def get_torch_home(): if "TORCH_HOME" in os.environ: return os.environ["TORCH_HOME"] elif "HOME" in os.environ: return os.path.join(os.environ["HOME"], ".torch") else: raise ValueError( "Did not find HOME in os.environ. " "Please at least setup the path of HOME or TORCH_HOME " "in the environment." ) def nats_is_dir(file_path): if _FILE_SYSTEM == "default": return os.path.isdir(file_path) elif _FILE_SYSTEM == "google": import tensorflow as tf return tf.io.gfile.isdir(file_path) else: raise ValueError("Unknown file system lib: {:}".format(_FILE_SYSTEM)) def nats_is_file(file_path): if _FILE_SYSTEM == "default": return os.path.isfile(file_path) elif _FILE_SYSTEM == "google": import tensorflow as tf return tf.io.gfile.exists(file_path) and not tf.io.gfile.isdir(file_path) else: raise ValueError("Unknown file system lib: {:}".format(_FILE_SYSTEM)) def pickle_save(obj, file_path, ext=".pbz2", protocol=4): if _FILE_SYSTEM == "default": with bz2.BZ2File(str(file_path) + ext, "wb") as cfile: pickle.dump(obj, cfile, protocol=protocol) else: raise ValueError("Unknown file system lib: {:}".format(_FILE_SYSTEM)) def pickle_load(file_path, ext=".pbz2"): if nats_is_file(str(file_path)): xfile_path = str(file_path) else: xfile_path = str(file_path) + ext if _FILE_SYSTEM == "default": with bz2.BZ2File(xfile_path, "rb") as cfile: return pickle.load(cfile) elif _FILE_SYSTEM == "google": import tensorflow as tf file_content = tf.io.gfile.GFile(file_path, mode="rb").read() byte_content = bz2.decompress(file_content) return pickle.loads(byte_content) else: raise ValueError("Unknown file system lib: {:}".format(_FILE_SYSTEM)) def remap_dataset_set_names(dataset, metric_on_set, verbose=False): if verbose: print( "Call internal function _remap_dataset_set_names with dataset={:} " "and metric_on_set={:}".format(dataset, metric_on_set) ) if dataset == "cifar10" and metric_on_set == "valid": dataset, metric_on_set = "cifar10-valid", "x-valid" elif dataset == "cifar10" and metric_on_set == "test": dataset, metric_on_set = "cifar10", "ori-test" elif dataset == "cifar10" and metric_on_set == "train": dataset, metric_on_set = "cifar10", "train" elif (dataset == "cifar100" or dataset == "ImageNet16-120") and metric_on_set == "valid": metric_on_set = "x-valid" elif (dataset == "cifar100" or dataset == "ImageNet16-120") and metric_on_set == "test": metric_on_set = "x-test" if verbose: print(" return dataset={:} and metric_on_set={:}".format(dataset, metric_on_set)) return dataset, metric_on_set class NASBenchMetaAPI(metaclass=abc.ABCMeta): @abc.abstractmethod def __init__( self, file_path_or_dict: Optional[Union[Text, Dict[Text, Any]]] = None, fast_mode: bool = False, verbose: bool = True, ): self.meta_archs = None self.verbose = None self.evaluated_indexes = None self.arch2infos_dict = None self.filename = None self._fast_mode = None self._archive_dir = None self._avaliable_hps = None self.archstr2index = None def __getitem__(self, index: int): return copy.deepcopy(self.meta_archs[index]) def arch(self, index: int): if self.verbose: print("Call the arch function with index={:}".format(index)) if index < 0 or index >= len(self.meta_archs): raise ValueError("invalid index : {:} vs. {:}.".format(index, len(self.meta_archs))) return copy.deepcopy(self.meta_archs[index]) def __len__(self): return len(self.meta_archs) def __repr__(self): return "{name}({num}/{total} architectures, fast_mode={fast_mode}, " "file={filename})".format( name=self.__class__.__name__, num=len(self.evaluated_indexes), total=len(self.meta_archs), fast_mode=self.fast_mode, filename=self.filename, ) @property def avaliable_hps(self): return list(copy.deepcopy(self._avaliable_hps)) @property def used_time(self): return self._used_time @property def search_space_name(self): return self._search_space_name @property def fast_mode(self): return self._fast_mode @property def archive_dir(self): return self._archive_dir @property def full_train_epochs(self): return self._full_train_epochs def reset_archive_dir(self, archive_dir): self._archive_dir = archive_dir def reset_fast_mode(self, fast_mode): self._fast_mode = fast_mode def reset_time(self): self._used_time = 0 @abc.abstractmethod def get_more_info(self, index, dataset, iepoch=None, hp: Text = "12", is_random: bool = True): def simulate_train_eval(self, arch, dataset, iepoch=None, hp="12", account_time=True): index = self.query_index_by_arch(arch) all_names = ("cifar10", "cifar100", "ImageNet16-120") if dataset not in all_names: raise ValueError("Invalid dataset name : {:} vs {:}".format(dataset, all_names)) if dataset == "cifar10": info = self.get_more_info(index, "cifar10-valid", iepoch=iepoch, hp=hp, is_random=True) else: info = self.get_more_info(index, dataset, iepoch=iepoch, hp=hp, is_random=True) valid_acc, time_cost = info["valid-accuracy"], info["train-all-time"] + info["valid-per-time"] latency = self.get_latency(index, dataset) if account_time: self._used_time += time_cost return valid_acc, latency, time_cost, self._used_time def random(self): return random.randint(0, len(self.meta_archs) - 1) def reload(self, archive_root: Text = None, index: int = None): if self.verbose: print( "{:} Call clear_params with archive_root={:} and index={:}".format(time_string(), archive_root, index) ) if archive_root is None: archive_root = os.path.join(os.environ["TORCH_HOME"], "{:}-full".format(self._all_base_names[-1])) if not nats_is_dir(archive_root): warnings.warn( "The input archive_root is None and the default " "archive_root path ({:}) does not exist, try to use " "self.archive_dir.".format(archive_root) ) archive_root = self.archive_dir if archive_root is None or not nats_is_dir(archive_root): raise ValueError("Invalid archive_root : {:}".format(archive_root)) if index is None: indexes = list(range(len(self))) else: indexes = [index] for idx in indexes: if not (0 <= idx < len(self.meta_archs)): raise ValueError("invalid index of {:}".format(idx)) xfile_path = os.path.join(archive_root, "{:06d}.{:}".format(idx, PICKLE_EXT)) if not nats_is_file(xfile_path): xfile_path = os.path.join(archive_root, "{:d}.{:}".format(idx, PICKLE_EXT)) assert nats_is_file(xfile_path), "invalid data path : {:}".format(xfile_path) xdata = pickle_load(xfile_path) assert isinstance(xdata, dict), "invalid format of data in {:}".format(xfile_path) self.evaluated_indexes.add(idx) hp2archres = collections.OrderedDict() for hp_key, results in xdata.items(): hp2archres[hp_key] = ArchResults.create_from_state_dict(results) self._avaliable_hps.add(hp_key) self.arch2infos_dict[idx] = hp2archres def query_index_by_arch(self, arch): if self.verbose: print("{:} Call query_index_by_arch with arch={:}".format(time_string(), arch)) if isinstance(arch, int): if 0 <= arch < len(self): return arch else: raise ValueError("Invalid architecture index {:} vs [{:}, {:}].".format(arch, 0, len(self))) elif isinstance(arch, str): if arch in self.archstr2index: arch_index = self.archstr2index[arch] else: arch_index = -1 elif hasattr(arch, "tostr"): if arch.tostr() in self.archstr2index: arch_index = self.archstr2index[arch.tostr()] else: arch_index = -1 else: arch_index = -1 return arch_index def query_by_arch(self, arch, hp): return self.query_info_str_by_arch(arch, hp) def _prepare_info(self, index): if index not in self.arch2infos_dict: if self.fast_mode and self.archive_dir is not None: self.reload(self.archive_dir, index) elif not self.fast_mode: if self.verbose: print( "{:} Call _prepare_info with index={:} skip because it is not" "the fast mode.".format(time_string(), index) ) else: raise ValueError( "Invalid status: fast_mode={:} and " "archive_dir={:}".format(self.fast_mode, self.archive_dir) ) else: if index not in self.evaluated_indexes: raise ValueError( "The index of {:} is not in self.evaluated_indexes, " "there must be something wrong.".format(index) ) if self.verbose: print( "{:} Call _prepare_info with index={:} skip because it is in " "arch2infos_dict".format(time_string(), index) ) def clear_params(self, index: int, hp: Optional[Text] = None): if self.verbose: print("{:} Call clear_params with index={:} and hp={:}".format(time_string(), index, hp)) if index not in self.arch2infos_dict: warnings.warn( "The {:}-th architecture is not in the benchmark data yet, " "no need to clear params.".format(index) ) elif hp is None: for key, result in self.arch2infos_dict[index].items(): result.clear_params() else: if str(hp) not in self.arch2infos_dict[index]: raise ValueError( "The {:}-th architecture only has hyper-parameters " "of {:} instead of {:}.".format(index, list(self.arch2infos_dict[index].keys()), hp) ) self.arch2infos_dict[index][str(hp)].clear_params() @abc.abstractmethod def query_info_str_by_arch(self, arch, hp: Text = "12"): def _query_info_str_by_arch(self, arch, hp: Text = "12", print_information=None): arch_index = self.query_index_by_arch(arch) self._prepare_info(arch_index) if arch_index in self.arch2infos_dict: if hp not in self.arch2infos_dict[arch_index]: raise ValueError( "The {:}-th architecture only has hyper-parameters of " "{:} instead of {:}.".format(arch_index, list(self.arch2infos_dict[arch_index].keys()), hp) ) info = self.arch2infos_dict[arch_index][hp] strings = print_information(info, "arch-index={:}".format(arch_index)) return "\n".join(strings) else: warnings.warn("Find this arch-index : {:}, but this arch is not " "evaluated.".format(arch_index)) return None def query_meta_info_by_index(self, arch_index, hp: Text = "12"): if self.verbose: print("Call query_meta_info_by_index with arch_index={:}, hp={:}".format(arch_index, hp)) self._prepare_info(arch_index) if arch_index in self.arch2infos_dict: if str(hp) not in self.arch2infos_dict[arch_index]: raise ValueError( "The {:}-th architecture only has hyper-parameters of " "{:} instead of {:}.".format(arch_index, list(self.arch2infos_dict[arch_index].keys()), hp) ) info = self.arch2infos_dict[arch_index][str(hp)] else: raise ValueError("arch_index [{:}] does not in arch2infos".format(arch_index)) return copy.deepcopy(info) def query_by_index(self, arch_index: int, dataname: Union[None, Text] = None, hp: Text = "12"): if self.verbose: print( "{:} Call query_by_index with arch_index={:}, dataname={:}, " "hp={:}".format(time_string(), arch_index, dataname, hp) ) info = self.query_meta_info_by_index(arch_index, str(hp)) if dataname is None: return info else: if dataname not in info.get_dataset_names(): raise ValueError("invalid dataset-name : {:} vs. {:}".format(dataname, info.get_dataset_names())) return info.query(dataname) def find_best( self, dataset, metric_on_set, flop_max=None, param_max=None, hp: Text = "12", enforce_all: bool = True ): if self.verbose: print( "{:} Call find_best with dataset={:}, metric_on_set={:}, hp={:} " "| with #FLOPs < {:} and #Params < {:}".format( time_string(), dataset, metric_on_set, hp, flop_max, param_max ) ) dataset, metric_on_set = remap_dataset_set_names(dataset, metric_on_set, self.verbose) best_index, highest_accuracy = -1, None if enforce_all: assert ( self.fast_mode ), "enforce_all can only be set when fast_mode=True; if you are using non-fast-mode, please set it as False" evaluated_indexes = list(range(len(self))) else: evaluated_indexes = sorted(list(self.evaluated_indexes)) for arch_index in evaluated_indexes: self._prepare_info(arch_index) arch_info = self.arch2infos_dict[arch_index][hp] info = arch_info.get_compute_costs(dataset) flop, param, latency = info["flops"], info["params"], info["latency"] if flop_max is not None and flop > flop_max: continue if param_max is not None and param > param_max: continue xinfo = arch_info.get_metrics(dataset, metric_on_set) loss, accuracy = xinfo["loss"], xinfo["accuracy"] if best_index == -1: best_index, highest_accuracy = arch_index, accuracy elif highest_accuracy < accuracy: best_index, highest_accuracy = arch_index, accuracy del latency, loss if self.verbose: if not evaluated_indexes: print("The evaluated_indexes is empty, please fill it before call find_best.") else: print( " the best architecture : [{:}] {:} with accuracy={:.3f}%".format( best_index, self.arch(best_index), highest_accuracy ) ) return best_index, highest_accuracy def get_net_param(self, index, dataset, seed: Optional[int], hp: Text = "12"): if self.verbose: print( "{:} Call the get_net_param function with index={:}, dataset={:}, " "seed={:}, hp={:}".format(time_string(), index, dataset, seed, hp) ) info = self.query_meta_info_by_index(index, hp) return info.get_net_param(dataset, seed) def get_net_config(self, index: int, dataset: Text): if self.verbose: print( "{:} Call the get_net_config function with index={:}, " "dataset={:}.".format(time_string(), index, dataset) ) self._prepare_info(index) if index in self.arch2infos_dict: info = self.arch2infos_dict[index] else: raise ValueError("The arch_index={:} is not in arch2infos_dict.".format(index)) info = next(iter(info.values())) results = info.query(dataset, None) results = next(iter(results.values())) return results.get_config(None) def get_cost_info(self, index: int, dataset: Text, hp: Text = "12") -> Dict[Text, float]: if self.verbose: print( "{:} Call the get_cost_info function with index={:}, " "dataset={:}, and hp={:}.".format(time_string(), index, dataset, hp) ) self._prepare_info(index) info = self.query_meta_info_by_index(index, hp) return info.get_compute_costs(dataset) def get_latency(self, index: int, dataset: Text, hp: Text = "12") -> float: if self.verbose: print( "{:} Call the get_latency function with index={:}, " "dataset={:}, and hp={:}.".format(time_string(), index, dataset, hp) ) cost_dict = self.get_cost_info(index, dataset, hp) return cost_dict["latency"] @abc.abstractmethod def show(self, index=-1): def _show(self, index=-1, print_information=None) -> None: if index < 0: print(self) evaluated_indexes = sorted(list(self.evaluated_indexes)) for i, idx in enumerate(evaluated_indexes): print( "\n" + "-" * 10 + " The ({:5d}/{:5d}) {:06d}-th " "architecture! ".format(i, len(evaluated_indexes), idx) + "-" * 10 ) print("arch : {:}".format(self.meta_archs[idx])) for unused_key, result in self.arch2infos_dict[index].items(): strings = print_information(result) print(">" * 40 + " {:03d} epochs ".format(result.get_total_epoch()) + ">" * 40) print("\n".join(strings)) print("<" * 40 + "------------" + "<" * 40) else: if 0 <= index < len(self.meta_archs): if index not in self.evaluated_indexes: self._prepare_info(index) if index not in self.evaluated_indexes: print("The {:}-th architecture has not been evaluated " "or not saved.".format(index)) else: for unused_key, result in self.arch2infos_dict[index].items(): strings = print_information(result) print(">" * 40 + " {:03d} epochs ".format(result.get_total_epoch()) + ">" * 40) print("\n".join(strings)) print("<" * 40 + "------------" + "<" * 40) else: print("This index ({:}) is out of range (0~{:}).".format(index, len(self.meta_archs)))
MIT License
azure/shrike
shrike/compliant_logging/data_conversions.py
is_pandas_series
python
def is_pandas_series(obj: Any) -> bool: try: import pandas as pd except Exception: return False return isinstance(obj, pd.Series)
Checks if the given object is a pandas series Args: obj (Any): Object to check Returns: bool: True if object is a series otherwise False
https://github.com/azure/shrike/blob/4621f932b6d9fe9b6768732337e63842fd425e8b/shrike/compliant_logging/data_conversions.py#L257-L271
from typing import Any, Optional from .exceptions import PublicRuntimeError def is_spark_dataframe(obj: Any) -> bool: try: from pyspark.sql import DataFrame except Exception: return False return isinstance(obj, DataFrame) def get_spark_dataframe_info(df: Any) -> str: try: from pyspark.sql import DataFrame except Exception: raise PublicRuntimeError( "Spark DataFrame is not supported in current environment" ) try: return "Spark DataFrame (Row Count: {} / Column Count: {})".format( df.count(), len(df.columns) ) except Exception: return "Failed to extract Spark DataFrame info" def spark_dataframe_schema(df: Any, schema_map: dict = None) -> dict: try: from pyspark.sql import DataFrame except Exception: raise PublicRuntimeError( "Spark DataFrame is not supported in current environment" ) if schema_map: return dict([(schema_map.get(n, n), str(t)) for n, t in df.dtypes]) else: return dict([(n, str(t)) for n, t in df.dtypes]) def collect_spark_dataframe(df: Any) -> Optional[dict]: try: from pyspark.sql import DataFrame except Exception: raise PublicRuntimeError( "Spark DataFrame is not supported in current environment" ) try: return df.toPandas().to_dict("list") except Exception: return None def is_vaex_dataframe(obj: Any) -> bool: try: from vaex.dataframe import DataFrame as VaexDataFrame except Exception: return False return isinstance(obj, VaexDataFrame) def get_vaex_dataframe_info(df: Any) -> str: try: from vaex.dataframe import DataFrame as VaexDataFrame except Exception: raise PublicRuntimeError("Vaex DataFrame not supported in current environment") return "Vaex DataFrame (Row Count: {} / Column Count: {})".format( df.count(), len(df.get_column_names()) ) def vaex_dataframe_schema( df: Any, schema_map: dict = None ) -> dict: try: from vaex.dataframe import DataFrame as VaexDataFrame except Exception: raise PublicRuntimeError("Vaex DataFrame not supported in current environment") if schema_map: return { schema_map.get(col, col): df[col].dtype.name for col in df.get_column_names() } else: return { column_name: df.data_type(column_name) for column_name in df.get_column_names() } def collect_vaex_dataframe(df: Any) -> Optional[dict]: try: from vaex.dataframe import DataFrame as VaexDataFrame except Exception: raise PublicRuntimeError("Vaex DataFrame not supported in current environment") try: return df.to_pandas_df().to_dict("list") except Exception: return None def is_numpy_array(obj: Any) -> bool: try: import numpy as np except Exception: return False return isinstance(obj, np.ndarray) def get_numpy_array_info(arr: Any) -> str: try: import numpy as np except Exception: raise PublicRuntimeError( "Numpy Array not supported in the current environment." ) return "Numpy Array (Shape: {})".format(arr.shape) def numpy_array_to_list(arr: Any) -> list: try: import numpy as np except Exception: raise PublicRuntimeError( "Numpy Array not supported in the current environment." ) return arr.tolist()
MIT License
python-zk/kazoo
kazoo/recipe/partitioner.py
SetPartitioner._child_watching
python
def _child_watching(self, func=None, client_handler=False): watcher = PatientChildrenWatch(self._client, self._party_path, self._time_boundary) asy = watcher.start() if func is not None: if client_handler: func = partial(self._client.handler.spawn, func) asy.rawlink(func) return asy
Called when children are being watched to stabilize This actually returns immediately, child watcher spins up a new thread/greenlet and waits for it to stabilize before any callbacks might run. :param client_handler: If True, deliver the result using the client's event handler.
https://github.com/python-zk/kazoo/blob/6337fd6f72b59fb20886f980f2e0d6d41525dc35/kazoo/recipe/partitioner.py#L379-L399
from functools import partial import logging import os import socket from kazoo.exceptions import KazooException, LockTimeout from kazoo.protocol.states import KazooState from kazoo.recipe.watchers import PatientChildrenWatch log = logging.getLogger(__name__) class PartitionState(object): ALLOCATING = "ALLOCATING" ACQUIRED = "ACQUIRED" RELEASE = "RELEASE" FAILURE = "FAILURE" class SetPartitioner(object): def __init__(self, client, path, set, partition_func=None, identifier=None, time_boundary=30, max_reaction_time=1, state_change_event=None): self.state_id = 0 self.state = PartitionState.ALLOCATING self.state_change_event = state_change_event or client.handler.event_object() self._client = client self._path = path self._set = set self._partition_set = [] self._partition_func = partition_func or self._partitioner self._identifier = identifier or '%s-%s' % ( socket.getfqdn(), os.getpid()) self._locks = [] self._lock_path = '/'.join([path, 'locks']) self._party_path = '/'.join([path, 'party']) self._time_boundary = time_boundary self._max_reaction_time = max_reaction_time self._acquire_event = client.handler.event_object() client.ensure_path(path) client.ensure_path(self._lock_path) client.ensure_path(self._party_path) self._party = client.ShallowParty(self._party_path, identifier=self._identifier) self._party.join() self._state_change = client.handler.rlock_object() client.add_listener(self._establish_sessionwatch) self._child_watching(self._allocate_transition, client_handler=True) def __iter__(self): for partition in self._partition_set: yield partition @property def failed(self): return self.state == PartitionState.FAILURE @property def release(self): return self.state == PartitionState.RELEASE @property def allocating(self): return self.state == PartitionState.ALLOCATING @property def acquired(self): return self.state == PartitionState.ACQUIRED def wait_for_acquire(self, timeout=30): self._acquire_event.wait(timeout) def release_set(self): self._release_locks() if self._locks: self._fail_out() return else: with self._state_change: if self.failed: return self._set_state(PartitionState.ALLOCATING) self._child_watching(self._allocate_transition, client_handler=True) def finish(self): self._release_locks() self._fail_out() def _fail_out(self): with self._state_change: self._set_state(PartitionState.FAILURE) if self._party.participating: try: self._party.leave() except KazooException: pass def _allocate_transition(self, result): if result.exception: self._fail_out() return children, async_result = result.get() children_changed = self._client.handler.event_object() def updated(result): with self._state_change: children_changed.set() if self.acquired: self._set_state(PartitionState.RELEASE) with self._state_change: if not self.allocating: return state_id = self.state_id async_result.rawlink(updated) def abort_if_needed(): if self.state_id == state_id: if children_changed.is_set(): self._abort_lock_acquisition() return True else: return False else: if self.allocating or self.acquired: with self._state_change: self._set_state(PartitionState.RELEASE) return True partition_set = self._partition_func( self._identifier, list(self._party), self._set) for member in partition_set: lock = self._client.Lock(self._lock_path + '/' + str(member)) while True: try: lock.acquire(timeout=self._max_reaction_time) except LockTimeout: if abort_if_needed(): return except KazooException: return self.finish() else: break self._locks.append(lock) if abort_if_needed(): return with self._state_change: if self.state_id == state_id and not children_changed.is_set(): self._partition_set = partition_set self._set_state(PartitionState.ACQUIRED) self._acquire_event.set() return if not abort_if_needed(): self._fail_out() def _release_locks(self): self._acquire_event.clear() for lock in self._locks[:]: try: lock.release() except KazooException: pass else: self._locks.remove(lock) def _abort_lock_acquisition(self): self._release_locks() if self._locks: self._fail_out() return self._child_watching(self._allocate_transition, client_handler=True)
Apache License 2.0
byceps/byceps
byceps/services/user/service.py
find_db_user_by_screen_name
python
def find_db_user_by_screen_name( screen_name: str, *, case_insensitive=False ) -> Optional[DbUser]: query = db.session.query(DbUser) if case_insensitive: query = query.filter( db.func.lower(DbUser.screen_name) == screen_name.lower() ) else: query = query.filter_by(screen_name=screen_name) return query.one_or_none()
Return the user with that screen name, or `None` if not found.
https://github.com/byceps/byceps/blob/138f928e98fd1e3d79943e1a8744ea04cef465b5/byceps/services/user/service.py#L194-L207
from __future__ import annotations from datetime import datetime, timedelta from typing import Optional from sqlalchemy import select from ...database import db, paginate, Pagination, Query from ...typing import UserID from ..user_avatar.dbmodels import ( Avatar as DbAvatar, AvatarSelection as DbAvatarSelection, ) from .dbmodels.detail import UserDetail as DbUserDetail from .dbmodels.user import User as DbUser from .transfer.models import ( User, UserDetail, UserEmailAddress, UserForAdmin, UserForAdminDetail, UserStateFilter, ) class UserIdRejected(Exception): def find_active_user( user_id: UserID, *, include_avatar: bool = False, ) -> Optional[User]: query = _get_user_query(include_avatar) row = query .filter(DbUser.initialized == True) .filter(DbUser.suspended == False) .filter(DbUser.deleted == False) .filter(DbUser.id == user_id) .one_or_none() if row is None: return None return _user_row_to_dto(row) def find_user( user_id: UserID, *, include_avatar: bool = False, ) -> Optional[User]: row = _get_user_query(include_avatar) .filter(DbUser.id == user_id) .one_or_none() if row is None: return None return _user_row_to_dto(row) def get_user(user_id: UserID, *, include_avatar: bool = False) -> User: user = find_user(user_id, include_avatar=include_avatar) if user is None: raise ValueError(f"Unknown user ID '{user_id}'") return user def get_users( user_ids: set[UserID], *, include_avatars: bool = False, ) -> set[User]: if not user_ids: return set() query = _get_user_query(include_avatars) rows = query .filter(DbUser.id.in_(frozenset(user_ids))) .all() return {_user_row_to_dto(row) for row in rows} def _get_user_query( include_avatar: bool, ) -> Query: query = db.session .query( DbUser.id, DbUser.screen_name, DbUser.suspended, DbUser.deleted, DbUser.locale, DbAvatar if include_avatar else db.null(), ) if include_avatar: query = query .outerjoin(DbAvatarSelection, DbUser.avatar_selection) .outerjoin(DbAvatar) return query def _user_row_to_dto( row: tuple[UserID, str, bool, bool, Optional[str], Optional[DbAvatar]] ) -> User: user_id, screen_name, suspended, deleted, locale, avatar = row avatar_url = avatar.url if (avatar is not None) else None return User( id=user_id, screen_name=screen_name, suspended=suspended, deleted=deleted, locale=locale, avatar_url=avatar_url, ) def find_user_by_email_address(email_address: str) -> Optional[User]: user = db.session .query(DbUser) .filter( db.func.lower(DbUser.email_address) == email_address.lower() ) .one_or_none() if user is None: return None return _db_entity_to_user(user) def find_user_by_screen_name( screen_name: str, *, case_insensitive=False ) -> Optional[User]: user = find_db_user_by_screen_name( screen_name, case_insensitive=case_insensitive ) if user is None: return None return _db_entity_to_user(user)
BSD 3-Clause New or Revised License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/statistics/models/successful_response.py
SuccessfulResponse.has_more
python
def has_more(self): return self._has_more
Gets the has_more of this SuccessfulResponse. Indicates whether there are more results for you to fetch in the next page. :return: The has_more of this SuccessfulResponse. :rtype: bool
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/statistics/models/successful_response.py#L109-L117
from pprint import pformat from six import iteritems import re class SuccessfulResponse(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'after': 'str', 'data': 'list[Metric]', 'has_more': 'bool', 'limit': 'int', 'object': 'str', 'total_count': 'int' } attribute_map = { 'after': 'after', 'data': 'data', 'has_more': 'has_more', 'limit': 'limit', 'object': 'object', 'total_count': 'total_count' } def __init__(self, after=None, data=None, has_more=None, limit=None, object=None, total_count=None): self._after = after self._data = data self._has_more = has_more self._limit = limit self._object = object self._total_count = total_count self.discriminator = None @property def after(self): return self._after @after.setter def after(self, after): self._after = after @property def data(self): return self._data @data.setter def data(self, data): self._data = data @property
Apache License 2.0
ofir-reich/seir-graph
seir.py
SimulationResults.analyze_results_df
python
def analyze_results_df(self): results_df = self.df self.duration = results_df['day'].iloc[-1] self.peak_infected_time = results_df['day'].iloc[results_df['infected'].idxmax()] self.peak_infected = results_df['infected'].max() self.peak_fraction_infected = results_df['infected'].max() / self.N self.peak_exposed_time = results_df['day'].iloc[results_df['exposed'].idxmax()] self.peak_exposed = results_df['exposed'].max() self.doubling_days = self.calculate_doubling_time() self.halving_days = self.calculate_halving_time() self.fraction_infected = results_df['recovered'].iloc[-1] / self.N fraction_quarantine_steps = results_df['quarantined'].sum() / self.N self.fraction_quarantine_time = fraction_quarantine_steps / self.steps_per_day total_tests = results_df['test_rate'].sum() / self.steps_per_day self.fraction_tests = total_tests / self.N self.peak_test_rate = results_df['test_rate'].max() / self.N
Calculate various summary stats.
https://github.com/ofir-reich/seir-graph/blob/3bde8f4025bf100529494fbca255b10eddab05f5/seir.py#L199-L220
import matplotlib as mpl import matplotlib.pyplot as plt import networkx as nx import numpy as np import pandas as pd import statsmodels.api as sm mpl.rcParams['figure.figsize'] = [25, 10] N = 10000 MIN_DEGREE = 2 MEAN_DEGREE = 20 GAMMA = 0.2 STEPS_PER_DAY = 5 MAX_STEPS = 3000 INITIAL_INFECTED_NUM = 10 PROB_INFECT = 0.027 PROB_INFECT_EXPOSED_FACTOR = 0.5 RELATIVE_INFECTIOUSNESS_ASYMPTOMATIC = 0.5 DURATION_EXPOSED_INFECTS = 2 INCUBATION_DURATION_MEAN = 5.1 INCUBATION_DURATION_STD = 4.38 PROB_ASYMPTOMATIC = 0.40 PROB_RECOVER = 1 / 3.5 DAYS_IN_QUARANTINE = 14 PROB_INFECTED_DETECTED = 0 PROB_NEIGHBOR_TRACED = 0 PROB_EXPOSED_DETECTED = 0 QUARANTINE_NEIGHBORS = False TEST_NEIGHBORS = False TEST_DELAY_TIME = 0 _EPSILON = 1e-10 DOUBLING_DAYS = float((28 - 4) / np.log2(2220 / 11)) MAIN_GROUPS = ['susceptible', 'exposed', 'recovered', 'infected', 'quarantined'] ALL_COLUMNS = MAIN_GROUPS + ['test_rate'] GROUP2COLOR = dict(susceptible='blue', exposed='orange', recovered='green', quarantined='purple', infected='red', test_rate='brown') SUMMARY_ATTRS = ['duration', 'fraction_infected', 'doubling_days', 'fraction_quarantine_time', 'peak_infected_time', 'peak_fraction_infected', 'fraction_tests', 'peak_test_rate'] class SimulationResults(object): def __init__(self, results_df, G=None, **kwargs): self.df = results_df self.hyperparams = kwargs for name, value in kwargs.items(): setattr(self, name, value) if G is None: self.N = results_df[['susceptible', 'exposed', 'infected', 'recovered']].iloc[0].sum() else: self.N = len(G) if G.name.startswith('power_law'): self.G_attrs = dict(zip(['gamma', 'min_degree', 'mean_degree'], map(float, G.name.split('_')[-3:]))) self.G_attrs['N'] = self.N if not hasattr(self, 'steps_per_day'): self.steps_per_day = ((results_df['step'].iloc[1] - results_df['step'].iloc[0]) / (results_df['day'].iloc[1] - results_df['day'].iloc[0])) self.analyze_results_df() def calculate_doubling_time(self): results_df = self.df idx_end = (results_df['exposed'] > results_df['exposed'].max() * 0.5).to_numpy().nonzero()[0][0] if self.peak_exposed_time < 3 or idx_end == 0: return np.inf exposed_min = results_df['exposed'][:idx_end].min() idx_min = results_df['exposed'][:idx_end].idxmin() start_candidates = ((results_df.index >= idx_min) & (results_df.index < idx_end) & (results_df['exposed'] > exposed_min * 2)).to_numpy().nonzero()[0] if not start_candidates.size: return np.inf idx_start = start_candidates[0] try: X = sm.add_constant(results_df[idx_start:idx_end][['day']], prepend=False) log2_exposed = np.log2(results_df[idx_start:idx_end]['exposed']) regression_results = sm.OLS(log2_exposed, X).fit() doubling_days = 1 / regression_results.params['day'] except ValueError: doubling_days = None return doubling_days def calculate_halving_time(self): results_df = self.df idx_peak = results_df['exposed'].idxmax() end_candidates = ((results_df.index >= idx_peak) & (results_df['exposed'] < self.peak_exposed / 5) & (results_df['exposed'] > 5)).to_numpy().nonzero()[0] if not end_candidates.size: return None idx_end = end_candidates[0] idx_start = idx_peak if idx_end - idx_start < 20: return None try: X = sm.add_constant(results_df[idx_start:idx_end][['day']], prepend=False) log2_exposed = np.log2(results_df[idx_start:idx_end]['exposed']) regression_results = sm.OLS(log2_exposed, X).fit() halving_days = -1 / regression_results.params['day'] except ValueError: halving_days = None return halving_days
Apache License 2.0
wq/django-rest-pandas
rest_pandas/serializers.py
PandasBoxplotSerializer.get_group_field
python
def get_group_field(self): return self.get_meta_option('boxplot_group')
Categorical field to group datasets by.
https://github.com/wq/django-rest-pandas/blob/53109d9bd97fac9b82ec44ad54e7c3d8911847c6/rest_pandas/serializers.py#L352-L356
from rest_framework import serializers from pandas import DataFrame from pandas.api.types import is_numeric_dtype from django.core.exceptions import ImproperlyConfigured import datetime from collections import OrderedDict class PandasSerializer(serializers.ListSerializer): read_only = True index_none_value = None wq_chart_type = None def get_index(self, dataframe): return self.get_index_fields() def get_dataframe(self, data): dataframe = DataFrame(data) index = self.get_index(dataframe) if index: if self.index_none_value is not None: for key in index: try: dataframe[key].fillna( self.index_none_value, inplace=True ) except ValueError: pass dataframe.set_index(index, inplace=True) else: dataframe.index.name = 'row' return dataframe def transform_dataframe(self, dataframe): view = self.context.get('view', None) if view and hasattr(view, 'transform_dataframe'): return self.context['view'].transform_dataframe(dataframe) return dataframe @property def data(self): data = super(serializers.ListSerializer, self).data if isinstance(data, DataFrame) or data: dataframe = self.get_dataframe(data) return self.transform_dataframe(dataframe) else: return DataFrame([]) def to_representation(self, data): if isinstance(data, DataFrame): return data return super(PandasSerializer, self).to_representation(data) @property def model_serializer(self): serializer = type(self.child) if serializer.__name__ == 'SerializerWithListSerializer': return serializer.__bases__[0] return serializer @property def model_serializer_meta(self): return getattr(self.model_serializer, 'Meta', object()) def get_index_fields(self): index_fields = self.get_meta_option('index', []) if index_fields: return index_fields model = getattr(self.model_serializer_meta, 'model', None) if model: pk_name = model._meta.pk.name if pk_name in self.child.get_fields(): return [pk_name] return [] def get_meta_option(self, name, default=None): meta_name = 'pandas_' + name value = getattr(self.model_serializer_meta, meta_name, None) if value is None: if default is not None: return default else: raise ImproperlyConfigured( "%s should be specified on %s.Meta" % (meta_name, self.model_serializer.__name__) ) return value class PandasUnstackedSerializer(PandasSerializer): index_none_value = '-' wq_chart_type = 'timeSeries' def get_index(self, dataframe): return self.get_index_fields() + self.get_header_fields() def transform_dataframe(self, dataframe): dataframe.columns.name = "" for i in range(len(self.get_header_fields())): dataframe = dataframe.unstack() dataframe = dataframe.dropna( axis=0, how='all' ).dropna( axis=1, how='all' ) return dataframe def get_header_fields(self): return self.get_meta_option('unstacked_header') class PandasScatterSerializer(PandasSerializer): index_none_value = '-' wq_chart_type = 'scatter' def get_index(self, dataframe): return ( self.get_index_fields() + self.get_header_fields() + self.get_coord_fields() ) def transform_dataframe(self, dataframe): coord_fields = self.get_coord_fields() header_fields = self.get_header_fields() for i in range(len(coord_fields)): dataframe = dataframe.unstack() dataframe = dataframe.dropna(axis=1, how='all') dataframe = dataframe.dropna(axis=0, how='any') for i in range(len(header_fields)): dataframe = dataframe.unstack() columns = [] for i in range(len(header_fields) + 1): columns.append([]) for col in dataframe.columns: value_name = col[0] coord_names = list(col[1:len(coord_fields) + 1]) header_names = list(col[len(coord_fields) + 1:]) coord_name = '' for name in coord_names: if name != self.index_none_value: coord_name += name + '-' coord_name += value_name columns[0].append(coord_name) for i, header_name in enumerate(header_names): columns[1 + i].append(header_name) dataframe.columns = columns dataframe.columns.names = [''] + header_fields return dataframe def get_coord_fields(self): return self.get_meta_option('scatter_coord') def get_header_fields(self): return self.get_meta_option('scatter_header', []) class PandasBoxplotSerializer(PandasSerializer): index_none_value = '-' wq_chart_type = 'boxplot' def get_index(self, dataframe): group_field = self.get_group_field() date_field = self.get_date_field() header_fields = self.get_header_fields() extra_index_fields = self.get_extra_index_fields() index = [] if date_field: index.append(date_field) index += extra_index_fields index.append(group_field) index += header_fields return index def transform_dataframe(self, dataframe): grouping = self.get_grouping(dataframe) group_field = self.get_group_field() header_fields = self.get_header_fields() if "series" in grouping: for i in range(len(header_fields) + 1): dataframe = dataframe.unstack() groups = { col: dataframe[col] for col in dataframe.columns } if "year" in grouping: interval = "year" elif "month" in grouping: interval = "month" else: interval = None series_infos = OrderedDict() for header, series in groups.items(): if interval: series_stats = self.boxplots_for_interval(series, interval) else: series_stats = [self.compute_boxplot(series)] for series_stat in series_stats: if isinstance(header, tuple): value_name = header[0] col_values = header[1:] else: value_name = header col_values = [] col_names = tuple(zip(dataframe.columns.names[1:], col_values)) if interval in series_stat: col_names += ((interval, series_stat[interval]),) series_infos.setdefault(col_names, dict(col_names)) series_info = series_infos[col_names] for stat_name, val in series_stat.items(): if stat_name != interval: series_info[value_name + '-' + stat_name] = val dataframe = DataFrame(list(series_infos.values())) if 'series' in grouping: index = header_fields + [group_field] unstack = len(header_fields) if interval: index = [interval] + index unstack += 1 else: index = [interval] unstack = 0 dataframe.set_index(index, inplace=True) dataframe.columns.name = '' for i in range(unstack): dataframe = dataframe.unstack() dataframe = dataframe.dropna(axis=1, how='all') return dataframe def get_grouping(self, dataframe): request = self.context.get('request', None) datasets = len(dataframe.columns) if request: group = request.GET.get('group', None) if group: return group return default_grouping(datasets, self.get_date_field()) def boxplots_for_interval(self, series, interval): def get_interval_name(date): if isinstance(date, tuple): date = date[0] if hasattr(date, 'count') and date.count('-') == 2: date = datetime.datetime.strptime(date, "%Y-%m-%d") return getattr(date, interval) interval_stats = [] groups = series.groupby(get_interval_name).groups for interval_name, group in groups.items(): stats = self.compute_boxplot(series[group]) stats[interval] = interval_name interval_stats.append(stats) return interval_stats def compute_boxplot(self, series): from matplotlib.cbook import boxplot_stats series = series[series.notnull()] if len(series.values) == 0: return {} elif not is_numeric_dtype(series): return self.non_numeric_stats(series) stats = boxplot_stats(list(series.values))[0] stats['count'] = len(series.values) stats['fliers'] = "|".join(map(str, stats['fliers'])) return stats def non_numeric_stats(self, series): return { 'count': len(series), 'mode': series.mode()[0], }
MIT License
wimlds-trojmiasto/detect-waste
detr/util/laprop.py
LaProp.step
python
def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] centered = group['centered'] state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_lr_1'] = 0.; state['exp_avg_lr_2'] = 0. state['exp_avg_sq'] = torch.zeros_like(p.data) if centered: state['exp_mean_avg_beta2'] = torch.zeros_like(p.data) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if centered: exp_mean_avg_beta2 = state['exp_mean_avg_beta2'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) state['exp_avg_lr_1'] = state['exp_avg_lr_1'] * beta1 + (1 - beta1) * group['lr'] state['exp_avg_lr_2'] = state['exp_avg_lr_2'] * beta2 + (1 - beta2) bias_correction1 = state['exp_avg_lr_1'] / group['lr'] if group['lr']!=0. else 1. step_size = 1 / bias_correction1 bias_correction2 = state['exp_avg_lr_2'] denom = exp_avg_sq if centered: exp_mean_avg_beta2.mul_(beta2).add_(1 - beta2, grad) if state['step'] > self.steps_before_using_centered: mean = exp_mean_avg_beta2 ** 2 denom = denom - mean if amsgrad: if not (centered and state['step'] <= self.steps_before_using_centered): torch.max(max_exp_avg_sq, denom, out=max_exp_avg_sq) denom = max_exp_avg_sq denom = denom.div(bias_correction2).sqrt_().add_(group['eps']) step_of_this_grad = grad / denom exp_avg.mul_(beta1).add_( (1 - beta1) * group['lr'], step_of_this_grad) p.data.add_(-step_size, exp_avg ) if group['weight_decay'] != 0: p.data.add_( - group['weight_decay'], p.data) return loss
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
https://github.com/wimlds-trojmiasto/detect-waste/blob/75f47fda5f918a06c51e372af19626675a9a3bca/detr/util/laprop.py#L26-L107
from torch.optim import Optimizer import math import torch class LaProp(Optimizer): def __init__(self, params, lr=4e-4, betas=(0.9, 0.999), eps=1e-15, weight_decay=0, amsgrad=False, centered=False): self.steps_before_using_centered = 10 if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, centered=centered) super(LaProp, self).__init__(params, defaults)
MIT License