repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
jlengrand/ivolution
ivolution/Guy.py
Guy.find_date
python
def find_date(self, date): try: my_date = time.strptime(date, "%Y:%m:%d %H:%M:%S") except Exception: my_logger = logging.getLogger('IvolutionFile.Guy') my_logger.error("Impossible to parse date for %s" %(self.name)) my_date = time.time() return my_date
This function takes a date as a string, and returns a date object. Used afterwards to sort images chronologically :param date: The date where the image was taken :type date: string :returns: datetime -- Returns a date object according to time library. :raises: In case of error, set the date to be the current time.
https://github.com/jlengrand/ivolution/blob/2753d7120b11fb94a5ce84bfe4a134b5a437a5c7/ivolution/Guy.py#L85-L101
import time import logging import cv class Guy(object): def __init__(self, image_id, date, source): self.in_x = None self.in_y = None self.name = image_id self.date = self.find_date(date) self.source = source self.faces = [] image = self.load_image() (self.in_x, self.in_y) = cv.GetSize(image) self.in_channels = image.nChannels self.depth = image.depth self.x_center = 0 self.y_center = 0 self.ratio = 1.0 def resized_dims(self): inx = int(self.ratio * self.in_x) iny = int(self.ratio * self.in_y) return (inx, iny) def resized_center(self): xc = int(self.ratio * self.x_center) yc = int(self.ratio * self.y_center) return (xc, yc) def load_image(self): image = cv.LoadImage(self.source) return image
BSD 3-Clause New or Revised License
alvinwan/quupod
quupod/models.py
Base.setting
python
def setting( self, name: str, dynamic: bool=False, default=Nil) -> db.Model: assert name in self.__defaultsettings__ or dynamic, 'Not a valid setting' key = {'%s_id' % self.entity: self.id} setting = self.__settingclass__.query.filter_by( name=name, **key).one_or_none() if not setting: setting = self.load_setting(name, default) return setting
Get Setting by name. :param name: The name of the setting to fetch. :param dynamic: Set to true if the setting is not expected to exist in default
https://github.com/alvinwan/quupod/blob/45f0d7d49c957dee15d237e2a1364764a18ca085/quupod/models.py#L138-L157
from sqlalchemy import asc from sqlalchemy import desc from flask import current_app from werkzeug.local import LocalProxy from flask_debugtoolbar import DebugToolbarExtension from flask_sqlalchemy import SQLAlchemy from sqlalchemy_utils import PasswordType from sqlalchemy_utils import ArrowType from sqlalchemy_utils.types.choice import ChoiceType from passlib.context import CryptContext from quupod.defaults import default_queue_settings import flask_login import arrow from flask_script import Manager from flask_migrate import Migrate from flask import g, request from quupod.views import url_for from quupod.utils import strfdelta from quupod.utils import Nil from quupod.utils import str2lst from wtforms import Form db = SQLAlchemy() migrate = Migrate() migration_manager = Manager() toolbar = DebugToolbarExtension() class Base(db.Model): __abstract__ = True __access_token = None __context = CryptContext(schemes=['pbkdf2_sha512']) id = db.Column(db.Integer, primary_key=True) updated_at = db.Column(ArrowType) updated_by = db.Column(db.Integer) created_at = db.Column(ArrowType, default=arrow.now('US/Pacific')) created_by = db.Column(db.Integer) is_active = db.Column(db.Boolean, default=True) @property def entity(self) -> str: return self.__class__.__name__.lower() @staticmethod def random_hash() -> str: return Base.hash(str(arrow.utcnow())) @staticmethod def hash(value: str) -> str: return Base.__context.encrypt(value) @classmethod def from_request(cls): return cls(**dict(request.form.items())).save() def modify_time(self, *fields, act=lambda t: t) -> db.Model: for field in fields: setattr(self, field, act(getattr(self, field))) return self def to_local(self, *fields) -> db.Model: return self.modify_time( *fields, act=lambda t: t.to(current_app.config['TZ'] or 'local')) def to_utc(self, *fields) -> db.Model: return self.modify_time(*fields, act=lambda t: t.to('utc')) def set_tz(self, *fields, tz: str) -> db.Model: return self.modify_time( *fields, act=lambda t: t.replace(tzinfo=tz)) def set_local(self, *fields) -> db.Model: from dateutil import tz as t return self.set_tz( *fields, tz=t.gettz(current_app.config['TZ']) or t.tzlocal()) def update(self, **kwargs) -> db.Model: for k, v in kwargs.items(): setattr(self, k, v) return self def save(self) -> db.Model: try: db.session.add(self) db.session.commit() return self except: db.session.rollback() return self.save()
Apache License 2.0
mindspore-ai/mindinsight
mindinsight/datavisual/data_transform/data_manager.py
DataManager._load_data
python
def _load_data(self): with self._status_mutex: if self.status == DataManagerStatus.LOADING.value: logger.debug("Current status is %s , will ignore to load data.", self.status) return self.status = DataManagerStatus.LOADING.value with ComputingResourceManager.get_instance().get_executor( max_processes_cnt=settings.MAX_PROCESSES_COUNT) as executor: self._brief_cache.update_cache(executor) brief_cache_update = time.time() for _ in self._detail_cache.update_cache(executor): update_interval = time.time() - brief_cache_update logger.debug('Loading one round of detail cache taking %ss.', update_interval) if update_interval > 3: self._brief_cache.update_cache(executor) brief_cache_update += update_interval with self._status_mutex: if not self._brief_cache.has_content() and not self._detail_cache.has_content(): self.status = DataManagerStatus.INVALID.value else: self.status = DataManagerStatus.DONE.value logger.info("Load brief data end, and loader pool size is %r.", self._detail_cache.loader_pool_size())
This function will load data once and ignore it if the status is loading.
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/datavisual/data_transform/data_manager.py#L925-L949
import abc import datetime import threading import time import os from typing import Iterable, Optional from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher from mindinsight.conf import settings from mindinsight.datavisual.common import exceptions from mindinsight.datavisual.common.enums import CacheStatus from mindinsight.datavisual.common.log import logger from mindinsight.datavisual.common.enums import DataManagerStatus from mindinsight.datavisual.common.enums import PluginNameEnum from mindinsight.datavisual.common.exceptions import TrainJobNotExistError from mindinsight.datavisual.data_transform.loader_generators.loader_generator import MAX_DATA_LOADER_SIZE from mindinsight.datavisual.data_transform.loader_generators.data_loader_generator import DataLoaderGenerator from mindinsight.utils.computing_resource_mgr import ComputingResourceManager from mindinsight.utils.exceptions import MindInsightException from mindinsight.utils.exceptions import ParamValueError from mindinsight.utils.exceptions import UnknownError from mindinsight.datavisual.utils.tools import exception_wrapper class _BasicTrainJob: def __init__(self, abs_summary_base_dir, entry): self._abs_summary_base_dir = abs_summary_base_dir self._entry = entry @property def abs_summary_dir(self): return os.path.realpath(os.path.join(self._abs_summary_base_dir, self._entry['relative_path'])) @property def summary_base_dir(self): return self._abs_summary_base_dir @property def train_id(self): return self._entry['relative_path'] @property def profiler_dir(self): if self._entry['profiler'] is not None: return self._entry['profiler']['directory'] return None @property def create_time(self): return self._entry['create_time'] @property def update_time(self): return self._entry['update_time'] @property def profiler_type(self): if self._entry['profiler'] is not None: return self._entry['profiler']['profiler_type'] return '' @property def summary_files(self): return self._entry['summary_files'] @property def graph_files(self): return self._entry['graph_files'] @property def lineage_files(self): return self._entry['lineage_files'] @property def dump_dir(self): return self._entry.get('dump_dir', None) class CachedTrainJob: def __init__(self, basic_info: _BasicTrainJob): self._basic_info = basic_info self._last_access_time = datetime.datetime.utcnow() self._content = {} self._cache_status = CacheStatus.NOT_IN_CACHE self._key_locks = {} @property def cache_status(self): return self._cache_status @cache_status.setter def cache_status(self, value): self._cache_status = value def update_access_time(self): self._last_access_time = datetime.datetime.utcnow() @property def last_access_time(self): return self._last_access_time @property def abs_summary_dir(self): return self._basic_info.abs_summary_dir @property def summary_base_dir(self): return self._basic_info.summary_base_dir def set(self, key, value): self._content[key] = value def delete(self, key, raise_exception=True): try: self._content.pop(key) except KeyError: if raise_exception: raise ParamValueError("Delete failed. Invalid cache key({}).".format(key)) def get(self, key, raise_exception=True): try: return self._content[key] except KeyError: if raise_exception: raise ParamValueError("Invalid cache key({}).".format(key)) return None @property def basic_info(self): return self._basic_info @basic_info.setter def basic_info(self, value): self._basic_info = value def lock_key(self, key): return self._key_locks.setdefault(key, threading.Lock()) @property def train_id(self): return self._basic_info.train_id class TrainJob: def __init__(self, brief_train_job: CachedTrainJob, detail_train_job: Optional[CachedTrainJob] = None): self._brief = brief_train_job self._detail = detail_train_job if self._detail is None: self._cache_status = CacheStatus.NOT_IN_CACHE else: self._cache_status = self._detail.cache_status def has_detail(self): return bool(self._detail is not None) def get_detail(self, key): if not self.has_detail(): raise exceptions.TrainJobDetailNotInCacheError() return self._detail.get(key) def get_brief(self, key): return self._brief.get(key) def get_basic_info(self): return self._brief.basic_info @property def cache_status(self): return self._cache_status @cache_status.setter def cache_status(self, cache_status): self._cache_status = cache_status class BaseCacheItemUpdater(abc.ABC): def update_item(self, cache_item: CachedTrainJob): raise NotImplementedError() class _BaseCacheManager: def __init__(self, summary_base_dir): self._summary_base_dir = summary_base_dir self._updaters = {} self._lock = threading.Lock() self._cache_items = {} def size(self): return len(self._cache_items) def register_cache_item_updater(self, updater: BaseCacheItemUpdater): self._updaters[updater.__class__.__qualname__] = updater def get_train_jobs(self): copied_train_jobs = dict(self._cache_items) return copied_train_jobs def get_train_job(self, train_id): try: return self._cache_items[train_id] except KeyError: raise TrainJobNotExistError(train_id) def cache_train_job(self, train_id) -> bool: raise NotImplementedError() def delete_train_job(self, train_id): if train_id in self._cache_items: del self._cache_items[train_id] def has_content(self): return bool(self._cache_items) def update_cache(self, executor): raise NotImplementedError() class _BriefCacheManager(_BaseCacheManager): def __init__(self, summary_base_dir): super(_BriefCacheManager, self).__init__(summary_base_dir) self._summary_watcher = SummaryWatcher() def cache_train_job(self, train_id): if train_id in self._cache_items: self._cache_items[train_id].update_access_time() return False def update_cache(self, executor): logger.info('Start to update BriefCacheManager.') summaries_info = self._summary_watcher.list_summary_directories(self._summary_base_dir) basic_train_jobs = [] for info in summaries_info: basic_train_jobs.append(_BasicTrainJob( abs_summary_base_dir=self._summary_base_dir, entry=info )) with self._lock: new_cache_items = self._merge_with_disk(basic_train_jobs) self._cache_items = new_cache_items for updater in self._updaters.values(): for cache_item in self._cache_items.values(): updater.update_item(cache_item) def _merge_with_disk(self, disk_train_jobs: Iterable[_BasicTrainJob]): new_cache_items = {} for train_job in disk_train_jobs: if train_job.train_id not in self._cache_items: new_cache_items[train_job.train_id] = CachedTrainJob(train_job) else: reused_train_job = self._cache_items[train_job.train_id] reused_train_job.basic_info = train_job new_cache_items[train_job.train_id] = reused_train_job return new_cache_items def register_folder_analyzer(self, analyzer): self._summary_watcher.register_folder_analyzer(analyzer) @property def cache_items(self): return self._cache_items DATAVISUAL_PLUGIN_KEY = "tag_mapping" DATAVISUAL_CACHE_KEY = "datavisual" class _DetailCacheManager(_BaseCacheManager): def __init__(self, summary_base_dir): super().__init__(summary_base_dir) self._loader_pool = {} self._deleted_id_list = [] self._loader_pool_mutex = threading.Lock() self._loader_generators = [DataLoaderGenerator(summary_base_dir)] self._loading_mutex = threading.Lock() def has_content(self): return bool(self._loader_pool) def register_folder_analyzer(self, analyzer): for generator in self._loader_generators: generator.register_folder_analyzer(analyzer) def size(self): raise NotImplementedError() def loader_pool_size(self): return len(self._loader_pool) def update_cache(self, executor): with self._loading_mutex: load_in_cache = exception_wrapper(self._execute_load_data) try: while not load_in_cache(executor): yield except UnknownError as ex: logger.warning("Load event data failed. Detail: %s.", str(ex)) def cache_train_job(self, train_id): loader = None need_reload = False with self._loader_pool_mutex: if self._is_loader_in_loader_pool(train_id, self._loader_pool): loader = self._loader_pool.get(train_id) if loader is None: for generator in self._loader_generators: tmp_loader = generator.generate_loader_by_train_id(train_id) if loader and loader.latest_update_time > tmp_loader.latest_update_time: continue loader = tmp_loader if loader is None: raise TrainJobNotExistError(train_id) self._add_loader(loader) need_reload = True self._update_loader_latest_update_time(loader.loader_id) return need_reload def get_train_jobs(self): def _add_loader(self, loader): if len(self._loader_pool) >= MAX_DATA_LOADER_SIZE: delete_number = len(self._loader_pool) - MAX_DATA_LOADER_SIZE + 1 sorted_loaders = sorted(self._loader_pool.items(), key=lambda loader: loader[1].latest_update_time) for index in range(delete_number): delete_loader_id = sorted_loaders[index][0] self._delete_loader(delete_loader_id) self._loader_pool.update({loader.loader_id: loader}) def _delete_loader(self, loader_id): if self._loader_pool.get(loader_id) is not None: logger.debug("delete loader %s", loader_id) self._loader_pool.pop(loader_id) def _execute_loader(self, loader_id, executor): try: with self._loader_pool_mutex: loader = self._loader_pool.get(loader_id, None) if loader is None: logger.debug("Loader %r has been deleted, will not load data.", loader_id) return True loader.cache_status = CacheStatus.CACHING if loader.data_loader.load(executor): loader.cache_status = CacheStatus.CACHED return True return False except MindInsightException as ex: logger.warning("Data loader %r load data failed. " "Delete data_loader. Detail: %s", loader_id, ex) with self._loader_pool_mutex: self._delete_loader(loader_id) return True def _generate_loaders(self): loader_dict = {} for generator in self._loader_generators: loader_dict.update(generator.generate_loaders(self._loader_pool)) sorted_loaders = sorted(loader_dict.items(), key=lambda loader: loader[1].latest_update_time) latest_loaders = sorted_loaders[-MAX_DATA_LOADER_SIZE:] self._deal_loaders(latest_loaders) def _deal_loaders(self, latest_loaders): with self._loader_pool_mutex: for loader_id, loader in latest_loaders: if self._loader_pool.get(loader_id, None) is None: self._add_loader(loader) continue if self._loader_pool[loader_id].latest_update_time < loader.latest_update_time: self._update_loader_latest_update_time(loader_id, loader.latest_update_time) def _execute_load_data(self, executor): self._generate_loaders() loader_pool = self._get_snapshot_loader_pool() loaded = True for loader_id in loader_pool: loaded = self._execute_loader(loader_id, executor) and loaded return loaded def delete_train_job(self, train_id): with self._loader_pool_mutex: self._delete_loader(train_id) def list_tensors(self, train_id, tag): loader_pool = self._get_snapshot_loader_pool() if not self._is_loader_in_loader_pool(train_id, loader_pool): raise TrainJobNotExistError("Can not find the given train job in cache.") data_loader = loader_pool[train_id].data_loader tensors = [] try: events_data = data_loader.get_events_data() tensors = events_data.tensors(tag) except KeyError: error_msg = "Can not find any data in this train job by given tag." raise ParamValueError(error_msg) except AttributeError: logger.debug("Train job %r has been deleted or it has not loaded data, " "and set tags to empty list.", train_id) return tensors def _check_train_job_exist(self, train_id, loader_pool): is_exist = False if train_id in loader_pool: return for generator in self._loader_generators: if generator.check_train_job_exist(train_id): is_exist = True break if not is_exist: raise TrainJobNotExistError("Can not find the train job in data manager.") def _is_loader_in_loader_pool(self, train_id, loader_pool): if train_id in loader_pool: return True return False def _get_snapshot_loader_pool(self): with self._loader_pool_mutex: return dict(self._loader_pool) def get_train_job(self, train_id): self._check_train_job_exist(train_id, self._loader_pool) loader = self._get_loader(train_id) if loader is None: logger.info("No valid summary log in train job %s, or it is not in the cache.", train_id) return None train_job = loader.to_dict() train_job.pop('data_loader') plugin_data = {} for plugin_name in PluginNameEnum.list_members(): job = self.get_train_job_by_plugin(train_id, plugin_name=plugin_name) if job is None: plugin_data[plugin_name] = [] else: plugin_data[plugin_name] = job['tags'] train_job.update({DATAVISUAL_PLUGIN_KEY: plugin_data}) train_job_obj = CachedTrainJob(basic_info=None) train_job_obj.set(DATAVISUAL_CACHE_KEY, train_job) train_job_obj.cache_status = loader.cache_status return train_job_obj def _get_loader(self, train_id): loader = None with self._loader_pool_mutex: if self._is_loader_in_loader_pool(train_id, self._loader_pool): loader = self._loader_pool.get(train_id) return loader def _update_loader_latest_update_time(self, loader_id, latest_update_time=None): if latest_update_time is None: latest_update_time = time.time() self._loader_pool[loader_id].latest_update_time = latest_update_time def get_train_job_by_plugin(self, train_id, plugin_name): self._check_train_job_exist(train_id, self._loader_pool) loader = self._get_loader(train_id) if loader is None: logger.warning("No valid summary log in train job %s, " "or it is not in the cache.", train_id) return None name = loader.name data_loader = loader.data_loader tags = [] try: events_data = data_loader.get_events_data() tags = events_data.list_tags_by_plugin(plugin_name) except KeyError: logger.debug("Plugin name %r does not exist " "in train job %r, and set tags to empty list.", plugin_name, name) except AttributeError: logger.debug("Train job %r has been deleted or it has not loaded data, " "and set tags to empty list.", name) result = dict(id=train_id, name=name, tags=tags) return result class DataManager: def __init__(self, summary_base_dir): self._summary_base_dir = os.path.realpath(summary_base_dir) self._status = DataManagerStatus.INIT.value self._status_mutex = threading.Lock() self._detail_cache = _DetailCacheManager(self._summary_base_dir) self._brief_cache = _BriefCacheManager(self._summary_base_dir) self._load_data_lock = threading.Lock() @property def summary_base_dir(self): return self._summary_base_dir def start_load_data(self, reload_interval=0): logger.info("Start to load data") DataManager.check_reload_interval(reload_interval) thread = threading.Thread(target=self._load_data_in_thread, name='start_load_data_thread', args=(reload_interval,), daemon=True) thread.start() return thread @staticmethod def check_reload_interval(reload_interval): if not isinstance(reload_interval, int): raise ParamValueError("The value of reload interval should be integer.") if reload_interval < 0: raise ParamValueError("The value of reload interval should be >= 0.") def _load_data_in_thread(self, reload_interval): if self._load_data_lock.locked(): return with self._load_data_lock: while True: try: exception_wrapper(self._load_data)() except UnknownError as exc: logger.warning(exc.message) finally: self._status = DataManagerStatus.DONE.value if not reload_interval: break time.sleep(reload_interval)
Apache License 2.0
rocheio/wiki-table-scrape
wikitablescrape/parse.py
csv_filename
python
def csv_filename(text): text = text.lower() text = re.sub(r"[,|'|\"/]", "", text) text = re.sub(r"[\(|\)|-]", " ", text) joined = "_".join(text.split()) if len(joined) > MAX_FILENAME_LEN: joined = joined[: joined.rindex("_", 0, MAX_FILENAME_LEN)] return joined + ".csv"
Return a normalized filename from a table header for outputting CSV.
https://github.com/rocheio/wiki-table-scrape/blob/86796c7373bc280d509513bcae86354e18bd95da/wikitablescrape/parse.py#L231-L239
import csv import logging import os import re import sys import bs4 LOGGER = logging.getLogger(__name__) MAX_FILENAME_LEN = os.getenv("MAX_FILENAME_LEN", 250) class Error(Exception): pass class RowspanCounter: def __init__(self, tag): self.rows_left = int(tag["rowspan"]) - 1 del tag["rowspan"] self.value = tag def pop(self): self.rows_left -= 1 return self.value class HtmlTable: def __init__(self, tag): self.tag = tag def parse_header(self): caption = self.tag.find("caption") if caption: return clean_cell(caption) h2 = self.tag.findPrevious("h2") if h2: header = clean_cell(h2) h3 = self.tag.findPrevious("h3") if h3: header += f" - {clean_cell(h3)}" return header return None def parse_rows(self): saved_rowspans = [] for row in self.tag.findAll("tr"): cells = row.findAll(["th", "td"]) for index, cell in reverse_enum(cells): if cell.has_attr("colspan"): for _ in range(int(cell["colspan"]) - 1): cells.insert(index, cell) if len(saved_rowspans) == 0: saved_rowspans = [None for _ in cells] elif len(cells) != len(saved_rowspans): for index, rowspan_data in enumerate(saved_rowspans): if not rowspan_data or not rowspan_data.rows_left: continue cells.insert(index, rowspan_data.pop()) for index, cell in enumerate(cells): if cell.has_attr("rowspan"): saved_rowspans[index] = RowspanCounter(cell) cleaned = [clean_cell(cell) for cell in cells] columns_missing = len(saved_rowspans) - len(cleaned) if columns_missing: cleaned += [""] * columns_missing yield cleaned def write_to_file(self, path): with open(path, mode="w", newline="", encoding="utf-8") as output: self.write(output) def write(self, output=sys.stdout): writer = csv.writer(output, quoting=csv.QUOTE_ALL, lineterminator="\n") for row in self.parse_rows(): writer.writerow(row) class Parser: def __init__(self, text): self.tables = [HtmlTable(tag) for tag in get_tables_from_html(text)] def write_to_dir(self, dir): os.makedirs(dir, exist_ok=True) for index, table in enumerate(self.tables): filename = f"table_{index+1}" header = table.parse_header() if header: filename += "_" + header filepath = os.path.join(dir, csv_filename(filename)) LOGGER.info(f"Writing table {index+1} to {filepath}") table.write_to_file(filepath) def find_table_by_header(self, search): all_headers = [] matches = [] for table in self.tables: header = table.parse_header() if not header: continue all_headers += [header] needle = re.sub(r"[^a-z0-9]", "", search.lower()) haystack = re.sub(r"[^a-z0-9]", "", header.lower()) if needle == haystack: return table if needle in haystack: matches += [table] if len(matches) == 1: return matches[0] if len(matches) > 1: raise Error( f"{len(matches)} matches for '{search}', specify further from: {all_headers}" ) raise Error( f"no matches found for '{search}', specify further from: {all_headers}" ) def get_tables_from_html(text): soup = bs4.BeautifulSoup(text, "lxml") tables = soup.findAll("table") tables = [tbl for tbl in tables if len(tbl.findAll("tr")) > 1] return tables def clean_cell(cell): to_remove = ( {"name": "sup", "class": "reference"}, {"name": "sup", "class": "sortkey"}, {"name": "span", "class": "mw-editsection"}, ) for definition in to_remove: for tag in cell.findAll(**definition): tag.extract() linebreaks = cell.findAll("br") if linebreaks: for linebreak in linebreaks: linebreak.replace_with(new_span(" ")) tags = cell.findAll() if len(tags) == 1 and tags[0].name == "img": return spaces_only(tags[0]["alt"]) tags = [tag for tag in cell.findAll(text=True) if not tag.startswith("[")] return spaces_only("".join(tags)) def spaces_only(text): return re.sub(r"\s+", " ", text).strip() def new_span(text): return bs4.BeautifulSoup(f"<span>{text}</span>", "lxml").html.body.span def reverse_enum(iterable): return zip(range(len(iterable) - 1, -1, -1), reversed(iterable))
MIT License
explosion/thinc
thinc/layers/strings2arrays.py
strings2arrays
python
def strings2arrays() -> Model[InT, OutT]: return Model("strings2arrays", forward)
Transform a sequence of string sequences to a list of arrays.
https://github.com/explosion/thinc/blob/a9b220da045619d12ef3c859e5e947d56cf9064e/thinc/layers/strings2arrays.py#L14-L16
from typing import Tuple, List, Callable, Sequence from murmurhash import hash_unicode from ..model import Model from ..config import registry from ..types import Ints2d InT = Sequence[Sequence[str]] OutT = List[Ints2d] @registry.layers("strings2arrays.v1")
MIT License
hunch/hunch-gift-app
django/utils/text.py
smart_split
python
def smart_split(text): text = force_unicode(text) for bit in smart_split_re.finditer(text): yield bit.group(0)
r""" Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >>> list(smart_split(r'This is "a person\'s" test.')) [u'This', u'is', u'"a person\\\'s"', u'test.'] >>> list(smart_split(r"Another 'person\'s' test.")) [u'Another', u"'person\\'s'", u'test.'] >>> list(smart_split(r'A "\"funky\" style" test.')) [u'A', u'"\\"funky\\" style"', u'test.']
https://github.com/hunch/hunch-gift-app/blob/8c7cad24cc0d9900deb4175e6b768c64a3d7adcf/django/utils/text.py#L220-L237
import re from django.utils.encoding import force_unicode from django.utils.functional import allow_lazy from django.utils.translation import ugettext_lazy from htmlentitydefs import name2codepoint capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:] capfirst = allow_lazy(capfirst, unicode) def wrap(text, width): text = force_unicode(text) def _generator(): it = iter(text.split(' ')) word = it.next() yield word pos = len(word) - word.rfind('\n') - 1 for word in it: if "\n" in word: lines = word.split('\n') else: lines = (word,) pos += len(lines[0]) + 1 if pos > width: yield '\n' pos = len(lines[-1]) else: yield ' ' if len(lines) > 1: pos = len(lines[-1]) yield word return u''.join(_generator()) wrap = allow_lazy(wrap, unicode) def truncate_words(s, num, end_text='...'): s = force_unicode(s) length = int(num) words = s.split() if len(words) > length: words = words[:length] if not words[-1].endswith(end_text): words.append(end_text) return u' '.join(words) truncate_words = allow_lazy(truncate_words, unicode) def truncate_html_words(s, num, end_text='...'): s = force_unicode(s) length = int(num) if length <= 0: return u'' html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input') re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U) re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>') pos = 0 end_text_pos = 0 words = 0 open_tags = [] while words <= length: m = re_words.search(s, pos) if not m: break pos = m.end(0) if m.group(1): words += 1 if words == length: end_text_pos = pos continue tag = re_tag.match(m.group(0)) if not tag or end_text_pos: continue closing_tag, tagname, self_closing = tag.groups() tagname = tagname.lower() if self_closing or tagname in html4_singlets: pass elif closing_tag: try: i = open_tags.index(tagname) except ValueError: pass else: open_tags = open_tags[i+1:] else: open_tags.insert(0, tagname) if words <= length: return s out = s[:end_text_pos] if end_text: out += ' ' + end_text for tag in open_tags: out += '</%s>' % tag return out truncate_html_words = allow_lazy(truncate_html_words, unicode) def get_valid_filename(s): s = force_unicode(s).strip().replace(' ', '_') return re.sub(r'(?u)[^-\w.]', '', s) get_valid_filename = allow_lazy(get_valid_filename, unicode) def get_text_list(list_, last_word=ugettext_lazy(u'or')): if len(list_) == 0: return u'' if len(list_) == 1: return force_unicode(list_[0]) return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1])) get_text_list = allow_lazy(get_text_list, unicode) def normalize_newlines(text): return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text)) normalize_newlines = allow_lazy(normalize_newlines, unicode) def recapitalize(text): text = force_unicode(text).lower() capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])') text = capsRE.sub(lambda x: x.group(1).upper(), text) return text recapitalize = allow_lazy(recapitalize) def phone2numeric(phone): letters = re.compile(r'[A-Z]', re.I) char2number = lambda m: {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9', }.get(m.group(0).lower()) return letters.sub(char2number, phone) phone2numeric = allow_lazy(phone2numeric) def compress_string(s): import cStringIO, gzip zbuf = cStringIO.StringIO() zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue() ustring_re = re.compile(u"([\u0080-\uffff])") def javascript_quote(s, quote_double_quotes=False): def fix(match): return r"\u%04x" % ord(match.group(1)) if type(s) == str: s = s.decode('utf-8') elif type(s) != unicode: raise TypeError(s) s = s.replace('\\', '\\\\') s = s.replace('\r', '\\r') s = s.replace('\n', '\\n') s = s.replace('\t', '\\t') s = s.replace("'", "\\'") if quote_double_quotes: s = s.replace('"', '&quot;') return str(ustring_re.sub(fix, s)) javascript_quote = allow_lazy(javascript_quote, unicode) smart_split_re = re.compile(r""" ((?: [^\s'"]* (?: (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*') [^\s'"]* )+ ) | \S+) """, re.VERBOSE)
MIT License
angr/angr
angr/analyses/cfg/cfg_emulated.py
CFGEmulated._job_queue_empty
python
def _job_queue_empty(self): self._iteratively_clean_pending_exits() while self._pending_jobs: pending_job = self._get_one_pending_job() if pending_job is None: continue self._insert_job(pending_job) self._register_analysis_job(pending_job.func_addr, pending_job) break
A callback method called when the job queue is empty. :return: None
https://github.com/angr/angr/blob/94de0f468df0c0d27428301dae93d94f935ade9b/angr/analyses/cfg/cfg_emulated.py#L926-L943
import itertools import logging import sys from collections import defaultdict from functools import reduce from typing import Dict import claripy import networkx import pyvex from archinfo import ArchARM from ... import BP, BP_BEFORE, BP_AFTER, SIM_PROCEDURES, procedures from ... import options as o from ...engines.procedure import ProcedureEngine from ...exploration_techniques.loop_seer import LoopSeer from ...exploration_techniques.slicecutor import Slicecutor from ...exploration_techniques.explorer import Explorer from ...exploration_techniques.lengthlimiter import LengthLimiter from ...errors import AngrCFGError, AngrError, AngrSkipJobNotice, SimError, SimValueError, SimSolverModeError, SimFastPathError, SimIRSBError, AngrExitError, SimEmptyCallStackError from ...sim_state import SimState from ...state_plugins.callstack import CallStack from ...state_plugins.sim_action import SimActionData from ...knowledge_plugins.cfg import CFGENode, IndirectJump from ...utils.constants import DEFAULT_STATEMENT from ..forward_analysis import ForwardAnalysis from .cfg_base import CFGBase from .cfg_job_base import BlockID, CFGJobBase from .cfg_utils import CFGUtils l = logging.getLogger(name=__name__) class CFGJob(CFGJobBase): def __init__(self, *args, **kwargs): super(CFGJob, self).__init__(*args, **kwargs) if self.jumpkind is None: self.jumpkind = 'Ijk_Boring' if self.state.history.jumpkind is None else self.state.history.jumpkind self.call_stack_suffix = None self.current_function = None self.cfg_node = None self.sim_successors = None self.exception_info = None self.successor_status = None self.extra_info = None @property def block_id(self): if self._block_id is None: self._block_id = CFGEmulated._generate_block_id( self.call_stack.stack_suffix(self._context_sensitivity_level), self.addr, self.is_syscall) return self._block_id @property def is_syscall(self): return self.jumpkind is not None and self.jumpkind.startswith('Ijk_Sys') def __hash__(self): return hash(self.block_id) class PendingJob: def __init__(self, caller_func_addr, returning_source, state, src_block_id, src_exit_stmt_idx, src_exit_ins_addr, call_stack): self.caller_func_addr = caller_func_addr self.returning_source = returning_source self.state = state self.src_block_id = src_block_id self.src_exit_stmt_idx = src_exit_stmt_idx self.src_exit_ins_addr = src_exit_ins_addr self.call_stack = call_stack def __repr__(self): return "<PendingJob to %s, from function %s>" % (self.state.ip, hex( self.returning_source) if self.returning_source is not None else 'Unknown') def __hash__(self): return hash((self.caller_func_addr, self.returning_source, self.src_block_id, self.src_exit_stmt_idx, self.src_exit_ins_addr ) ) def __eq__(self, other): if not isinstance(other, PendingJob): return False return self.caller_func_addr == other.caller_func_addr and self.returning_source == other.returning_source and self.src_block_id == other.src_block_id and self.src_exit_stmt_idx == other.src_exit_stmt_idx and self.src_exit_ins_addr == other.src_exit_ins_addr class CFGEmulated(ForwardAnalysis, CFGBase): tag = "CFGEmulated" def __init__(self, context_sensitivity_level=1, start=None, avoid_runs=None, enable_function_hints=False, call_depth=None, call_tracing_filter=None, initial_state=None, starts=None, keep_state=False, indirect_jump_target_limit=100000, resolve_indirect_jumps=True, enable_advanced_backward_slicing=False, enable_symbolic_back_traversal=False, indirect_jump_resolvers=None, additional_edges=None, no_construct=False, normalize=False, max_iterations=1, address_whitelist=None, base_graph=None, iropt_level=None, max_steps=None, state_add_options=None, state_remove_options=None, model=None, ): ForwardAnalysis.__init__(self, order_jobs=True if base_graph is not None else False) CFGBase.__init__(self, 'emulated', context_sensitivity_level, normalize=normalize, resolve_indirect_jumps=resolve_indirect_jumps, indirect_jump_resolvers=indirect_jump_resolvers, indirect_jump_target_limit=indirect_jump_target_limit, model=model, ) if start is not None: l.warning("`start` is deprecated. Please consider using `starts` instead in your code.") self._starts = (start,) else: if isinstance(starts, (list, set)): self._starts = tuple(starts) elif isinstance(starts, tuple) or starts is None: self._starts = starts else: raise AngrCFGError('Unsupported type of the `starts` argument.') if enable_advanced_backward_slicing or enable_symbolic_back_traversal: l.warning("`advanced backward slicing` and `symbolic back traversal` are deprecated.") l.warning("Please use `resolve_indirect_jumps` to resolve indirect jumps using different resolvers instead.") self._iropt_level = iropt_level self._avoid_runs = avoid_runs self._enable_function_hints = enable_function_hints self._call_depth = call_depth self._call_tracing_filter = call_tracing_filter self._initial_state = initial_state self._keep_state = keep_state self._advanced_backward_slicing = enable_advanced_backward_slicing self._enable_symbolic_back_traversal = enable_symbolic_back_traversal self._additional_edges = additional_edges if additional_edges else {} self._max_steps = max_steps self._state_add_options = state_add_options if state_add_options is not None else set() self._state_remove_options = state_remove_options if state_remove_options is not None else set() self._state_add_options.update([o.SYMBOL_FILL_UNCONSTRAINED_MEMORY, o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS]) if self._enable_function_hints and o.TRACK_MEMORY_ACTIONS not in self._state_add_options: self._state_add_options.add(o.TRACK_MEMORY_ACTIONS) self._symbolic_function_initial_state = {} self._function_input_states = None self._unresolvable_runs = set() self._quasi_topological_order = {} self._entry_points = [] self._max_iterations = max_iterations self._address_whitelist = set(address_whitelist) if address_whitelist is not None else None self._base_graph = base_graph self._node_addr_visiting_order = [ ] if self._base_graph: sorted_nodes = CFGUtils.quasi_topological_sort_nodes(self._base_graph) self._node_addr_visiting_order = [ n.addr for n in sorted_nodes ] self._sanitize_parameters() self._executable_address_ranges = [] self._executable_address_ranges = self._executable_memory_regions() self._function_input_states = {} self._loop_back_edges = [] self._overlapped_loop_headers = [] self._pending_function_hints = set() self._edge_map = defaultdict(list) self._model._iropt_level = self._iropt_level self._start_keys = [ ] self._pending_jobs = defaultdict(list) self._traced_addrs = defaultdict(lambda: defaultdict(int)) self._block_artifacts = {} self._analyzed_addrs = set() self._non_returning_functions = set() self._pending_edges = defaultdict(list) if not no_construct: self._initialize_cfg() self._analyze() def copy(self): new_cfg = CFGEmulated.__new__(CFGEmulated) super(CFGEmulated, self).make_copy(new_cfg) new_cfg._indirect_jump_target_limit = self._indirect_jump_target_limit new_cfg.named_errors = dict(self.named_errors) new_cfg.errors = list(self.errors) new_cfg._fail_fast = self._fail_fast new_cfg._max_steps = self._max_steps new_cfg.project = self.project new_cfg._edge_map = self._edge_map.copy() new_cfg._loop_back_edges = self._loop_back_edges[::] new_cfg._executable_address_ranges = self._executable_address_ranges[::] new_cfg._unresolvable_runs = self._unresolvable_runs.copy() new_cfg._overlapped_loop_headers = self._overlapped_loop_headers[::] new_cfg._thumb_addrs = self._thumb_addrs.copy() new_cfg._keep_state = self._keep_state return new_cfg def resume(self, starts=None, max_steps=None): self._should_abort = False self._starts = starts self._max_steps = max_steps if self._starts is None: self._starts = [ ] if self._starts: self._sanitize_starts() self._analyze() def remove_cycles(self): if not self._loop_back_edges: l.debug("Detecting loops...") self._detect_loops() l.debug("Removing cycles...") l.debug("There are %d loop back edges.", len(self._loop_back_edges)) l.debug("And there are %d overlapping loop headers.", len(self._overlapped_loop_headers)) for b1, b2 in self._loop_back_edges: if self._graph.has_edge(b1, b2): l.debug("Removing loop back edge %s -> %s", b1, b2) self._graph.remove_edge(b1, b2) for b in self._overlapped_loop_headers: successors = self._graph.successors(b) for succ in successors: self._graph.remove_edge(b, succ) l.debug("Removing partial loop header edge %s -> %s", b, succ) def downsize(self): for cfg_node in self._nodes.values(): cfg_node.downsize() def unroll_loops(self, max_loop_unrolling_times): if not isinstance(max_loop_unrolling_times, int) or max_loop_unrolling_times < 0: raise AngrCFGError('Max loop unrolling times must be set to an integer greater than or equal to 0 if ' + 'loop unrolling is enabled.') def _unroll(graph, loop): for back_edge in loop.continue_edges: loop_body_addrs = {n.addr for n in loop.body_nodes} src_blocknode = back_edge[0] dst_blocknode = back_edge[1] for src in self.get_all_nodes(src_blocknode.addr): for dst in graph.successors(src): if dst.addr != dst_blocknode.addr: continue new_dst = dst.copy() new_dst.looping_times = dst.looping_times + 1 if (new_dst not in graph and new_dst.looping_times <= max_loop_unrolling_times ): dst_successors = graph.successors(dst) edge_data = graph.get_edge_data(src, dst) graph.add_edge(src, new_dst, **edge_data) for ds in dst_successors: if ds.looping_times == 0 and ds.addr not in loop_body_addrs: edge_data = graph.get_edge_data(dst, ds) graph.add_edge(new_dst, ds, **edge_data) graph.remove_edge(src, dst) self._detect_loops(loop_callback=_unroll) def force_unroll_loops(self, max_loop_unrolling_times): if not isinstance(max_loop_unrolling_times, int) or max_loop_unrolling_times < 0: raise AngrCFGError('Max loop unrolling times must be set to an integer greater than or equal to 0 if ' + 'loop unrolling is enabled.') loop_backedges = [] start = self._starts[0] if isinstance(start, tuple): start, _ = start start_node = self.get_any_node(start) if start_node is None: raise AngrCFGError('Cannot find start node when trying to unroll loops. The CFG might be empty.') graph_copy = networkx.DiGraph(self.graph) while True: cycles_iter = networkx.simple_cycles(graph_copy) try: cycle = next(cycles_iter) except StopIteration: break loop_backedge = (None, None) for n in networkx.dfs_preorder_nodes(graph_copy, source=start_node): if n in cycle: idx = cycle.index(n) if idx == 0: loop_backedge = (cycle[-1], cycle[idx]) else: loop_backedge = (cycle[idx - 1], cycle[idx]) break if loop_backedge not in loop_backedges: loop_backedges.append(loop_backedge) end_nodes = [n for n in graph_copy.nodes() if graph_copy.out_degree(n) == 0] new_end_node = "end_node" if not end_nodes: cycles = sorted(networkx.simple_cycles(graph_copy), key=len) first_cycle = cycles[0] if len(first_cycle) == 1: graph_copy.remove_edge(first_cycle[0], first_cycle[0]) else: graph_copy.remove_edge(first_cycle[0], first_cycle[1]) end_nodes = [n for n in graph_copy.nodes() if graph_copy.out_degree(n) == 0] for en in end_nodes: graph_copy.add_edge(en, new_end_node) graph_copy.remove_node(new_end_node) src, dst = loop_backedge if graph_copy.has_edge(src, dst): new_dst = dst.copy() new_dst.looping_times = dst.looping_times + 1 if ( new_dst not in graph_copy and new_dst.looping_times <= max_loop_unrolling_times): dst_successors = list(graph_copy.successors(dst)) edge_data = graph_copy.get_edge_data(src, dst) graph_copy.add_edge(src, new_dst, **edge_data) for ds in dst_successors: if ds.looping_times == 0 and ds not in cycle: edge_data = graph_copy.get_edge_data(dst, ds) graph_copy.add_edge(new_dst, ds, **edge_data) graph_copy.remove_edge(src, dst) self._loop_back_edges = loop_backedges self.model.graph = graph_copy def immediate_dominators(self, start, target_graph=None): return self._immediate_dominators(start, target_graph=target_graph, reverse_graph=False) def immediate_postdominators(self, end, target_graph=None): return self._immediate_dominators(end, target_graph=target_graph, reverse_graph=True) def remove_fakerets(self): fakeret_edges = [ (src, dst) for src, dst, data in self.graph.edges(data=True) if data['jumpkind'] == 'Ijk_FakeRet' ] self.graph.remove_edges_from(fakeret_edges) def get_topological_order(self, cfg_node): if not self._quasi_topological_order: self._quasi_topological_sort() return self._quasi_topological_order.get(cfg_node, None) def get_subgraph(self, starting_node, block_addresses): graph = networkx.DiGraph() if starting_node not in self.graph: raise AngrCFGError('get_subgraph(): the specified "starting_node" %s does not exist in the current CFG.' % starting_node ) addr_set = set(block_addresses) graph.add_node(starting_node) queue = [ starting_node ] while queue: node = queue.pop() for _, dst, data in self.graph.out_edges([node], data=True): if dst not in graph and dst.addr in addr_set: graph.add_edge(node, dst, **data) queue.append(dst) cfg = self.copy() cfg._graph = graph cfg._starts = (starting_node.addr, ) return cfg def get_function_subgraph(self, start, max_call_depth=None): start_node = self.get_any_node(start) node_wrapper = (start_node, 0) stack = [node_wrapper] traversed_nodes = {start_node} subgraph_nodes = set([start_node]) while stack: nw = stack.pop() n, call_depth = nw[0], nw[1] edges = self.graph.out_edges(n, data=True) for _, dst, data in edges: if dst not in traversed_nodes: traversed_nodes.add(dst) if data['jumpkind'] == 'Ijk_Call': if max_call_depth is None or (max_call_depth is not None and call_depth < max_call_depth): subgraph_nodes.add(dst) new_nw = (dst, call_depth + 1) stack.append(new_nw) elif data['jumpkind'] == 'Ijk_Ret': if call_depth > 0: subgraph_nodes.add(dst) new_nw = (dst, call_depth - 1) stack.append(new_nw) else: subgraph_nodes.add(dst) new_nw = (dst, call_depth) stack.append(new_nw) subgraph = self.graph.subgraph(subgraph_nodes).copy() subcfg = self.copy() subcfg._graph = subgraph subcfg._starts = (start,) return subcfg @property def context_sensitivity_level(self): return self._context_sensitivity_level def __setstate__(self, s): self.project = s['project'] self.indirect_jumps: Dict[int,IndirectJump] = s['indirect_jumps'] self._loop_back_edges = s['_loop_back_edges'] self._thumb_addrs = s['_thumb_addrs'] self._unresolvable_runs = s['_unresolvable_runs'] self._executable_address_ranges = s['_executable_address_ranges'] self._iropt_level = s['_iropt_level'] self._model = s['_model'] def __getstate__(self): s = { 'project': self.project, "indirect_jumps": self.indirect_jumps, '_loop_back_edges': self._loop_back_edges, '_nodes_by_addr': self._nodes_by_addr, '_thumb_addrs': self._thumb_addrs, '_unresolvable_runs': self._unresolvable_runs, '_executable_address_ranges': self._executable_address_ranges, '_iropt_level': self._iropt_level, '_model': self._model } return s @property def graph(self): return self._model.graph @property def unresolvables(self): return self._unresolvable_runs @property def deadends(self): if self.graph is None: raise AngrCFGError('CFG hasn\'t been generated yet.') deadends = [i for i in self.graph if self.graph.out_degree(i) == 0] return deadends def _sanitize_parameters(self): if isinstance(self._additional_edges, (list, set, tuple)): new_dict = defaultdict(list) for s, d in self._additional_edges: new_dict[s].append(d) self._additional_edges = new_dict elif isinstance(self._additional_edges, dict): pass else: raise AngrCFGError('Additional edges can only be a list, set, tuple, or a dict.') if self._advanced_backward_slicing and self._enable_symbolic_back_traversal: raise AngrCFGError('Advanced backward slicing and symbolic back traversal cannot both be enabled.') if self._advanced_backward_slicing and not self._keep_state: raise AngrCFGError('Keep state must be enabled if advanced backward slicing is enabled.') self._avoid_runs = [ ] if self._avoid_runs is None else self._avoid_runs if not isinstance(self._avoid_runs, (list, set)): raise AngrCFGError('"avoid_runs" must either be None, or a list or a set.') self._sanitize_starts() def _sanitize_starts(self): if self._starts is None: self._starts = ((self.project.entry, None),) else: new_starts = [ ] for item in self._starts: if isinstance(item, tuple): if len(item) != 2: raise AngrCFGError('Unsupported item in "starts": %s' % str(item)) new_starts.append(item) elif isinstance(item, int): new_starts.append((item, None)) elif isinstance(item, SimState): new_starts.append(item) else: raise AngrCFGError('Unsupported item type in "starts": %s' % type(item)) self._starts = new_starts if not self._starts: raise AngrCFGError("At least one start must be provided") def _job_key(self, job): return job.block_id def _job_sorting_key(self, job): if self._base_graph is None: return 0 MAX_JOBS = 1000000 if job.addr not in self._node_addr_visiting_order: return MAX_JOBS return self._node_addr_visiting_order.index(job.addr) def _pre_analysis(self): for item in self._starts: callstack = None if isinstance(item, tuple): ip = item[0] state = self._create_initial_state(item[0], item[1]) elif isinstance(item, SimState): state = item.copy() ip = state.solver.eval_one(state.ip) self._reset_state_mode(state, 'fastpath') else: raise AngrCFGError('Unsupported CFG start type: %s.' % str(type(item))) self._symbolic_function_initial_state[ip] = state path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack) key = path_wrapper.block_id if key not in self._start_keys: self._start_keys.append(key) self._insert_job(path_wrapper) self._register_analysis_job(path_wrapper.func_addr, path_wrapper) def _intra_analysis(self): if self._pending_function_hints: self._process_hints(self._analyzed_addrs)
BSD 2-Clause Simplified License
san089/optimizing-public-transportation
producers/models/line.py
Line.run
python
def run(self, timestamp, time_step): self._advance_turnstiles(timestamp, time_step) self._advance_trains()
Advances trains between stations in the simulation. Runs turnstiles.
https://github.com/san089/optimizing-public-transportation/blob/8bf4fe163b76ea1e91afae53b9330e369e6683a7/producers/models/line.py#L68-L71
import collections from enum import IntEnum import logging from models import Station, Train logger = logging.getLogger(__name__) class Line: colors = IntEnum("colors", "blue green red", start=0) num_directions = 2 def __init__(self, color, station_data, num_trains=10): self.color = color self.num_trains = num_trains self.stations = self._build_line_data(station_data) self.num_stations = len(self.stations) - 1 self.trains = self._build_trains() def _build_line_data(self, station_df): stations = station_df["station_name"].unique() station_data = station_df[station_df["station_name"] == stations[0]] line = [ Station(station_data["station_id"].unique()[0], stations[0], self.color) ] prev_station = line[0] for station in stations[1:]: station_data = station_df[station_df["station_name"] == station] new_station = Station( station_data["station_id"].unique()[0], station, self.color, prev_station, ) prev_station.dir_b = new_station prev_station = new_station line.append(new_station) return line def _build_trains(self): trains = [] curr_loc = 0 b_dir = True for train_id in range(self.num_trains): tid = str(train_id).zfill(3) train = Train( f"{self.color.name[0].upper()}L{tid}", Train.status.in_service ) trains.append(train) if b_dir: self.stations[curr_loc].arrive_b(train, None, None) else: self.stations[curr_loc].arrive_a(train, None, None) curr_loc, b_dir = self._get_next_idx(curr_loc, b_dir) return trains
MIT License
canonical/charmcraft
charmcraft/utils.py
get_templates_environment
python
def get_templates_environment(templates_dir): templates_dir = os.path.join("templates", templates_dir) if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): logger.debug("Bundle directory: %s", sys._MEIPASS) loader = FileSystemLoader(os.path.join(sys._MEIPASS, templates_dir)) else: loader = PackageLoader("charmcraft", templates_dir) env = Environment( loader=loader, autoescape=False, keep_trailing_newline=True, optimized=False, undefined=StrictUndefined, ) return env
Create and return a Jinja environment to deal with the templates.
https://github.com/canonical/charmcraft/blob/70ba8e28b303ba1d4a3c0b563c2c528f8cdf8526/charmcraft/utils.py#L79-L99
import logging import os import pathlib import platform import sys from collections import namedtuple from dataclasses import dataclass from stat import S_IRGRP, S_IROTH, S_IRUSR, S_IXGRP, S_IXOTH, S_IXUSR import yaml from jinja2 import Environment, FileSystemLoader, PackageLoader, StrictUndefined from charmcraft.cmdbase import CommandError from charmcraft.env import is_charmcraft_running_in_managed_mode logger = logging.getLogger("charmcraft.commands") OSPlatform = namedtuple("OSPlatform", "system release machine") S_IXALL = S_IXUSR | S_IXGRP | S_IXOTH S_IRALL = S_IRUSR | S_IRGRP | S_IROTH ARCH_TRANSLATIONS = { "aarch64": "arm64", "armv7l": "armhf", "i686": "i386", "ppc": "powerpc", "ppc64le": "ppc64el", "x86_64": "amd64", "AMD64": "amd64", } def make_executable(fh): fileno = fh.fileno() mode = os.fstat(fileno).st_mode mode_r = mode & S_IRALL mode_x = mode_r >> 2 mode = mode | mode_x os.fchmod(fileno, mode) def load_yaml(fpath): if not fpath.is_file(): logger.debug("Couldn't find config file %r", str(fpath)) return try: with fpath.open("rb") as fh: content = yaml.safe_load(fh) except (yaml.error.YAMLError, OSError) as err: logger.error("Failed to read/parse config file %r: %r", str(fpath), err) return return content
Apache License 2.0
google/openhtf
openhtf/output/callbacks/mfg_inspector.py
MfgInspector.from_json
python
def from_json(cls, json_data): return cls( user=json_data['client_email'], keydata=json_data['private_key'], token_uri=json_data['token_uri'])
Create an uploader given (parsed) JSON data. Note that this is a JSON-formatted key file downloaded from Google when the service account key is created, *NOT* a json-encoded oauth2client.client.SignedJwtAssertionCredentials object. Args: json_data: Dict containing the loaded JSON key data. Returns: a MfgInspectorCallback with credentials.
https://github.com/google/openhtf/blob/4646aa6b9ba67532ce7e8743ce16d7bd4369ad3d/openhtf/output/callbacks/mfg_inspector.py#L178-L194
import copy import json import logging import threading import time from typing import Any, Dict import zlib import httplib2 import oauth2client.client from openhtf import util from openhtf.core import test_record from openhtf.output import callbacks from openhtf.output.proto import guzzle_pb2 from openhtf.output.proto import mfg_event_pb2 from openhtf.output.proto import test_runs_converter import six from six.moves import range class UploadFailedError(Exception): class InvalidTestRunError(Exception): def _send_mfg_inspector_request(envelope_data, credentials, destination_url): logging.info('Uploading result...') http = httplib2.Http() if credentials.access_token_expired: credentials.refresh(http) credentials.authorize(http) resp, content = http.request(destination_url, 'POST', envelope_data) try: result = json.loads(content) except Exception: logging.warning('Upload failed with response %s: %s', resp, content) raise UploadFailedError(resp, content) if resp.status == 200: return result message = '%s: %s' % (result.get('error', 'UNKNOWN_ERROR'), result.get('message')) if resp.status == 400: raise InvalidTestRunError(message) else: raise UploadFailedError(message) def send_mfg_inspector_data(inspector_proto, credentials, destination_url, payload_type): envelope = guzzle_pb2.TestRunEnvelope() envelope.payload = zlib.compress(inspector_proto.SerializeToString()) envelope.payload_type = payload_type envelope_data = envelope.SerializeToString() for _ in range(5): try: result = _send_mfg_inspector_request(envelope_data, credentials, destination_url) return result except UploadFailedError: time.sleep(1) logging.critical( 'Could not upload to mfg-inspector after 5 attempts. Giving up.') return {} class _MemStorage(oauth2client.client.Storage): def __init__(self): self._lock = threading.Lock() self._credentials = None def acquire_lock(self): self._lock.acquire(True) def release_lock(self): self._lock.release() def locked_get(self): return self._credentials def locked_put(self, credentials): self._credentials = credentials class MfgInspector(object): TOKEN_URI = 'https://accounts.google.com/o/oauth2/token' SCOPE_CODE_URI = 'https://www.googleapis.com/auth/glass.infra.quantum_upload' DESTINATION_URL = ('https://clients2.google.com/factoryfactory/' 'uploads/quantum_upload/?json') PARAMS = ['dut_id', 'end_time_millis', 'start_time_millis', 'station_id'] _converter = None _default_filename_pattern = None _cached_partial_proto = None _partial_proto_upload_complete = False def __init__(self, user=None, keydata=None, token_uri=TOKEN_URI, destination_url=DESTINATION_URL): self.user = user self.keydata = keydata self.token_uri = token_uri self.destination_url = destination_url if user and keydata: self.credentials = oauth2client.client.SignedJwtAssertionCredentials( service_account_name=self.user, private_key=six.ensure_binary(self.keydata), scope=self.SCOPE_CODE_URI, user_agent='OpenHTF Guzzle Upload Client', token_uri=self.token_uri) self.credentials.set_store(_MemStorage()) else: self.credentials = None self.upload_result = None self._cached_proto = None self._cached_params = dict.fromkeys(self.PARAMS) @classmethod
Apache License 2.0
bbj-dev/bbj
clients/urwid/main.py
App.get_quotes
python
def get_quotes(self, msg_object, value_type=int): quotes = [] if msg_object["send_raw"]: return quotes for paragraph in msg_object["body"]: [quotes.append(cdr) for car, cdr in paragraph if car == "quote"] return [value_type(q) for q in quotes]
Returns the post_ids that msg_object is quoting. Is a list, may be empty. ids are ints by default but can be passed `str` for strings.
https://github.com/bbj-dev/bbj/blob/9488ade7fb64a58076b69cdae1b9ade0e9bb6241/clients/urwid/main.py#L693-L705
from network import BBJ, URLError from string import punctuation from datetime import datetime from sys import argv, version from time import time, sleep from getpass import getpass from subprocess import call from random import choice from code import interact import rlcompleter import readline import tempfile import urwid import json import os import re def get_arg(key, default=None, get_value=True): try: spec = argv.index("--" + key) value = argv[spec + 1] if get_value else True except ValueError: value = default except IndexError: exit("invalid format for --" + key) return value if get_arg("help", False, False): print("""BBJ Urwid Client Available options: --help: this message --https: enable use of https, requires host support --host <hostname>: the ip address/hostname/website/server to connect to --port <port>: the port to use when connecting to the host --user <username>: automatically connect with the given username Available environment variables: BBJ_USER: set your username to log in automatically. If --user is passed, this is ignored. BBJ_PASSWORD: set your password to log in automatically. if the password is wrong, will prompt you as normal. Please note that these environment variables need to be exported, and are visible to all other programs run from your shell.""") exit() try: network = BBJ(get_arg("host", "127.0.0.1"), get_arg("port", 7099), get_arg("https", False, False)) except URLError as e: exit("\033[0;31m%s\033[0m" % repr(e)) obnoxious_logo = """ % _ * ! * % 8 888888888o % 8 888888888o . 8 8888 8 8888 `88. 8 8888 `88. _ ! 8 8888 & ^ 8 8888 `88 8 8888 `88 * 8 8888 _ 8 8888 ,88 8 8888 ,88 8 8888 * 8 8888. ,88' 8 8888. ,88' ! 8 8888 " 8 8888888888 8 8888888888 8 8888 = ! 8 8888 `88. 8 8888 `88. 88. 8 8888 8 8888 88 8 8888 88 `88. | 8 888' ' > 8 8888. ,88' 8 8888. ,88' `88o. .8 88' . 8 888888888P 8 888888888P `Y888888 ' . % %""" welcome = """>>> Welcome to Bulletin Butter & Jelly! ------------------@ | BBJ is a persistent, chronologically ordered text | | discussion board for tildes. You may log in, | | register as a new user, or participate anonymously. | |---------------------------------------------------------| | \033[1;31mTo go anon, just press enter. Otherwise, give me a name\033[0m | | \033[1;31m(registered or not)\033[0m | @_________________________________________________________@ """ anon_warn = """>>> \033[1;31mJust a reminder!\033[0m ----------------------------------@ | You are not logged into BBJ, and are posting this | | message anonymously. If you do not log in, you will | | not be able to edit or delete this message. This | | warning can be disabled in the settings. | |------------------------------------------------------| """ format_help = [ "Quick reminder: \[rainbow: expressions work like this]. You may scroll " "this message, or press Q or escape to close it.\n\n" "BBJ supports **bolding**, __underlining__, and [rainbow: coloring] text " "using markdown-style symbols as well as tag-like expressions. Markdown " "is **NOT** fully implemented, but several of the more obvious concepts " "have been brought over. Additionally, we have chan-style greentext and " "numeric post referencing, ala >>3 for the third reply.", "[red: Whitespace]", "When you're composing, it is desirable to not include your own linebreaks " "into paragraphs of your post, because clients handle text wrapping on their " "own. Adding them yourself can cause your posts to look very strange. You can " "always edit your posts after submitting them if you do this by accident, as " "long as you are not anonymous.", "In previous versions of BBJ, linebreaks were joined into sentences if they " "occured in the same paragraph, however this confused many users and has been " "reverted to just use whatever was submitted, as-is.", "[red: Colors, Bold, Underline & Expressions]", "You can use [rainbow: rainbow], [red: red], [yellow: yellow], [green: green], " "[blue: blue], [cyan: cyan], [magenta: and magenta], [dim: dim], **bold**, and __underline__ " "inside of your posts. \**bold works like this\**, \__and underlines like this\__. " "You can escape these expressions \\\**like this\\\**. They can span up to the full width " "of the same line. They are best used on shorter phrases. " "However, you can use a different syntax for it, which is also required to use " "colors: these expressions \[bold: look like this] and have less restrictions.", "The colon and the space following it are important. When you use these " "expressions, the __first__ space is not part of the content, but any characters, " "including spaces, that follow it are included in the body. The formatting will " "apply until the closing ]. You can escape such an expression \\\[cyan: like this] " "and can also \\[blue: escape \\\] other closing brackets] inside of it. Only " "closing brackets need to be escaped within an expression. Any backslashes used " "for escaping will not show in the body unless you use two slashes.", "This peculiar syntax elimiates false positives. You never have to escape [normal] " "brackets when using the board. Only expressions with **valid and defined** directives " "will be affected. [so: this is totally valid and requires no escapes] because 'so' is " "not a directive. [red this will pass too] because the colon is missing.", "The following directives may be used in this form: red, yellow, green, blue, cyan, " "magenta, bold, underline, dim, and rainbow. Nesting expressions into eachother will " "override the parent directives until the innermost expression closes. Thus, nesting " "is valid but doesn't produce layered results on the command line client.", "[red: Quotes & Greentext]", "You can refer to a post number using two angle brackets pointing into a number. >>432 " "like this. You can color a whole line green by proceeding it with a '>'. Note that " "this violates the sentence structure outlined in the **Whitespace** section above, " "so you may introduce >greentext without splicing into seperate paragraphs. The '>' " "must be the first character on the line with no whitespace before it.\n>it looks like this\n" "and the paragraph doesnt have to break on either side. The formatter is smart enough to " "differentiate between >>greentext with multiple arrows and numeric quotes (outlined below) " "given that the text doesn't start with any numbers.", "When using numeric quotes, they are highlighted and the author's name will show " "next to them in the thread. You can press enter when focused on a message to view " "the parent posts. You may insert these directives manually or use the <Reply> function " "on post menus.", "Quoting directives cannot be escaped." ] general_help = [ ("bold", "use the arrow keys, j/k, or n/p to scroll down this menu\n\n"), ("bold", "use q or escape to close dialogs and menus (including this one)\n\n"), ("10", "use q, escape, or a left directional key to go back at any point" " from just about anywhere.\n\n"), ("20", "use the o key to change your settings when this dialog is closed\n\n"), "You may use the arrow keys, or use ", ("button", "jk/np/Control-n|p"), " to move up and down by " "an element. If an element is overflowing the screen, it will scroll only one line. " "To make scrolling faster, ", ("button", "hold shift"), " when using a control: it " "will repeat 5 times by default, and you can change this number in your settings.\n\n" "In threads, The ", ("button", "<"), " and ", ("button", ">"), " keys will jump by " "a chosen number of post headers. You can see the count inside of the footer line at " "the far right side: press ", ("button", "x"), " to cycle it upwards or ", ("button", "X"), " to cycle it downwards.\n\n" "In the thread index and any open thread, the ", ("button", "b"), " and ", ("button", "t "), "keys may be used to go to very top or bottom.\n\n" "To go back and forth between threads, you may also use the left/right arrow keys, " "or ", ("button", "h"), "/", ("button", "l"), " to do it vi-style.\n\n" "Aside from those, primary controls are shown on the very bottom of the screen " "in the footer line, or may be placed in window titles for other actions like " "dialogs or composers." ] colors = [ "\033[1;31m", "\033[1;33m", "\033[1;33m", "\033[1;32m", "\033[1;34m", "\033[1;35m" ] colornames = ["none", "red", "yellow", "green", "blue", "cyan", "magenta"] editors = ["nano", "vim", "emacs", "vim -u NONE", "emacs -Q", "micro", "ed", "joe"] default_prefs = { "editor": os.getenv("EDITOR") or "nano", "mouse_integration": False, "jump_count": 1, "shift_multiplier": 5, "integrate_external_editor": True, "index_spacing": False, "dramatic_exit": True, "date": "%Y/%m/%d", "time": "%H:%M", "frame_theme": "tilde", "custom_divider_char": False, "frame_title": "BBJ", "use_custom_frame_title": False, "max_text_width": 80, "confirm_anon": True, "edit_escapes": { "abort": "f1", "focus": "f2", "fhelp": "f3" } } bars = { "index": "[RET]Open [/]Search [C]ompose [R]efresh [*]Bookmark [O]ptions [?]Help [Q]uit", "thread": "[Q]Back [RET]Menu [C]ompose [^R]eply [R]efresh [0-9]Goto [B/T]End [</>]Jump[X]%d [/]Search", "edit_window": "[{}]Abort [{}]Swap [{}]Formatting Help [save/quit to send] {}" } colormap = [ ("default", "default", "default"), ("bar", "light magenta", "default"), ("button", "light red", "default"), ("quote", "brown", "default"), ("opt_prompt", "black", "light gray"), ("opt_header", "light cyan", "default"), ("hover", "light cyan", "default"), ("dim", "dark gray", "default"), ("bold", "default,bold", "default"), ("underline", "default,underline", "default"), ("0", "default", "default"), ("1", "dark red", "default"), ("2", "brown", "default"), ("3", "dark green", "default"), ("4", "dark blue", "default"), ("5", "dark cyan", "default"), ("6", "dark magenta", "default"), ("10", "light red", "default"), ("20", "yellow", "default"), ("30", "light green", "default"), ("40", "light blue", "default"), ("50", "light cyan", "default"), ("60", "light magenta", "default") ] escape_map = { key: urwid.vterm.ESC + sequence for sequence, key in urwid.escape.input_sequences if len(key) > 1 } themes = { "tilde": { "divider": "-", "frame": { "tlcorner": "@", "trcorner": "@", "blcorner": "@", "brcorner": "@", "tline": "=", "bline": "=", "lline": "|", "rline": "|", } }, "urwid": { "divider": "─", "frame": { "tlcorner": "┌", "trcorner": "┐", "blcorner": "└", "brcorner": "┘", "tline": "─", "bline": "─", "lline": "│", "rline": "│", } }, "none": { "divider": " ", "frame": { "tlcorner": "", "trcorner": "", "blcorner": "", "brcorner": "", "tline": "", "bline": "", "lline": "", "rline": "", } } } rcpath = os.path.join(os.getenv("HOME"), ".bbjrc") markpath = os.path.join(os.getenv("HOME"), ".bbjmarks") pinpath = os.path.join(os.getenv("HOME"), ".bbjpins") class App(object): def __init__(self): self.prefs = bbjrc("load") self.client_pinned_threads = load_client_pins() self.usermap = {} self.match_data = { "query": "", "matches": [], "position": 0, } try: self.theme = themes[self.prefs["frame_theme"]].copy() if isinstance(self.prefs["custom_divider_char"], str): self.theme["divider"] = self.prefs["custom_divider_char"] except KeyError: exit("Selected theme does not exist. Please check " "the `frame_theme` value in ~/.bbjrc") self.mode = None self.thread = None self.window_split = False self.last_index_pos = None self.last_alarm = None if self.prefs["use_custom_frame_title"]: self.frame_title = self.prefs["frame_title"] else: self.frame_title = network.instance_info["instance_name"] self.walker = urwid.SimpleFocusListWalker([]) self.box = ActionBox(self.walker) self.body = urwid.AttrMap( urwid.LineBox(self.box, **self.frame_theme(self.frame_title)), "default" ) self.loop = urwid.MainLoop( urwid.Frame(self.body), palette=colormap, handle_mouse=self.prefs["mouse_integration"]) def frame_theme(self, title=""): theme = self.theme["frame"].copy() if theme["tline"] != "": theme.update({"title": title}) return theme def set_header(self, text, *format_specs): header = ("{}@bbj | " + text).format( (network.user_name or "anonymous"), *format_specs ) self.loop.widget.header = urwid.AttrMap(urwid.Text(header), "bar") def set_footer(self, string): widget = urwid.AttrMap(urwid.Text(string), "bar") self.loop.widget.footer = widget def set_default_header(self): if self.mode == "thread": name = self.usermap[self.thread["author"]]["user_name"] self.set_header("~{}: {}", name, self.thread["title"]) else: self.set_header("{} threads", len(self.walker)) def set_default_footer(self, clobber_composer=False): if self.window_split and not clobber_composer: return elif self.mode == "thread": footer = bars["thread"] % self.prefs["jump_count"] if self.match_data["matches"]: footer += " [@#] Search Control" else: footer = bars["index"] self.set_footer(footer) def set_bars(self, clobber_composer=False): self.set_default_header() self.set_default_footer(clobber_composer) def close_editor(self): if self.window_split: self.window_split = False self.loop.widget.focus_position = "body" self.set_footer(bars["thread"]) else: self.loop.widget = self.loop.widget[0] self.set_default_header() def overlay_p(self): return isinstance(self.loop.widget, urwid.Overlay) def remove_overlays(self, *_): while True: try: self.loop.widget = self.loop.widget[0] except: break def switch_editor(self): pos = self.loop.widget.focus_position attr = ["bar" if pos == "body" else "30", "dim"] if not self.window_split: return elif pos == "body": self.loop.widget.focus_position = "footer" focus = "[focused on editor]" else: self.loop.widget.focus_position = "body" focus = "[focused on thread]" attr.reverse() self.loop.widget.footer[0].set_text( bars["edit_window"].format( self.prefs["edit_escapes"]["abort"].upper(), self.prefs["edit_escapes"]["focus"].upper(), self.prefs["edit_escapes"]["fhelp"].upper(), focus) ) self.loop.widget.footer.contents[1][0].original_widget.attr_map = self.loop.widget.footer.contents[0][0].attr_map = {None: attr[0]} self.loop.widget.header.attr_map = {None: attr[1]} self.body.attr_map = {None: attr[1]} def readable_delta(self, modified): delta = time() - modified hours, remainder = divmod(delta, 3600) if hours > 48: return self.timestring(modified) elif hours > 1: return "%d hours ago" % hours elif hours == 1: return "about an hour ago" minutes, remainder = divmod(remainder, 60) if minutes > 1: return "%d minutes ago" % minutes return "less than a minute ago" def quote_view_action(self, button, message): widget = OptionsMenu( ActionBox(urwid.SimpleFocusListWalker(self.make_message_body(message))), **self.frame_theme(">>%d" % message["post_id"]) ) self.loop.widget = urwid.Overlay( widget, self.loop.widget, align=("relative", 50), valign=("relative", 50), width=("relative", 98), height=("relative", 60) ) def quote_view_menu(self, button, post_ids): buttons = [] for pid in post_ids: try: message = self.thread["messages"][pid] if len(post_ids) == 1: return self.quote_view_action(button, message) author = self.usermap[message["author"]] label = [ ("button", ">>%d " % pid), "(", (str(author["color"]), author["user_name"]), ")" ] buttons.append(cute_button(label, self.quote_view_action, message)) except IndexError: continue widget = OptionsMenu( urwid.ListBox(urwid.SimpleFocusListWalker(buttons)), **self.frame_theme("View a Quote") ) self.loop.widget = urwid.Overlay( widget, self.loop.widget, align=("relative", 50), valign=("relative", 50), height=len(buttons) + 3, width=30 ) def edit_post(self, button, message): post_id = message["post_id"] thread_id = message["thread_id"] try: message = network.edit_query(thread_id, post_id) except UserWarning as e: self.remove_overlays() return self.temp_footer_message(e.description) self.remove_overlays() self.compose(init_body=message["body"], edit=message) def reply(self, button, message): self.remove_overlays() self.compose(init_body=">>%d\n\n" % message["post_id"]) def deletion_dialog(self, button, message): op = message["post_id"] == 0 buttons = [ urwid.Text(("bold", "Delete this %s?" % ("whole thread" if op else "post"))), urwid.Divider(), cute_button(("10" , ">> Yes"), lambda _: [ network.message_delete(message["thread_id"], message["post_id"]), self.remove_overlays(), self.index() if op else self.refresh() ]), cute_button(("30", "<< No"), self.remove_overlays) ] popup = OptionsMenu( urwid.ListBox(urwid.SimpleFocusListWalker(buttons)), **self.frame_theme()) self.loop.widget = urwid.Overlay( popup, self.loop.widget, align=("relative", 50), valign=("relative", 50), width=30, height=6) def toggle_formatting(self, button, message): self.remove_overlays() raw = not message["send_raw"] network.set_post_raw(message["thread_id"], message["post_id"], raw) return self.refresh() def on_post(self, button, message): quotes = self.get_quotes(message) author = self.usermap[message["author"]] buttons = [] if not self.window_split: buttons.append(urwid.Button("Reply", self.reply, message)) if quotes and message["post_id"] != 0: buttons.append(urwid.Button( "View %sQuote" % ("a " if len(quotes) != 1 else ""), self.quote_view_menu, quotes)) if network.can_edit(message["thread_id"], message["post_id"]) and not self.window_split: if message["post_id"] == 0: msg = "Thread" else: msg = "Post" raw = message["send_raw"] buttons.insert(0, urwid.Button("Delete %s" % msg, self.deletion_dialog, message)) buttons.insert(0, urwid.Button( "Enable Formatting" if raw else "Disable Formatting", self.toggle_formatting, message)) buttons.insert(0, urwid.Button("Edit Post", self.edit_post, message)) if network.user["is_admin"]: buttons.insert(0, urwid.Text(("20", "Reminder: You're an admin!"))) if not buttons: return widget = OptionsMenu( urwid.ListBox(urwid.SimpleFocusListWalker(buttons)), **self.frame_theme(">>%d (%s)" % (message["post_id"], author["user_name"])) ) size = self.loop.screen_size self.loop.widget = urwid.Overlay( urwid.AttrMap(widget, str(author["color"]*10)), self.loop.widget, align=("relative", 50), valign=("relative", 50), width=30, height=len(buttons) + 2 )
MIT License
pyqode/pyqode.core
pyqode/core/widgets/filesystem_treeview.py
FileSystemHelper.copy_path_to_clipboard
python
def copy_path_to_clipboard(self): path = self.get_current_path() QtWidgets.QApplication.clipboard().setText(path) debug('path copied: %s' % path)
Copies the file path to the clipboard
https://github.com/pyqode/pyqode.core/blob/0ffabebe4f0397d53429024f6f44db3fe97b0828/pyqode/core/widgets/filesystem_treeview.py#L494-L500
import sys import fnmatch import locale import logging import os import platform import shutil import subprocess from pyqode.qt import QtCore, QtGui, QtWidgets from pyqode.core import icons def _logger(): return logging.getLogger(__name__) def debug(msg, *args): return _logger().log(5, msg, *args) class FileSystemTreeView(QtWidgets.QTreeView): class FilterProxyModel(QtCore.QSortFilterProxyModel): def __init__(self): super(FileSystemTreeView.FilterProxyModel, self).__init__() self.ignored_patterns = [ '*.pyc', '*.pyo', '*.coverage', '.DS_Store', '__pycache__'] self._ignored_unused = [] def set_root_path(self, path): self._ignored_unused[:] = [] self._root = path parent_dir = os.path.dirname(path) for item in os.listdir(parent_dir): item_path = os.path.join(parent_dir, item) if item_path != path: self._ignored_unused.append(os.path.normpath(item_path)) def filterAcceptsRow(self, row, parent): index0 = self.sourceModel().index(row, 0, parent) finfo = self.sourceModel().fileInfo(index0) fn = finfo.fileName() fp = os.path.normpath(finfo.filePath()) if os.path.ismount(self._root): return True if fp in self._ignored_unused: return False for ptrn in self.ignored_patterns: if fnmatch.fnmatch(fn, ptrn): return False debug('accepting %s', finfo.filePath()) return True file_deleted = QtCore.Signal(str) files_deleted = QtCore.Signal(list) file_renamed = QtCore.Signal(str, str) files_renamed = QtCore.Signal(list) file_created = QtCore.Signal(str) about_to_show_context_menu = QtCore.Signal(str) def __init__(self, parent=None): super(FileSystemTreeView, self).__init__(parent) self._path_to_set = None self._path_to_select = None self.context_menu = None self._root_path = None self.root_path = '' self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) self.customContextMenuRequested.connect(self._show_context_menu) self.helper = FileSystemHelper(self) self.setSelectionMode(self.ExtendedSelection) self._ignored_patterns = [] self._icon_provider = QtWidgets.QFileIconProvider() self._hide_extra_colums = True from pyqode.core.widgets import FileIconProvider self.set_icon_provider(FileIconProvider()) def showEvent(self, event): super(FileSystemTreeView, self).showEvent(event) if self._path_to_set: self.set_root_path(self._path_to_set, self._hide_extra_colums) self._path_to_set = None def set_icon_provider(self, icon_provider): self._icon_provider = icon_provider def ignore_directories(self, *directories): for d in directories: self.add_ignore_patterns(d) def ignore_extensions(self, *extensions): for ext in extensions: self.add_ignore_patterns('*%s' % ext) def clear_ignore_patterns(self): self._ignored_patterns[:] = [] def add_ignore_patterns(self, *patterns): for ptrn in patterns: if isinstance(ptrn, list): for p in ptrn: self._ignored_patterns.append(p) else: self._ignored_patterns.append(ptrn) def set_context_menu(self, context_menu): self.context_menu = context_menu self.context_menu.tree_view = self self.context_menu.init_actions() for action in self.context_menu.actions(): self.addAction(action) def set_root_path(self, path, hide_extra_columns=True): if not self.isVisible(): self._path_to_set = path self._hide_extra_colums = hide_extra_columns return if sys.platform == 'win32' and os.path.splitunc(path)[0]: mdl = QtGui.QStandardItemModel(1, 1) item = QtGui.QStandardItem( QtGui.QIcon.fromTheme( 'dialog-warning', QtGui.QIcon(':/pyqode-icons/rc/dialog-warning.png')), 'UNC pathnames not supported.') mdl.setItem(0, 0, item) self.setModel(mdl) self.root_path = None return self._hide_extra_colums = hide_extra_columns if os.path.isfile(path): path = os.path.abspath(os.path.join(path, os.pardir)) self._fs_model_source = QtWidgets.QFileSystemModel() self._fs_model_source.setFilter(QtCore.QDir.Dirs | QtCore.QDir.Files | QtCore.QDir.NoDotAndDotDot | QtCore.QDir.Hidden) self._fs_model_source.setIconProvider(self._icon_provider) self._fs_model_proxy = self.FilterProxyModel() for item in self._ignored_patterns: self._fs_model_proxy.ignored_patterns.append(item) self._fs_model_proxy.setSourceModel(self._fs_model_source) self._fs_model_proxy.set_root_path(path) self._root_path = os.path.dirname(path) self.root_path = path self._fs_model_source.directoryLoaded.connect(self._on_path_loaded) self._fs_model_source.setRootPath(self._root_path) def _on_path_loaded(self, path): if os.path.normpath(path) != self._root_path: return try: self.setModel(self._fs_model_proxy) file_root_index = self._fs_model_source.setRootPath( self._root_path) root_index = self._fs_model_proxy.mapFromSource(file_root_index) self.setRootIndex(root_index) if not os.path.ismount(self._root_path): self.expandToDepth(0) if self._hide_extra_colums: self.setHeaderHidden(True) for i in range(1, 4): self.hideColumn(i) if self._path_to_select: self.select_path(self._path_to_select) self._path_to_select = None except RuntimeError: return def filePath(self, index): return self._fs_model_source.filePath( self._fs_model_proxy.mapToSource(index)) def fileInfo(self, index): return self._fs_model_source.fileInfo( self._fs_model_proxy.mapToSource(index)) def _show_context_menu(self, point): if self.context_menu: self.about_to_show_context_menu.emit( FileSystemHelper(self).get_current_path()) self.context_menu.exec_(self.mapToGlobal(point)) def select_path(self, path): if not self.isVisible(): self._path_to_select = path else: self.setCurrentIndex(self._fs_model_proxy.mapFromSource( self._fs_model_source.index(path))) class FileSystemHelper: class _UrlListMimeData(QtCore.QMimeData): def __init__(self, copy=True): super(FileSystemHelper._UrlListMimeData, self).__init__() self.copy = copy def set_list(self, urls): lst = [] for url in urls: lst.append(bytes(url, encoding=locale.getpreferredencoding())) self.setData(self.format(self.copy), b'\n'.join(lst)) @classmethod def list_from(cls, mime_data, copy=True): string = bytes(mime_data.data(cls.format(copy))).decode('utf-8') lst = string.split('\n') urls = [] for val in lst: urls.append(val) return urls def formats(self): return [self.format(self.copy)] @classmethod def format(cls, copy=True): return 'text/tv-copy-url-list' if copy else 'text/tv-cut-url-list' def __init__(self, treeview): self.tree_view = treeview def copy_to_clipboard(self, copy=True): urls = self.selected_urls() if not urls: return mime = self._UrlListMimeData(copy) mime.set_list(urls) clipboard = QtWidgets.QApplication.clipboard() clipboard.setMimeData(mime) def selected_urls(self): urls = [] debug('gettings urls') for proxy_index in self.tree_view.selectedIndexes(): finfo = self.tree_view.fileInfo(proxy_index) urls.append(finfo.canonicalFilePath()) debug('selected urls %r' % [str(url) for url in urls]) return urls def paste_from_clipboard(self): to = self.get_current_path() if os.path.isfile(to): to = os.path.abspath(os.path.join(to, os.pardir)) mime = QtWidgets.QApplication.clipboard().mimeData() paste_operation = None if mime.hasFormat(self._UrlListMimeData.format(copy=True)): paste_operation = True elif mime.hasFormat(self._UrlListMimeData.format(copy=False)): paste_operation = False if paste_operation is not None: self._paste( self._UrlListMimeData.list_from(mime, copy=paste_operation), to, copy=paste_operation) def _paste(self, sources, destination, copy): for src in sources: debug('%s <%s> to <%s>' % ( 'copying' if copy else 'cutting', src, destination)) perform_copy = True ext = os.path.splitext(src)[1] original = os.path.splitext(os.path.split(src)[1])[0] filename, status = QtWidgets.QInputDialog.getText( self.tree_view, _('Copy'), _('New name:'), QtWidgets.QLineEdit.Normal, original) if filename == '' or not status: return filename = filename + ext final_dest = os.path.join(destination, filename) if os.path.exists(final_dest): rep = QtWidgets.QMessageBox.question( self.tree_view, _('File exists'), _('File <%s> already exists. Do you want to erase it?') % final_dest, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No) if rep == QtWidgets.QMessageBox.No: perform_copy = False if not perform_copy: continue try: if os.path.isfile(src): shutil.copy(src, final_dest) else: shutil.copytree(src, final_dest) except (IOError, OSError) as e: QtWidgets.QMessageBox.warning( self.tree_view, _('Copy failed'), _('Failed to copy "%s" to "%s".\n\n%s' % (src, destination, str(e)))) _logger().exception('failed to copy "%s" to "%s', src, destination) else: debug('file copied %s', src) if not copy: debug('removing source (cut operation)') if os.path.isfile(src): os.remove(src) else: shutil.rmtree(src) self.tree_view.files_renamed.emit([(src, final_dest)]) @staticmethod def _get_files(path): ret_val = [] for root, _, files in os.walk(path): for f in files: ret_val.append(os.path.join(root, f)) return ret_val def delete(self): urls = self.selected_urls() rep = QtWidgets.QMessageBox.question( self.tree_view, _('Confirm delete'), _('Are you sure about deleting the selected files/directories?'), QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes) if rep == QtWidgets.QMessageBox.Yes: deleted_files = [] for fn in urls: try: if os.path.isfile(fn): os.remove(fn) deleted_files.append(fn) else: files = self._get_files(fn) shutil.rmtree(fn) deleted_files += files except OSError as e: QtWidgets.QMessageBox.warning( self.tree_view, _('Delete failed'), _('Failed to remove "%s".\n\n%s') % (fn, str(e))) _logger().exception('failed to remove %s', fn) self.tree_view.files_deleted.emit(deleted_files) for d in deleted_files: debug('%s removed', d) self.tree_view.file_deleted.emit(os.path.normpath(d)) def get_current_path(self): path = self.tree_view.fileInfo( self.tree_view.currentIndex()).filePath() if not path: path = self.tree_view.root_path return path
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/reference_file_reference.py
ReferenceFileReference.note
python
def note(self): return self._note
Gets the note of this ReferenceFileReference. # noqa: E501 :return: The note of this ReferenceFileReference. # noqa: E501 :rtype: str
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/reference_file_reference.py#L103-L110
import pprint import re import six class ReferenceFileReference(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'filename': 'str', 'note': 'str' } attribute_map = { 'id': 'id', 'filename': 'filename', 'note': 'note' } def __init__(self, id=None, filename=None, note=None): self._id = None self._filename = None self._note = None self.discriminator = None if id is not None: self.id = id if filename is not None: self.filename = filename if note is not None: self.note = note @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def filename(self): return self._filename @filename.setter def filename(self, filename): self._filename = filename @property
Apache License 2.0
alfredfrancis/ai-chatbot-framework
app/nlu/entity_extractor.py
EntityExtractor.extract_features
python
def extract_features(self, sent, i): word = sent[i][0] postag = sent[i][1] features = [ 'bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(), 'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(), 'postag=' + postag, 'postag[:2]=' + postag[:2], ] if i > 0: word1 = sent[i - 1][0] postag1 = sent[i - 1][1] features.extend([ '-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1, '-1:postag[:2]=' + postag1[:2], ]) else: features.append('BOS') if i < len(sent) - 1: word1 = sent[i + 1][0] postag1 = sent[i + 1][1] features.extend([ '+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1, '+1:postag[:2]=' + postag1[:2], ]) else: features.append('EOS') return features
Extract features for a given sentence :param sent: :param i: :return:
https://github.com/alfredfrancis/ai-chatbot-framework/blob/6e60c0a66f580c9df3866ee48a599bdafdbc381c/app/nlu/entity_extractor.py#L32-L78
import pycrfsuite from flask import current_app as app from app import spacy_tokenizer class EntityExtractor: def __init__(self, synonyms=[]): self.synonyms = synonyms def replace_synonyms(self, entities): for entity in entities.keys(): entity_value = str(entities[entity]) if entity_value.lower() in self.synonyms: entities[entity] = self.synonyms[entity_value.lower()] return entities
MIT License
oncleben31/home-assistant-config
config/custom_components/variable/__init__.py
async_setup
python
async def async_setup(hass, config): component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] for variable_id, variable_config in config[DOMAIN].items(): if not variable_config: variable_config = {} name = variable_config.get(CONF_NAME) value = variable_config.get(CONF_VALUE) attributes = variable_config.get(CONF_ATTRIBUTES) restore = variable_config.get(CONF_RESTORE, False) force_update = variable_config.get(CONF_FORCE_UPDATE, False) entities.append( Variable(variable_id, name, value, attributes, restore, force_update) ) @asyncio.coroutine def async_set_variable_service(call): entity_id = ENTITY_ID_FORMAT.format(call.data.get(ATTR_VARIABLE)) entity = component.get_entity(entity_id) if entity: target_variables = [entity] tasks = [ variable.async_set_variable( call.data.get(ATTR_VALUE), call.data.get(ATTR_ATTRIBUTES), call.data.get(ATTR_REPLACE_ATTRIBUTES, False), ) for variable in target_variables ] if tasks: yield from asyncio.wait(tasks, loop=hass.loop) else: _LOGGER.warning( f"Failed to set unknown variable: {entity_id}" ) hass.services.async_register( DOMAIN, SERVICE_SET_VARIABLE, async_set_variable_service, schema=SERVICE_SET_VARIABLE_SCHEMA, ) await component.async_add_entities(entities) return True
Set up variables.
https://github.com/oncleben31/home-assistant-config/blob/11b40fe08e0dc11a6e66d735bb0b4e01b3d5f17a/config/custom_components/variable/__init__.py#L81-L134
import asyncio import logging import json import voluptuous as vol from homeassistant.const import CONF_NAME, ATTR_ICON from homeassistant.helpers import config_validation as cv from homeassistant.loader import bind_hass from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.restore_state import RestoreEntity _LOGGER = logging.getLogger(__name__) DOMAIN = "variable" ENTITY_ID_FORMAT = DOMAIN + ".{}" CONF_ATTRIBUTES = "attributes" CONF_VALUE = "value" CONF_RESTORE = "restore" CONF_FORCE_UPDATE = "force_update" ATTR_VARIABLE = "variable" ATTR_VALUE = "value" ATTR_ATTRIBUTES = "attributes" ATTR_REPLACE_ATTRIBUTES = "replace_attributes" SERVICE_SET_VARIABLE = "set_variable" SERVICE_SET_VARIABLE_SCHEMA = vol.Schema( { vol.Required(ATTR_VARIABLE): cv.string, vol.Optional(ATTR_VALUE): cv.match_all, vol.Optional(ATTR_ATTRIBUTES): dict, vol.Optional(ATTR_REPLACE_ATTRIBUTES): cv.boolean, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { cv.slug: vol.Any( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_VALUE): cv.match_all, vol.Optional(CONF_ATTRIBUTES): dict, vol.Optional(CONF_RESTORE): cv.boolean, vol.Optional(CONF_FORCE_UPDATE): cv.boolean, }, None, ) } ) }, extra=vol.ALLOW_EXTRA, ) @bind_hass def set_variable( hass, variable, value, attributes, replace_attributes, ): hass.services.call( DOMAIN, SERVICE_SET_VARIABLE, { ATTR_VARIABLE: variable, ATTR_VALUE: value, ATTR_ATTRIBUTES: attributes, ATTR_REPLACE_ATTRIBUTES: replace_attributes, }, )
MIT License
awslabs/mxboard
python/mxboard/summary.py
_compute_curve
python
def _compute_curve(labels, predictions, num_thresholds, weights=None): if weights is None: weights = 1.0 bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) float_labels = labels.astype(np.float) histogram_range = (0, num_thresholds - 1) tp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=float_labels * weights) fp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=(1.0 - float_labels) * weights) tp = np.cumsum(tp_buckets[::-1])[::-1] fp = np.cumsum(fp_buckets[::-1])[::-1] tn = fp[0] - fp fn = tp[0] - tp precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) return np.stack((tp, fp, tn, fn, precision, recall))
This function is another implementation of functions in https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
https://github.com/awslabs/mxboard/blob/432d4df2489ecf6dbb251d7f96f1ccadb368997a/python/mxboard/summary.py#L328-L357
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import io import wave import struct import json import re as _re import numpy as np try: import mxnet from distutils.version import LooseVersion if LooseVersion(mxnet.__version__) < LooseVersion('1.2.0'): logging.warning('The currently installed MXNet version %s is less than 1.2.0.' ' Some functionality of MXBoard may not work.', mxnet.__version__) except ImportError: raise ImportError('MXBoard requires MXNet with version >= 1.2.0.' ' Please follow the instruction here to install MXNet first.' ' http://mxnet.incubator.apache.org/install/index.html') from mxnet.ndarray import NDArray from mxnet.symbol import Symbol from mxnet.gluon import HybridBlock from .proto.summary_pb2 import Summary from .proto.summary_pb2 import HistogramProto from .proto.summary_pb2 import SummaryMetadata from .proto.tensor_pb2 import TensorProto from .proto.tensor_shape_pb2 import TensorShapeProto from .proto.plugin_pr_curve_pb2 import PrCurvePluginData from .proto.node_def_pb2 import NodeDef from .proto.graph_pb2 import GraphDef from .proto.attr_value_pb2 import AttrValue from .proto.versions_pb2 import VersionDef from .utils import _make_numpy_array, _prepare_image try: from PIL import Image except ImportError: Image = None _INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]') def _clean_tag(name): if name is not None: new_name = _INVALID_TAG_CHARACTERS.sub('_', name) new_name = new_name.lstrip('/') if new_name != name: logging.warning('Summary name %s is illegal; using %s instead.', name, new_name) name = new_name return name def scalar_summary(tag, scalar): tag = _clean_tag(tag) scalar = _make_numpy_array(scalar) assert(scalar.squeeze().ndim == 0), 'scalar should be 0D' scalar = float(scalar) return Summary(value=[Summary.Value(tag=tag, simple_value=scalar)]) def histogram_summary(tag, values, bins): tag = _clean_tag(tag) values = _make_numpy_array(values) hist = _make_histogram(values.astype(float), bins) return Summary(value=[Summary.Value(tag=tag, histo=hist)]) def _make_histogram(values, bins): values = values.reshape(-1) counts, limits = np.histogram(values, bins=bins) limits = limits[1:] sum_sq = values.dot(values) return HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits, bucket=counts) def image_summary(tag, image): tag = _clean_tag(tag) image = _prepare_image(image) image = _make_image(image) return Summary(value=[Summary.Value(tag=tag, image=image)]) def _make_image(tensor): assert isinstance(tensor, NDArray) if Image is None: raise ImportError('need to install PIL for visualizing images') height, width, channel = tensor.shape tensor = _make_numpy_array(tensor) image = Image.fromarray(tensor) output = io.BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() return Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string) def audio_summary(tag, audio, sample_rate=44100): audio = audio.squeeze() if audio.ndim != 1: raise ValueError('input audio must be squeezable to 1D, input audio squeezed ' 'shape is {}'.format(audio.shape)) audio = _make_numpy_array(audio) tensor_list = [int(32767.0 * x) for x in audio] fio = io.BytesIO() wave_writer = wave.open(fio, 'wb') wave_writer.setnchannels(1) wave_writer.setsampwidth(2) wave_writer.setframerate(sample_rate) tensor_enc = b'' for v in tensor_list: tensor_enc += struct.pack('<h', v) wave_writer.writeframes(tensor_enc) wave_writer.close() audio_string = fio.getvalue() fio.close() audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(tensor_list), encoded_audio_string=audio_string, content_type='audio/wav') return Summary(value=[Summary.Value(tag=tag, audio=audio)]) def text_summary(tag, text): plugin_data = [SummaryMetadata.PluginData(plugin_name='text')] smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_STRING', string_val=[text.encode(encoding='utf_8')], tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)])) return Summary(value=[Summary.Value(node_name=tag, metadata=smd, tensor=tensor)]) def pr_curve_summary(tag, labels, predictions, num_thresholds, weights=None): if num_thresholds > 127: logging.warning('num_thresholds>127 would result in failure of creating pr_curve protobuf,' ' clipping it at 127') num_thresholds = 127 labels = _make_numpy_array(labels) predictions = _make_numpy_array(predictions) if weights is not None: weights = _make_numpy_array(weights) data = _compute_curve(labels, predictions, num_thresholds=num_thresholds, weights=weights) pr_curve_plugin_data = PrCurvePluginData(version=0, num_thresholds=num_thresholds).SerializeToString() plugin_data = [SummaryMetadata.PluginData(plugin_name='pr_curves', content=pr_curve_plugin_data)] smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_FLOAT', float_val=data.reshape(-1).tolist(), tensor_shape=TensorShapeProto( dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])])) return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) _MINIMUM_COUNT = 1e-7
Apache License 2.0
johnbywater/eventsourcing
eventsourcing/domain.py
Aggregate.created_on
python
def created_on(self) -> datetime: return self._created_on
The date and time when the aggregate was created.
https://github.com/johnbywater/eventsourcing/blob/95196be59d9e92e0eecbe2fe7744339daa4117e3/eventsourcing/domain.py#L860-L864
import inspect import os from abc import ABC, ABCMeta from dataclasses import dataclass from datetime import datetime, tzinfo from types import FunctionType, WrapperDescriptorType from typing import ( Any, Callable, Dict, Generic, Iterable, List, Optional, Type, TypeVar, Union, cast, ) from uuid import UUID, uuid4 from eventsourcing.utils import get_method_name, get_topic, resolve_topic TZINFO: tzinfo = resolve_topic(os.getenv("TZINFO_TOPIC", "datetime:timezone.utc")) TAggregate = TypeVar("TAggregate", bound="Aggregate") class MetaDomainEvent(ABCMeta): def __new__(mcs, name: str, bases: tuple, cls_dict: dict) -> "MetaDomainEvent": event_cls = ABCMeta.__new__(mcs, name, bases, cls_dict) event_cls = dataclass(frozen=True)(event_cls) return event_cls def __init__(cls, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) class DomainEvent(ABC, metaclass=MetaDomainEvent): originator_id: UUID originator_version: int timestamp: datetime class AggregateEvent(DomainEvent, Generic[TAggregate]): def mutate(self, obj: Optional[TAggregate]) -> Optional[TAggregate]: assert obj is not None next_version = obj.version + 1 if self.originator_version != next_version: raise VersionError(self.originator_version, next_version) if self.apply(obj) is not None: raise TypeError( f"Unexpected value returned from " f"{type(self).apply.__qualname__}(). Values " f"returned from 'apply' methods are discarded." ) obj.version = self.originator_version obj.modified_on = self.timestamp return obj def apply(self, aggregate: TAggregate) -> None: class AggregateCreated(AggregateEvent["Aggregate"]): originator_topic: str def mutate(self, obj: Optional[TAggregate]) -> TAggregate: assert obj is None kwargs = self.__dict__.copy() aggregate_class: Type[TAggregate] = resolve_topic( kwargs.pop("originator_topic") ) agg: TAggregate = aggregate_class.__new__(aggregate_class) base_kwargs = { "id": kwargs.pop("originator_id"), "version": kwargs.pop("originator_version"), "timestamp": kwargs.pop("timestamp"), } Aggregate.__base_init__(agg, **base_kwargs) init_method = agg.__init__ if aggregate_class._init_mentions_id: kwargs["id"] = base_kwargs["id"] init_method(**kwargs) return agg class CommandMethodDecorator: def __init__(self, arg: Union[Callable, str, Type[AggregateEvent]]): self.is_name_inferred_from_method = False self.given_event_cls: Optional[Type[AggregateEvent]] = None self.event_cls_name: Optional[str] = None self.is_property_setter = False self.property_setter_arg_name: Optional[str] = None self.is_decorating_a_property = False self.decorated_property: Optional[property] = None self.original_method: Optional[FunctionType] = None if isinstance(arg, str): self.initialise_from_explicit_name(event_cls_name=arg) elif isinstance(arg, type) and issubclass(arg, AggregateEvent): self.initialise_from_event_cls(event_cls=arg) elif isinstance(arg, FunctionType): self.initialise_from_decorated_method(original_method=arg) elif isinstance(arg, property): method_name = arg.fset.__name__ raise TypeError( f"@event on {method_name}() property setter requires event class name" ) elif isinstance(arg, staticmethod): raise TypeError( f"{arg.__func__.__name__}() staticmethod can't be " f"used to update aggregate state" ) elif isinstance(arg, classmethod): raise TypeError( f"{arg.__func__.__name__}() classmethod can't be " f"used to update aggregate state" ) else: raise TypeError(f"Unsupported usage: {type(arg)} is not a str or function") def initialise_from_decorated_method(self, original_method: FunctionType) -> None: self.original_method = original_method original_method_name = original_method.__name__ if original_method_name != "__init__": self.is_name_inferred_from_method = True self.event_cls_name = "".join( [s.capitalize() for s in original_method_name.split("_")] ) _check_no_variable_params(self.original_method) def initialise_from_event_cls(self, event_cls: Type[AggregateEvent]) -> None: self.given_event_cls = event_cls def initialise_from_explicit_name(self, event_cls_name: str) -> None: if event_cls_name == "": raise ValueError("Can't use empty string as name of event class") self.event_cls_name = event_cls_name def __call__(self, *args: Any, **kwargs: Any) -> Any: if self.original_method is None: assert len(kwargs) == 0, "Unsupported usage" assert len(args) == 1, "Unsupported usage" arg = args[0] if isinstance(arg, FunctionType): self.original_method = arg _check_no_variable_params(self.original_method) elif isinstance(arg, property): self.is_decorating_a_property = True self.decorated_property = arg if arg.fset is None: assert arg.fget is not None method_name = arg.fget.__name__ raise TypeError( f"@event can't decorate {method_name}() property getter" ) assert isinstance(arg.fset, FunctionType) self.original_method = arg.fset assert self.original_method setter_arg_names = list(inspect.signature(arg.fset).parameters) assert len(setter_arg_names) == 2 self.property_setter_arg_name = setter_arg_names[1] _check_no_variable_params(self.original_method) else: raise ValueError( f"Unsupported usage: {type(arg)} is not a str or a FunctionType" ) if self.given_event_cls: if self.given_event_cls in original_methods: name = self.given_event_cls.__name__ raise TypeError( f"{name} event class used in more than one decorator" ) if "apply" in self.given_event_cls.__dict__: name = self.given_event_cls.__name__ raise TypeError(f"{name} event class has unexpected apply() method") setattr( self.given_event_cls, "apply", DecoratedEvent.apply ) original_methods[self.given_event_cls] = self.original_method return self else: assert self.is_property_setter assert self.property_setter_arg_name assert len(args) == 2 assert len(kwargs) == 0 assert isinstance(args[0], Aggregate) aggregate_instance = args[0] bound = BoundCommandMethodDecorator(self, aggregate_instance) property_setter_arg_value = args[1] kwargs = {self.property_setter_arg_name: property_setter_arg_value} bound.trigger(**kwargs) def __get__( self, instance: Optional[TAggregate], owner: "MetaAggregate" ) -> Union["BoundCommandMethodDecorator", "UnboundCommandMethodDecorator"]: if self.is_decorating_a_property: assert self.decorated_property return self.decorated_property.__get__(instance, owner) else: if instance is None: return UnboundCommandMethodDecorator(self) else: return BoundCommandMethodDecorator(self, instance) def __set__(self, instance: TAggregate, value: Any) -> None: assert self.is_decorating_a_property b = BoundCommandMethodDecorator(self, instance) assert self.property_setter_arg_name kwargs = {self.property_setter_arg_name: value} b.trigger(**kwargs) def event( arg: Optional[Union[FunctionType, str, Type[AggregateEvent]]] = None ) -> CommandMethodDecorator: if arg is None: return event else: return CommandMethodDecorator(arg) triggers = event class UnboundCommandMethodDecorator: def __init__(self, event_decorator: CommandMethodDecorator): self.event_decorator = event_decorator assert event_decorator.original_method self.__qualname__ = event_decorator.original_method.__qualname__ self.__name__ = event_decorator.original_method.__name__ class BoundCommandMethodDecorator: def __init__(self, event_decorator: CommandMethodDecorator, aggregate: TAggregate): assert event_decorator.original_method self.event_decorator = event_decorator self.__qualname__ = event_decorator.original_method.__qualname__ self.__name__ = event_decorator.original_method.__name__ self.aggregate = aggregate def trigger(self, *args: Any, **kwargs: Any) -> None: assert isinstance(self.event_decorator, CommandMethodDecorator) assert self.event_decorator.original_method kwargs = _coerce_args_to_kwargs( self.event_decorator.original_method, args, kwargs ) if self.event_decorator.given_event_cls: event_cls = self.event_decorator.given_event_cls else: assert self.event_decorator.event_cls_name event_cls = getattr(self.aggregate, self.event_decorator.event_cls_name) self.aggregate.trigger_event(event_cls, **kwargs) def __call__(self, *args: Any, **kwargs: Any) -> None: self.trigger(*args, **kwargs) original_methods: Dict[MetaDomainEvent, FunctionType] = {} class DecoratedEvent(AggregateEvent): def apply(self, aggregate: TAggregate) -> None: event_obj_dict = dict(self.__dict__) event_obj_dict.pop("originator_id") event_obj_dict.pop("originator_version") event_obj_dict.pop("timestamp") original_method = original_methods[type(self)] method_signature = inspect.signature(original_method) for name in method_signature.parameters: if name == "self": continue returned_value = original_method(aggregate, **event_obj_dict) if returned_value is not None: raise TypeError( f"Unexpected value returned from " f"{original_method.__qualname__}(). Values " f"returned from 'apply' methods are discarded." ) TDomainEvent = TypeVar("TDomainEvent", bound=DomainEvent) TAggregateEvent = TypeVar("TAggregateEvent", bound=AggregateEvent) TAggregateCreated = TypeVar("TAggregateCreated", bound=AggregateCreated) def _check_no_variable_params( method: Union[FunctionType, WrapperDescriptorType] ) -> None: assert isinstance(method, (FunctionType, WrapperDescriptorType)), type(method) for param in inspect.signature(method).parameters.values(): if param.kind is param.VAR_POSITIONAL: raise TypeError("variable positional parameters not supported") elif param.kind is param.VAR_KEYWORD: raise TypeError("variable keyword parameters not supported") def _coerce_args_to_kwargs( method: Union[FunctionType, WrapperDescriptorType], args: Iterable[Any], kwargs: Dict[str, Any], expects_id: bool = False, ) -> Dict[str, Any]: assert isinstance(method, (FunctionType, WrapperDescriptorType)) method_signature = inspect.signature(method) copy_kwargs = dict(kwargs) args = tuple(args) positional_names = [] keyword_defaults = {} required_positional = [] required_keyword_only = [] if expects_id: positional_names.append("id") required_positional.append("id") for name, param in method_signature.parameters.items(): if name == "self": continue if param.kind is param.KEYWORD_ONLY: required_keyword_only.append(name) if param.kind is param.POSITIONAL_OR_KEYWORD: positional_names.append(name) if param.default == param.empty: required_positional.append(name) if param.default != param.empty: keyword_defaults[name] = param.default for name in kwargs: if name not in required_keyword_only and name not in positional_names: raise TypeError( f"{get_method_name(method)}() got an unexpected " f"keyword argument '{name}'" ) counter = 0 len_args = len(args) if len_args > len(positional_names): msg = ( f"{get_method_name(method)}() takes {len(positional_names) + 1} " f"positional argument{'' if len(positional_names) + 1 == 1 else 's'} " f"but {len_args + 1} were given" ) raise TypeError(msg) required_positional_not_in_kwargs = [ n for n in required_positional if n not in kwargs ] num_missing = len(required_positional_not_in_kwargs) - len_args if num_missing > 0: missing_names = [ f"'{name}'" for name in required_positional_not_in_kwargs[len_args:] ] msg = ( f"{get_method_name(method)}() missing {num_missing} required positional " f"argument{'' if num_missing == 1 else 's'}: " ) raise_missing_names_type_error(missing_names, msg) for name in positional_names: if counter + 1 > len_args: break if name not in kwargs: copy_kwargs[name] = args[counter] counter += 1 else: raise TypeError( f"{get_method_name(method)}() got multiple values for argument '{name}'" ) missing_keyword_only_arguments = [] for name in required_keyword_only: if name not in kwargs: missing_keyword_only_arguments.append(name) if missing_keyword_only_arguments: missing_names = [f"'{name}'" for name in missing_keyword_only_arguments] msg = ( f"{get_method_name(method)}() missing {len(missing_names)} " f"required keyword-only argument" f"{'' if len(missing_names) == 1 else 's'}: " ) raise_missing_names_type_error(missing_names, msg) for name, value in keyword_defaults.items(): if name not in copy_kwargs: copy_kwargs[name] = value return copy_kwargs def raise_missing_names_type_error(missing_names: List[str], msg: str) -> None: msg += missing_names[0] if len(missing_names) == 2: msg += f" and {missing_names[1]}" elif len(missing_names) > 2: msg += ", " + ", ".join(missing_names[1:-1]) msg += f", and {missing_names[-1]}" raise TypeError(msg) class MetaAggregate(ABCMeta): _annotations_mention_id = False _init_mentions_id = False INITIAL_VERSION = 1 def __new__(mcs, *args: Any, **kwargs: Any) -> "MetaAggregate": try: args[2]["__annotations__"].pop("id") except KeyError: pass else: args[2]["_annotations_mention_id"] = True cls = ABCMeta.__new__(mcs, *args) cls = dataclass(eq=False, repr=False)(cls) return cast(MetaAggregate, cls) def __init__( cls, *args: Any, created_event_name: Optional[str] = None, ) -> None: super().__init__(*args) created_event_classes = {} try: created_event_class = cls.__dict__["_created_event_class"] if created_event_name: raise TypeError( "Can't use both '_created_event_class' and 'created_event_name'" ) except KeyError: created_event_class = None if isinstance(cls.__dict__["__init__"], CommandMethodDecorator): init_decorator: CommandMethodDecorator = cls.__dict__["__init__"] init_method = init_decorator.original_method if created_event_name: raise TypeError( "Can't use both 'created_event_name' and __init__ @event decorator" ) elif created_event_class: raise TypeError( "Can't use both '_created_event_class' and __init__ @event " "decorator" ) elif init_decorator.event_cls_name: created_event_name = init_decorator.event_cls_name elif init_decorator.given_event_cls: created_event_class = init_decorator.given_event_cls else: raise TypeError( "Neither name nor class given to __init__ @event decorator" ) cls.__init__ = init_method else: init_method = cls.__dict__["__init__"] assert isinstance(init_method, FunctionType) for name, value in tuple(cls.__dict__.items()): if isinstance(value, type) and issubclass(value, AggregateCreated): created_event_classes[name] = value if created_event_name in created_event_classes: created_event_class = created_event_classes[created_event_name] elif created_event_class is None: if len(created_event_classes) == 0 or created_event_name: if not created_event_name: created_event_name = "Created" created_cls_annotations = {} _check_no_variable_params(init_method) method_signature = inspect.signature(init_method) for param_name in method_signature.parameters: if param_name == "self": continue if param_name == "id": cls._init_mentions_id = True continue created_cls_annotations[param_name] = "typing.Any" created_event_class = type( created_event_name, (AggregateCreated,), { "__annotations__": created_cls_annotations, "__module__": cls.__module__, "__qualname__": ".".join( [cls.__qualname__, created_event_name] ), }, ) setattr(cls, created_event_name, created_event_class) elif len(created_event_classes) == 1: created_event_class = list(created_event_classes.values())[0] cls._created_event_class = created_event_class for attribute in tuple(cls.__dict__.values()): if isinstance(attribute, property) and isinstance( attribute.fset, CommandMethodDecorator ): attribute = attribute.fset if attribute.is_name_inferred_from_method: method_name = attribute.original_method.__name__ raise TypeError( f"@event under {method_name}() property setter requires event " f"class name" ) attribute.is_property_setter = True if isinstance(attribute, CommandMethodDecorator): original_method = attribute.original_method assert isinstance(original_method, FunctionType) method_signature = inspect.signature(original_method) annotations = {} for param_name in method_signature.parameters: if param_name == "self": continue elif attribute.is_property_setter: assert len(method_signature.parameters) == 2 attribute.property_setter_arg_name = param_name annotations[param_name] = "typing.Any" if not attribute.given_event_cls: assert attribute.event_cls_name event_cls_name = attribute.event_cls_name if event_cls_name in cls.__dict__: raise TypeError( f"{event_cls_name} event already defined on {cls.__name__}" ) event_cls_qualname = ".".join([cls.__qualname__, event_cls_name]) event_cls_dict = { "__annotations__": annotations, "__module__": cls.__module__, "__qualname__": event_cls_qualname, } event_cls = MetaDomainEvent( event_cls_name, (DecoratedEvent,), event_cls_dict ) original_methods[event_cls] = original_method setattr(cls, event_cls_name, event_cls) cls._create_id_param_names = [] for name, param in inspect.signature(cls.create_id).parameters.items(): if param.kind in [param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD]: cls._create_id_param_names.append(name) def __call__(cls: "MetaAggregate", *args: Any, **kwargs: Any) -> TAggregate: self_init: WrapperDescriptorType = cls.__init__ kwargs = _coerce_args_to_kwargs( self_init, args, kwargs, expects_id=cls._annotations_mention_id ) if cls._created_event_class is None: raise TypeError("attribute '_created_event_class' not set on class") else: new_aggregate: TAggregate = cls._create( event_class=cls._created_event_class, **kwargs, ) return new_aggregate @staticmethod def create_id(**kwargs: Any) -> UUID: return uuid4() def _create( cls, event_class: Type[TAggregateCreated], *, id: Optional[UUID] = None, **kwargs: Any, ) -> TAggregate: create_id_kwargs = { k: v for k, v in kwargs.items() if k in cls._create_id_param_names } try: created_event: TAggregateCreated = event_class( originator_topic=get_topic(cls), originator_id=id or cls.create_id(**create_id_kwargs), originator_version=cls.INITIAL_VERSION, timestamp=datetime.now(tz=TZINFO), **kwargs, ) except TypeError as e: msg = ( f"Unable to construct 'aggregate created' " f"event with class {event_class.__qualname__} " f"and keyword args {kwargs}: {e}" ) raise TypeError(msg) agg: TAggregate = created_event.mutate(None) agg.pending_events.append(created_event) return agg class Aggregate(ABC, metaclass=MetaAggregate): class Event(AggregateEvent): pass class Created(AggregateCreated): pass def __new__(cls, *args: Any, **kwargs: Any) -> Any: return object.__new__(cls) def __eq__(self, other: Any) -> bool: return type(self) == type(other) and self.__dict__ == other.__dict__ def __repr__(self) -> str: attrs = [ f"{k.lstrip('_')}={v!r}" for k, v in self.__dict__.items() if k != "_pending_events" ] return f"{type(self).__name__}({', '.join(attrs)})" def __base_init__(self, id: UUID, version: int, timestamp: datetime) -> None: self._id = id self._version = version self._created_on = timestamp self._modified_on = timestamp self._pending_events: List[AggregateEvent] = [] @property def id(self) -> UUID: return self._id @property def version(self) -> int: return self._version @version.setter def version(self, version: int) -> None: self._version = version @property
BSD 3-Clause New or Revised License
rapid7/vm-console-client-python
rapid7vmconsole/models/settings.py
Settings.__eq__
python
def __eq__(self, other): if not isinstance(other, Settings): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/settings.py#L470-L475
import pprint import re import six class Settings(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'asset_linking': 'bool', 'authentication': 'AuthenticationSettings', 'database': 'DatabaseSettings', 'directory': 'str', 'insight_platform': 'bool', 'insight_platform_region': 'str', 'links': 'list[Link]', 'risk': 'RiskSettings', 'scan': 'ScanSettings', 'serial_number': 'str', 'smtp': 'SmtpSettings', 'updates': 'UpdateSettings', 'uuid': 'str', 'web': 'WebSettings' } attribute_map = { 'asset_linking': 'assetLinking', 'authentication': 'authentication', 'database': 'database', 'directory': 'directory', 'insight_platform': 'insightPlatform', 'insight_platform_region': 'insightPlatformRegion', 'links': 'links', 'risk': 'risk', 'scan': 'scan', 'serial_number': 'serialNumber', 'smtp': 'smtp', 'updates': 'updates', 'uuid': 'uuid', 'web': 'web' } def __init__(self, asset_linking=None, authentication=None, database=None, directory=None, insight_platform=None, insight_platform_region=None, links=None, risk=None, scan=None, serial_number=None, smtp=None, updates=None, uuid=None, web=None): self._asset_linking = None self._authentication = None self._database = None self._directory = None self._insight_platform = None self._insight_platform_region = None self._links = None self._risk = None self._scan = None self._serial_number = None self._smtp = None self._updates = None self._uuid = None self._web = None self.discriminator = None if asset_linking is not None: self.asset_linking = asset_linking if authentication is not None: self.authentication = authentication if database is not None: self.database = database if directory is not None: self.directory = directory if insight_platform is not None: self.insight_platform = insight_platform if insight_platform_region is not None: self.insight_platform_region = insight_platform_region if links is not None: self.links = links if risk is not None: self.risk = risk if scan is not None: self.scan = scan if serial_number is not None: self.serial_number = serial_number if smtp is not None: self.smtp = smtp if updates is not None: self.updates = updates if uuid is not None: self.uuid = uuid if web is not None: self.web = web @property def asset_linking(self): return self._asset_linking @asset_linking.setter def asset_linking(self, asset_linking): self._asset_linking = asset_linking @property def authentication(self): return self._authentication @authentication.setter def authentication(self, authentication): self._authentication = authentication @property def database(self): return self._database @database.setter def database(self, database): self._database = database @property def directory(self): return self._directory @directory.setter def directory(self, directory): self._directory = directory @property def insight_platform(self): return self._insight_platform @insight_platform.setter def insight_platform(self, insight_platform): self._insight_platform = insight_platform @property def insight_platform_region(self): return self._insight_platform_region @insight_platform_region.setter def insight_platform_region(self, insight_platform_region): self._insight_platform_region = insight_platform_region @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def risk(self): return self._risk @risk.setter def risk(self, risk): self._risk = risk @property def scan(self): return self._scan @scan.setter def scan(self, scan): self._scan = scan @property def serial_number(self): return self._serial_number @serial_number.setter def serial_number(self, serial_number): self._serial_number = serial_number @property def smtp(self): return self._smtp @smtp.setter def smtp(self, smtp): self._smtp = smtp @property def updates(self): return self._updates @updates.setter def updates(self, updates): self._updates = updates @property def uuid(self): return self._uuid @uuid.setter def uuid(self, uuid): self._uuid = uuid @property def web(self): return self._web @web.setter def web(self, web): self._web = web def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Settings, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
aws-quickstart/quickstart-redhat-openshift
functions/source/KeyGen/asn1crypto/keys.py
PrivateKeyInfo.wrap
python
def wrap(cls, private_key, algorithm): if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value): raise TypeError(unwrap( ''' private_key must be a byte string or Asn1Value, not %s ''', type_name(private_key) )) if algorithm == 'rsa': if not isinstance(private_key, RSAPrivateKey): private_key = RSAPrivateKey.load(private_key) params = Null() elif algorithm == 'dsa': if not isinstance(private_key, DSAPrivateKey): private_key = DSAPrivateKey.load(private_key) params = DSAParams() params['p'] = private_key['p'] params['q'] = private_key['q'] params['g'] = private_key['g'] public_key = private_key['public_key'] private_key = private_key['private_key'] elif algorithm == 'ec': if not isinstance(private_key, ECPrivateKey): private_key = ECPrivateKey.load(private_key) else: private_key = private_key.copy() params = private_key['parameters'] del private_key['parameters'] else: raise ValueError(unwrap( ''' algorithm must be one of "rsa", "dsa", "ec", not %s ''', repr(algorithm) )) private_key_algo = PrivateKeyAlgorithm() private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm) private_key_algo['parameters'] = params container = cls() container._algorithm = algorithm container['version'] = Integer(0) container['private_key_algorithm'] = private_key_algo container['private_key'] = private_key if algorithm == 'dsa': container._public_key = public_key return container
Wraps a private key in a PrivateKeyInfo structure :param private_key: A byte string or Asn1Value object of the private key :param algorithm: A unicode string of "rsa", "dsa" or "ec" :return: A PrivateKeyInfo object
https://github.com/aws-quickstart/quickstart-redhat-openshift/blob/2b87dd38b72e7e4c439a606c5a9ea458d72da612/functions/source/KeyGen/asn1crypto/keys.py#L521-L586
from __future__ import unicode_literals, division, absolute_import, print_function import hashlib import math from ._elliptic_curve import ( SECP192R1_BASE_POINT, SECP224R1_BASE_POINT, SECP256R1_BASE_POINT, SECP384R1_BASE_POINT, SECP521R1_BASE_POINT, PrimeCurve, PrimePoint, ) from ._errors import unwrap from ._types import type_name, str_cls, byte_cls from .algos import _ForceNullParameters, DigestAlgorithm, EncryptionAlgorithm, RSAESOAEPParams from .core import ( Any, Asn1Value, BitString, Choice, Integer, IntegerOctetString, Null, ObjectIdentifier, OctetBitString, OctetString, ParsableOctetString, ParsableOctetBitString, Sequence, SequenceOf, SetOf, ) from .util import int_from_bytes, int_to_bytes class OtherPrimeInfo(Sequence): _fields = [ ('prime', Integer), ('exponent', Integer), ('coefficient', Integer), ] class OtherPrimeInfos(SequenceOf): _child_spec = OtherPrimeInfo class RSAPrivateKeyVersion(Integer): _map = { 0: 'two-prime', 1: 'multi', } class RSAPrivateKey(Sequence): _fields = [ ('version', RSAPrivateKeyVersion), ('modulus', Integer), ('public_exponent', Integer), ('private_exponent', Integer), ('prime1', Integer), ('prime2', Integer), ('exponent1', Integer), ('exponent2', Integer), ('coefficient', Integer), ('other_prime_infos', OtherPrimeInfos, {'optional': True}) ] class RSAPublicKey(Sequence): _fields = [ ('modulus', Integer), ('public_exponent', Integer) ] class DSAPrivateKey(Sequence): _fields = [ ('version', Integer), ('p', Integer), ('q', Integer), ('g', Integer), ('public_key', Integer), ('private_key', Integer), ] class _ECPoint(): @classmethod def from_coords(cls, x, y): x_bytes = int(math.ceil(math.log(x, 2) / 8.0)) y_bytes = int(math.ceil(math.log(y, 2) / 8.0)) num_bytes = max(x_bytes, y_bytes) byte_string = b'\x04' byte_string += int_to_bytes(x, width=num_bytes) byte_string += int_to_bytes(y, width=num_bytes) return cls(byte_string) def to_coords(self): data = self.native first_byte = data[0:1] if first_byte == b'\x04': remaining = data[1:] field_len = len(remaining) // 2 x = int_from_bytes(remaining[0:field_len]) y = int_from_bytes(remaining[field_len:]) return (x, y) if first_byte not in set([b'\x02', b'\x03']): raise ValueError(unwrap( ''' Invalid EC public key - first byte is incorrect ''' )) raise ValueError(unwrap( ''' Compressed representations of EC public keys are not supported due to patent US6252960 ''' )) class ECPoint(OctetString, _ECPoint): pass class ECPointBitString(OctetBitString, _ECPoint): pass class SpecifiedECDomainVersion(Integer): _map = { 1: 'ecdpVer1', 2: 'ecdpVer2', 3: 'ecdpVer3', } class FieldType(ObjectIdentifier): _map = { '1.2.840.10045.1.1': 'prime_field', '1.2.840.10045.1.2': 'characteristic_two_field', } class CharacteristicTwoBasis(ObjectIdentifier): _map = { '1.2.840.10045.1.2.1.1': 'gn_basis', '1.2.840.10045.1.2.1.2': 'tp_basis', '1.2.840.10045.1.2.1.3': 'pp_basis', } class Pentanomial(Sequence): _fields = [ ('k1', Integer), ('k2', Integer), ('k3', Integer), ] class CharacteristicTwo(Sequence): _fields = [ ('m', Integer), ('basis', CharacteristicTwoBasis), ('parameters', Any), ] _oid_pair = ('basis', 'parameters') _oid_specs = { 'gn_basis': Null, 'tp_basis': Integer, 'pp_basis': Pentanomial, } class FieldID(Sequence): _fields = [ ('field_type', FieldType), ('parameters', Any), ] _oid_pair = ('field_type', 'parameters') _oid_specs = { 'prime_field': Integer, 'characteristic_two_field': CharacteristicTwo, } class Curve(Sequence): _fields = [ ('a', OctetString), ('b', OctetString), ('seed', OctetBitString, {'optional': True}), ] class SpecifiedECDomain(Sequence): _fields = [ ('version', SpecifiedECDomainVersion), ('field_id', FieldID), ('curve', Curve), ('base', ECPoint), ('order', Integer), ('cofactor', Integer, {'optional': True}), ('hash', DigestAlgorithm, {'optional': True}), ] class NamedCurve(ObjectIdentifier): _map = { '1.2.840.10045.3.0.1': 'c2pnb163v1', '1.2.840.10045.3.0.2': 'c2pnb163v2', '1.2.840.10045.3.0.3': 'c2pnb163v3', '1.2.840.10045.3.0.4': 'c2pnb176w1', '1.2.840.10045.3.0.5': 'c2tnb191v1', '1.2.840.10045.3.0.6': 'c2tnb191v2', '1.2.840.10045.3.0.7': 'c2tnb191v3', '1.2.840.10045.3.0.8': 'c2onb191v4', '1.2.840.10045.3.0.9': 'c2onb191v5', '1.2.840.10045.3.0.10': 'c2pnb208w1', '1.2.840.10045.3.0.11': 'c2tnb239v1', '1.2.840.10045.3.0.12': 'c2tnb239v2', '1.2.840.10045.3.0.13': 'c2tnb239v3', '1.2.840.10045.3.0.14': 'c2onb239v4', '1.2.840.10045.3.0.15': 'c2onb239v5', '1.2.840.10045.3.0.16': 'c2pnb272w1', '1.2.840.10045.3.0.17': 'c2pnb304w1', '1.2.840.10045.3.0.18': 'c2tnb359v1', '1.2.840.10045.3.0.19': 'c2pnb368w1', '1.2.840.10045.3.0.20': 'c2tnb431r1', '1.2.840.10045.3.1.2': 'prime192v2', '1.2.840.10045.3.1.3': 'prime192v3', '1.2.840.10045.3.1.4': 'prime239v1', '1.2.840.10045.3.1.5': 'prime239v2', '1.2.840.10045.3.1.6': 'prime239v3', '1.3.132.0.1': 'sect163k1', '1.3.132.0.15': 'sect163r2', '1.2.840.10045.3.1.1': 'secp192r1', '1.3.132.0.33': 'secp224r1', '1.3.132.0.26': 'sect233k1', '1.2.840.10045.3.1.7': 'secp256r1', '1.3.132.0.27': 'sect233r1', '1.3.132.0.16': 'sect283k1', '1.3.132.0.17': 'sect283r1', '1.3.132.0.34': 'secp384r1', '1.3.132.0.36': 'sect409k1', '1.3.132.0.37': 'sect409r1', '1.3.132.0.35': 'secp521r1', '1.3.132.0.38': 'sect571k1', '1.3.132.0.39': 'sect571r1', } class ECDomainParameters(Choice): _alternatives = [ ('specified', SpecifiedECDomain), ('named', NamedCurve), ('implicit_ca', Null), ] class ECPrivateKeyVersion(Integer): _map = { 1: 'ecPrivkeyVer1', } class ECPrivateKey(Sequence): _fields = [ ('version', ECPrivateKeyVersion), ('private_key', IntegerOctetString), ('parameters', ECDomainParameters, {'explicit': 0, 'optional': True}), ('public_key', ECPointBitString, {'explicit': 1, 'optional': True}), ] class DSAParams(Sequence): _fields = [ ('p', Integer), ('q', Integer), ('g', Integer), ] class Attribute(Sequence): _fields = [ ('type', ObjectIdentifier), ('values', SetOf, {'spec': Any}), ] class Attributes(SetOf): _child_spec = Attribute class PrivateKeyAlgorithmId(ObjectIdentifier): _map = { '1.2.840.113549.1.1.1': 'rsa', '1.2.840.10040.4.1': 'dsa', '1.2.840.10045.2.1': 'ec', } class PrivateKeyAlgorithm(_ForceNullParameters, Sequence): _fields = [ ('algorithm', PrivateKeyAlgorithmId), ('parameters', Any, {'optional': True}), ] _oid_pair = ('algorithm', 'parameters') _oid_specs = { 'dsa': DSAParams, 'ec': ECDomainParameters, } class PrivateKeyInfo(Sequence): _fields = [ ('version', Integer), ('private_key_algorithm', PrivateKeyAlgorithm), ('private_key', ParsableOctetString), ('attributes', Attributes, {'implicit': 0, 'optional': True}), ] def _private_key_spec(self): algorithm = self['private_key_algorithm']['algorithm'].native return { 'rsa': RSAPrivateKey, 'dsa': Integer, 'ec': ECPrivateKey, }[algorithm] _spec_callbacks = { 'private_key': _private_key_spec } _algorithm = None _bit_size = None _public_key = None _fingerprint = None @classmethod
Apache License 2.0
scholi/pyspm
pySPM/utils/plot.py
sp
python
def sp(M, N=1, W=21, ravel=True, fig=False): if N < 0: tot = -N N = 1+(tot-1)//M f, ax = plt.subplots(N, M, figsize=(W, N*W/M)) if ravel: if fig: return np.ravel(ax), f return np.ravel(ax) if fig: return ax, f return ax
Shortcut for creating subplots with max width = W (default 21, which seems to correspond to 100% of the width in jupyter). Height is calculated in order to have square subplots. The layout is given by M columns and N lines. If N is negative, abs(N) is interpreted as the number of elements and the number of line will be calculated automatically in function of M.
https://github.com/scholi/pyspm/blob/cd31fcd6567322eb92a4b8ae6c5b3706d5ab52bb/pySPM/utils/plot.py#L126-L144
import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from .misc import dec_debug, do_debug, alias @alias("plot_mask") def plotMask(ax, mask, color, **kargs): import copy m = np.ma.masked_array(mask, ~mask) palette = copy.copy(plt.cm.gray) palette.set_over(color, 1.0) ax.imshow(m, cmap=palette, vmin=0, vmax=0.5, **kargs) def offset_coord(xy, offset=(0,0), ax=None, fig=None, unit='px'): if ax is None: ax = plt.gca() if fig is None: fig = plt.gcf() tr = ax.transData tri = tr.inverted() if unit is 'px': offset = np.array(offset) elif unit is 'ax': offset = ax.transAxes.transform(offset) elif unit is 'fig': offset = fig.transFigure.transform(offset) return tri.transform(tr.transform(xy)+offset) def Xdist(ax,left, right, y, color='r', linestyle=':', fmt="{dist:.1f}{unit}", xtransf=lambda x: x, va='bottom', ha='center', offset=(0,2), **kargs): ax.axvline(left, color=color, linestyle=linestyle) ax.axvline(right, color=color, linestyle=linestyle) ann = dict(va=va, ha=ha, color=color) ann.update({k[3:]:kargs[k] for k in kargs if k.startswith('an_')}) ax.annotate(fmt.format(dist=xtransf(right-left),unit=kargs.get('unit','')), ({'center':.5*(left+right),'left':right,'right':left}[ann['ha']], y), offset, textcoords='offset pixels', **ann) arr = dict(arrowstyle='<->',color=color) arr.update({k[4:]:kargs[k] for k in kargs if k.startswith('arr_')}) ax.annotate("", (left, y), (right, y), arrowprops=arr) def Ydist(ax, down, up, x, color='r', linestyle=':', fmt="{dist:.2f}{unit}", ytransf=lambda y: y, rotation=90, va='center', ha='right', offset=(-2,0), **kargs): ax.axhline(down, color=color, linestyle=linestyle) ax.axhline(up, color=color, linestyle=linestyle) ann = dict(va=va, ha=ha, color=color) ann.update({k[3:]:kargs[k] for k in kargs if k.startswith('an_')}) ax.annotate(fmt.format(dist=ytransf(up-down),unit=kargs.get('unit','')), (x,{'center':.5*(down+up),'top':up,'bottom':down}[ann['va']]), offset, textcoords='offset pixels', rotation=rotation, **ann) arr = dict(arrowstyle='<->',color=color) arr.update({k[4:]:kargs[k] for k in kargs if k.startswith('arr_')}) ax.annotate("", (x, down), (x, up), arrowprops=arr) @alias("DualPlot") def dual_plot(ax, col1='C0',col2='C1'): axb = ax.twinx() axb.spines['left'].set_color(col1) axb.spines['right'].set_color(col2) ax.yaxis.label.set_color(col1) axb.yaxis.label.set_color(col2) ax.tick_params(axis='y', colors=col1) axb.tick_params(axis='y', colors=col2) return axb
Apache License 2.0
jatinchowdhury18/audio_dspy
audio_dspy/eq.py
EQ.process_block
python
def process_block(self, block): out = np.copy(block) for filter in self.filters: out = filter.process_block(out) return out
Process a block of samples. Parameters ---------- block : array-like The block of samples to process Returns ------- output : array-like Block of output samples
https://github.com/jatinchowdhury18/audio_dspy/blob/5d39eba11f56ac5619173fe8f3f233e682bc1d60/audio_dspy/eq.py#L234-L252
import audio_dspy as adsp import numpy as np import scipy.signal as signal class Filter: def __init__(self, order, fs, type='Other'): self.fs = fs self.order = order self.type = type self.b_coefs = np.zeros(order + 1) self.a_coefs = np.zeros(order + 1) self.__z = np.zeros(order + 1) def reset(self): self.__z = np.zeros(self.order + 1) def has_been_reset(self): return np.sum(self.__z) == 0 def set_coefs(self, b, a): assert np.size(b) == ( self.order + 1), 'b coefficients size is not the same as filter order' assert np.size(a) == ( self.order + 1), 'a coefficients size is not the same as filter order' self.b_coefs = np.copy(b) self.a_coefs = np.copy(a) def process_sample(self, x): y = self.__z[1] + x * self.b_coefs[0] for n in range(self.order): self.__z[n] = self.__z[n+1] + x * self.b_coefs[n] - y * self.a_coefs[n] self.__z[self.order] = x * self.b_coefs[self.order] - y * self.a_coefs[self.order] return y def process_block(self, block): out = np.copy(block) for n, _ in enumerate(block): out[n] = self.process_sample(block[n]) return out class EQ: def __init__(self, fs): self.fs = fs self.filters = [] def add_filter(self, filter): assert isinstance(filter, Filter), 'filter must be of adsp.Filter type' self.filters.append(filter) def add_LPF(self, fc, Q): string = 'LPF, Freq: {}, Q: {}'.format(fc, Q) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_LPF2(fc, Q, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def add_HPF(self, fc, Q): string = 'HPF, Freq: {}, Q: {}'.format(fc, Q) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_HPF2(fc, Q, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def add_bell(self, fc, Q, gain): string = 'Bell, Freq: {}, Q: {}, gain: {}'.format(fc, Q, gain) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_bell(fc, Q, gain, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def add_notch(self, fc, Q): string = 'Notch, Freq: {}, Q: {}'.format(fc, Q) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_notch(fc, Q, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def add_highshelf(self, fc, Q, gain): string = 'High Shelf, Freq: {}, Q: {}, gain: {}'.format(fc, Q, gain) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_highshelf(fc, Q, gain, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def add_lowshelf(self, fc, Q, gain): string = 'Low Shelf, Freq: {}, Q: {}, gain: {}'.format(fc, Q, gain) filter = adsp.Filter(2, self.fs, type=string) b, a = adsp.design_lowshelf(fc, Q, gain, self.fs) filter.set_coefs(b, a) self.add_filter(filter) def reset(self): for filter in self.filters: filter.reset()
MIT License
monaen/lightfieldreconstruction
utils/utils.py
shaveLF
python
def shaveLF(inLF, border=(3, 3)): h_border, w_border = border if (h_border != 0) and (w_border != 0): shavedLF = inLF[h_border:-h_border, w_border:-w_border, ...] elif (h_border != 0) and (w_border == 0): shavedLF = inLF[h_border:-h_border, :, ...] elif (h_border == 0) and (w_border != 0): shavedLF = inLF[:, w_border:-w_border, ...] else: shavedLF = inLF return shavedLF
Shave the input light field in terms of a given border. :param inLF: input light field of size: [H, W, S, T, C] :param border: border values :return: shaved light field
https://github.com/monaen/lightfieldreconstruction/blob/5bc51c93d320a1a74e2050257ed16d95cb6edb5e/utils/utils.py#L298-L316
import tensorflow as tf import numpy as np import math from tqdm import tqdm from scipy.ndimage import gaussian_filter from scipy.signal import convolve2d def count_number_trainable_params(): def params_bytes(vtype): if vtype == "float32_ref": return 32 / 8 if vtype == "float64_ref": return 64 / 8 def get_nb_params_shape(shape): nb_params = 1 for dim in shape: nb_params = nb_params * int(dim) return nb_params total_nb_params = 0 total_btypes = 0 for trainable_variable in tf.trainable_variables(): shape = trainable_variable.get_shape() current_nb_params = get_nb_params_shape(shape) variable_type = trainable_variable.dtype total_nb_params = total_nb_params + current_nb_params total_btypes = total_btypes + current_nb_params * params_bytes(variable_type) info = "Model size: {0}K, Space usage: {1}KB ({2:6.2f}MB)".format(total_nb_params/1000, total_btypes/1000, total_btypes/1000000.0) print(info) return def get_gauss_filter(shape=(7, 7), sigma=1.2): m, n = [(ss-1.)/2. for ss in shape] y, x = np.ogrid[-m:m+1, -n:n+1] h = np.exp(-(x*x + y*y) / (2.*sigma*sigma)) h[h < np.finfo(h.dtype).eps*h.max()] = 0 sumh = h.sum() if sumh != 0: h /= sumh return h def blur(hrlf, psf): blurred_lfimgs = np.zeros_like(hrlf) ws = psf.shape[0] t = int((ws-1) / 2) hrlf = np.concatenate([hrlf[:, :t, :], hrlf, hrlf[:, -t:, :]], axis=1) hrlf = np.concatenate([hrlf[:t, :, :], hrlf, hrlf[-t:, :, :]], axis=0) if hrlf.shape[2] == 3: blurred_lfimgs[:, :, 0] = convolve2d(hrlf[:, :, 0], psf, 'valid') blurred_lfimgs[:, :, 1] = convolve2d(hrlf[:, :, 1], psf, 'valid') blurred_lfimgs[:, :, 2] = convolve2d(hrlf[:, :, 2], psf, 'valid') else: blurred_lfimgs = convolve2d(np.squeeze(hrlf), psf, 'valid') blurred_lfimgs = np.expand_dims(blurred_lfimgs, axis=2) return blurred_lfimgs def downsampling(data, rs=1, ra=1, nSig=1.2): def spatial_downsampling(GT, rate=2): b, h, w, s, t, c = GT.shape psf = get_gauss_filter(shape=(7, 7), sigma=nSig) downsampled = np.zeros_like(GT) for n in range(b): for i in range(s): for j in range(t): downsampled[n, :, :, i, j, :] = blur(GT[n, :, :, i, j, :], psf) downsampled = downsampled[:, ::rate, ::rate, :, :, :] return downsampled def angular_downsampling(GT, rate=2): downsampled = None if rate == 4: GT = GT[:, :, :, :-1, :-1, :] downsampled = GT[:, :, :, 0:8:7, 0:8:7, :] elif rate == 3: downsampled = GT[:, :, :, ::(rate+1), ::(rate+1), :] elif rate == 2: downsampled = GT[:, :, :, ::rate, ::rate, :] elif rate == 0: GT = GT[:, :, :, 1:-1, 1:-1, :] downsampled = GT[:, :, :, ::3, ::3, :] else: assert False, "Unsupported angular downsampling rate: {}.".format(rate) return GT, downsampled if rs != 1 and ra == 1: downsampled = spatial_downsampling(data, rate=rs) return downsampled elif ra != 1 and rs == 1: label, downsampled = angular_downsampling(data, rate=ra) return label, downsampled elif ra != 1 and rs != 1: label, downsampled = angular_downsampling(data, rate=ra) downsampled = spatial_downsampling(downsampled, rate=rs) return label, downsampled else: assert False, "Both spatial and angular downsampling rates are 1." def psnr(img1, img2): img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) mse = np.mean((img1 - img2) ** 2) if mse == 0: return float("inf") PIXEL_MAX = 255.0 return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) def lfpsnrs(truth4d, recons4d): assert truth4d.shape == recons4d.shape, "The prediction and label should be same size." assert truth4d.dtype == "uint8", "The ground truth should be uint8 format within range (0, 255)" assert recons4d.dtype == "uint8", "The inputs should be uint8 format within range (0, 255)" h, w, s, t = np.squeeze(truth4d).shape lfpsnr = np.zeros([s, t]) for i in range(s): for j in range(t): truth = truth4d[:, :, i, j] truth = np.squeeze(truth) recons = recons4d[:, :, i, j] recons = np.squeeze(recons) lfpsnr[i, j] = psnr(truth, recons) meanpsnr = np.mean(lfpsnr) return lfpsnr, meanpsnr def batchmeanpsnr(truth, pred): batchmean_psnr = 0 for i in range(len(pred)): _, meanpsnr = lfpsnrs(np.uint8(truth[i]*255.), np.uint8(pred[i]*255.)) batchmean_psnr += meanpsnr batchmean_psnr = batchmean_psnr / len(pred) return batchmean_psnr def ssim_exact(img1, img2, sd=1.5, C1=0.01**2, C2=0.03**2): mu1 = gaussian_filter(img1, sd) mu2 = gaussian_filter(img2, sd) mu1_sq = mu1 * mu1 mu2_sq = mu2 * mu2 mu1_mu2 = mu1 * mu2 sigma1_sq = gaussian_filter(img1 * img1, sd) - mu1_sq sigma2_sq = gaussian_filter(img2 * img2, sd) - mu2_sq sigma12 = gaussian_filter(img1 * img2, sd) - mu1_mu2 ssim_num = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) ssim_den = ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) ssim_map = ssim_num / ssim_den return np.mean(ssim_map) def lfssims(truth4d, recons4d): assert truth4d.shape == recons4d.shape, 'The prediction and label should be same size.' h, w, s, t, c = truth4d.shape lfssim = np.zeros([s, t]) for i in range(s): for j in range(t): truth = truth4d[:, :, i, j, :] truth = np.squeeze(truth) recons = recons4d[:, :, i, j, :] recons = np.squeeze(recons) lfssim[i, j] = ssim_exact(truth/255., recons/255.) meanssim = np.mean(lfssim) return lfssim, meanssim def batchmeanssim(truth, pred): batchmean_ssim = 0 for i in range(len(pred)): _, meanssim = lfssims(np.uint8(truth[i]*255.), np.uint8(pred[i]*255.)) batchmean_ssim += meanssim batchmean_ssim = batchmean_ssim / len(pred) return batchmean_ssim
MIT License
yoseflab/cassiopeia
cassiopeia/simulator/BirthDeathFitnessSimulator.py
BirthDeathFitnessSimulator.update_fitness
python
def update_fitness(self, birth_scale: float) -> float: base_selection_coefficient = 1 if self.mutation_distribution: num_mutations = int(self.mutation_distribution()) if num_mutations < 0: raise TreeSimulatorError( "Negative number of mutations detected" ) for _ in range(num_mutations): base_selection_coefficient *= ( self.fitness_base ** self.fitness_distribution() ) return birth_scale * base_selection_coefficient
Updates a lineage birth scale, which represents its fitness. At each division event, the fitness is updated by sampling from a distribution determining the number of mutations. The birth scale parameter of the lineage is then scaled by the total multiplicative coefficient across all mutations and passed on to the descendant nodes. The multiplicative factor of each mutation is determined by exponentiating a base parameter by a value drawn from another 'fitness' distribution. Therefore, negative values from the fitness distribution are valid and down-scale the birth scale parameter. The base determines the base strength of the mutations in either direction and the fitness distribution determines how the mutations are distributed. Args: birth_scale: The birth_scale to be updated Returns: The updated birth_scale Raises: TreeSimulatorError if a negative number of mutations is sampled
https://github.com/yoseflab/cassiopeia/blob/6a4479e260a5fbefc663e0cecb7dfd51a4a01376/cassiopeia/simulator/BirthDeathFitnessSimulator.py#L402-L437
from typing import Callable, Dict, Generator, List, Optional, Union import networkx as nx import numpy as np from queue import PriorityQueue from cassiopeia.data.CassiopeiaTree import CassiopeiaTree from cassiopeia.mixins import TreeSimulatorError from cassiopeia.simulator.TreeSimulator import TreeSimulator class BirthDeathFitnessSimulator(TreeSimulator): def __init__( self, birth_waiting_distribution: Callable[[float], float], initial_birth_scale: float, death_waiting_distribution: Optional[ Callable[[], float] ] = lambda: np.inf, mutation_distribution: Optional[Callable[[], int]] = None, fitness_distribution: Optional[Callable[[], float]] = None, fitness_base: float = np.e, num_extant: Optional[int] = None, experiment_time: Optional[float] = None, collapse_unifurcations: bool = True, random_seed: int = None, ): if num_extant is None and experiment_time is None: raise TreeSimulatorError( "Please specify at least one stopping condition" ) if mutation_distribution is not None and fitness_distribution is None: raise TreeSimulatorError( "Please specify a fitness strength distribution" ) if num_extant is not None and num_extant <= 0: raise TreeSimulatorError( "Please specify number of extant lineages greater than 0" ) if num_extant is not None and type(num_extant) is not int: raise TreeSimulatorError( "Please specify an integer number of extant tips" ) if experiment_time is not None and experiment_time <= 0: raise TreeSimulatorError( "Please specify an experiment time greater than 0" ) self.birth_waiting_distribution = birth_waiting_distribution self.initial_birth_scale = initial_birth_scale self.death_waiting_distribution = death_waiting_distribution self.mutation_distribution = mutation_distribution self.fitness_distribution = fitness_distribution self.fitness_base = fitness_base self.num_extant = num_extant self.experiment_time = experiment_time self.collapse_unifurcations = collapse_unifurcations self.random_seed = random_seed def simulate_tree( self, ) -> CassiopeiaTree: def node_name_generator() -> Generator[str, None, None]: i = 0 while True: yield str(i) i += 1 names = node_name_generator() if self.random_seed: np.random.seed(self.random_seed) tree = nx.DiGraph() root = next(names) tree.add_node(root) tree.nodes[root]["birth_scale"] = self.initial_birth_scale tree.nodes[root]["time"] = 0 current_lineages = PriorityQueue() observed_nodes = [] starting_lineage = { "id": root, "birth_scale": self.initial_birth_scale, "total_time": 0, "active": True, } self.sample_lineage_event( starting_lineage, current_lineages, tree, names, observed_nodes ) while not current_lineages.empty(): if self.num_extant: if current_lineages.qsize() == self.num_extant: remaining_lineages = [] while not current_lineages.empty(): _, _, lineage = current_lineages.get() remaining_lineages.append(lineage) min_total_time = remaining_lineages[0]["total_time"] for lineage in remaining_lineages: parent = list(tree.predecessors(lineage["id"]))[0] tree.nodes[lineage["id"]]["time"] += ( min_total_time - lineage["total_time"] ) tree.nodes[lineage["id"]]["birth_scale"] = tree.nodes[ parent ]["birth_scale"] observed_nodes.append(lineage["id"]) break _, _, lineage = current_lineages.get() if lineage["active"]: for _ in range(2): self.sample_lineage_event( lineage, current_lineages, tree, names, observed_nodes ) cassiopeia_tree = CassiopeiaTree(tree=tree) time_dictionary = {} for i in tree.nodes: time_dictionary[i] = tree.nodes[i]["time"] cassiopeia_tree.set_times(time_dictionary) to_remove = list(set(cassiopeia_tree.leaves) - set(observed_nodes)) cassiopeia_tree.remove_leaves_and_prune_lineages(to_remove) if self.collapse_unifurcations and len(cassiopeia_tree.nodes) > 1: cassiopeia_tree.collapse_unifurcations(source="1") if len(cassiopeia_tree.nodes) == 1: raise TreeSimulatorError( "All lineages died before stopping condition" ) return cassiopeia_tree def sample_lineage_event( self, lineage: Dict[str, Union[int, float]], current_lineages: PriorityQueue, tree: nx.DiGraph, names: Generator, observed_nodes: List[str], ) -> None: if not lineage["active"]: raise TreeSimulatorError( "Cannot sample event for non-active lineage" ) unique_id = next(names) birth_waiting_time = self.birth_waiting_distribution( lineage["birth_scale"] ) death_waiting_time = self.death_waiting_distribution() if birth_waiting_time <= 0 or death_waiting_time <= 0: raise TreeSimulatorError("0 or negative waiting time detected") if ( self.experiment_time and lineage["total_time"] + birth_waiting_time >= self.experiment_time and lineage["total_time"] + death_waiting_time >= self.experiment_time ): tree.add_node(unique_id) tree.nodes[unique_id]["birth_scale"] = lineage["birth_scale"] tree.add_edge(lineage["id"], unique_id) tree.nodes[unique_id]["time"] = self.experiment_time current_lineages.put( ( self.experiment_time, unique_id, { "id": unique_id, "birth_scale": lineage["birth_scale"], "total_time": self.experiment_time, "active": False, }, ) ) observed_nodes.append(unique_id) else: if birth_waiting_time < death_waiting_time: updated_birth_scale = self.update_fitness( lineage["birth_scale"] ) tree.add_node(unique_id) tree.nodes[unique_id]["birth_scale"] = updated_birth_scale tree.add_edge(lineage["id"], unique_id) tree.nodes[unique_id]["time"] = ( birth_waiting_time + lineage["total_time"] ) current_lineages.put( ( birth_waiting_time + lineage["total_time"], unique_id, { "id": unique_id, "birth_scale": updated_birth_scale, "total_time": birth_waiting_time + lineage["total_time"], "active": True, }, ) ) else: tree.add_node(unique_id) tree.nodes[unique_id]["birth_scale"] = lineage["birth_scale"] tree.add_edge(lineage["id"], unique_id) tree.nodes[unique_id]["time"] = ( death_waiting_time + lineage["total_time"] ) current_lineages.put( ( death_waiting_time + lineage["total_time"], unique_id, { "id": unique_id, "birth_scale": lineage["birth_scale"], "total_time": death_waiting_time + lineage["total_time"], "active": False, }, ) )
MIT License
molecule-one/megan
src/datasets/__init__.py
Dataset.key
python
def key(self) -> str: raise NotImplementedError("Abstract method")
:return: key of the dataset as a string. Defines dataset directory.
https://github.com/molecule-one/megan/blob/db2cbcb7baed49dde6b549ffa25a1776958f002d/src/datasets/__init__.py#L43-L47
import os from abc import ABCMeta, abstractmethod import pandas as pd from src import DATA_DIR class Dataset(metaclass=ABCMeta): def __init__(self): super(Dataset, self).__init__() self._create_directories() def _create_directories(self): if not os.path.exists(DATA_DIR): os.mkdir(DATA_DIR) if not os.path.exists(self.dir): os.mkdir(self.dir) if not os.path.exists(self.feat_dir): os.mkdir(self.feat_dir) @property def meta_info(self) -> dict: return {} @property @abstractmethod
MIT License
mingtzge/2019-ccf-bdci-ocr-mczj-ocr-identificationidelement
pytorch-CycleGAN-and-pix2pix/scripts/eval_cityscapes/cityscapes.py
cityscapes.preprocess
python
def preprocess(self, im): in_ = np.array(im, dtype=np.float32) in_ = in_[:, :, ::-1] in_ -= self.mean in_ = in_.transpose((2, 0, 1)) return in_
Preprocess loaded image (by load_image) for Caffe: - cast to float - switch channels RGB -> BGR - subtract mean - transpose to channel x height x width order
https://github.com/mingtzge/2019-ccf-bdci-ocr-mczj-ocr-identificationidelement/blob/42ad9686f3c3bde0d29a8bc6bcb0e3afb35fb3c3/pytorch-CycleGAN-and-pix2pix/scripts/eval_cityscapes/cityscapes.py#L66-L78
import sys import os import glob import numpy as np from PIL import Image class cityscapes: def __init__(self, data_path): self.dir = data_path self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle'] self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32) sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir)) labels = __import__('labels') self.id2trainId = {label.id: label.trainId for label in labels.labels} self.trainId2color = {label.trainId: label.color for label in labels.labels} def get_dset(self, split): if split == 'train': dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines() else: dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines() return [(item.split('/')[0], item.split('/')[1]) for item in dataset] def load_image(self, split, city, idx): im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx)) return im def assign_trainIds(self, label): label = np.array(label, dtype=np.float32) if sys.version_info[0] < 3: for k, v in self.id2trainId.iteritems(): label[label == k] = v else: for k, v in self.id2trainId.items(): label[label == k] = v return label def load_label(self, split, city, idx): label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx)) label = self.assign_trainIds(label) label = np.array(label, dtype=np.uint8) label = label[np.newaxis, ...] return label
MIT License
guofei9987/scikit-opt
sko/operators/crossover.py
crossover_pmx
python
def crossover_pmx(self): Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom for i in range(0, size_pop, 2): Chrom1, Chrom2 = self.Chrom[i], self.Chrom[i + 1] cxpoint1, cxpoint2 = np.random.randint(0, self.len_chrom - 1, 2) if cxpoint1 >= cxpoint2: cxpoint1, cxpoint2 = cxpoint2, cxpoint1 + 1 pos1_recorder = {value: idx for idx, value in enumerate(Chrom1)} pos2_recorder = {value: idx for idx, value in enumerate(Chrom2)} for j in range(cxpoint1, cxpoint2): value1, value2 = Chrom1[j], Chrom2[j] pos1, pos2 = pos1_recorder[value2], pos2_recorder[value1] Chrom1[j], Chrom1[pos1] = Chrom1[pos1], Chrom1[j] Chrom2[j], Chrom2[pos2] = Chrom2[pos2], Chrom2[j] pos1_recorder[value1], pos1_recorder[value2] = pos1, j pos2_recorder[value1], pos2_recorder[value2] = j, pos2 self.Chrom[i], self.Chrom[i + 1] = Chrom1, Chrom2 return self.Chrom
Executes a partially matched crossover (PMX) on Chrom. For more details see [Goldberg1985]_. :param self: :return: .. [Goldberg1985] Goldberg and Lingel, "Alleles, loci, and the traveling salesman problem", 1985.
https://github.com/guofei9987/scikit-opt/blob/d9582ef59b75906890b64cba7c7f01a812236177/sko/operators/crossover.py#L73-L102
import numpy as np __all__ = ['crossover_1point', 'crossover_2point', 'crossover_2point_bit', 'crossover_pmx', 'crossover_2point_prob'] def crossover_1point(self): Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom for i in range(0, size_pop, 2): n = np.random.randint(0, self.len_chrom) seg1, seg2 = self.Chrom[i, n:].copy(), self.Chrom[i + 1, n:].copy() self.Chrom[i, n:], self.Chrom[i + 1, n:] = seg2, seg1 return self.Chrom def crossover_2point(self): Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom for i in range(0, size_pop, 2): n1, n2 = np.random.randint(0, self.len_chrom, 2) if n1 > n2: n1, n2 = n2, n1 seg1, seg2 = self.Chrom[i, n1:n2].copy(), self.Chrom[i + 1, n1:n2].copy() self.Chrom[i, n1:n2], self.Chrom[i + 1, n1:n2] = seg2, seg1 return self.Chrom def crossover_2point_bit(self): Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom half_size_pop = int(size_pop / 2) Chrom1, Chrom2 = Chrom[:half_size_pop], Chrom[half_size_pop:] mask = np.zeros(shape=(half_size_pop, len_chrom), dtype=int) for i in range(half_size_pop): n1, n2 = np.random.randint(0, self.len_chrom, 2) if n1 > n2: n1, n2 = n2, n1 mask[i, n1:n2] = 1 mask2 = (Chrom1 ^ Chrom2) & mask Chrom1 ^= mask2 Chrom2 ^= mask2 return self.Chrom def crossover_2point_prob(self, crossover_prob): Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom for i in range(0, size_pop, 2): if np.random.rand() < crossover_prob: n1, n2 = np.random.randint(0, self.len_chrom, 2) if n1 > n2: n1, n2 = n2, n1 seg1, seg2 = self.Chrom[i, n1:n2].copy(), self.Chrom[i + 1, n1:n2].copy() self.Chrom[i, n1:n2], self.Chrom[i + 1, n1:n2] = seg2, seg1 return self.Chrom
MIT License
tulsawebdevs/django-multi-gtfs
multigtfs/models/fields/seconds.py
SecondsField.value_to_string
python
def value_to_string(self, obj): value = self.value_from_object(obj) return value.__str__()
Convert to HH:MM:SS format.
https://github.com/tulsawebdevs/django-multi-gtfs/blob/345b39eb7f575961fe4f8d8f1fc83044ab789080/multigtfs/models/fields/seconds.py#L124-L127
from __future__ import unicode_literals from django.db.models import Field from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Seconds(object): def __init__(self, seconds=0): self.seconds = int(seconds) if self.seconds < 0: raise ValueError('seconds must be positive') @classmethod def from_hms(cls, hours=0, minutes=0, seconds=0): return Seconds((hours * 60 * 60) + (minutes * 60) + seconds) def __str__(self): minutes, seconds = divmod(self.seconds, 60) hours, minutes = divmod(minutes, 60) return "%02d:%02d:%02d" % (hours, minutes, seconds) def _compare(self, other, method): try: return method(self.seconds, other.seconds) except (AttributeError, TypeError): return NotImplemented def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) class SecondsField(Field): description = 'Seconds since start of the day' def from_db_value(self, value, expression, connection, context): if value is None: return value return self.parse_seconds(value) def to_python(self, value): if isinstance(value, Seconds): return value if value in self.empty_values: return None return self.parse_seconds(value) @staticmethod def parse_seconds(value): svalue = str(value) colons = svalue.count(':') if colons == 2: hours, minutes, seconds = [int(v) for v in svalue.split(':')] elif colons == 1: hours, minutes = [int(v) for v in svalue.split(':')] seconds = 0 elif colons == 0: hours = 0 minutes = 0 seconds = int(svalue) else: raise ValueError('Must be in seconds or HH:MM:SS format') return Seconds.from_hms(hours, minutes, seconds) def get_prep_value(self, value): if isinstance(value, Seconds): return value.seconds elif value: return self.parse_seconds(value).seconds else: return None def get_internal_type(self): return 'IntegerField'
Apache License 2.0
thesimj/envyaml
envyaml/envyaml.py
EnvYAML.__read_env_file
python
def __read_env_file(file_path, strict): config = dict() defined = set() if file_path: with io.open(file_path, encoding="utf8") as f: content = f.read() for entry in RE_DOT_ENV.finditer(content): name = entry.group("name") value = entry.group("value") if name in config: defined.add(name) config[name] = value if strict and defined: raise ValueError( "Strict mode enabled, variables " + ", ".join(["$" + v for v in defined]) + " defined several times!" ) return config
read and parse env file :param str file_path: path to file :param bool strict: strict mode :return: dict
https://github.com/thesimj/envyaml/blob/571cc41811e0ca0b352433ee15ace1152d23fdd7/envyaml/envyaml.py#L161-L195
import io import os import re try: from yaml import safe_load except ImportError: safe_load = None RE_COMMENTS = re.compile(r"(^#.*\n)", re.MULTILINE | re.UNICODE | re.IGNORECASE) RE_DOT_ENV = re.compile( r"^(?!\d+)(?P<name>[\w\-\.]+)\=[\"\']?(?P<value>(.*?))[\"\']?$", re.MULTILINE | re.UNICODE | re.IGNORECASE, ) RE_PATTERN = re.compile( r"(?P<pref>[\"\'])?" r"(\$(?:(?P<escaped>(\$|\d+))|" r"{(?P<braced>(.*?))(\|(?P<braced_default>.*?))?}|" r"(?P<named>[\w\-\.]+)(\|(?P<named_default>.*))?))" r"(?P<post>[\"\'])?", re.MULTILINE | re.UNICODE | re.IGNORECASE | re.VERBOSE, ) __version__ = "1.9.210927" class EnvYAML: __version__ = __version__ ENVYAML_STRICT_DISABLE = "ENVYAML_STRICT_DISABLE" DEFAULT_ENV_YAML_FILE = "env.yaml" DEFAULT_ENV_FILE = ".env" __env_file = None __yaml_file = None __cfg = None __strict = True def __init__( self, yaml_file=None, env_file=None, include_environment=True, strict=True, flatten=True, **kwargs ): if safe_load is None: raise ModuleNotFoundError( 'EnvYAML require "pyyaml >= 5" module to work. ' "Consider install this module into environment!" ) self.__cfg = dict(os.environ) if include_environment else {} self.__strict = False if self.ENVYAML_STRICT_DISABLE in self.__cfg else strict self.__env_file = env_file self.__yaml_file = yaml_file self.__cfg.update( self.__read_env_file( self.__get_file_path(env_file, "ENV_FILE", self.DEFAULT_ENV_FILE), self.__strict, ) ) self.__cfg.update(kwargs) yaml_config = self.__read_yaml_file( self.__get_file_path( yaml_file, "ENV_YAML_FILE", self.DEFAULT_ENV_YAML_FILE ), self.__cfg, self.__strict, ) if isinstance(yaml_config, list): self.__cfg.update({k: v for k, v in enumerate(yaml_config)}) else: self.__cfg.update(yaml_config) if flatten: self.__cfg = self.__flat(self.__cfg) def get(self, key, default=None): return self.__cfg.get(key, default) def export(self): return self.__cfg.copy() @staticmethod def environ(): return os.environ @staticmethod
MIT License
mbk-dev/okama
okama/api/data_queries.py
QueryData.get_nav
python
def get_nav( symbol: str, first_date: str = "1913-01-01", last_date: str = "2100-01-01", period="M", ) -> pd.Series: csv_input = API.get_nav( symbol=symbol, first_date=first_date, last_date=last_date, period=period ) return QueryData.csv_to_series(csv_input, period=period)
NAV time series for funds (works for PIF namespace only).
https://github.com/mbk-dev/okama/blob/3e6589784644d05448654cd8580311d8d1eac9bf/okama/api/data_queries.py#L68-L80
from typing import Dict from io import StringIO import json import pandas as pd import numpy as np from .api_methods import API from .namespaces import no_dividends_namespaces class QueryData: @staticmethod def get_symbol_info(symbol: str) -> Dict[str, str]: json_input = API.get_symbol_info(symbol) return json.loads(json_input) @staticmethod def csv_to_series(csv_input: str, period: str) -> pd.Series: ts = pd.read_csv( StringIO(csv_input), delimiter=",", index_col=0, parse_dates=[0], dtype={1: np.float64}, engine="python", ) if not ts.empty: ts.index = ts.index.to_period(period.upper()) ts = ts.squeeze("columns") return ts @staticmethod def get_macro_ts( symbol: str, first_date: str = "1913-01-01", last_date: str = "2100-01-01" ) -> pd.Series: csv_input = API.get_macro( symbol=symbol, first_date=first_date, last_date=last_date ) return QueryData.csv_to_series(csv_input, period="M") @staticmethod def get_ror( symbol: str, first_date: str = "1913-01-01", last_date: str = "2100-01-01", period="M", ) -> pd.Series: csv_input = API.get_ror( symbol=symbol, first_date=first_date, last_date=last_date, period=period ) return QueryData.csv_to_series(csv_input, period) @staticmethod
MIT License
tlc-pack/tenset
python/tvm/relay/op/strategy/generic.py
multibox_prior_strategy
python
def multibox_prior_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior), wrap_topi_schedule(topi.generic.schedule_multibox_prior), name="multibox_prior.generic", ) return strategy
multibox_prior generic strategy
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/relay/op/strategy/generic.py#L935-L943
import logging import re from tvm import topi, _ffi, te, ir from tvm.topi.utils import get_const_int, get_const_float, get_const_tuple, get_float_tuple from tvm.target import generic_func, override_native_generic_func from .. import op as _op logger = logging.getLogger("strategy") def naive_schedule(_, outs, target): if "gpu" in target.keys: logger.debug( "Cannot compile for GPU targets if no tuned schedule is found. " "Please see the warning messages above for more information about the failed workloads." ) return te.create_schedule(outs[-1].op) def wrap_topi_schedule(topi_schedule): def wrapper(attrs, outs, target): with target: return topi_schedule(outs) return wrapper def get_conv2d_in_channels(data_shape, data_layout): data_shape = get_const_tuple(data_shape) if len(data_shape) == 4: idx = data_layout.find("C") assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout) return data_shape[idx] if re.match(r"NCHW\d*c", data_layout): return data_shape[1] * data_shape[4] raise ValueError("Unknown conv2d data layout {}".format(data_layout)) def get_conv2d_out_channels(kernel_shape, kernel_layout): kernel_shape = get_const_tuple(kernel_shape) if len(kernel_shape) == 4: idx = kernel_layout.find("O") assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout) return kernel_shape[idx] if re.match(r"OIHW\d*i\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[5] if re.match(r"OIHW\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[4] raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout)) def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups): if len(kernel_shape) > 4: return False ic = get_conv2d_in_channels(data_shape, data_layout) oc = get_conv2d_out_channels(kernel_shape, kernel_layout) return ic == oc == groups @generic_func def schedule_injective(attrs, outs, target): with target: return topi.generic.schedule_injective(outs) @generic_func def schedule_reduce(attrs, outs, target): with target: return topi.generic.schedule_reduce(outs) _op._schedule_injective = schedule_injective _op._schedule_reduce = schedule_reduce @generic_func def schedule_concatenate(attrs, outs, target): with target: return topi.generic.schedule_injective(outs) @generic_func def schedule_pool(attrs, outs, target): with target: return topi.generic.schedule_pool(outs, attrs.layout) @generic_func def schedule_pool_grad(attrs, outs, target): with target: return topi.generic.schedule_pool_grad(outs) @generic_func def schedule_adaptive_pool(attrs, outs, target): with target: return topi.generic.schedule_adaptive_pool(outs) def wrap_compute_softmax(topi_compute): def _compute_softmax(attrs, inputs, out_type): axis = attrs.get_int("axis") return [topi_compute(inputs[0], axis)] return _compute_softmax @override_native_generic_func("softmax_strategy") def softmax_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_softmax(topi.nn.softmax), wrap_topi_schedule(topi.generic.schedule_softmax), name="softmax.generic", ) return strategy @override_native_generic_func("fast_softmax_strategy") def fast_softmax_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_softmax(topi.nn.fast_softmax), naive_schedule, name="fast_softmax.generic", ) return strategy @generic_func def schedule_log_softmax(attrs, outs, target): with target: return topi.generic.schedule_softmax(outs) @generic_func def schedule_lrn(attrs, outs, target): with target: return topi.generic.schedule_lrn(outs) @generic_func def schedule_bitpack(attrs, outs, target): with target: return topi.generic.schedule_bitpack(outs) get_auto_scheduler_rewritten_layout = _ffi.get_global_func( "relay.attrs.get_auto_scheduler_rewritten_layout" ) def wrap_compute_conv2d( topi_compute, need_data_layout=False, need_out_layout=False, has_groups=False, need_auto_scheduler_layout=False, ): def _compute_conv2d(attrs, inputs, out_type): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) dilation = get_const_tuple(attrs.dilation) data_layout = attrs.get_str("data_layout") out_layout = attrs.get_str("out_layout") out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype args = [inputs[0], inputs[1], strides, padding, dilation] if has_groups: args.append(attrs.groups) if need_data_layout: args.append(data_layout) if need_out_layout: args.append(out_layout) args.append(out_dtype) if need_auto_scheduler_layout: args.append(get_auto_scheduler_rewritten_layout(attrs)) return [topi_compute(*args)] return _compute_conv2d @override_native_generic_func("conv2d_strategy") def conv2d_strategy(attrs, inputs, out_type, target): logger.warning("conv2d is not optimized for this platform.") strategy = _op.OpStrategy() data, kernel = inputs dilation = get_const_tuple(attrs.dilation) groups = attrs.groups layout = attrs.data_layout kernel_layout = attrs.kernel_layout (dilation_h, dilation_w) = dilation if dilation_h < 1 or dilation_w < 1: raise ValueError("dilation should be positive value") if groups == 1: if layout == "NCHW": assert kernel_layout == "OIHW" strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_nchw), wrap_topi_schedule(topi.generic.schedule_conv2d_nchw), name="conv2d_nchw.generic", ) elif layout == "NHWC": assert kernel_layout == "HWIO" strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_nhwc), wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc), name="conv2d_nhwc.generic", ) elif layout == "HWCN": assert kernel_layout == "HWIO" strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_hwcn), wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn), name="conv2d_hwcn.generic", ) else: raise RuntimeError("Unsupported conv2d layout {}".format(layout)) elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" strategy.add_implementation( wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw), name="depthwise_conv2d_nchw.generic", ) elif layout == "NHWC": assert kernel_layout == "HWOI" strategy.add_implementation( wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc), name="depthwise_conv2d_nhwc.generic", ) else: raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) else: if layout == "NCHW": assert kernel_layout == "OIHW" strategy.add_implementation( wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True), wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw), name="group_conv2d_nchw.generic", ) elif layout == "NHWC": assert kernel_layout == "HWIO" strategy.add_implementation( wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True), wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc), name="group_conv2d_nhwc.generic", ) else: raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) return strategy @override_native_generic_func("conv2d_NCHWc_strategy") def conv2d_NCHWc_strategy(attrs, inputs, out_type, target): logger.warning("conv2d_NCHWc is not optimized for this platform.") strategy = _op.OpStrategy() if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8": strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True), wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8), name="conv2d_NCHWc_int8.generic", ) else: strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True), wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc), name="conv2d_NCHWc.generic", ) return strategy @override_native_generic_func("depthwise_conv2d_NCHWc_strategy") def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target): logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True), wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc), name="depthwise_conv2d_NCHWc.generic", ) return strategy @override_native_generic_func("conv2d_winograd_without_weight_transform_strategy") def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target): raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform") @override_native_generic_func("conv2d_gemm_without_weight_transform_strategy") def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target): raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform") @generic_func def schedule_conv2d_winograd_weight_transform(attrs, outs, target): with target: return topi.generic.schedule_conv2d_winograd_weight_transform(outs) @generic_func def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target): with target: return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs) @generic_func def schedule_conv2d_gemm_weight_transform(attrs, outs, target): with target: return topi.generic.schedule_conv2d_gemm_weight_transform(outs) def wrap_compute_deformable_conv2d(topi_compute): def _compute_deformable_conv2d(attrs, inputs, out_dtype): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) dilation = get_const_tuple(attrs.dilation) deformable_groups = attrs.deformable_groups groups = attrs.groups out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype out = topi_compute( inputs[0], inputs[1], inputs[2], strides, padding, dilation, deformable_groups, groups, out_dtype, ) return [out] return _compute_deformable_conv2d @override_native_generic_func("deformable_conv2d_strategy") def deformable_conv2d_strategy(attrs, inputs, out_type, target): layout = attrs.data_layout strategy = _op.OpStrategy() if layout == "NCHW": strategy.add_implementation( wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw), wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw), name="deformable_conv2d_nchw.generic", ) elif layout == "NHWC": strategy.add_implementation( wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc), naive_schedule, name="deformable_conv2d_nhwc.generic", ) else: raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout) return strategy def wrap_compute_conv2d_transpose(topi_compute): def compute_conv2d_transpose(attrs, inputs, out_dtype): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype output_padding = get_const_tuple(attrs.output_padding) out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding) return [out] return compute_conv2d_transpose @override_native_generic_func("conv2d_transpose_strategy") def conv2d_transpose_strategy(attrs, inputs, out_type, target): logger.warning("conv2d_transpose is not optimized for this platform.") layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) groups = attrs.groups assert layout == "NCHW", "only support nchw for now" assert dilation == (1, 1), "not support dilate now" assert groups == 1, "only support groups == 1 for now" strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw), wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw), name="conv2d_transpose_nchw.generic", ) return strategy def wrap_compute_conv3d_transpose(topi_compute): def compute_conv3d_transpose(attrs, inputs, out_dtype): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) output_padding = get_const_tuple(attrs.output_padding) out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding) return [out] return compute_conv3d_transpose @override_native_generic_func("conv3d_transpose_strategy") def conv3d_transpose_strategy(attrs, inputs, out_type, target): logger.warning("conv3d_transpose is not optimized for this platform.") layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) groups = attrs.groups assert layout == "NCDHW", "only support ncdhw for now" assert dilation == (1, 1, 1), "not support dilate now" assert groups == 1, "only support groups == 1 for now" strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw), wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw), name="conv3d_transpose_ncdhw.generic", ) return strategy def wrap_compute_conv3d(topi_compute, need_layout=False, need_auto_scheduler_layout=False): def _compute_conv3d(attrs, inputs, out_type): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) dilation = get_const_tuple(attrs.dilation) groups = attrs.groups layout = attrs.data_layout out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype (dilation_d, dilation_h, dilation_w) = dilation if dilation_d < 1 or dilation_h < 1 or dilation_w < 1: raise ValueError("Dilation should be positive value") if groups != 1: raise ValueError("Not support arbitrary group number for conv3d") args = [inputs[0], inputs[1], strides, padding, dilation] if need_layout: args.append(layout) args.append(out_dtype) if need_auto_scheduler_layout: args.append(get_auto_scheduler_rewritten_layout(attrs)) return [topi_compute(*args)] return _compute_conv3d @override_native_generic_func("conv3d_strategy") def conv3d_strategy(attrs, inputs, out_type, target): logger.warning("conv3d is not optimized for this platform.") strategy = _op.OpStrategy() layout = attrs.data_layout if layout == "NCDHW": strategy.add_implementation( wrap_compute_conv3d(topi.nn.conv3d_ncdhw), wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw), name="conv3d_ncdhw.generic", ) elif layout == "NDHWC": strategy.add_implementation( wrap_compute_conv3d(topi.nn.conv3d_ndhwc), wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc), name="conv3d_ndhwc.generic", ) else: raise ValueError("Not support this layout {} yet".format(layout)) return strategy @override_native_generic_func("conv3d_winograd_without_weight_transform_strategy") def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target): raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform") @generic_func def schedule_conv3d_winograd_weight_transform(attrs, outs, target): with target: return topi.generic.schedule_conv3d_winograd_weight_transform(outs) def wrap_compute_conv1d(topi_compute): def _compute_conv1d(attrs, inputs, out_type): strides = get_const_tuple(attrs.strides) padding = get_const_tuple(attrs.padding) dilation = get_const_tuple(attrs.dilation) out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)] return _compute_conv1d @override_native_generic_func("conv1d_strategy") def conv1d_strategy(attrs, inputs, out_type, target): logger.warning("conv1d is not optimized for this platform.") layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) if dilation[0] < 1: raise ValueError("dilation should be a positive value") strategy = _op.OpStrategy() if layout == "NCW": strategy.add_implementation( wrap_compute_conv1d(topi.nn.conv1d_ncw), wrap_topi_schedule(topi.generic.schedule_conv1d_ncw), name="conv1d_ncw.generic", ) elif layout == "NWC": strategy.add_implementation( wrap_compute_conv1d(topi.nn.conv1d_nwc), wrap_topi_schedule(topi.generic.schedule_conv1d_nwc), name="conv1d_nwc.generic", ) else: raise ValueError("Unsupported conv1d layout {}".format(layout)) return strategy def wrap_compute_conv1d_transpose(topi_compute): def _compute_conv1d_tranpsoe(attrs, inputs, out_type): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype output_padding = get_const_tuple(attrs.output_padding) out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding) return [out] return _compute_conv1d_tranpsoe @override_native_generic_func("conv1d_transpose_strategy") def conv1d_transpose_strategy(attrs, inputs, out_type, target): logger.warning("conv1d_transpose is not optimized for this platform.") strategy = _op.OpStrategy() layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) groups = attrs.groups assert layout == "NCW", "conv1d_transpose ncw only supported" assert dilation == (1,), "conv1d_transpose dilation is not supported" assert groups == 1, "conv1d_transpose groups == 1 only supported" strategy.add_implementation( wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw), wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw), name="conv1d_transpose_ncw.generic", ) return strategy def wrap_compute_dilation2d(topi_compute, need_data_layout=False): def _compute_dilation2d(attrs, inputs, out_type): padding = get_const_tuple(attrs.padding) strides = get_const_tuple(attrs.strides) dilations = get_const_tuple(attrs.dilations) data_layout = attrs.get_str("data_layout") out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype args = [inputs[0], inputs[1], strides, padding, dilations] if need_data_layout: args.append(data_layout) args.append(out_dtype) return [topi_compute(*args)] return _compute_dilation2d @override_native_generic_func("dilation2d_strategy") def dilation2d_strategy(attrs, inputs, out_type, target): logger.warning("dilation2d_strategy is not optimized for this platform.") strategy = _op.OpStrategy() dilations = get_const_tuple(attrs.dilations) layout = attrs.data_layout kernel_layout = attrs.kernel_layout assert layout in ["NCHW", "NHWC"] (dilation_h, dilation_w) = dilations if dilation_h < 1 or dilation_w < 1: raise ValueError("dilation should be positive value") if layout == "NCHW": assert kernel_layout == "IHW" strategy.add_implementation( wrap_compute_dilation2d(topi.image.dilation2d_nchw), wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw), name="dilation2d_nchw.generic", ) elif layout == "NHWC": assert kernel_layout == "HWI" strategy.add_implementation( wrap_compute_dilation2d(topi.image.dilation2d_nhwc), wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc), name="dilation2d_nhwc.generic", ) else: raise RuntimeError("Unsupported dilation2d layout {}".format(layout)) return strategy def wrap_compute_dense(topi_compute, need_auto_scheduler_layout=False): def _compute_dense(attrs, inputs, out_type): out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype args = [inputs[0], inputs[1], None, out_dtype] if need_auto_scheduler_layout: args.append(get_auto_scheduler_rewritten_layout(attrs)) return [topi_compute(*args)] return _compute_dense @override_native_generic_func("dense_strategy") def dense_strategy(attrs, inputs, out_type, target): logger.warning("dense is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_dense(topi.nn.dense), wrap_topi_schedule(topi.generic.schedule_dense), name="dense.generic", ) return strategy @override_native_generic_func("dense_pack_strategy") def dense_pack_strategy(attrs, inputs, out_type, target): logger.warning("dense_pack is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_dense(topi.nn.dense_pack), wrap_topi_schedule(topi.generic.schedule_dense), name="dense_pack.generic", ) return strategy def wrap_compute_batch_matmul(topi_compute, need_auto_scheduler_layout=False): def _compute_batch_matmul(attrs, inputs, out_type): args = [inputs[0], inputs[1], out_type.shape] if need_auto_scheduler_layout: args.append(get_auto_scheduler_rewritten_layout(attrs)) return [topi_compute(*args)] return _compute_batch_matmul @override_native_generic_func("batch_matmul_strategy") def batch_matmul_strategy(attrs, inputs, out_type, target): logger.warning("batch_matmul is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_batch_matmul(topi.nn.batch_matmul), wrap_topi_schedule(topi.generic.schedule_batch_matmul), name="batch_matmul.generic", ) return strategy def wrap_compute_sparse_dense(topi_compute): def _compute_sparse_dense(attrs, inputs, out_type): return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])] return _compute_sparse_dense @override_native_generic_func("sparse_dense_strategy") def sparse_dense_strategy(attrs, inputs, out_type, target): logger.warning("sparse dense is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_sparse_dense(topi.nn.sparse_dense), wrap_topi_schedule(topi.generic.schedule_sparse_dense), name="sparse_dense.generic", ) return strategy @override_native_generic_func("sparse_dense_padded_strategy") def sparse_dense_padded_strategy(attrs, inputs, out_type, target): raise NotImplementedError("sparse_dense_padded is only implemented for cuda") def wrap_compute_sparse_add(topi_compute): def _compute_sparse_add(attrs, inputs, out_type): return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3])] return _compute_sparse_add @override_native_generic_func("sparse_add_strategy") def sparse_add_strategy(attrs, inputs, out_type, target): logger.warning("sparse add is not optimized for this platform.") strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_sparse_add(topi.nn.sparse_add), wrap_topi_schedule(topi.generic.schedule_extern), name="sparse_add.generic", ) return strategy @generic_func def schedule_sparse_transpose(attrs, outs, target): with target: return topi.generic.schedule_sparse_transpose(outs) def wrap_compute_sort(topi_compute): def _compute_sort(attrs, inputs, _): axis = get_const_int(attrs.axis) is_ascend = bool(get_const_int(attrs.is_ascend)) return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend)] return _compute_sort @override_native_generic_func("sort_strategy") def sort_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_sort(topi.sort), wrap_topi_schedule(topi.generic.schedule_sort), name="sort.generic", ) return strategy def wrap_compute_argsort(topi_compute): def _compute_argsort(attrs, inputs, _): axis = get_const_int(attrs.axis) is_ascend = bool(get_const_int(attrs.is_ascend)) dtype = attrs.dtype return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)] return _compute_argsort @override_native_generic_func("argsort_strategy") def argsort_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_argsort(topi.argsort), wrap_topi_schedule(topi.generic.schedule_argsort), name="argsort.generic", ) return strategy def wrap_compute_topk(topi_compute): def _compute_topk(attrs, inputs, out_type): if attrs.k is not None: k = attrs.k else: k = inputs[1] axis = get_const_int(attrs.axis) ret_type = attrs.ret_type is_ascend = bool(get_const_int(attrs.is_ascend)) dtype = attrs.dtype out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype) out = out if isinstance(out, list) else [out] return out return _compute_topk @override_native_generic_func("topk_strategy") def topk_strategy(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_topk(topi.topk), wrap_topi_schedule(topi.generic.schedule_topk), name="topk.generic", ) return strategy def wrap_compute_multibox_prior(topi_compute): def _compute_multibox_prior(attrs, inputs, _): sizes = get_float_tuple(attrs.sizes) ratios = get_float_tuple(attrs.ratios) steps = get_float_tuple(attrs.steps) offsets = get_float_tuple(attrs.offsets) clip = bool(get_const_int(attrs.clip)) return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)] return _compute_multibox_prior @override_native_generic_func("multibox_prior_strategy")
Apache License 2.0
basnijholt/miflora
miflora/miflora_poller.py
MiFloraPoller.parameter_value
python
def parameter_value(self, parameter, read_cached=True): if parameter == MI_BATTERY: return self.battery_level() with self.lock: if ( (read_cached is False) or (self._last_read is None) or (datetime.now() - self._cache_timeout > self._last_read) ): self.fill_cache() else: _LOGGER.debug( "Using cache (%s < %s)", datetime.now() - self._last_read, self._cache_timeout, ) if self.cache_available() and (len(self._cache) == 16): return self._parse_data()[parameter] if self.cache_available() and (self.is_ropot()): if parameter == MI_LIGHT: return False return self._parse_data()[parameter] raise BluetoothBackendException( "Could not read data from Mi Flora sensor %s" % self._mac )
Return a value of one of the monitored paramaters. This method will try to retrieve the data from cache and only request it by bluetooth if no cached value is stored or the cache is expired. This behaviour can be overwritten by the "read_cached" parameter.
https://github.com/basnijholt/miflora/blob/c231bfd3f9aa624195dd39ce610ad175229b5712/miflora/miflora_poller.py#L156-L191
import logging import time from datetime import datetime, timedelta from struct import unpack from threading import Lock from btlewrap.base import BluetoothBackendException, BluetoothInterface _HANDLE_READ_VERSION_BATTERY = 0x38 _HANDLE_READ_NAME = 0x03 _HANDLE_READ_SENSOR_DATA = 0x35 _HANDLE_WRITE_MODE_CHANGE = 0x33 _DATA_MODE_CHANGE = bytes([0xA0, 0x1F]) MI_TEMPERATURE = "temperature" MI_LIGHT = "light" MI_MOISTURE = "moisture" MI_CONDUCTIVITY = "conductivity" MI_BATTERY = "battery" _LOGGER = logging.getLogger(__name__) BYTEORDER = "little" _HANDLE_DEVICE_TIME = 0x41 _HANDLE_HISTORY_CONTROL = 0x3E _HANDLE_HISTORY_READ = 0x3C _CMD_HISTORY_READ_INIT = b"\xa0\x00\x00" _CMD_HISTORY_READ_SUCCESS = b"\xa2\x00\x00" _CMD_HISTORY_READ_FAILED = b"\xa3\x00\x00" _INVALID_HISTORY_DATA = [ b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff", b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", b"\xaa\xbb\xcc\xdd\xee\xff\x99\x88\x77\x66\x55\x44\x33\x22\x11\x10", ] def format_bytes(raw_data): if raw_data is None: return "None" return " ".join([format(c, "02x") for c in raw_data]).upper() class MiFloraPoller: def __init__(self, mac, backend, cache_timeout=600, adapter="hci0"): self._mac = mac self._bt_interface = BluetoothInterface(backend, adapter=adapter) self._cache = None self._cache_timeout = timedelta(seconds=cache_timeout) self._last_read = None self._fw_last_read = None self.lock = Lock() self._firmware_version = None self.battery = None def name(self): with self._bt_interface.connect(self._mac) as connection: name = connection.read_handle( _HANDLE_READ_NAME ) if not name: raise BluetoothBackendException( "Could not read data from Mi Flora sensor %s" % self._mac ) return "".join(chr(n) for n in name) def fill_cache(self): _LOGGER.debug("Filling cache with new sensor data.") try: firmware_version = self.firmware_version() except BluetoothBackendException: self._last_read = ( datetime.now() - self._cache_timeout + timedelta(seconds=300) ) raise with self._bt_interface.connect(self._mac) as connection: if firmware_version >= "2.6.6": try: connection.write_handle( _HANDLE_WRITE_MODE_CHANGE, _DATA_MODE_CHANGE ) except BluetoothBackendException: self._last_read = ( datetime.now() - self._cache_timeout + timedelta(seconds=300) ) return self._cache = connection.read_handle( _HANDLE_READ_SENSOR_DATA ) _LOGGER.debug( "Received result for handle %s: %s", _HANDLE_READ_SENSOR_DATA, format_bytes(self._cache), ) self._check_data() if self.cache_available(): self._last_read = datetime.now() else: self._last_read = ( datetime.now() - self._cache_timeout + timedelta(seconds=300) ) def battery_level(self): self.firmware_version() return self.battery def firmware_version(self): if (self._firmware_version is None) or ( datetime.now() - timedelta(hours=24) > self._fw_last_read ): self._fw_last_read = datetime.now() with self._bt_interface.connect(self._mac) as connection: res = connection.read_handle( _HANDLE_READ_VERSION_BATTERY ) _LOGGER.debug( "Received result for handle %s: %s", _HANDLE_READ_VERSION_BATTERY, format_bytes(res), ) if res is None: self.battery = 0 self._firmware_version = None else: self.battery = res[0] self._firmware_version = "".join(map(chr, res[2:])) return self._firmware_version
MIT License
forseti-security/forseti-security
google/cloud/forseti/common/gcp_api/_base_repository.py
GCPRepository._build_next_request
python
def _build_next_request(self, verb, prior_request, prior_response): method = getattr(self._component, verb + '_next') return method(prior_request, prior_response)
Builds pagination-aware request object. More details: https://developers.google.com/api-client-library/python/guide/pagination Args: verb (str): Request verb (ex. insert, update, delete). prior_request (httplib2.HttpRequest): Request that may trigger paging. prior_response (dict): Potentially partial response. Returns: httplib2.HttpRequest: HttpRequest or None. None is returned when there is nothing more to fetch - request completed.
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/common/gcp_api/_base_repository.py#L394-L411
from builtins import str from builtins import object import json import logging import os import threading from urllib.parse import urljoin from future import standard_library import google_auth_httplib2 import pkg_resources import uritemplate from googleapiclient import discovery from ratelimiter import RateLimiter from retrying import retry import google.auth from google.auth.credentials import with_scopes_if_required from google.cloud.forseti.common.gcp_api import _supported_apis from google.cloud.forseti.common.gcp_api import errors as api_errors from google.cloud.forseti.common.util import http_helpers from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util import replay from google.cloud.forseti.common.util import retryable_exceptions import google.oauth2.credentials standard_library.install_aliases() CLOUD_SCOPES = frozenset(['https://www.googleapis.com/auth/cloud-platform']) LOCAL_THREAD = threading.local() LOGGER = logger.get_logger(__name__) NUM_HTTP_RETRIES = 5 SUPPORT_DISCOVERY_CACHE = ( pkg_resources.get_distribution( 'google-api-python-client').version >= '1.4.2') REQUEST_RECORDER = dict() REQUEST_REPLAYER = dict() DISCOVERY_DOCS_BASE_DIR = os.path.join(os.path.abspath( os.path.dirname(__file__)), 'discovery_documents') @retry(retry_on_exception=retryable_exceptions.is_retryable_exception, wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_attempt_number=5) def _create_service_api(credentials, service_name, version, is_private_api, developer_key=None, cache_discovery=False, cache=None, use_versioned_discovery_doc=False): try: if LOGGER.getEffectiveLevel() > logging.DEBUG: logging.getLogger(discovery.__name__).setLevel(logging.WARNING) except Exception as e: LOGGER.debug('Logging cannot be set: %s', e) if is_private_api: if use_versioned_discovery_doc: service_json = '{}_{}.json'.format(service_name, version) service_path = os.path.join(DISCOVERY_DOCS_BASE_DIR, service_json) return _build_service_from_document( credentials, service_path) discovery_kwargs = { 'serviceName': service_name, 'version': version, 'developerKey': developer_key, 'credentials': credentials} if SUPPORT_DISCOVERY_CACHE: discovery_kwargs['cache_discovery'] = cache_discovery discovery_kwargs['cache'] = cache return discovery.build(**discovery_kwargs) def _build_service_from_document(credentials, document_path): with open(document_path, 'r') as f: discovery_data = json.load(f) return discovery.build_from_document( service=discovery_data, credentials=credentials ) class BaseRepositoryClient(object): def __init__(self, api_name, versions=None, credentials=None, quota_max_calls=None, quota_period=None, use_rate_limiter=False, read_only=False, use_versioned_discovery_doc=False, cache_discovery=False, cache=None, **kwargs): self._use_cached_http = False if not credentials: self._use_cached_http = True credentials, _ = google.auth.default() self._credentials = with_scopes_if_required(credentials, list(CLOUD_SCOPES)) self._repository_lock = threading.RLock() if use_rate_limiter: self._rate_limiter = RateLimiter(max_calls=quota_max_calls, period=quota_period) else: self._rate_limiter = None self._read_only = read_only self.name = api_name supported_api = _supported_apis.SUPPORTED_APIS.get(api_name) if not supported_api: LOGGER.warning('API "%s" is not formally supported in Forseti, ' 'proceed at your own risk.', api_name) if not versions and supported_api: versions = [supported_api.get('default_version')] self.versions = versions if supported_api: for version in versions: if version not in supported_api.get('supported_versions', []): LOGGER.warning('API "%s" version %s is not formally ' 'supported in Forseti, proceed at your ' 'own risk.', api_name, version) self.is_private_api = None if supported_api: self.is_private_api = ( _supported_apis.SUPPORTED_APIS.get(api_name) .get('is_private_api')) self.gcp_services = {} for version in versions: self.gcp_services[version] = _create_service_api( self._credentials, self.name, version, self.is_private_api, kwargs.get('developer_key'), cache_discovery, cache, use_versioned_discovery_doc) def __repr__(self): return 'API: name=%s, versions=%s' % (self.name, self.versions) def _init_repository(self, repository_class, version=None): if not version: version = ( _supported_apis.SUPPORTED_APIS.get(self.name, {}) .get('default_version')) if not version or version not in self.gcp_services: version = sorted(self.gcp_services.keys())[0] with self._repository_lock: return repository_class(gcp_service=self.gcp_services[version], credentials=self._credentials, rate_limiter=self._rate_limiter, use_cached_http=self._use_cached_http, read_only=self._read_only) class GCPRepository(object): def __init__(self, gcp_service, credentials, component, num_retries=NUM_HTTP_RETRIES, key_field='project', entity_field=None, list_key_field=None, get_key_field=None, max_results_field='maxResults', search_query_field='query', resource_path_template=None, rate_limiter=None, use_cached_http=True, read_only=False): self.gcp_service = gcp_service self.read_only = read_only self._credentials = credentials components = component.split('.') self._component = getattr( self.gcp_service, components.pop(0))() for nested_component in components: self._component = getattr( self._component, nested_component)() self._entity_field = entity_field self._num_retries = num_retries if list_key_field: self._list_key_field = list_key_field else: self._list_key_field = key_field if get_key_field: self._get_key_field = get_key_field else: self._get_key_field = key_field self._max_results_field = max_results_field self._search_query_field = search_query_field self._resource_path_template = resource_path_template self._rate_limiter = rate_limiter self._use_cached_http = use_cached_http self._local = LOCAL_THREAD @property def http(self): if self._use_cached_http and hasattr(self._local, 'http'): return self._local.http authorized_http = google_auth_httplib2.AuthorizedHttp( self._credentials, http=http_helpers.build_http()) if self._use_cached_http: self._local.http = authorized_http return authorized_http def _build_request(self, verb, verb_arguments): method = getattr(self._component, verb) method_args = {str(k): v for k, v in verb_arguments.items()} return method(**method_args)
Apache License 2.0
ryanbhayward/games-puzzles-algorithms
old/lib/games_puzzles_algorithms/players/minimax/minimax_agent.py
MinimaxAgent.value
python
def value(self, game_state, time_allowed_s=-1, time_used=0, tree=None): if tree is None: self._tree = {} tree = self._tree tree['value'] = -INF if game_state.is_terminal(): tree['value'] = -game_state.score( game_state.player_who_acted_last()) elif time_allowed_s > 0 and not(time_used < time_allowed_s): raise self.TimeIsUp else: tree['children'] = [] start_time = time.clock() if time_allowed_s > 0 else 0 for action in game_state.legal_actions(): tree['children'].append({'action': action}) with game_state.play(action): action_value = -self.value( game_state, time_allowed_s=time_allowed_s, time_used=time.clock() - start_time + time_used, tree=tree['children'][-1]) if action_value > tree['value']: tree['value'] = action_value return tree['value']
Return the game theoretic value of game state, `game_state`. If the remaining time, `time_allowed_s` - `time_used` is insufficient, then return the best value encountered so far. A non-positive `time_allowed_s` implies no time limit.
https://github.com/ryanbhayward/games-puzzles-algorithms/blob/53b12cf37324f8757024cec5839e8cb2625cc4e2/old/lib/games_puzzles_algorithms/players/minimax/minimax_agent.py#L15-L49
from __future__ import division import time import random import games_puzzles_algorithms.debug as debug import logging INF = float('inf') class MinimaxAgent(object): class TimeIsUp(Exception): pass
MIT License
michaelaye/pyciss
pyciss/ringcube.py
RingCube.imshow
python
def imshow( self, data=None, save=False, ax=None, interpolation="none", extra_title=None, show_resonances="some", set_extent=True, equalized=False, rmin=None, rmax=None, savepath=".", **kwargs, ): if data is None: data = self.img if self.resonance_axis is not None: logger.debug("removing resonance_axis") self.resonance_axis.remove() if equalized: data = np.nan_to_num(data) data[data < 0] = 0 data = exposure.equalize_hist(data) self.plotted_data = data extent_val = self.extent if set_extent else None min_, max_ = self.plot_limits self.min_ = min_ self.max_ = max_ if ax is None: if not _SEABORN_INSTALLED: fig, ax = plt.subplots(figsize=calc_4_3(8)) else: fig, ax = plt.subplots() else: fig = ax.get_figure() with quantity_support(): im = ax.imshow( data, extent=extent_val, cmap="gray", vmin=min_, vmax=max_, interpolation=interpolation, origin="lower", aspect="auto", **kwargs, ) if any([rmin is not None, rmax is not None]): ax.set_ylim(rmin, rmax) self.mpl_im = im ax.set_xlabel("Longitude [deg]") ax.set_ylabel("Radius [Mm]") ax.ticklabel_format(useOffset=False) title = self.plot_title if extra_title: title += ", " + extra_title ax.set_title(title, fontsize=12) if show_resonances: self.set_resonance_axis(ax, show_resonances, rmin, rmax) if save: savename = self.plotfname if extra_title: savename = savename[:-4] + "_" + extra_title + ".png" p = Path(savename) fullpath = Path(savepath) / p.name fig.savefig(fullpath, dpi=150) logging.info("Created %s", fullpath) self.im = im return im
Powerful default display. show_resonances can be True, a list, 'all', or 'some'
https://github.com/michaelaye/pyciss/blob/c8bcf30882dc9f8cff2240dc5a7febe89c98f2f8/pyciss/ringcube.py#L259-L335
import logging import warnings from pathlib import Path import holoviews as hv import hvplot.pandas import hvplot.xarray import matplotlib.pyplot as plt import numpy as np import pandas as pd import xarray as xr from astropy import units as u from astropy.visualization import quantity_support from matplotlib.ticker import FormatStrFormatter from skimage import exposure from pysis import CubeFile from ._utils import which_epi_janus_resonance from .index import ring_summary_index from .io import PathManager from .meta import get_all_resonances from .opusapi import MetaData try: import seaborn as sns _SEABORN_INSTALLED = True except ImportError: _SEABORN_INSTALLED = False else: sns.set_style("white", {"xtick.bottom": True, "ytick.left": True}) logger = logging.getLogger(__name__) resonances = get_all_resonances() meta_df = ring_summary_index() meta_df["file_id"] = meta_df.FILE_SPECIFICATION_NAME.map( lambda x: Path(x).stem.split("_")[0] ) def calc_4_3(width): return (width, 3 * width / 4) def mad(arr, relative=True): with warnings.catch_warnings(): warnings.simplefilter("ignore") med = np.nanmedian(arr, axis=1) mad = np.nanmedian(np.abs(arr - med[:, np.newaxis]), axis=1) if relative: return mad / med else: return mad def calc_offset(cube): i = 0 while pd.Series(cube.img[:, i]).count() < 200: i += 1 return max(i, 20) class RingCube(CubeFile): def __init__( self, fname, plot_limits=(0.1, 99), destriped=True, pixres=None, litstatus=None, **kwargs, ): p = Path(fname) self.pm = PathManager(fname) if not p.is_absolute(): if self.pm.cubepath.exists() and destriped is True: fname = str(self.pm.cubepath) else: fname = str(self.pm.undestriped) super().__init__(str(fname), **kwargs) try: q = f"file_id == '{self.pm.img_id}'" self.meta = meta_df.query(q) if self.meta.size == 0: logging.warn("Image ID not found in meta-data index.") except KeyError: self.meta = None self._meta_pixres = pixres self._meta_litstatus = litstatus self.resonance_axis = None self.pmin, self.pmax = plot_limits self._plotted_data = None self._xarray = self.to_xarray() @property def xarray(self): return self._xarray def get_opus_meta_data(self): print("Getting metadata from the online OPUS database.") self.opusmeta = MetaData(self.pm._id) print("Done.") return self.opusmeta @property def plotted_data(self): return self.img if self._plotted_data is None else self._plotted_data @plotted_data.setter def plotted_data(self, value): self._plotted_data = value @property def meta_pixres(self): if self._meta_pixres is None: meta = self.meta cols = ["FINEST_RADIAL_RESOLUTION", "COARSEST_RADIAL_RESOLUTION"] if meta is not None and meta.size != 0: mean_radial_res = meta[cols].mean(axis=1) self._meta_pixres = int(mean_radial_res * 1000) * u.m / u.pix else: self._meta_pixres = np.nan return self._meta_pixres @meta_pixres.setter def meta_pixres(self, value): self._meta_pixres = value @property def meta_litstatus(self): if self._meta_litstatus is None: if self.meta is not None: emang = self.meta.filter(regex="RING_EMISSION_ANGLE").mean(axis=1) self._meta_litstatus = "LIT" if emang.iat[0] < 90.0 else "UNLIT" else: self._meta_litstatus = "UNKNOWN" return self._meta_litstatus @meta_litstatus.setter def meta_litstatus(self, value): self._meta_litstatus = value @property def mapping_label(self): return self.label["IsisCube"]["Mapping"] @property def minrad(self): return self.mapping_label["MinimumRingRadius"] / 1e6 * u.Mm @property def minrad_km(self): return self.minrad.to(u.km) @property def midrad(self): return (self.minrad + self.maxrad) / 2 @property def maxrad(self): return self.mapping_label["MaximumRingRadius"] / 1e6 * u.Mm @property def maxrad_km(self): return self.maxrad.to(u.km) @property def minlon(self): return self.mapping_label["MinimumRingLongitude"] * u.degree @property def maxlon(self): return self.mapping_label["MaximumRingLongitude"] * u.degree @property def img(self): return self.apply_numpy_specials()[0] @property def extent(self): return [i.value for i in [self.minlon, self.maxlon, self.minrad, self.maxrad]] @property def resolution_val(self): return self.mapping_label["PixelResolution"].value * u.m / u.pixel @property def image_id(self): return Path(self.filename).stem.split(".")[0] @property def plotfname(self): return self.filename.split(".")[0] + ".png" def calc_clim(self, data): from numpy import inf if np.nanmin(data) == -inf: data[data == -inf] = np.nan data[data == inf] = np.nan return np.percentile(data[~np.isnan(data)], (self.pmin, self.pmax)) @property def plot_limits(self): return self.calc_clim(self.plotted_data) def to_xarray(self, subtracted=False): radii = np.linspace(self.minrad, self.maxrad, self.img.shape[0]) azimuths = np.linspace(self.minlon, self.maxlon, self.img.shape[1]) if subtracted: imgdata = self.density_wave_median_subtracted.T else: imgdata = self.img.T data = xr.DataArray( imgdata, coords={"azimuth": azimuths, "radius": radii}, dims=("azimuth", "radius"), ) if not subtracted: return data vmin, vmax = self.plot_limits min_filtered = data.where(data > vmin, vmin) return min_filtered.where(min_filtered < vmax) else: return data
ISC License
scy-phy/minicps
minicps/devices.py
Device._start
python
def _start(self): print "TODO _start: please override me"
Start a device.
https://github.com/scy-phy/minicps/blob/e1c9c9c9344d0a5c7afcbea3229c6277c587280b/minicps/devices.py#L198-L201
import time from os.path import splitext from minicps.states import SQLiteState, RedisState from minicps.protocols import EnipProtocol, ModbusProtocol class Device(object): def __init__(self, name, protocol, state, disk={}, memory={}): self._validate_inputs(name, protocol, state, disk, memory) self.name = name self.state = state self.protocol = protocol self.memory = memory self.disk = disk self._init_state() self._init_protocol() self._start() self._stop() def _validate_inputs(self, name, protocol, state, disk, memory): if type(name) is not str: raise TypeError('Name must be a string.') elif not name: raise ValueError('Name string cannot be empty.') if type(state) is not dict: raise TypeError('State must be a dict.') else: state_keys = state.keys() if (not state_keys) or (len(state_keys) != 2): raise KeyError('State must contain 2 keys.') else: for key in state_keys: if (key != 'path') and (key != 'name'): raise KeyError('%s is an invalid key.' % key) state_values = state.values() for val in state_values: if type(val) is not str: raise TypeError('state values must be strings.') subpath, extension = splitext(state['path']) if (extension != '.redis') and (extension != '.sqlite'): raise ValueError('%s extension not supported.' % extension) if type(state['name']) is not str: raise TypeError('State name must be a string.') if type(protocol) is not dict: if protocol is not None: raise TypeError('Protocol must be either None or a dict.') else: protocol_keys = protocol.keys() if (not protocol_keys) or (len(protocol_keys) != 3): raise KeyError('Protocol must contain 3 keys.') else: for key in protocol_keys: if ((key != 'name') and (key != 'mode') and (key != 'server')): raise KeyError('%s is an invalid key.' % key) if type(protocol['name']) is not str: raise TypeError('Protocol name must be a string.') else: name = protocol['name'] if (name != 'enip' and name != 'modbus'): raise ValueError('%s protocol not supported.' % protocol) if type(protocol['mode']) is not int: raise TypeError('Protocol mode must be a int.') else: mode = protocol['mode'] if (mode < 0): raise ValueError('Protocol mode must be positive.') def _init_state(self): subpath, extension = splitext(self.state['path']) if extension == '.sqlite': self._state = SQLiteState(self.state) elif extension == '.redis': self._state = RedisState(self.state) else: print 'ERROR: %s backend not supported.' % self.state def _init_protocol(self): if self.protocol is None: print 'DEBUG: %s has no networking capabilities.' % self.name pass else: name = self.protocol['name'] if name == 'enip': self._protocol = EnipProtocol(self.protocol) elif name == 'modbus': self._protocol = ModbusProtocol(self.protocol) else: print 'ERROR: %s protocol not supported.' % self.protocol
MIT License
mattvonrocketstein/smash
smashlib/ipy3x/testing/decorators.py
make_label_dec
python
def make_label_dec(label, ds=None): if isinstance(label, string_types): labels = [label] else: labels = label tmp = lambda: None for label in labels: setattr(tmp, label, True) def decor(f): for label in labels: setattr(f, label, True) return f if ds is None: ds = "Labels a test as %r." % label decor.__doc__ = ds return decor
Factory function to create a decorator that applies one or more labels. Parameters ---------- label : string or sequence One or more labels that will be applied by the decorator to the functions it decorates. Labels are attributes of the decorated function with their value set to True. ds : string An optional docstring for the resulting decorator. If not given, a default docstring is auto-generated. Returns ------- A decorator. Examples -------- A simple labeling decorator: >>> slow = make_label_dec('slow') >>> slow.__doc__ "Labels a test as 'slow'." And one that uses multiple labels and a custom docstring: >>> rare = make_label_dec(['slow','hard'], ... "Mix labels 'slow' and 'hard' for rare tests.") >>> rare.__doc__ "Mix labels 'slow' and 'hard' for rare tests." Now, let's test using this one: >>> @rare ... def f(): pass ... >>> >>> f.slow True >>> f.hard True
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/testing/decorators.py#L104-L171
import sys import os import tempfile import unittest from IPython.external.decorator import decorator from .ipunittest import ipdoctest, ipdocstring from IPython.external.decorators import * from IPython.utils.process import is_cmd_found from IPython.utils.py3compat import string_types def as_unittest(func): class Tester(unittest.TestCase): def test(self): func() Tester.__name__ = func.__name__ return Tester def apply_wrapper(wrapper, func): import nose.tools return decorator(wrapper, nose.tools.make_decorator(func)(wrapper))
MIT License
abhinavkashyap/sciwing
sciwing/api/routers/citation_intent_clf.py
classify_citation_intent
python
def classify_citation_intent(citation: str): global citation_intent_clf_model if citation_intent_clf_model is None: citation_intent_clf_model = CitationIntentClassification() predictions = citation_intent_clf_model.predict_for_text(citation) return {"tags": predictions, "citation": citation}
End point to classify a citation intent into ```Background`, `Method`, `Result Comparison``` Parameters ---------- citation : str String containing the citation to another work Returns ------- JSON ``{"tags": Predicted tag for the citation, "citation": the citation itself}``
https://github.com/abhinavkashyap/sciwing/blob/5337e846c761165cf53fa8c26910589f7a03c3e9/sciwing/api/routers/citation_intent_clf.py#L10-L27
from fastapi import APIRouter from sciwing.models.citation_intent_clf import CitationIntentClassification router = APIRouter() citation_intent_clf_model = None @router.get("/cit_int_clf/{citation}")
MIT License
nuagenetworks/vspk-python
vspk/v5_0/nueventlog.py
NUEventLog.entity_parent_type
python
def entity_parent_type(self): return self._entity_parent_type
Get entity_parent_type value. Notes: Event parent entity type. Generally reported against enterprise. This attribute is named `entityParentType` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nueventlog.py#L268-L278
from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUEventLog(NURESTObject): __rest_name__ = "eventlog" __resource_name__ = "eventlogs" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): super(NUEventLog, self).__init__() self._request_id = None self._diff = None self._enterprise = None self._entities = None self._entity_id = None self._entity_parent_id = None self._entity_parent_type = None self._entity_scope = None self._entity_type = None self._user = None self._event_received_time = None self._external_id = None self._type = None self.expose_attribute(local_name="request_id", remote_name="requestID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="diff", remote_name="diff", attribute_type=dict, is_required=False, is_unique=False) self.expose_attribute(local_name="enterprise", remote_name="enterprise", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entities", remote_name="entities", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_id", remote_name="entityID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_parent_id", remote_name="entityParentID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_parent_type", remote_name="entityParentType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="entity_type", remote_name="entityType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="user", remote_name="user", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="event_received_time", remote_name="eventReceivedTime", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False) self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def request_id(self): return self._request_id @request_id.setter def request_id(self, value): self._request_id = value @property def diff(self): return self._diff @diff.setter def diff(self, value): self._diff = value @property def enterprise(self): return self._enterprise @enterprise.setter def enterprise(self, value): self._enterprise = value @property def entities(self): return self._entities @entities.setter def entities(self, value): self._entities = value @property def entity_id(self): return self._entity_id @entity_id.setter def entity_id(self, value): self._entity_id = value @property def entity_parent_id(self): return self._entity_parent_id @entity_parent_id.setter def entity_parent_id(self, value): self._entity_parent_id = value @property
BSD 3-Clause New or Revised License
paulscherrerinstitute/pcaspy
pcaspy/driver.py
Driver.setParamStatus
python
def setParamStatus(self, reason, alarm=None, severity=None): if alarm is not None and self.pvDB[reason].alarm != alarm: self.pvDB[reason].alarm = alarm self.pvDB[reason].mask |= cas.DBE_ALARM self.pvDB[reason].flag = True if severity is not None and self.pvDB[reason].severity != severity: self.pvDB[reason].severity = severity self.pvDB[reason].mask |= cas.DBE_ALARM self.pvDB[reason].flag = True
set PV status and severity and request update :param str reason: PV base name :param alarm: alarm state :param severity: severity state The PVs' alarm status and severity are automatically set in :meth:`setParam`. If the status and severity need to be set explicitly to override the defaults, :meth:`setParamStatus` must be called *after* :meth:`setParam`. The new alarm status/severity will be pushed to registered clients the next time when :meth:`updatePVs` is called.
https://github.com/paulscherrerinstitute/pcaspy/blob/3dee3ddc8e7144ce66b059e0e45e8cc0ebe39ab7/pcaspy/driver.py#L169-L189
from . import cas from .alarm import Severity, Alarm import collections import operator import threading import time import sys import logging if sys.hexversion >= 0x02070000: from logging import NullHandler else: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger('pcaspy').addHandler(NullHandler()) class Manager(object): pvs = {} pvf = {} driver = {} manager = Manager() def registerDriver(driver_init_func): def wrap(*args, **kargs): driver_instance = args[0] port = driver_instance.port driver_init_func(*args, **kargs) manager.driver[port] = driver_instance return wrap class DriverType(type): def __init__(cls, name, bases, dct): if name != 'Driver': cls.__init__ = registerDriver(cls.__init__) type.__init__(cls, name, bases, dct) class Data(object): def __init__(self): self.value = 0 self.flag = False self.severity = Severity.INVALID_ALARM self.alarm = Alarm.UDF_ALARM self.udf = True self.mask = 0 self.time = cas.epicsTimeStamp() def __repr__(self): return "value=%s alarm=%s severity=%s flag=%s mask=%s time=%s" % (self.value, Alarm.nameOf(self.alarm), Severity.nameOf(self.severity), self.flag, self.mask, self.time) DriverBase = DriverType(str('DriverBase'), (), { '__doc__': 'Driver base class' }) class Driver(DriverBase): port = 'default' def __init__(self): self.pvDB = {} for reason, pv in manager.pvs[self.port].items(): data = Data() data.value = pv.info.value self.pvDB[reason] = data def read(self, reason): return self.getParam(reason) def write(self, reason, value): self.setParam(reason, value) return True def setParam(self, reason, value): if isinstance(value, list): value = value[:] elif 'numpy.ndarray' in str(type(value)): value = value.copy() pv = manager.pvs[self.port][reason] self.pvDB[reason].mask |= pv.info.checkValue(value) self.pvDB[reason].value = value self.pvDB[reason].time = cas.epicsTimeStamp() if self.pvDB[reason].mask: self.pvDB[reason].flag = True alarm, severity = pv.info.checkAlarm(value) self.setParamStatus(reason, alarm, severity) logging.getLogger('pcaspy.Driver.setParam') .debug('%s: %s', reason, self.pvDB[reason])
BSD 3-Clause New or Revised License
morganloomis/ml_tools
scripts/ml_arcTracer.py
ui
python
def ui(): globalScale = 1 if mc.optionVar(exists='ml_arcTracer_brushGlobalScale'): globalScale = mc.optionVar(query='ml_arcTracer_brushGlobalScale') with utl.MlUi('ml_arcTracer', 'Arc Tracer', width=400, height=180, info='''Select objects to trace. Choose camera space or worldspace arc. Press clear to delete the arcs, or retrace to redo the last arc.''') as win: win.buttonWithPopup(label='Trace Camera', command=traceCamera, annotation='Trace an arc as an overlay over the current camera.', shelfLabel='cam', shelfIcon='flowPathObj') win.buttonWithPopup(label='Trace World', command=traceWorld, annotation='Trace an arc in world space.', shelfLabel='world', shelfIcon='flowPathObj') win.buttonWithPopup(label='Retrace Previous', command=retraceArc, annotation='Retrace the previously traced arc.', shelfLabel='retrace', shelfIcon='flowPathObj') win.buttonWithPopup(label='Clear Arcs', command=clearArcs, annotation='Clear all arcs.', shelfLabel='clear', shelfIcon='flowPathObj') fsg = mc.floatSliderGrp( label='Line Width', minValue=0.1, maxValue=5, value=globalScale) mc.floatSliderGrp(fsg, edit=True, dragCommand=partial(setLineWidthCallback, fsg))
User interface for arc tracer
https://github.com/morganloomis/ml_tools/blob/857daff9e7c2b3e34b5e92e70d2c7fcd90a14ad0/scripts/ml_arcTracer.py#L112-L134
__author__ = 'Morgan Loomis' __license__ = 'MIT' __revision__ = 11 __category__ = 'animation' shelfButton = {'annotation': 'Open a UI to trace the animation of a node across the screen.', 'menuItem': [['Trace Camera','import ml_arcTracer;ml_arcTracer.traceCamera()'], ['Trace World','import ml_arcTracer;ml_arcTracer.traceWorld()'], ['Retrace', 'import ml_arcTracer;ml_arcTracer.retraceArc()'], ['Clear Arcs','import ml_arcTracer;ml_arcTracer.clearArcs()']], 'order': 2} import maya.cmds as mc import maya.mel as mm from maya import OpenMaya import random, math from functools import partial try: import ml_utilities as utl utl.upToDateCheck(32) except ImportError: result = mc.confirmDialog( title='Module Not Found', message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.', button=['Download Module','Cancel'], defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' ) if result == 'Download Module': mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
MIT License
dragonfly/dragonfly
dragonfly/distributions/continuous.py
Beta.__str__
python
def __str__(self): return 'Beta: alpha=%0.3f, beta=%.3f' % (self.alpha, self.beta)
Returns a string representation.
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/distributions/continuous.py#L254-L256
from __future__ import absolute_import from __future__ import division import numpy as np from .distribution import Continuous from ..exd import domains class Normal(Continuous): def __init__(self, mean, var): super(Normal, self).__init__() self.mean = float(mean) self.var = float(var) self.dim = 1 self.domain = domains.EuclideanDomain([[-np.inf, np.inf]]) def pdf(self, x): return np.asscalar(np.exp(-((x-self.mean)**2)/(2*self.var))/ (np.sqrt(2*np.pi*self.var))) def draw_samples_random(self, size=None): return np.random.normal(self.mean, np.sqrt(self.var), size) def logp(self, x): return np.asscalar(-0.5*(((x-self.mean)*(x-self.mean)/self.var) + np.log(2*np.pi*self.var))) def grad_logp(self, x): return np.asscalar(-(x - self.mean)/self.var) def get_mean(self): return self.mean def get_variance(self): return self.var def __str__(self): return 'Univariate Normal: mean=%0.3f, variance=%.3f' % (self.mean, self.var) class MultivariateGaussian(Continuous): def __init__(self, mean, cov): super(MultivariateGaussian, self).__init__() self.mean = np.array(mean, dtype=float) self.cov = np.array(cov, dtype=float) self.pre = np.linalg.inv(self.cov) self.det = np.linalg.det(self.cov) self.dim = len(mean) self.domain = domains.EuclideanDomain(np.tile(np.array([-np.inf, np.inf]), (len(mean), 1))) def pdf(self, x): value = -0.5*np.dot(np.transpose(x - self.mean), np.dot(self.pre, x - self.mean)) return np.asscalar(np.exp(value)*np.power(2*np.pi*self.det, -0.5)) def draw_samples_random(self, size): return np.random.multivariate_normal(self.mean, self.cov, size) def logp(self, x): value = np.dot(np.transpose(x - self.mean), np.dot(self.pre, x - self.mean)) return -0.5*(value + np.log(2*np.pi) + np.log(self.det)) def grad_logp(self, x): return -np.dot(self.pre, x - self.mean) def get_mean(self): return self.mean def get_variance(self): return self.cov def __str__(self): return 'Multivariate Normal' class ContinuousUniform(Continuous): def __init__(self, lower, upper): super(ContinuousUniform, self).__init__() self.lower = float(lower) self.upper = float(upper) self.dim = 1 self.domain = domains.EuclideanDomain([[self.lower, self.upper]]) def pdf(self, x): if x < self.lower or x > self.upper: return 0 return np.asscalar(1/(self.upper - self.lower)) def draw_samples_random(self, size=None): return np.random.uniform(self.lower, self.upper, size) def logp(self, x): if x < self.lower or x > self.upper: return -np.inf return -np.log(self.upper - self.lower) def grad_logp(self, x): return 0 def get_parameters(self): return self.lower, self.upper def get_mean(self): return (self.lower + self.upper)/2 def get_variance(self): return (np.power((self.upper - self.lower), 2))/12 def __str__(self): return 'Continuous Uniform: lower=%0.3f, upper=%.3f' % (self.lower, self.upper) class Exponential(Continuous): def __init__(self, lam): super(Exponential, self).__init__() self.lam = float(lam) self.dim = 1 self.domain = domains.EuclideanDomain([[0, np.inf]]) def pdf(self, x): if x < 0: return 0 return np.asscalar(self.lam*np.exp(-self.lam*x)) def draw_samples_random(self, size=None): return np.random.exponential(1/self.lam, size) def logp(self, x): if x < 0: return -np.inf return np.log(self.lam) - self.lam*x def grad_logp(self, x): return -self.lam def get_lambda(self): return self.lam def get_mean(self): return 1/self.lam def get_variance(self): return np.power(self.lam, -2) def __str__(self): return 'Exponential: lambda=%0.3f' % (self.lam) class Beta(Continuous): def __init__(self, alpha, beta): super(Beta, self).__init__() self.alpha = float(alpha) self.beta = float(beta) self.dim = 1 self.B = (np.math.factorial(self.alpha - 1)*np.math.factorial(self.beta - 1))/ (float(np.math.factorial(self.alpha+self.beta-1))) self.domain = domains.EuclideanDomain([[0, 1]]) def pdf(self, x): if x < 0 or x > 1: return 0 return np.asscalar((np.power(x, self.alpha - 1)* np.power(1 - x, self.beta - 1))/self.B) def draw_samples_random(self, *args): return np.random.beta(self.alpha, self.beta, *args) def logp(self, x): if x < 0 or x > 1: return -np.inf return (self.alpha - 1)*np.log(x) + (self.beta - 1)*np.log(1-x) - np.log(self.B) def grad_logp(self, x): if x < 0 or x > 1: return 0 return (self.alpha - 1)/x - (self.beta - 1)/(1 - x) def get_parameters(self): return self.alpha, self.beta def get_mean(self): return self.alpha/(self.alpha + self.beta) def get_variance(self): return (self.alpha*self.beta)/ ((np.power(self.alpha+self.beta, 2))*(self.alpha+self.beta+1))
MIT License
google/clusterfuzz
src/appengine/handlers/base_handler.py
check_redirect_url
python
def check_redirect_url(url): if not _SAFE_URL_PATTERN.match(url): raise helpers.EarlyExitException('Invalid redirect.', 403)
Check redirect URL is safe.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/appengine/handlers/base_handler.py#L127-L130
import base64 import cgi import datetime import json import logging import os import re import sys import traceback import urllib.parse from flask import redirect as flask_redirect from flask import request from flask import Response from flask.views import MethodView from google.cloud import ndb import jinja2 from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import db_config from clusterfuzz._internal.config import local_config from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.system import environment from libs import auth from libs import form from libs import helpers _SAFE_URL_PATTERN = re.compile( r'^(?:(?:https?|mailto|ftp):|[^:/?#]*(?:[/?#]|$))', flags=re.IGNORECASE) def add_jinja2_filter(name, fn): _JINJA_ENVIRONMENT.filters[name] = fn class JsonEncoder(json.JSONEncoder): _EPOCH = datetime.datetime.utcfromtimestamp(0) def default(self, obj): if isinstance(obj, ndb.Model): dict_obj = obj.to_dict() dict_obj['id'] = obj.key.id() return dict_obj if isinstance(obj, datetime.datetime): return int((obj - self._EPOCH).total_seconds()) if hasattr(obj, 'to_dict'): return obj.to_dict() if isinstance(obj, cgi.FieldStorage): return str(obj) if isinstance(obj, bytes): return obj.decode('utf-8') return json.JSONEncoder.default(self, obj) def format_time(dt): return '{t.day} {t:%b} {t:%y} {t:%X} PDT'.format(t=dt) def splitlines(text): return text.splitlines() def split_br(text): return re.split(r'\s*<br */>\s*', text, flags=re.IGNORECASE) def encode_json(value): return base64.b64encode(json.dumps( value, cls=JsonEncoder).encode('utf-8')).decode('utf-8') _JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(os.path.dirname(__file__), '..', 'templates')), extensions=['jinja2.ext.autoescape'], autoescape=True) _MENU_ITEMS = [] add_jinja2_filter('json', encode_json) add_jinja2_filter('format_time', format_time) add_jinja2_filter('splitlines', splitlines) add_jinja2_filter('split_br', split_br) add_jinja2_filter('polymer_tag', lambda v: '{{%s}}' % v) def add_menu(name, href): _MENU_ITEMS.append(_MenuItem(name, href)) def make_login_url(dest_url): return '/login?' + urllib.parse.urlencode({'dest': dest_url}) def make_logout_url(dest_url): return '/logout?' + urllib.parse.urlencode({ 'csrf_token': form.generate_csrf_token(), 'dest': dest_url, })
Apache License 2.0
ncullen93/pybn
pyBN/utils/graph.py
topsort
python
def topsort(edge_dict, root=None): queue = [] if root is not None: queue = [root] else: for rv in edge_dict.keys(): prior=True for p in edge_dict.keys(): if rv in edge_dict[p]: prior=False if prior==True: queue.append(rv) visited = [] while queue: vertex = queue.pop(0) if vertex not in visited: visited.append(vertex) for nbr in edge_dict[vertex]: queue.append(nbr) return visited
List of nodes in topological sort order from edge dict where key = rv and value = list of rv's children
https://github.com/ncullen93/pybn/blob/58bf684b4ac0bbfa7e2aa394ba3dd302d3dd22db/pyBN/utils/graph.py#L31-L56
import networkx as nx import numpy as np from copy import copy def would_cause_cycle(e, u, v, reverse=False): G = nx.DiGraph(e) if reverse: G.remove_edge(v,u) G.add_edge(u,v) try: nx.find_cycle(G, source=u) return True except: return False
MIT License
gr4ph0s/c4d_redshiftwrapper_api
RedshiftWrapper/Redshift.py
Redshift.GetAllNodes
python
def GetAllNodes(self, removeMasterGroup=True, gvNode=None, nodeList=None): self._CheckMatIsValid() if nodeList is None: nodeList = list() gvNode = self._gvMaster.GetRoot() while gvNode: nodeList.append(Node(gvNode, self.doUndo)) self.GetAllNodes(False, gvNode.GetDown(), nodeList) gvNode = gvNode.GetNext() if len(nodeList) > 1 and removeMasterGroup: nodeList = nodeList[1:] return nodeList
Get all nodes inside the material. Use redshift.GatAllNodes(), only removeMasterGroup is needed other parameters are for internal use. :param removeMasterGroup: True to remove the Shader Group that hold all other Node otherwise false. :type removeMasterGroup: Bool. :return: All Nodes inside the material. :rtype: List of :class:`.Node`
https://github.com/gr4ph0s/c4d_redshiftwrapper_api/blob/666385b535139d876091da3c33e012d76eab1d3a/RedshiftWrapper/Redshift.py#L74-L96
import os import sys import c4d try: import redshift except: pass if os.path.dirname(__file__) not in sys.path: sys.path.append(os.path.dirname(__file__)) from ImportTester import ImportTester from Node import Node from MetaName import MetaName __author__ = 'Adam Maxime - Graphos <gr4ph0s(at)hotmail.fr>' __project__ = "https://github.com/gr4ph0s/C4D_RedshiftWrapper_API" __version__ = '1.1' class Redshift(MetaName, metaclass=ImportTester): doUndo = True _mat = None _gvMaster = None @staticmethod def RedhisftIsInstalled(): return ImportTester._CheckImport("redshift") def SetMat(self, mat): global redshift if not isinstance(mat, c4d.BaseMaterial): raise TypeError('material is not a c4d.BaseMaterial') if not mat.IsInstanceOf(redshift.Mrsmaterial): raise TypeError('material is not a redshift material') self._mat = mat self._gvMaster = redshift.GetRSMaterialNodeMaster(mat) if not self._gvMaster: raise TypeError('can\'t get GvMaster from mat') def _CheckMatIsValid(self): global redshift if self._mat is None: raise TypeError('Mat is not define') if not isinstance(self._mat, c4d.BaseMaterial): raise TypeError('material is not a c4d.BaseMaterial') if not self._mat.IsInstanceOf(redshift.Mrsmaterial): raise TypeError('material is not a redshift material')
MIT License
rlowrance/re-avm
samples-train-analysis.py
make_transactions_mapper
python
def make_transactions_mapper(info): verbose = False if verbose: print 'make_transactions_mapper begin', info.apn_date, 'pid', os.getpid() apn_date, df, column = info.apn_date, info.df, info.column mask_apn = df[column.apn] == apn_date.apn mask_date = df[column.date] == apn_date.date mask = mask_apn & mask_date df_apn_date = df[mask] sequence_number = 0 result_duplicates = set() result_df = pd.DataFrame() for label, row in df_apn_date.iterrows(): if verbose: print 'make_transactions_mapper iter row label', info.apn_date, label if sequence_number > 0: result_duplicates.add(apn_date) new_df = create_new_row(apn_date.apn, apn_date.date, sequence_number, row, column) result_df = result_df.append(new_df, ignore_index=True, verify_integrity=True) sequence_number += 1 if verbose: print 'make_transactions_mapper end', len(result_df), len(result_duplicates) return MappedItem( transactions=result_df, duplicates=result_duplicates, )
return (df of transactions, set of duplicates)
https://github.com/rlowrance/re-avm/blob/d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2/samples-train-analysis.py#L125-L152
import argparse import collections import math import multiprocessing import numpy as np import os import pandas as pd import pdb import random import sys import Bunch import dirutility import Logger import Month import Path import Timer def make_control(argv): print 'argv', argv parser = argparse.ArgumentParser() parser.add_argument('invocation') parser.add_argument('--test', action='store_true', help='if present, truncated input and enable test code') parser.add_argument('--trace', action='store_true', help='if present, call pdb.set_trace() early in run') arg = parser.parse_args(argv) arg.me = arg.invocation.split('.')[0] if arg.trace: pdb.set_trace() random_seed = 123 random.seed(random_seed) dir_working = Path.Path().dir_working() path_out_dir = dirutility.assure_exists(dir_working + arg.me + ('-test' if arg.test else '') + '/') return Bunch.Bunch( arg=arg, path_in_samples=dir_working + 'samples-train.csv', path_out_log=path_out_dir + '0log.txt', path_out_csv=path_out_dir + 'transactions.csv', random_seed=random_seed, test=arg.test, timer=Timer.Timer(), ) def make_index(apn, date, sequence_number): return '%d-%d-%d' % (apn, date, sequence_number) APN_Date = collections.namedtuple('APN_Date', 'apn date') ColumnName = collections.namedtuple('ColumnName', 'apn date actual_price') def column_names(): return ColumnName( apn='APN UNFORMATTED_deed', date='SALE DATE_deed', actual_price='SALE AMOUNT_deed', ) def create_new_row(apn, date, sequence_number, row, column): date = int(date) date_year = int(date / 10000) date_month = int((date - date_year * 10000) / 100) date_day = int(date - date_year * 10000 - date_month * 100) assert date == date_year * 10000 + date_month * 100 + date_day, date new_df = pd.DataFrame( data={ 'apn': int(apn), 'date': date, 'year': date_year, 'month': date_month, 'day': date_day, 'sequence_number': sequence_number, 'actual_price': row[column.actual_price], }, index=[make_index(apn, date, sequence_number)], ) return new_df class DuplicateFinder(object): def __init__(self, df, column): self.df = df self.column = column def find_duplicates_method(self, apn_date): mask_apn = self.df[self.column.apn] == apn_date.apn mask_date = self.df[self.column.date] == apn_date.date mask = mask_apn & mask_date df_apn_date = self.df[mask] sequence_number = 0 result_duplicates = set() result_df = None for i, row in df_apn_date.iterrows(): if sequence_number > 0: result_duplicates.add(apn_date) new_df = create_new_row(apn_date.apn, apn_date.date, sequence_number, row, self.column) result_df = new_df if result_df is None else result_df.append(new_df, verify_integrity=True) sequence_number += 1 return result_df, result_duplicates Info = collections.namedtuple('Info', 'apn_date, df, column') MappedItem = collections.namedtuple('MappedItem', 'transactions duplicates')
BSD 3-Clause New or Revised License
timkpaine/tdameritrade
tdameritrade/client.py
TDClient.cancelOrder
python
def cancelOrder(self, accountId, orderId): return self._request( CANCEL_ORDER.format(accountId=accountId, orderId=orderId), method="DELETE" )
cancel the given order Args: accountId (int): account id the order is under orderId (int): order id of order to cancel
https://github.com/timkpaine/tdameritrade/blob/4873bbce5362ed45d72e9b3fea4b0827a154c2bd/tdameritrade/client.py#L528-L537
import pandas as pd import os from .session import TDASession from .exceptions import handle_error_response, TDAAPIError from .urls import ( CANCEL_ORDER, GET_ORDER_BY_QUERY, PLACE_ORDER, REPLACE_ORDER, STATUS_VALUES, CREATE_SAVED_ORDER, DELETE_SAVED_ORDER, GET_SAVED_ORDER, GET_SAVED_ORDER_BY_PATH, REPLACE_SAVED_ORDER, GET_ACCOUNT, GET_ACCOUNTS, SEARCH_INSTRUMENTS, SEARCH_INSTRUMENT_PROJECTION, GET_INSTRUMENT, GET_HOURS_FOR_MULTIPLE_MARKETS, MARKETS_VALUES, GET_HOURS_FOR_SINGLE_MARKET, MOVERS, DIRECTION_VALUES, CHANGE_VALUES, GET_OPTION_CHAIN, CONTRACT_TYPE_VALUES, STRATEGY_VALUES, RANGE_VALUES, OPTION_TYPE_VALUES, OPTION_EXPMONTH_VALUES, GET_PRICE_HISTORY, PERIOD_TYPE_VALUES, FREQUENCY_TYPE_VALUES, GET_QUOTES, GET_TRANSCATION_TYPE_VALUES, GET_PREFERENCES, UPDATE_PREFERENCES, CREATE_WATCHLIST, DELETE_WATCHLIST, GET_WATCHLIST, GET_WATCHLISTS, GET_WATCHLISTS_MULTIPLE_ACCOUNTS, REPLACE_WATCHLIST, UPDATE_WATCHLIST, ) def response_is_valid(resp): return resp.status_code in (200, 201, 204) class TDClient(object): def __init__(self, client_id=None, refresh_token=None, account_ids=None): self._clientId = client_id or os.environ["TDAMERITRADE_CLIENT_ID"] self._refreshToken = refresh_token or os.environ["TDAMERITRADE_REFRESH_TOKEN"] self.accountIds = account_ids or [] self.session = TDASession(self._refreshToken, self._clientId) def _request(self, url, method="GET", params=None, *args, **kwargs): resp = self.session.request(method, url, params=params, *args, **kwargs) if not response_is_valid(resp): handle_error_response(resp) return resp def accounts(self, positions=False, orders=False): ret = {} if positions or orders: params = {"fields": []} if positions: params["fields"].append("positions") if orders: params["fields"].append("orders") params["fields"] = ",".join(params["fields"]) else: params = {} if self.accountIds: for acc in self.accountIds: resp = self._request(GET_ACCOUNT.format(accountId=acc), params=params) ret[acc] = resp.json() else: resp = self._request(GET_ACCOUNTS, params=params) for account in resp.json(): ret[account["securitiesAccount"]["accountId"]] = account self.accountIds = [int(accountId) for accountId in ret] return ret def accountsDF(self): data = self.accounts() account_dataframes = [] for accountId, value in data.items(): account_dataframes.append(pd.io.json.json_normalize(value)) account_dataframes[-1].columns = [ c.replace("securitiesAccount.", "") for c in account_dataframes[-1].columns ] return pd.concat(account_dataframes) def transactions( self, accountId=None, type=None, symbol=None, startDate=None, endDate=None ): if accountId: accounts = [accountId] else: accounts = self.accountIds if type not in GET_TRANSCATION_TYPE_VALUES: raise TDAAPIError( "Transaction type must be in {}".format(GET_TRANSCATION_TYPE_VALUES) ) ret = {} for account in accounts: transactions = GET_ACCOUNT.format(accountId=account) + "/transactions" ret[account] = self._request( transactions, params={ "type": type, "symbol": symbol, "startDate": startDate, "endDate": endDate, }, ).json() return ret def transactionsDF( self, accountId=None, type=None, symbol=None, startDate=None, endDate=None ): return pd.json_normalize( self.transactions( accountId=accountId, type=type, symbol=symbol, startDate=startDate, endDate=endDate, ) ) def search(self, symbol, projection="symbol-search"): if projection not in SEARCH_INSTRUMENT_PROJECTION: raise TDAAPIError( "Projection must be in {}".format(SEARCH_INSTRUMENT_PROJECTION) ) return self._request( SEARCH_INSTRUMENTS, params={"symbol": symbol, "projection": projection} ).json() def searchDF(self, symbol, projection="symbol-search"): ret = [] dat = self.search(symbol, projection) for symbol in dat: ret.append(dat[symbol]) return pd.DataFrame(ret) def fundamentalSearch(self, symbol): return self.search(symbol, "fundamental") def fundamentalSearchDF(self, symbol): return self.searchDF(symbol, "fundamental") def instrument(self, cusip): return self._request(GET_INSTRUMENT.format(cusip=cusip)).json() def instrumentDF(self, cusip): return pd.DataFrame(self.instrument(cusip)) def quote(self, symbol): if not isinstance(symbol, list): symbol = [symbol] return self._request( GET_QUOTES, params={"symbol": [s.upper() for s in symbol]} ).json() def quoteDF(self, symbol): x = self.quote(symbol) return pd.DataFrame(x).T.reset_index(drop=True) def history( self, symbol, periodType=None, period=None, frequencyType=None, frequency=None, endDate=None, startDate=None, needExtendedHoursData=True, ): params = {} if periodType or period: if periodType not in PERIOD_TYPE_VALUES: raise TDAAPIError( "Period type must be in {}".format(PERIOD_TYPE_VALUES) ) params["period"] = period params["periodType"] = periodType if frequencyType or frequency: if frequencyType not in FREQUENCY_TYPE_VALUES: raise TDAAPIError( "Frequency type must be in {}".format(FREQUENCY_TYPE_VALUES) ) params["frequency"] = frequency params["frequencyType"] = frequencyType if startDate: params["startDate"] = startDate if endDate: params["endDate"] = endDate params["needExtendedHoursData"] = needExtendedHoursData return self._request( GET_PRICE_HISTORY.format(symbol=symbol), params=params ).json() def historyDF(self, symbol, **kwargs): x = self.history(symbol, **kwargs) df = pd.DataFrame(x["candles"]) df["datetime"] = pd.to_datetime(df["datetime"], unit="ms") return df def options( self, symbol, contractType="ALL", strikeCount=-1, includeQuotes=False, strategy="SINGLE", interval=None, strike=None, range="ALL", fromDate=None, toDate=None, volatility=None, underlyingPrice=None, interestRate=None, daysToExpiration=None, expMonth="ALL", optionType="ALL", ): params = {"symbol": symbol} if contractType not in CONTRACT_TYPE_VALUES: raise TDAAPIError( "Contract type must be in {}".format(CONTRACT_TYPE_VALUES) ) params["contractType"] = contractType if strikeCount: params["strikeCount"] = strikeCount params["includeQuotes"] = includeQuotes if strategy not in STRATEGY_VALUES: raise TDAAPIError("Strategy must be in {}".format(STRATEGY_VALUES)) params["strategy"] = strategy if interval: params["interval"] = interval if strike: params["strike"] = strike if range not in RANGE_VALUES: raise TDAAPIError("Range must be in {}".format(RANGE_VALUES)) params["range"] = range if fromDate: params["fromDate"] = fromDate if toDate: params["toDate"] = toDate if strategy == "ANALYTICAL": if volatility: params["volatility"] = volatility if underlyingPrice: params["underlyingPrice"] = underlyingPrice if interestRate: params["interestRate"] = interestRate if daysToExpiration: params["daysToExpiration"] = daysToExpiration if expMonth not in OPTION_EXPMONTH_VALUES: raise TDAAPIError( "Expiration month must be in {}".format(OPTION_EXPMONTH_VALUES) ) params["expMonth"] = expMonth if optionType not in OPTION_TYPE_VALUES: raise TDAAPIError("Option type must be in {}".format(OPTION_TYPE_VALUES)) return self._request(GET_OPTION_CHAIN, params=params).json() def optionsDF( self, symbol, contractType="ALL", strikeCount=-1, includeQuotes=False, strategy="SINGLE", interval=None, strike=None, range="ALL", fromDate=None, toDate=None, volatility=None, underlyingPrice=None, interestRate=None, daysToExpiration=None, expMonth="ALL", optionType="ALL", ): ret = [] dat = self.options( symbol=symbol, contractType=contractType, strikeCount=strikeCount, includeQuotes=includeQuotes, strategy=strategy, interval=interval, strike=strike, range=range, fromDate=fromDate, toDate=toDate, volatility=volatility, underlyingPrice=underlyingPrice, interestRate=interestRate, daysToExpiration=daysToExpiration, expMonth=expMonth, optionType=optionType, ) for date in dat["callExpDateMap"]: for strike in dat["callExpDateMap"][date]: ret.extend(dat["callExpDateMap"][date][strike]) for date in dat["putExpDateMap"]: for strike in dat["putExpDateMap"][date]: ret.extend(dat["putExpDateMap"][date][strike]) df = pd.DataFrame(ret) for col in ( "tradeTimeInLong", "quoteTimeInLong", "expirationDate", "lastTradingDay", ): df[col] = pd.to_datetime(df[col], unit="ms") return df def movers(self, index, direction="up", change="percent"): params = {} if direction not in DIRECTION_VALUES: raise TDAAPIError("Direction must be in {}".format(DIRECTION_VALUES)) params["direction"] = direction if change not in CHANGE_VALUES: raise TDAAPIError("Change mus be in {}".format(CHANGE_VALUES)) params["change"] = change return self._request(MOVERS.format(index=index), params=params).json() def orders( self, accountId=None, orderId=None, maxResults=-1, fromEnteredTime=None, toEnteredTime=None, status=None, ): params = {} if status and status not in STATUS_VALUES: raise TDAAPIError("Status must be in {}".format(STATUS_VALUES)) elif status: params["status"] = status if accountId: params["accountId"] = accountId if orderId: params["orderId"] = orderId if maxResults: params["maxResults"] = maxResults if fromEnteredTime: params["fromEnteredTime"] = fromEnteredTime if toEnteredTime: params["toEnteredTime"] = toEnteredTime return self._request(GET_ORDER_BY_QUERY, json=params).json()
Apache License 2.0
openstack/cinder
cinder/volume/drivers/hitachi/hbsd_rest_api.py
ResponseData.is_locked
python
def is_locked(self): if not self['errobj']: return False message_id = self['errobj'].get('messageId') retcode = self['errobj'].get('errorCode', {}).get('errorCode') return (message_id in _MSGID_LOCK_FAILURE or self.get_err_code() in _REST_LOCKED_ERRORS or retcode == 'EX_EACCES')
Check if a response is the error of the lock factor.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/hitachi/hbsd_rest_api.py#L146-L154
from http import client as httpclient import threading from eventlet import greenthread from keystoneauth1.session import TCPKeepAliveAdapter from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import timeutils import requests from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume import volume_utils _LOCK_WAITTIME = 2 * 60 * 60 _EXEC_MAX_WAITTIME = 30 _EXTEND_WAITTIME = 10 * 60 _EXEC_RETRY_INTERVAL = 5 _DEFAULT_CONNECT_TIMEOUT = 30 _RESPONSE_TIMEOUT_TOLERANCE = 30 _JOB_API_RESPONSE_TIMEOUT = 30 * 60 _GET_API_RESPONSE_TIMEOUT = 30 * 60 _REST_SERVER_BUSY_TIMEOUT = 2 * 60 * 60 _REST_SERVER_RESTART_TIMEOUT = 10 * 60 _REST_SERVER_ERROR_TIMEOUT = 10 * 60 _KEEP_SESSION_LOOP_INTERVAL = 3 * 60 _HTTPS = 'https://' _REST_LOCKED_ERRORS = [ ('2E11', '2205'), ('2E11', '2207'), ] LDEV_ALREADY_DEFINED = ('2E22', '0001') NO_AVAILABLE_LDEV_ID = ('2E11', '2209') INVALID_SNAPSHOT_POOL = ('2E30', '600E') _MSGID_REST_SERVER_BUSY = ('KART00003-E',) _MSGID_LOCK_FAILURE = ('KART40050-E', 'KART40051-E', 'KART40052-E') EXCEED_WWN_MAX = ('B957', '4184') ANOTHER_LDEV_MAPPED = ('B958', '0947') REST_NO_RETRY_ERRORS = [ ('2E10', '9705'), ('2E10', '9706'), ('2E10', '9707'), ('2E11', '8303'), ('2E30', '0007'), ('B956', '3173'), ('B956', '31D7'), ('B956', '31D9'), ('B957', '4188'), ('B958', '015A'), ('B958', '015E'), LDEV_ALREADY_DEFINED, NO_AVAILABLE_LDEV_ID, EXCEED_WWN_MAX, INVALID_SNAPSHOT_POOL, ] MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST = 'KART30013-E' _REST_NO_RETRY_MESSAGEIDS = [ MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST ] LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg def _build_base_url(ip_addr, ip_port): return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % { 'https': _HTTPS, 'ip': ip_addr, 'port': ip_port, } class ResponseData(dict): def is_json(self): return (self['rsp'].content and 'json' in self['rsp'].headers['Content-Type']) def _init_content(self): if self.is_json(): self['rsp_body'] = self['rsp'].json() elif self['rsp'].content: self['rsp_body'] = self['rsp'].text else: self['rsp_body'] = None def _init_error(self): if self['rsp_body'] and 'errorSource' in self['rsp_body']: self['errobj'] = self['rsp_body'] elif self['rsp_body'] and 'error' in self['rsp_body']: self['errobj'] = self['rsp_body']['error'] else: self['errobj'] = {} def __init__(self, rsp): super(ResponseData, self).__init__() self['rsp'] = rsp self['status_code'] = rsp.status_code self._init_content() self._init_error() def job_succeeded(self): return (self.is_json() and self['rsp_body'].get('status') == 'Completed' and self['rsp_body'].get('state') == 'Succeeded') def get_err_code(self): return utils.safe_get_err_code(self['errobj']) def get_return_code(self): return utils.safe_get_return_code(self['errobj']) def is_success(self, ignore_error, ignore_message_id, ignore_return_code, ignore_all_errors=False): return (ignore_all_errors or self['status_code'] == httpclient.OK or (self['status_code'] == httpclient.ACCEPTED and self.job_succeeded()) or self.get_err_code() in ignore_error or self['errobj'].get('messageId') in ignore_message_id or self.get_return_code() in ignore_return_code)
Apache License 2.0
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/white_labeling_params.py
WhiteLabelingParams.custom_css
python
def custom_css(self, custom_css): self._custom_css = custom_css
Sets the custom_css of this WhiteLabelingParams. :param custom_css: The custom_css of this WhiteLabelingParams. # noqa: E501 :type: str
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/white_labeling_params.py#L142-L150
import pprint import re import six class WhiteLabelingParams(object): swagger_types = { 'app_title': 'str', 'custom_css': 'str', 'enable_help_links': 'bool', 'favicon': 'Favicon', 'favicon_checksum': 'str', 'help_link_base_url': 'str', 'logo_image_checksum': 'str', 'logo_image_height': 'int', 'logo_image_url': 'str', 'palette_settings': 'PaletteSettings', 'platform_name': 'str', 'platform_version': 'str', 'show_name_version': 'bool', 'white_labeling_enabled': 'bool' } attribute_map = { 'app_title': 'appTitle', 'custom_css': 'customCss', 'enable_help_links': 'enableHelpLinks', 'favicon': 'favicon', 'favicon_checksum': 'faviconChecksum', 'help_link_base_url': 'helpLinkBaseUrl', 'logo_image_checksum': 'logoImageChecksum', 'logo_image_height': 'logoImageHeight', 'logo_image_url': 'logoImageUrl', 'palette_settings': 'paletteSettings', 'platform_name': 'platformName', 'platform_version': 'platformVersion', 'show_name_version': 'showNameVersion', 'white_labeling_enabled': 'whiteLabelingEnabled' } def __init__(self, app_title=None, custom_css=None, enable_help_links=None, favicon=None, favicon_checksum=None, help_link_base_url=None, logo_image_checksum=None, logo_image_height=None, logo_image_url=None, palette_settings=None, platform_name=None, platform_version=None, show_name_version=None, white_labeling_enabled=None): self._app_title = None self._custom_css = None self._enable_help_links = None self._favicon = None self._favicon_checksum = None self._help_link_base_url = None self._logo_image_checksum = None self._logo_image_height = None self._logo_image_url = None self._palette_settings = None self._platform_name = None self._platform_version = None self._show_name_version = None self._white_labeling_enabled = None self.discriminator = None if app_title is not None: self.app_title = app_title if custom_css is not None: self.custom_css = custom_css if enable_help_links is not None: self.enable_help_links = enable_help_links if favicon is not None: self.favicon = favicon if favicon_checksum is not None: self.favicon_checksum = favicon_checksum if help_link_base_url is not None: self.help_link_base_url = help_link_base_url if logo_image_checksum is not None: self.logo_image_checksum = logo_image_checksum if logo_image_height is not None: self.logo_image_height = logo_image_height if logo_image_url is not None: self.logo_image_url = logo_image_url if palette_settings is not None: self.palette_settings = palette_settings if platform_name is not None: self.platform_name = platform_name if platform_version is not None: self.platform_version = platform_version if show_name_version is not None: self.show_name_version = show_name_version if white_labeling_enabled is not None: self.white_labeling_enabled = white_labeling_enabled @property def app_title(self): return self._app_title @app_title.setter def app_title(self, app_title): self._app_title = app_title @property def custom_css(self): return self._custom_css @custom_css.setter
Apache License 2.0
luciferjack/python-mysql-pool
PyMysqlPool/mysql/connector/fabric/caching.py
FabricCache.group_search
python
def group_search(self, group_name): entry_hash = CacheGroup.hash_index(group_name) entry = None try: entry = self._groups[entry_hash] if entry.invalid: _LOGGER.debug("{0} invalidated".format(entry)) self.remove_group(entry_hash) return None except KeyError: return None return entry
Search cache for a group based on its name
https://github.com/luciferjack/python-mysql-pool/blob/7b812c6fc7f04255620cb86f272a2d8900c2240d/PyMysqlPool/mysql/connector/fabric/caching.py#L259-L274
import bisect import logging import threading from datetime import datetime, timedelta from hashlib import sha1 from . import FabricShard _LOGGER = logging.getLogger('myconnpy-fabric') _CACHE_TTL = 1 * 60 def insort_right_rev(alist, new_element, low=0, high=None): if low < 0: raise ValueError('low must be non-negative') if high is None: high = len(alist) while low < high: middle = (low + high) // 2 if new_element > alist[middle]: high = middle else: low = middle + 1 alist.insert(low, new_element) class CacheEntry(object): def __init__(self, version=None, fabric_uuid=None, ttl=_CACHE_TTL): self.version = version self.fabric_uuid = fabric_uuid self.last_updated = datetime.utcnow() self._ttl = ttl @classmethod def hash_index(cls, part1, part2=None): raise NotImplementedError @property def invalid(self): if not self.last_updated: return False atime = self.last_updated + timedelta(seconds=self._ttl) return datetime.utcnow() > atime def reset_ttl(self): self.last_updated = datetime.utcnow() def invalidate(self): self.last_updated = None class CacheShardTable(CacheEntry): def __init__(self, shard, version=None, fabric_uuid=None): if not isinstance(shard, FabricShard): raise ValueError("shard argument must be a FabricShard instance") super(CacheShardTable, self).__init__(version=version, fabric_uuid=fabric_uuid) self.partitioning = {} self._shard = shard self.keys = [] self.keys_reversed = [] if shard.key and shard.group: self.add_partition(shard.key, shard.group) def __getattr__(self, attr): return getattr(self._shard, attr) def add_partition(self, key, group): if self.shard_type == 'RANGE': key = int(key) elif self.shard_type == 'RANGE_DATETIME': try: if ':' in key: key = datetime.strptime(key, "%Y-%m-%d %H:%M:%S") else: key = datetime.strptime(key, "%Y-%m-%d").date() except: raise ValueError( "RANGE_DATETIME key could not be parsed, was: {0}".format( key )) elif self.shard_type == 'RANGE_STRING': pass elif self.shard_type == "HASH": pass else: raise ValueError("Unsupported sharding type {0}".format( self.shard_type )) self.partitioning[key] = { 'group': group, } self.reset_ttl() bisect.insort_right(self.keys, key) insort_right_rev(self.keys_reversed, key) @classmethod def hash_index(cls, part1, part2=None): return sha1(part1.encode('utf-8') + part2.encode('utf-8')).hexdigest() def __repr__(self): return "{class_}({database}.{table}.{column})".format( class_=self.__class__, database=self.database, table=self.table, column=self.column ) class CacheGroup(CacheEntry): def __init__(self, group_name, servers): super(CacheGroup, self).__init__(version=None, fabric_uuid=None) self.group_name = group_name self.servers = servers @classmethod def hash_index(cls, part1, part2=None): return sha1(part1.encode('utf-8')).hexdigest() def __repr__(self): return "{class_}({group})".format( class_=self.__class__, group=self.group_name, ) class FabricCache(object): def __init__(self, ttl=_CACHE_TTL): self._ttl = ttl self._sharding = {} self._groups = {} self.__sharding_lock = threading.Lock() self.__groups_lock = threading.Lock() def remove_group(self, entry_hash): with self.__groups_lock: try: del self._groups[entry_hash] except KeyError: pass else: _LOGGER.debug("Group removed from cache") def remove_shardtable(self, entry_hash): with self.__sharding_lock: try: del self._sharding[entry_hash] except KeyError: pass def sharding_cache_table(self, shard, version=None, fabric_uuid=None): entry_hash = CacheShardTable.hash_index(shard.database, shard.table) with self.__sharding_lock: try: entry = self._sharding[entry_hash] entry.add_partition(shard.key, shard.group) except KeyError: entry = CacheShardTable(shard, version=version, fabric_uuid=fabric_uuid) self._sharding[entry_hash] = entry def cache_group(self, group_name, servers): entry_hash = CacheGroup.hash_index(group_name) with self.__groups_lock: try: entry = self._groups[entry_hash] entry.servers = servers entry.reset_ttl() _LOGGER.debug("Recaching group {0} with {1}".format( group_name, servers)) except KeyError: entry = CacheGroup(group_name, servers) self._groups[entry_hash] = entry _LOGGER.debug("Caching group {0} with {1}".format( group_name, servers)) def sharding_search(self, database, table): entry_hash = CacheShardTable.hash_index(database, table) entry = None try: entry = self._sharding[entry_hash] if entry.invalid: _LOGGER.debug("{0} invalidated".format(entry)) self.remove_shardtable(entry_hash) return None except KeyError: return None return entry
MIT License
shtalinberg/django-el-pagination
el_pagination/tests/templatetags/test_el_pagination_tags.py
TemplateTagsTestMixin.request
python
def request(self, url='/', page=None, data=None, **kwargs): querydict = {} if data is None else data querydict.update(kwargs) if page is not None: querydict[settings.PAGE_LABEL] = page return self.factory.get(url, querydict)
Return a Django request for the given *page*.
https://github.com/shtalinberg/django-el-pagination/blob/4273dbfccd46d58a92e90b1b979f88eb8daab064/el_pagination/tests/templatetags/test_el_pagination_tags.py#L48-L54
from __future__ import unicode_literals import string import sys import unittest import xml.etree.ElementTree as etree from django.http import Http404 from django.template import Context, Template, TemplateSyntaxError from django.template.context import make_context from django.test import TestCase from django.test.client import RequestFactory from el_pagination import settings from el_pagination.exceptions import PaginationError from el_pagination.models import PageList from project.models import make_model_instances skip_if_old_etree = unittest.skipIf( sys.version_info < (2, 7), 'XPath not supported by this Python version.') class TemplateTagsTestMixin(object): def setUp(self): self.factory = RequestFactory() def render(self, request, contents, **kwargs): template = Template('{% load el_pagination_tags %}' + contents) context_data = kwargs.copy() if kwargs else {'objects': range(47)} context_data['request'] = request context = Context(context_data) if isinstance(context, dict): context = make_context(context, request, autoescape=self.backend.engine.autoescape) html = template.render(context) return html.strip(), context
MIT License
terrance/immp
immp/plug/slack.py
SlackUser.from_bot
python
def from_bot(cls, slack, json): bot = _Schema.bot(json) return cls(id_=(bot["app_id"] or bot["id"]), plug=slack, real_name=bot["name"], avatar=cls._best_image(bot["icons"]), bot_id=bot["id"], app=True, raw=json)
Convert an API bot :class:`dict` to a :class:`.User`. Args: slack (.SlackPlug): Related plug instance that provides the user. json (dict): Slack API bot object. Returns: .SlackUser: Parsed user object.
https://github.com/terrance/immp/blob/e710648a3d3b47107fba7659c9b4b20823a23cb7/immp/plug/slack.py#L304-L325
from asyncio import CancelledError, ensure_future, gather, Lock, sleep from copy import copy from collections import defaultdict from datetime import datetime, timezone from functools import partial from json import dumps as json_dumps import logging import re import time from aiohttp import ClientResponseError, FormData from emoji import emojize import immp log = logging.getLogger(__name__) class _Schema: image_sizes = ("original", "512", "192", "72", "48", "32", "24") _images = {immp.Optional("image_{}".format(size)): immp.Nullable(str) for size in image_sizes} config = immp.Schema({"token": str, immp.Optional("app-token"): immp.Nullable(str), immp.Optional("fallback-image"): immp.Nullable(str), immp.Optional("thread-broadcast", False): bool, immp.Optional("real-names", True): bool}) user = immp.Schema({"id": str, "name": str, "profile": {immp.Optional("real_name"): immp.Nullable(str), immp.Optional("bot_id"): immp.Nullable(str), **_images}}) bot = immp.Schema({"id": str, immp.Optional("app_id"): immp.Nullable(str), "name": str, "icons": _images}) _channel = {"id": str, immp.Optional("name"): immp.Nullable(str), immp.Optional("is_im", False): bool} direct = immp.Schema({"id": str, "user": str}) _shares = {str: [{"ts": str}]} file = immp.Schema(immp.Any({"id": str, "name": immp.Nullable(str), "pretty_type": str, "url_private": str, immp.Optional("mode"): immp.Nullable(str), immp.Optional("shares", dict): {immp.Optional("public", dict): _shares, immp.Optional("private", dict): _shares}}, {"id": str, "mode": "tombstone"})) attachment = immp.Schema({immp.Optional("fallback"): immp.Nullable(str), immp.Optional("title"): immp.Nullable(str), immp.Optional("image_url"): immp.Nullable(str), immp.Optional("is_msg_unfurl", False): bool}) msg_unfurl = immp.Schema({"channel_id": str, "ts": str}, attachment) _base_msg = {"ts": str, "type": "message", immp.Optional("hidden", False): bool, immp.Optional("channel"): immp.Nullable(str), immp.Optional("edited", dict): {immp.Optional("ts"): immp.Nullable(str), immp.Optional("user"): immp.Nullable(str)}, immp.Optional("thread_ts"): immp.Nullable(str), immp.Optional("files", list): [file], immp.Optional("attachments", list): [attachment], immp.Optional("is_ephemeral", False): bool} _plain_msg = {immp.Optional("user"): immp.Nullable(str), immp.Optional("bot_id"): immp.Nullable(str), immp.Optional("username"): immp.Nullable(str), immp.Optional("icons", dict): dict, "text": str, **_base_msg} message = immp.Schema(immp.Any({"subtype": "file_comment", **_base_msg}, {"subtype": "message_changed", **_base_msg}, {"subtype": "message_deleted", "deleted_ts": str, **_base_msg}, {"subtype": immp.Any("channel_name", "group_name"), "name": str, **_plain_msg}, {immp.Optional("subtype"): immp.Nullable(str), **_plain_msg})) message.raw.choices[1].update({"message": message, "previous_message": message}) event = immp.Schema(immp.Any(message, {"type": "team_pref_change", "name": str, "value": immp.Any()}, {"type": immp.Any("team_join", "user_change"), "user": user}, {"type": immp.Any("channel_created", "channel_joined", "channel_rename", "group_created", "group_joined", "group_rename"), "channel": {"id": str, "name": str}}, {"type": "im_created", "channel": {"id": str}}, {"type": immp.Any("member_joined_channel", "member_left_channel"), "user": str, "channel": str}, {"type": "message", immp.Optional("subtype"): immp.Nullable(str)}, {"type": str})) socket_event = immp.Schema(immp.Any({"type": "events_api", immp.Optional("envelope_id"): immp.Nullable(str), "payload": {"type": "event_callback", "event": event}}, {"type": str, immp.Optional("envelope_id"): immp.Nullable(str)})) def _api(nested={}): return immp.Schema(immp.Any({"ok": True, immp.Optional("response_metadata", dict): {immp.Optional("next_cursor", ""): str}, **nested}, {"ok": False, "error": str})) socket_open = _api({"url": str}) auth_test = _api({"user_id": str}) team_info = _api({"team": {"id": str, "name": str, "domain": str}}) users_list = _api({"members": [user]}) bot_info = _api({"bot": bot}) convs_list = _api({"channels": [_channel]}) conv_open = _api({"channel": direct}) conv_members = _api({"members": [str]}) conv_history = _api({"messages": [message]}) chat_post = _api({"channel": str, "message": message}) file_upload = _api({"file": file}) api = _api() class SlackAPIError(immp.PlugError): class MessageNotFound(Exception): pass class SlackUser(immp.User): def __init__(self, id_=None, plug=None, display_name=None, real_name=None, avatar=None, bot_id=None, app=False, raw=None): super().__init__(id_=id_, plug=plug, avatar=avatar, raw=raw) self._display_name = display_name self._real_name = real_name self._real_name_override = None self.bot_id = bot_id self.app = app @property def real_name(self): if self._real_name_override: return self._real_name_override elif self.plug.config["real-names"]: return self._real_name or self._display_name else: return self._display_name or self._real_name @real_name.setter def real_name(self, value): self._real_name_override = value @property def link(self): return "https://{}.slack.com/{}/{}".format(self.plug._team["domain"], "apps" if self.app else "team", self.id) @link.setter def link(self, value): pass @classmethod def _best_image(cls, profile): for size in _Schema.image_sizes: if "image_{}".format(size) in profile: return profile["image_{}".format(size)] return None @classmethod def from_member(cls, slack, json): member = _Schema.user(json) return cls(id_=member["id"], plug=slack, display_name=member["profile"]["display_name"], real_name=member["profile"]["real_name"], avatar=cls._best_image(member["profile"]), bot_id=member["profile"]["bot_id"], raw=json) @classmethod
BSD 3-Clause New or Revised License
microsoft/distributeddeeplearning
{{cookiecutter.project_name}}/tasks.py
tensorboard
python
def tensorboard(c, experiment, runs=None): cmd = f"tmux neww -d -n tensorboard python control/src/aml_compute.py tensorboard --experiment {experiment} " if runs: cmd = cmd + f"--runs {runs}" c.run(cmd)
Runs tensorboard in a seperate tmux session Note: If no runs are specified it will simply look for the run that are still running. To see runs that completed or failed simply also include the run identifier Args: experiment (string): The name of the experiment you wish to display the logged information for runs (list[tring], optional): The list of run identifiers you want to display in tensorboard from the experiment. Defaults to None.
https://github.com/microsoft/distributeddeeplearning/blob/2f407881b49415188ca2e38e5331781962939251/{{cookiecutter.project_name}}/tasks.py#L121-L134
import logging import logging.config from invoke import task, Collection from dotenv import find_dotenv, set_key from invoke.exceptions import Failure from config import load_config import os import tensorflow_experiment import tensorflow_benchmark import tfrecords import tensorflow_imagenet import pytorch_imagenet import storage import image import pytorch_experiment import pytorch_benchmark from invoke.executor import Executor logging.config.fileConfig(os.getenv("LOG_CONFIG", "logging.conf")) env_values = load_config() _USE_IMAGENET = False _USE_IMAGENET = True def _is_loged_in(c): try: result = c.run("az account show", hide="both") return "Please run 'az login'" not in result.stdout except Failure: return False def _prompt_sub_id_selection(c): from tabulate import tabulate from toolz import pipe import json from prompt_toolkit import prompt results = c.run(f"az account list", pty=True, hide="out") parsestr = "["+results.stdout[1:-7]+"]" sub_dict = json.loads(parsestr) sub_list = [ {"Index": i, "Name": sub["name"], "id": sub["id"]} for i, sub in enumerate(sub_dict) ] pipe(sub_list, tabulate, print) prompt_result = prompt("Please type in index of subscription you want to use: ") sub_id = sub_list[int(prompt_result)]["id"] print(f"You selected index {prompt_result} sub id {sub_id}") return sub_id @task def select_subscription(c, sub_id=env_values.get("SUBSCRIPTION_ID", None)): env_file = find_dotenv(raise_error_if_not_found=True) if sub_id is None or sub_id == "": sub_id = _prompt_sub_id_selection(c) set_key(env_file, "SUBSCRIPTION_ID", sub_id) c.run(f"az account set -s {sub_id}", pty=True) @task(post=[select_subscription]) def login(c): if _is_loged_in(c): return None c.run("az login -o table", pty=True) @task(aliases=("i")) def interactive(c): c.run("python /workspace/control/src/aml_compute.py -- --interactive", pty=True) @task def delete(c, resource_group=env_values.get("RESOURCE_GROUP")): c.run(f"az group delete --resource-group {resource_group} --no-wait --yes") @task def setup(c, use_imagenet=_USE_IMAGENET, use_tfrecords=True): logger = logging.getLogger(__name__) c.invoke_execute(c, "login") if use_imagenet: logger.info("Preparing Imagenet data") image.prepare_imagenet(c) c.invoke_execute(c, "storage.image.upload_data") if use_tfrecords: tfrecords.generate_tf_records(c) c.invoke_execute(c, "storage.tfrecords.upload_data") logger.info("Setup complete") @task
MIT License
google/clusterfuzz
src/clusterfuzz/_internal/bot/fuzzers/ml/rnn/generator.py
prepare_model_directory
python
def prepare_model_directory(fuzzer_name): temp_directory = environment.get_value('BOT_TMPDIR') model_directory = os.path.join(temp_directory, fuzzer_name) shell.remove_directory(model_directory, recreate=True) if not download_model_from_gcs(model_directory, fuzzer_name): return None return os.path.join(model_directory, constants.RNN_MODEL_NAME)
Prepare model directory, and return model path. Args: fuzzer_name: Name of the fuzzer to which this model belongs. Returns: Model path. For example, if `/tmp/model` is the directory containing model files(e.g. rnn.index), the path should be '/tmp/model/rnn'.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/bot/fuzzers/ml/rnn/generator.py#L88-L109
try: from clusterfuzz._internal.base import modules modules.fix_module_search_paths() except ImportError: pass import os import sys from clusterfuzz._internal.base import utils from clusterfuzz._internal.bot.fuzzers.ml.rnn import constants from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell ML_RNN_SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) GENERATION_MAX_COUNT = 5000 def download_model_from_gcs(local_model_directory, fuzzer_name): gcs_corpus_bucket = environment.get_value('CORPUS_BUCKET') if not gcs_corpus_bucket: logs.log('Corpus bucket is not set. Skip generation.') return False gcs_model_directory = 'gs://%s/%s/%s' % ( gcs_corpus_bucket, constants.RNN_MODEL_NAME, fuzzer_name) logs.log('GCS model directory for fuzzer %s is %s.' % (fuzzer_name, gcs_model_directory)) data_filename = constants.RNN_MODEL_NAME + constants.MODEL_DATA_SUFFIX index_filename = constants.RNN_MODEL_NAME + constants.MODEL_INDEX_SUFFIX gcs_data_path = '%s/%s' % (gcs_model_directory, data_filename) gcs_index_path = '%s/%s' % (gcs_model_directory, index_filename) if not (storage.exists(gcs_data_path) and storage.exists(gcs_index_path)): logs.log('ML RNN model for fuzzer %s does not exist. Skip generation.' % fuzzer_name) return False local_data_path = os.path.join(local_model_directory, data_filename) local_index_path = os.path.join(local_model_directory, index_filename) result = ( storage.copy_file_from(gcs_data_path, local_data_path) and storage.copy_file_from(gcs_index_path, local_index_path)) if not result: logs.log('Failed to download RNN model for fuzzer %s. Skip generation.' % fuzzer_name) return False return True
Apache License 2.0
iexcloud/pyex
pyEX/studies/technicals/momentum.py
minus_dm
python
def minus_dm(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14): df = client.chartDF(symbol, timeframe) x = t.MINUS_DM( df[highcol].values.astype(float), df[lowcol].values.astype(float), period ) return pd.DataFrame( {highcol: df[highcol].values, lowcol: df[lowcol].values, "minus_dm": x} )
This will return a dataframe of Minus Directional Movement for the given symbol across the given timeframe Args: client (pyEX.Client): Client symbol (string): Ticker timeframe (string): timeframe to use, for pyEX.chart highcol (string): column to use to calculate lowcol (string): column to use to calculate period (int): period to calculate across Returns: DataFrame: result
https://github.com/iexcloud/pyex/blob/48223a046d120703e8cc8f6c57f8a1450ee3f835/pyEX/studies/technicals/momentum.py#L486-L508
import pandas as pd import talib as t def adx( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", period=14, ): df = client.chartDF(symbol, timeframe) adx = t.ADX( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, "adx": adx, } ) def adxr( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", period=14, ): df = client.chartDF(symbol, timeframe) adx = t.ADXR( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, "adx": adx, } ) def apo( client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0 ): df = client.chartDF(symbol, timeframe) apo = t.APO(df[col].values.astype(float), fastperiod, slowperiod, matype) return pd.DataFrame({col: df[col].values, "apo": apo}) def aroon(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14): df = client.chartDF(symbol, timeframe) aroondown, aroonup = t.AROON( df[highcol].values.astype(float), df[lowcol].values.astype(float), period ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, "aroonup": aroonup, "aroondown": aroondown, } ) def aroonosc(client, symbol, timeframe="6m", highcol="high", lowcol="low", period=14): df = client.chartDF(symbol, timeframe) x = t.AROONOSC( df[highcol].values.astype(float), df[lowcol].values.astype(float), period ) return pd.DataFrame( {highcol: df[highcol].values, lowcol: df[lowcol].values, "aroonosc": x} ) def bop( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", volumecol="volume", ): df = client.chartDF(symbol, timeframe) x = t.BOP( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), df[volumecol].values.astype(float), ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, volumecol: df[volumecol].values, "bop": x, } ) def cci( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", period=14, ): df = client.chartDF(symbol, timeframe) x = t.CCI( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, "cci": x, } ) def cmo(client, symbol, timeframe="6m", col="close", period=14): df = client.chartDF(symbol, timeframe) return pd.DataFrame( {col: df[col].values, "cmo": t.CMO(df[col].values.astype(float), period)} ) def dx( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", period=14, ): df = client.chartDF(symbol, timeframe) x = t.DX( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, "dx": x, } ) def macd( client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, signalperiod=9, ): df = client.chartDF(symbol, timeframe) macd, macdsignal, macdhist = t.MACD( df[col].values.astype(float), fastperiod, slowperiod, signalperiod ) return pd.DataFrame( { col: df[col].values, "macd": macd, "macdsignal": macdsignal, "macdhist": macdhist, } ) def macdext( client, symbol, timeframe="6m", col="close", fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0, ): df = client.chartDF(symbol, timeframe) macd, macdsignal, macdhist = t.MACDEXT( df[col].values.astype(float), fastperiod, slowperiod, signalperiod ) return pd.DataFrame( { col: df[col].values, "macd": macd, "macdsignal": macdsignal, "macdhist": macdhist, } ) def mfi( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", volumecol="volume", period=14, ): df = client.chartDF(symbol, timeframe) x = t.MFI( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), df[volumecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, volumecol: df[volumecol].values, "mfi": x, } ) def minus_di( client, symbol, timeframe="6m", highcol="high", lowcol="low", closecol="close", period=14, ): df = client.chartDF(symbol, timeframe) x = t.MINUS_DI( df[highcol].values.astype(float), df[lowcol].values.astype(float), df[closecol].values.astype(float), period, ) return pd.DataFrame( { highcol: df[highcol].values, lowcol: df[lowcol].values, closecol: df[closecol].values, "minus_di": x, } )
Apache License 2.0
andrewdarmawan/tncontract
tncontract/tncon.py
con
python
def con(*args): tensor_list = [] contract_list = [] for x in args: if isinstance(x, list): if isinstance(x[0], tn.Tensor): tensor_list.extend(x) else: contract_list.extend(x) elif isinstance(x, tn.Tensor): tensor_list.append(x) else: contract_list.append(x) tensor_list = [t.copy() for t in tensor_list] all_tensor_indices = [t.labels for t in tensor_list] contracted_indices = [item for pair in contract_list for item in pair] if len(set(contracted_indices)) != len(contracted_indices): raise ValueError("Index found in more than one contraction pair.") index_lookup = {} for i,labels in enumerate(all_tensor_indices): for lab in labels: if lab in index_lookup.keys(): raise ValueError("Index label "+lab+" found in two tensors."+ " Tensors must have unique index labelling.") index_lookup[lab] = i internal_contract = [] pairwise_contract = [] tensor_pairs = [] tensors_involved = set() for c in contract_list: if index_lookup[c[0]] == index_lookup[c[1]]: internal_contract.append(c) else: if (tuple(np.sort((index_lookup[c[0]],index_lookup[c[1]]))) in tensor_pairs): idx = tensor_pairs.index((index_lookup[c[0]], index_lookup[c[1]])) if not isinstance(pairwise_contract[idx][0], list): pairwise_contract[idx][0] = [pairwise_contract[idx][0]] pairwise_contract[idx][1] = [pairwise_contract[idx][1]] pairwise_contract[idx][0].append(c[0]) pairwise_contract[idx][1].append(c[1]) else: pairwise_contract.append(list(c)) tensor_pairs.append(tuple(np.sort((index_lookup[c[0]],index_lookup[c[1]])))) tensors_involved.add(index_lookup[c[0]]) tensors_involved.add(index_lookup[c[1]]) for c in internal_contract: tensor_list[index_lookup[c[0]]].trace(c[0], c[1]) connected_component = [i for i in range(len(tensor_list))] for c in pairwise_contract: if isinstance(c[0], list): d=index_lookup[c[0][0]] e=index_lookup[c[1][0]] else: d=index_lookup[c[0]] e=index_lookup[c[1]] if d==e: tensor_list[d].trace(c[0],c[1]) else: if d<e: tensor_list[d]=tn.contract(tensor_list[d], tensor_list[e], c[0], c[1]) connected_component[e]=d else: tensor_list[e]=tn.contract(tensor_list[e], tensor_list[d], c[1], c[0]) connected_component[d]=e for lab in tensor_list[min(d,e)].labels: index_lookup[lab]=min(d,e) return tn.tensor_product(*[tensor_list[connected_component.index(x)] for x in set(connected_component)])
Contract a network of tensors. Similar purpose to NCON, described in arxiv.org/abs/1402.0939, but designed to work with the Tensor objects of tncontract. Examples -------- >>> import tncontract as tn For the examples below, we define three tensors >>> A = tn.Tensor(np.random.rand(3,2,4), labels=["a", "b", "c"]) >>> B = tn.Tensor(np.random.rand(3,4), labels=["d", "e"]) >>> C = tn.Tensor(np.random.rand(5,5,2), labels=["f", "g", "h"]) Contract a pair indices between two tensors ------------------------------------------- The following contracts pairs of indices "a","d" and "c","e" of tensors `A` and `B`. It is identical to A["a", "c"]*B["d", "e"] >>> tn.con(A, B, ("a", "d" ), ("c", "e")) Tensor object: shape = (2), labels = ["b"] Contract a pair of indices beloning to one tensor (internal edges) ------------------------------------------------------------------ The following contracts the "f" and "g" indices of tensor `C` >>> t.con(C, ("f", "g")) Tensor object: shape = (2), labels = ["h"] Return the tensor product of a pair of tensors ---------------------------------------------- After all indices have been contracted, `con` will return the tensor product of the disconnected components of the tensor contraction. The following example returns the tensor product of `A` and `B`. >>> tn.con(A, B) Tensor object: shape = (3, 2, 4, 3, 4), labels = ["a", "b", "c", "d", "e"] Contract a network of several tensors ------------------------------------- It is possible to contract a network of several tensors. Internal edges are contracted first then edges connecting separate tensors, and then the tensor product is taken of the disconnected components resulting from the contraction. Edges between separate tensors are contracted in the order they appear in the argument list. The result of the example below is a scalar (since all indices will be contracted). >>> tn.con(A, B, C, ("a", "d" ), ("c", "e"), ("f", "g"), ("h", "b")) Notes ----- Lists of tensors and index pairs for contraction may be used as arguments. The following example contracts 100 rank 2 tensors in a ring with periodic boundary conditions. >>> N=100 >>> A = tn.Tensor(np.random.rand(2,2), labels=["left","right"]) >>> tensor_list = [A.suf(str(i)) for i in range(N)] >>> idx_pairs = [("right"+str(j), "left"+str(j+1)) for j in range(N-1)] >>> tn.con(tensor_list, idx_pairs, ("right"+str(N-1), "left0"))
https://github.com/andrewdarmawan/tncontract/blob/a65b5663fe8ec24f2170cf6d2e27fe6a1882834d/tncontract/tncon.py#L4-L159
import tncontract as tn import numpy as np
MIT License
microsoft/qlib
scripts/check_dump_bin.py
CheckBin.check
python
def check(self): logger.info("start check......") error_list = [] not_in_features = [] compare_false = [] with tqdm(total=len(self.csv_files)) as p_bar: with ProcessPoolExecutor(max_workers=self.max_workers) as executor: for file_path, _check_res in zip(self.csv_files, executor.map(self._compare, self.csv_files)): symbol = file_path.name.strip(self.file_suffix) if _check_res == self.NOT_IN_FEATURES: not_in_features.append(symbol) elif _check_res == self.COMPARE_ERROR: error_list.append(symbol) elif _check_res == self.COMPARE_FALSE: compare_false.append(symbol) p_bar.update() logger.info("end of check......") if error_list: logger.warning(f"compare error: {error_list}") if not_in_features: logger.warning(f"not in features: {not_in_features}") if compare_false: logger.warning(f"compare False: {compare_false}") logger.info( f"total {len(self.csv_files)}, {len(error_list)} errors, {len(not_in_features)} not in features, {len(compare_false)} compare false" )
Check whether the bin file after ``dump_bin.py`` is executed is consistent with the original csv file data
https://github.com/microsoft/qlib/blob/7c31012b507a3823117bddcc693fc64899460b2a/scripts/check_dump_bin.py#L111-L139
from pathlib import Path from concurrent.futures import ProcessPoolExecutor import qlib from qlib.data import D import fire import datacompy import pandas as pd from tqdm import tqdm from loguru import logger class CheckBin: NOT_IN_FEATURES = "not in features" COMPARE_FALSE = "compare False" COMPARE_TRUE = "compare True" COMPARE_ERROR = "compare error" def __init__( self, qlib_dir: str, csv_path: str, check_fields: str = None, freq: str = "day", symbol_field_name: str = "symbol", date_field_name: str = "date", file_suffix: str = ".csv", max_workers: int = 16, ): self.qlib_dir = Path(qlib_dir).expanduser() bin_path_list = list(self.qlib_dir.joinpath("features").iterdir()) self.qlib_symbols = sorted(map(lambda x: x.name.lower(), bin_path_list)) qlib.init( provider_uri=str(self.qlib_dir.resolve()), mount_path=str(self.qlib_dir.resolve()), auto_mount=False, redis_port=-1, ) csv_path = Path(csv_path).expanduser() self.csv_files = sorted(csv_path.glob(f"*{file_suffix}") if csv_path.is_dir() else [csv_path]) if check_fields is None: check_fields = list(map(lambda x: x.name.split(".")[0], bin_path_list[0].glob(f"*.bin"))) else: check_fields = check_fields.split(",") if isinstance(check_fields, str) else check_fields self.check_fields = list(map(lambda x: x.strip(), check_fields)) self.qlib_fields = list(map(lambda x: f"${x}", self.check_fields)) self.max_workers = max_workers self.symbol_field_name = symbol_field_name self.date_field_name = date_field_name self.freq = freq self.file_suffix = file_suffix def _compare(self, file_path: Path): symbol = file_path.name.strip(self.file_suffix) if symbol.lower() not in self.qlib_symbols: return self.NOT_IN_FEATURES qlib_df = D.features([symbol], self.qlib_fields, freq=self.freq) qlib_df.rename(columns={_c: _c.strip("$") for _c in qlib_df.columns}, inplace=True) origin_df = pd.read_csv(file_path) origin_df[self.date_field_name] = pd.to_datetime(origin_df[self.date_field_name]) if self.symbol_field_name not in origin_df.columns: origin_df[self.symbol_field_name] = symbol origin_df.set_index([self.symbol_field_name, self.date_field_name], inplace=True) origin_df.index.names = qlib_df.index.names origin_df = origin_df.reindex(qlib_df.index) try: compare = datacompy.Compare( origin_df, qlib_df, on_index=True, abs_tol=1e-08, rel_tol=1e-05, df1_name="Original", df2_name="New", ) _r = compare.matches(ignore_extra_columns=True) return self.COMPARE_TRUE if _r else self.COMPARE_FALSE except Exception as e: logger.warning(f"{symbol} compare error: {e}") return self.COMPARE_ERROR
MIT License
modalseoul/weeb.fm
scrobbles/views.py
ScrobbleView.by_song
python
def by_song(self, request, pk=None): if pk is not None: queryset = Scrobble.objects.filter(song__title__iexact=pk) serializer = ScrobbleSerializer(instance=queryset, many=True) return Response(serializer.data)
Lists all scrobbles from one song(pk)
https://github.com/modalseoul/weeb.fm/blob/1393c076a878ef4a05a13fa58243e404bed7e85c/scrobbles/views.py#L199-L204
import json import datetime from datetime import timedelta from django.db.models import Count, F from django.shortcuts import render from django.utils import timezone from rest_framework import viewsets, generics, permissions from rest_framework.filters import BaseFilterBackend from rest_framework.decorators import action from rest_framework.response import Response from songs.models import Song from songs.serializers import SongSerializer from users.serializers import MemberSerializer from users.models import Member from artists.models import Artist from albums.models import Album from .models import Scrobble from .serializers import ScrobbleSerializer, CreateScrobbleSerializer from WeebFM.permissions import IsOwnerOrReadOnly def song_exists(title, artist): check = Song.objects.filter( title__iexact=title, artist__name__iexact=artist) if len(check) != 0: return True else: return False def album_exists(title): check = Album.objects.filter(title__iexact=title) if len(check) != 0: return True else: return False def artist_exists(name): check = Artist.objects.filter(name__iexact=name) if len(check) != 0: return True else: return False class ScrobbleView(viewsets.ModelViewSet): queryset = Scrobble.objects.all() serializer_class = ScrobbleSerializer permission_classes = ( permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly) def get_queryset(self): data = self.request.query_params query = Scrobble.objects.all() user = self.request.user if 'active' in data: query = Scrobble.objects.filter( member__nick_name__iexact=data.get('active')) query = query.order_by('-date_scrobbled')[:1] if 'last_played' in data: query = Scrobble.objects.latest('date_scrobbled') elif 'past' in data: query = Scrobble.objects.order_by('-id')[:int(data.get('past'))] elif 'artist' in data: query = Scrobble.objects.filter( song__artist__name=data.get('artist')) elif 'song' in data: query = Scrobble.objects.filter(song__title=data.get('song')) elif 'by_user' in data: start, end = int(data.get('start')), int(data.get('end')) query = Scrobble.objects.filter( member__nick_name__iexact=data.get('by_user')) query = query.order_by('-date_scrobbled')[start:end] return query def get_serializer_class(self): if self.action == 'create': return CreateScrobbleSerializer return ScrobbleSerializer def create(self, request): data = self.request.data creator = self.request.user serializer = self.get_serializer_class() if artist_exists(data['artist']): artist = Artist.objects.get(name__iexact=data['artist']) artist.scrobble_count += 1 artist.save() else: if isinstance(data['artist'], bytes): data['artist'] = data['artist'].decode('UTF-8') artist = Artist.objects.create(name=data['artist']) if 'album' in data: if album_exists(data['album']): album = Album.objects.get(title__iexact=data['album']) album.scrobble_count += 1 album.save() else: if isinstance(data['album'], bytes): data['album'] = data['album'].decode('UTF-8') album = Album.objects.create( title=data['album'], artist=artist, scrobble_count=1) if len(data['song']) > 0 and not data['song'].isspace(): if song_exists(data['song'], data['artist']): song = Song.objects.get( title__iexact=data['song'], artist__id=artist.id) song.scrobble_count += 1 song.save() else: if 'album' in data: if isinstance(data['song'], bytes): data['song'] = data['song'].decode('UTF-8') song = Song.objects.create( title=data['song'], artist=artist, album=album) else: if isinstance(data['song'], bytes): data['song'] = data['song'].decode('UTF-8') song = Song.objects.create( title=data['song'], artist=artist) creator.listened_to.add(song) creator.save() if 'timestamp' in data: date_s = data.get('timestamp') else: date_s = timezone.now() obj = Scrobble.objects.create(song=song, member=creator, date_scrobbled=date_s) created = serializer(instance=obj) return Response(created.data) return Response('Song was either only whitespace or < 1 char') @action(detail=True, methods=['GET']) def by_artist(self, request, pk=None): if pk is not None: queryset = Scrobble.objects.filter(artist__name__iexact=pk) serializer = ScrobbleSerializer(instance=queryset, many=True) return Response(serializer.data) @action(detail=True, methods=['GET']) def by_album(self, request, pk=None): if pk is not None: queryset = Scrobble.objects.filter(song__album__title__iexact=pk) serializer = ScrobbleSerializer(instance=queryset, many=True) return Response(serializer.data) @action(detail=False, methods=['GET']) def wiltweek(self, request): q = request.query_params.get queryset = Scrobble.objects.all() if q('user'): pk_scrobbles = queryset.filter(member__nick_name__iexact=q('user')) week_scrobbles = pk_scrobbles.filter( date_scrobbled__range=[ timezone.now() - timedelta(days=6), timezone.now()]) b = week_scrobbles.values('song__artist__name').annotate( count=Count('song__artist__name'), artist=F('song__artist__name')).order_by('-count').values('artist', 'count') return Response(b) @action(detail=False, methods=['GET']) def count(self, request): return Response(Scrobble.objects.all().count()) @action(detail=True, methods=['GET'])
MIT License
mitre/cascade-server
app/api.py
query_session_hosts
python
def query_session_hosts(session, user=None): session = Session.objects.with_id(session) if not session: return None, HTTPStatus.NOT_FOUND host_ids = set() for event in DataModelEvent.objects(sessions=session): event.update_host() if event.host: host_ids.add(event.host.id) host_list = [Host.objects.with_id(_) for _ in host_ids] return host_list, HTTPStatus.OK
:type session: Session :type user: User
https://github.com/mitre/cascade-server/blob/60399ab517b021b8d7e205fd95aa0b94a0b38100/app/api.py#L662-L678
import json import traceback import datetime from collections import defaultdict from functools import wraps from http import HTTPStatus from itertools import chain import bson import mongoengine from mongoengine.queryset.base import BaseQuerySet from flask import request, Response from app import utils from app.cascade.jobs import AnalyticJob, Job, TuningJob, CustomQueryJob, InvestigateJob from app.cascade.analytics import AnalyticResult, Analytic, CascadeAnalytic, AnalyticBaseline, ExternalAnalytic, AnalyticConfigurationList, AnalyticConfiguration from app.cascade.data_model import Host, parser from app.cascade.data_model.event import DataModelEvent, InvalidFieldError, InvalidActionError, InvalidObjectError, DataModelQuery from app.cascade.data_model import event_lookup, pivot_lookup from app.cascade.database import DateRange, AbsoluteRange, RelativeRange from app.cascade.query_layers import mappings, DataModelQueryLayer from app.cascade.session import Session, SessionState from app.cascade.cluster import HierarchicalCluster, ClusterKey from app.cascade.attack import AttackTactic, AttackTechnique, refresh_attack, TacticSet from app.cascade.query_layers import DatabaseInfo from app.server import flask_app from app import users from app.utils import json_default, bson_default from app import settings api_endpoints = {} def jsonify(obj, indent=2): return json.dumps(obj, sort_keys=True, indent=indent, default=json_default) def bsonify(obj, indent=2): return json.dumps(obj, indent=2, sort_keys=True, default=bson_default) class JSONResponse(Response): _messages = { HTTPStatus.NOT_FOUND: 'resource not found', HTTPStatus.UNAUTHORIZED: 'login required', HTTPStatus.INTERNAL_SERVER_ERROR: 'exception while handling request', HTTPStatus.NOT_IMPLEMENTED: 'function not yet implemented' } def __init__(self, json_obj=None, status=HTTPStatus.OK): if status in self._messages: json_obj = {'error': self._messages[status]} super(JSONResponse, self).__init__(jsonify(json_obj), content_type='application/json', status=status) def rest_doc(api_function): doc = api_function.__doc__ if doc is not None: return "\n".join(_.strip() for _ in doc.strip().splitlines()) def api(uri, login=False, **kwargs): def decorator(f): @wraps(f) def wrapped_f(*func_args, **func_kwargs): if login: user_token = request.cookies.get('user-token') if user_token is not None: try: func_kwargs['user'] = users.validate_token(user_token) except utils.AuthenticationError: return JSONResponse(status=HTTPStatus.UNAUTHORIZED) else: return JSONResponse(status=HTTPStatus.UNAUTHORIZED) try: results, status_code = f(*func_args, **func_kwargs) if 'count' in request.args: if isinstance(results, BaseQuerySet): results = results.count() else: results = len(results) except mongoengine.ValidationError: traceback.print_exc() status_code = HTTPStatus.BAD_REQUEST results = {"error": "invalid input"} except mongoengine.NotUniqueError: traceback.print_exc() status_code = HTTPStatus.BAD_REQUEST results = {"error": "not unique"} except Exception as e: traceback.print_exc() status_code = HTTPStatus.INTERNAL_SERVER_ERROR results = None if request.args.get('format', 'json') == 'bson': output = bsonify(results) return Response(output, status=status_code, content_type='application/json') else: return JSONResponse(results, status=status_code) endpoint = kwargs.pop('endpoint', f.__name__) flask_app.add_url_rule(uri, endpoint, wrapped_f, **kwargs) assert endpoint not in api_endpoints api_endpoints[endpoint] = f, uri, kwargs.get('methods', ['GET']), rest_doc(f) return f return decorator @api('/api', methods=['GET']) def query_api(): endpoints = [{'name': name, 'uri': uri.replace('<', '{').replace('>', '}'), 'methods': methods, 'doc': doc} for name, (f, uri, methods, doc) in api_endpoints.items()] endpoints.sort(key=lambda x: x['uri']) return endpoints, HTTPStatus.OK @api('/api/debug', methods=['GET', 'POST'], login=True) def debug(user=None): try: import pydevd pydevd.settrace() except ImportError: pass return None, HTTPStatus.OK @api('/api/login', methods=['POST']) def login(): if request.args.get('action') == 'reset_password': reset_token = request.json.get('token') password = request.json.get('password') try: user = users.reset_password(reset_token, password) except users.PasswordPolicyError as error: regex, rules = error.args return {'violation': {'regex': regex, 'rules': rules}}, HTTPStatus.BAD_REQUEST if user is not None: return {'username': user.username}, HTTPStatus.OK else: return None, HTTPStatus.BAD_REQUEST elif request.args.get('action') == 'forgot_password': email = request.json.get('email') if email: user = users.User.objects(email=email).first() if user is not None: user.send_reset_email() return None, HTTPStatus.OK persistent = request.args.get('persistent', 'true').lower() == 'true' try: if isinstance(request.json, dict): if request.json.get('api_token') is not None: token = request.json['api_token'] user = users.validate_token(token) if user is not None: info = {'api_token': token, 'username': user.username, 'full_name': user.full_name, 'email': user.email} return info, HTTPStatus.OK elif request.json.get('user') is not None and request.json.get('password') is not None: user = users.login(request.json['user'], request.json['password']) return {'api_token': user.generate_token(persistent=persistent), 'username': user.username, 'full_name': user.full_name, 'email': user.email}, HTTPStatus.OK if request.cookies.get('user-token'): user_token = request.cookies.get('user-token') user = users.validate_token(user_token) return {'api_token': user.generate_token(persistent=persistent), 'username': user.username, 'full_name': user.full_name, 'email': user.email}, HTTPStatus.OK except (utils.AuthenticationError, users.AuthenticationError): pass return None, HTTPStatus.UNAUTHORIZED @api('/api/attack', methods=['GET']) def query_attack(): if 'refresh' in request.args: refresh_attack() attack = {'tactics': attack_tactics()[0], 'techniques': attack_techniques()[0]} return attack, HTTPStatus.OK @api('/api/attack', methods=['POST'], login=True) def update_attack(user): if 'refresh' in request.args: refresh_attack() return query_attack(), HTTPStatus.OK @api('/api/attack/tactics', methods=['GET']) def attack_tactics(): return AttackTactic.objects().order_by('order'), HTTPStatus.OK @api('/api/attack/techniques', methods=['GET']) def attack_techniques(): return AttackTechnique.objects(), HTTPStatus.OK @api('/api/attack/tactic_sets', methods=['GET', 'POST']) def tactic_sets(): if request.method == 'GET': return TacticSet.objects, HTTPStatus.OK elif request.method == 'POST': if isinstance(request.json, dict): tactic_set = TacticSet(tactics=request.json['tactics']).save() return tactic_set.id, HTTPStatus.OK @api('/api/attack/tactic_sets/<set_id>', methods=['GET', 'DELETE']) def tactic_set_query(set_id): tactic_set = TacticSet.objects.with_id(set_id) if tactic_set is None: return {}, HTTPStatus.NOT_FOUND if request.method == 'GET': return tactic_set, HTTPStatus.OK elif request.method == 'DELETE': return tactic_set.delete(), HTTPStatus.OK @api('/api/databases', methods=['GET', 'POST'], login=True) def query_databases(user=None): if request.method == 'GET': return DatabaseInfo.objects(), HTTPStatus.OK elif request.method == 'POST': db_cls = mongoengine.base.get_document(request.json.get('_cls')) if db_cls and issubclass(db_cls, DatabaseInfo): database = db_cls(**request.json) database.save() return database.id, HTTPStatus.OK else: return None, HTTPStatus.BAD_REQUEST @api('/api/schemas/databases', methods=['GET'], login=True) def query_database_schemas(user=None): if request.method == 'GET': return DatabaseInfo.get_schemas(), HTTPStatus.OK @api('/api/databases/<database_id>', methods=['GET', 'PUT', 'DELETE'], login=True) def query_database(database_id, user=None): database = DatabaseInfo.objects.with_id(database_id) if database is None and request.method != 'PUT': return None, HTTPStatus.NOT_FOUND if request.method == 'GET': return database, HTTPStatus.OK elif request.method == 'PUT': db_info = dict(request.json) db_info['id'] = database_id return DatabaseInfo(**db_info), HTTPStatus.OK elif request.method == 'DELETE': return database.delete(), HTTPStatus.OK @api('/api/user', methods=['GET'], login=True) def query_user(user): user_info = user.to_mongo().to_dict() user_info.pop('sha256_hash', None) user_info.pop('databases', None) return user_info, HTTPStatus.OK @flask_app.route('/api/user', methods=['POST']) def create_user(): if not settings.load()['config'].get('allow_account_creation', False): return JSONResponse(status=HTTPStatus.FORBIDDEN) """ This API route is used by the create new account template to add a new user into Mongo """ if isinstance(request.json, dict): args = request.json if args.get('username') and args.get('password'): try: user = users.create_user(args['username'], args['password'], args.get('email'), args.get('full_name')) except users.PasswordPolicyError as error: regex, rules = error.args return JSONResponse({'violation': {'regex': regex, 'rules': rules}}, HTTPStatus.BAD_REQUEST) if user is not None: response = Response(status=HTTPStatus.CREATED) response.set_cookie('user-token', user.generate_token(), max_age=datetime.timedelta(days=7)) return response else: return JSONResponse({'message': 'Username already exists!'}, status=HTTPStatus.BAD_REQUEST) return JSONResponse({'message': 'Username, email and password are required'}, status=HTTPStatus.BAD_REQUEST) @api('/api/user/databases', methods=['GET', 'POST'], login=True) def user_databases(user=None): if request.method == 'GET': user_layers = [{'name': user_db_info.database.name, 'username': user_db_info.username, '_id': user_db_info.database.id} for user_db_info in user.databases] return user_layers, HTTPStatus.OK else: if not isinstance(request.json, dict) or 'database' not in request.json or 'action' not in request.args: return None, HTTPStatus.BAD_REQUEST action = request.args['action'] database_info = DatabaseInfo.objects.with_id(request.json.pop('database')) if database_info is None: return None, HTTPStatus.BAD_REQUEST if action == 'remove': status = user.remove_layer(database_info.id) return status, (HTTPStatus.OK if status else HTTPStatus.BAD_REQUEST) elif action == 'add' and database_info.id: user_db_info = database_info.add_user(**request.json) try: user_db_info.login() user.add_layer(user_db_info) return True, HTTPStatus.OK except utils.AuthenticationError: return {'error': 'login'}, HTTPStatus.BAD_REQUEST else: return {}, HTTPStatus.BAD_REQUEST @api('/api/sessions', methods=['GET', 'POST'], login=True) def all_sessions(user=None): if request.method == 'GET': query = {} if 'name' in request.args: query['name'] = request.args['name'] sessions = Session.objects(**query).order_by('name') return sessions, HTTPStatus.OK elif request.method == 'POST': if 'clone' in request.args: original = Session.objects.with_id(request.args.get('clone')) original_id = original.id if original is None: return {'error': 'source session could not be found'}, HTTPStatus.BAD_REQUEST session = original session.id = None session.name = request.json['name'] session.save(validate=True) DataModelEvent.objects(sessions=original_id).update(add_to_set__sessions=session.id) for result in AnalyticResult.objects(session=original_id): result.id = None result.session = session result.uuid = result.get_uuid() result.save() else: info = request.json if info.get('range') is not None and info.get('name') is not None: time_range = DateRange.get_range(info['range']) session = Session(range=time_range, name=info['name']) session.save(validate=True) return session.id, HTTPStatus.OK @api('/api/sessions/<session>', methods=['GET', 'PUT', 'POST', 'DELETE'], login=True) def query_session(session, user=None): session_id = session session = Session.objects.with_id(session) if request.method == 'GET': if session: return session, HTTPStatus.OK return None, HTTPStatus.NOT_FOUND elif request.method == 'PUT': if not session: session = Session(id=session_id) http_status = HTTPStatus.CREATED else: http_status = HTTPStatus.OK try: session.update(**request.json) session.validate() except mongoengine.ValidationError: return {'error': 'schema validation error'}, HTTPStatus.BAD_REQUEST session.save() return None, http_status elif request.method == 'POST': if 'reset' in request.args: DataModelEvent.objects(sessions=session).update(pull__sessions=session) AnalyticResult.objects(session=session).delete() Job.objects(session=session).delete() session.update(state=SessionState()) return None, HTTPStatus.RESET_CONTENT elif 'refresh' in request.args: for analytic_state in session.state.analytics: job = AnalyticJob.update_existing(analytic=analytic_state.analytic, mode=analytic_state.mode, user=user, session=session) job.submit() return None, HTTPStatus.RESET_CONTENT elif request.method == 'DELETE': DataModelEvent.objects(sessions=session).update(pull__sessions=session) AnalyticResult.objects(session=session).delete() Job.objects(session=session).delete() session.delete() return None, HTTPStatus.NO_CONTENT @api('/api/sessions/<session>/results', methods=['GET'], login=True) def session_results(session, user=None): if request.method == 'GET': session = Session.objects.with_id(session) if not session: return None, HTTPStatus.NOT_FOUND if isinstance(session.range, RelativeRange): status = AnalyticResult.objects(session=session, time__lt=session.range.start_time).delete() results = AnalyticResult.objects(session=session) if request.args.get('format') == 'tree': analytic_index = defaultdict(list) for analytic_result in results: for event in analytic_result.events: state = event.state.to_mongo().to_dict() analytic_index[analytic_result.analytic.id].append(state) results = [] for analytic, result_list in analytic_index.items(): baseline = HierarchicalCluster() baseline.keys = [ClusterKey(name=k, status=True) for k in Analytic.objects.with_id(analytic).fields] baseline.cluster_events(result_list, min_size=1) results.append({'analytic': analytic, 'root': baseline.root, 'keys': baseline.keys}) return results, HTTPStatus.OK @api('/api/sessions/<session>/results/<analytic>', methods=['GET'], login=True) def session_analytic_results(session, analytic, user=None): if request.method == 'GET': session = Session.objects.with_id(session) analytic = Analytic.objects.with_id(analytic) if not session or not analytic: return None, HTTPStatus.NOT_FOUND if isinstance(session.range, RelativeRange): status = AnalyticResult.objects(session=session, analytic=analytic, time__lt=session.range.start_time).delete() results = AnalyticResult.objects(session=session, analytic=analytic) if request.args.get('format') == 'tree': result_states = [] for analytic_result in results: for event in analytic_result.events: state = event.state.to_mongo().to_dict() result_states.append(state) cluster = HierarchicalCluster() keys = request.args.getlist('key') cluster.keys = [ClusterKey(name=f, status=not len(keys) or f in keys) for f in analytic.fields] cluster.cluster_events(result_states, min_size=1) return {'root': cluster.root, 'keys': cluster.keys}, HTTPStatus.OK return results, HTTPStatus.OK @api('/api/sessions/<session>/graphs/alerts', methods=['GET'], login=True) def alert_graph(session, user=None): if request.method == 'GET': session = Session.objects.with_id(session) if not session: return None, HTTPStatus.NOT_FOUND events = {e.id: e for e in DataModelEvent.objects(sessions=session)} results = list(AnalyticResult.objects(session=session)) edges = set() result_lookup = defaultdict(list) for result in results: for event in result.events: result_lookup[event.id].append(result.id) def descendant_analytics(_event): children = [] for child_event in _event.links: if child_event.id in result_lookup: children.extend(result_lookup[child_event.id]) elif child_event.id in events: children.extend(descendant_analytics(events[child_event.id])) return children for i, result in enumerate(results): for event in result.events: for similar_result in result_lookup[event.id]: if similar_result is not result: pass if event.id in events: for edge in descendant_analytics(events[event.id]): if edge != result.id: edges.add((result.id, edge)) return {'nodes': results, 'edges': list(edges)}, HTTPStatus.OK @api('/api/sessions/<session>/attack_timeline', methods=['GET'], login=True) def attack_timeline(session, user=None): timeline = [] results = AnalyticResult.objects(session=session) for result in results: for event in result.events: for coverage in result.analytic.coverage: attack_event = { "technique": coverage.technique.id, "tactics": [] if coverage.tactics is None else [_.id for _ in coverage.tactics], "discovered_time": event.discovered_time, "event_id": event.id } timeline.append(attack_event) return sorted(timeline, key=lambda k: k['discovered_time']), HTTPStatus.OK @api('/api/sessions/<session>/graphs/technique', methods=['GET'], login=True) def technique_graph(session, user=None): orig_graph, status = alert_graph(session) old_edges = defaultdict(list) if status != HTTPStatus.OK: return None, status edges = set() nodes = [] incoming_edges = defaultdict(set) outgoing_edges = defaultdict(set) analytics = {_.id: _ for _ in Analytic.objects()} for result in orig_graph['nodes']: coverage = analytics[result.analytic.id].coverage if coverage is None or len(coverage) == 0: continue for mapping in coverage: if mapping.technique is None: continue technique = mapping.technique technique_node = {'technique': technique.id, 'group': 'technique', 'id': hash((technique.id, result.id))} nodes.append(technique_node) incoming_edges[result.id].add(technique_node['id']) if not mapping.tactics: outgoing_edges[result.id].add(technique_node['id']) else: for tactic in mapping.tactics: tactic_node = {'tactic': tactic.id, 'group': 'tactic', 'id': hash((technique.id, result.id, tactic.id))} nodes.append(tactic_node) edges.add((technique_node['id'], tactic_node['id'])) outgoing_edges[result.id].add(tactic_node['id']) for node in orig_graph['nodes']: nodes.append({'_id': node.id, 'group': 'event'}) edges.update([(node.id, next_id) for next_id in incoming_edges[node.id]]) for source_id, target_id in orig_graph['edges']: edges.update([(middle_id, target_id) for middle_id in outgoing_edges[source_id]]) return {'nodes': nodes, 'edges': list(edges)}, HTTPStatus.OK @api('/api/sessions/<session>/events', methods=['GET'], login=True) def session_events(session, user=None): if request.method == 'GET': session = Session.objects.with_id(session) if isinstance(session.range, RelativeRange): DataModelEvent.objects(sessions=session, time__lt=session.range.start_time).update(pull__sessions=session) return DataModelEvent.objects(sessions=session).order_by('time'), HTTPStatus.OK @api('/api/sessions/<session>/hosts', methods=['GET'], login=True)
Apache License 2.0
vsantiago113/readwritememory
ReadWriteMemory/__init__.py
Process.write
python
def write(self, lp_base_address: int, value: int) -> bool: try: write_buffer = ctypes.c_uint(value) lp_buffer = ctypes.byref(write_buffer) n_size = ctypes.sizeof(write_buffer) lp_number_of_bytes_written = ctypes.c_ulong(0) ctypes.windll.kernel32.WriteProcessMemory(self.handle, ctypes.c_void_p(lp_base_address), lp_buffer, n_size, lp_number_of_bytes_written) return True except (BufferError, ValueError, TypeError) as error: if self.handle: self.close() self.error_code = self.get_last_error() error = {'msg': str(error), 'Handle': self.handle, 'PID': self.pid, 'Name': self.name, 'ErrorCode': self.error_code} ReadWriteMemoryError(error)
Write data to the process's memory. :param lp_base_address: The process' pointer. :param value: The data to be written to the process's memory :return: It returns True if succeed if not it raises an exception.
https://github.com/vsantiago113/readwritememory/blob/49d228064ca8349b6acdfcb0828107bee9984f7a/ReadWriteMemory/__init__.py#L148-L171
from typing import Any, List import os.path import ctypes import ctypes.wintypes PROCESS_QUERY_INFORMATION = 0x0400 PROCESS_VM_OPERATION = 0x0008 PROCESS_VM_READ = 0x0010 PROCESS_VM_WRITE = 0x0020 PROCESS_ALL_ACCESS = 0x1f0fff MAX_PATH = 260 class ReadWriteMemoryError(Exception): pass class Process(object): def __init__(self, name: [str, bytes] = '', pid: int = -1, handle: int = -1, error_code: [str, bytes] = None): self.name = name self.pid = pid self.handle = handle self.error_code = error_code def __repr__(self) -> str: return f'{self.__class__.__name__}: "{self.name}"' def open(self): dw_desired_access = (PROCESS_QUERY_INFORMATION | PROCESS_VM_OPERATION | PROCESS_VM_READ | PROCESS_VM_WRITE) b_inherit_handle = True self.handle = ctypes.windll.kernel32.OpenProcess(dw_desired_access, b_inherit_handle, self.pid) if not self.handle: raise ReadWriteMemoryError(f'Unable to open process <{self.name}>') def close(self) -> int: ctypes.windll.kernel32.CloseHandle(self.handle) return self.get_last_error() def get_all_access_handle(self): b_inherit_handle = True self.handle = ctypes.windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, b_inherit_handle, self.pid) @staticmethod def get_last_error() -> int: return ctypes.windll.kernel32.GetLastError() def get_pointer(self, lp_base_address: hex, offsets: List[hex] = ()) -> int: temp_address = self.read(lp_base_address) pointer = 0x0 if not offsets: return lp_base_address else: for offset in offsets: pointer = int(str(temp_address), 0) + int(str(offset), 0) temp_address = self.read(pointer) return pointer def read(self, lp_base_address: int) -> Any: try: read_buffer = ctypes.c_uint() lp_buffer = ctypes.byref(read_buffer) n_size = ctypes.sizeof(read_buffer) lp_number_of_bytes_read = ctypes.c_ulong(0) ctypes.windll.kernel32.ReadProcessMemory(self.handle, ctypes.c_void_p(lp_base_address), lp_buffer, n_size, lp_number_of_bytes_read) return read_buffer.value except (BufferError, ValueError, TypeError) as error: if self.handle: self.close() self.error_code = self.get_last_error() error = {'msg': str(error), 'Handle': self.handle, 'PID': self.pid, 'Name': self.name, 'ErrorCode': self.error_code} ReadWriteMemoryError(error) def readString(self, lp_base_address: int, length: int) -> Any: try: read_buffer = ctypes.create_string_buffer(length) lp_number_of_bytes_read = ctypes.c_ulong(0) ctypes.windll.kernel32.ReadProcessMemory(self.handle, lp_base_address, read_buffer, length, lp_number_of_bytes_read) bufferArray = bytearray(read_buffer) found_terminator = bufferArray.find(b'\x00') if found_terminator != -1: return bufferArray[:found_terminator].decode('utf-8') print("[ReadMemory/Error]: terminator not found.\naddress: %s" % hex(lp_base_address)) return "" except (BufferError, ValueError, TypeError) as error: if self.handle: self.close() self.error_code = self.get_last_error() error = {'msg': str(error), 'Handle': self.handle, 'PID': self.pid, 'Name': self.name, 'ErrorCode': self.error_code} ReadWriteMemoryError(error)
MIT License
mozilla/mozillians
mozillians/users/models.py
UserProfile._api_alternate_emails
python
def _api_alternate_emails(self): legacy_emails_qs = self._alternate_emails idp_qs = self._identity_profiles e_exclude = [e.id for e in legacy_emails_qs if idp_qs.filter(email=e.identifier, privacy__gte=e.privacy).exists()] legacy_emails_qs = legacy_emails_qs.exclude(id__in=e_exclude) idp_exclude = [i.id for i in idp_qs if legacy_emails_qs.filter(identifier=i.email, privacy__gte=i.privacy).exists()] idp_qs = idp_qs.exclude(id__in=idp_exclude) return chain(legacy_emails_qs, idp_qs)
Helper private property that creates a compatibility layer for API results in alternate emails. Combines both IdpProfile and ExternalAccount objects. In conflicts/duplicates it returns the minimum privacy level defined.
https://github.com/mozilla/mozillians/blob/bd5da47fef01e4e09d3bb8cb0799735bdfbeb3f9/mozillians/users/models.py#L290-L309
import logging import os import uuid from itertools import chain from django.conf import settings from django.contrib.auth.models import User from django.core.files.storage import default_storage from django.core.mail import send_mail from django.db import models from django.db.models import Q, Manager, ManyToManyField from django.utils.encoding import iri_to_uri from django.utils.http import urlquote from django.utils.timezone import now from django.utils.translation import ugettext as _, ugettext_lazy as _lazy from django.template.loader import get_template from product_details import product_details from PIL import Image from pytz import common_timezones from sorl.thumbnail import ImageField, get_thumbnail from waffle import switch_is_active from mozillians.common import utils from mozillians.common.templatetags.helpers import absolutify, gravatar from mozillians.common.templatetags.helpers import offset_of_timezone from mozillians.common.urlresolvers import reverse from mozillians.groups.models import (Group, GroupAlias, GroupMembership, Invite, Skill, SkillAlias) from mozillians.phonebook.validators import (validate_email, validate_twitter, validate_website, validate_username_not_url, validate_phone_number, validate_linkedin, validate_discord) from mozillians.users import get_languages_for_locale from mozillians.users.managers import (EMPLOYEES, MOZILLIANS, PRIVACY_CHOICES, PRIVACY_CHOICES_WITH_PRIVATE, PRIVATE, PUBLIC, PUBLIC_INDEXABLE_FIELDS, UserProfileQuerySet) from mozillians.users.tasks import send_userprofile_to_cis COUNTRIES = product_details.get_regions('en-US') AVATAR_SIZE = (300, 300) logger = logging.getLogger(__name__) ProfileManager = Manager.from_queryset(UserProfileQuerySet) def _calculate_photo_filename(instance, filename): return os.path.join(settings.USER_AVATAR_DIR, str(uuid.uuid4()) + '.jpg') class PrivacyField(models.PositiveSmallIntegerField): def __init__(self, *args, **kwargs): myargs = {'default': MOZILLIANS, 'choices': PRIVACY_CHOICES} myargs.update(kwargs) super(PrivacyField, self).__init__(*args, **myargs) class UserProfilePrivacyModel(models.Model): _privacy_level = None privacy_photo = PrivacyField() privacy_full_name = PrivacyField() privacy_full_name_local = PrivacyField() privacy_ircname = PrivacyField() privacy_email = PrivacyField(choices=PRIVACY_CHOICES_WITH_PRIVATE, default=MOZILLIANS) privacy_bio = PrivacyField() privacy_geo_city = PrivacyField() privacy_geo_region = PrivacyField() privacy_geo_country = PrivacyField() privacy_city = PrivacyField() privacy_region = PrivacyField() privacy_country = PrivacyField() privacy_groups = PrivacyField() privacy_skills = PrivacyField() privacy_languages = PrivacyField() privacy_date_mozillian = PrivacyField() privacy_timezone = PrivacyField() privacy_tshirt = PrivacyField(choices=((PRIVATE, _lazy(u'Private')),), default=PRIVATE) privacy_title = PrivacyField() privacy_story_link = PrivacyField() CACHED_PRIVACY_FIELDS = None class Meta: abstract = True @classmethod def clear_privacy_fields_cache(cls): cls.CACHED_PRIVACY_FIELDS = None @classmethod def privacy_fields(cls): if cls.CACHED_PRIVACY_FIELDS is None: privacy_fields = {} field_names = list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in cls._meta.get_fields() if not (field.many_to_one and field.related_model is None) ))) for name in field_names: if name.startswith('privacy_') or not 'privacy_%s' % name in field_names: continue field = cls._meta.get_field(name) if isinstance(field, ManyToManyField): default = field.remote_field.model.objects.none() else: default = field.get_default() privacy_fields[name] = default privacy_fields['email'] = u'' cls.CACHED_PRIVACY_FIELDS = privacy_fields return cls.CACHED_PRIVACY_FIELDS class UserProfile(UserProfilePrivacyModel): REFERRAL_SOURCE_CHOICES = ( ('direct', 'Mozillians'), ('contribute', 'Get Involved'), ) objects = ProfileManager() user = models.OneToOneField(User) full_name = models.CharField(max_length=255, default='', blank=False, verbose_name=_lazy(u'Full Name')) full_name_local = models.CharField(max_length=255, blank=True, default='', verbose_name=_lazy(u'Name in local language')) is_vouched = models.BooleanField( default=False, help_text='You can edit vouched status by editing invidual vouches') can_vouch = models.BooleanField( default=False, help_text='You can edit can_vouch status by editing invidual vouches') last_updated = models.DateTimeField(auto_now=True) groups = models.ManyToManyField(Group, blank=True, related_name='members', through=GroupMembership) skills = models.ManyToManyField(Skill, blank=True, related_name='members') bio = models.TextField(verbose_name=_lazy(u'Bio'), default='', blank=True) photo = ImageField(default='', blank=True, upload_to=_calculate_photo_filename) ircname = models.CharField(max_length=63, verbose_name=_lazy(u'IRC Nickname'), default='', blank=True) geo_country = models.ForeignKey('geo.Country', blank=True, null=True, on_delete=models.SET_NULL) geo_region = models.ForeignKey('geo.Region', blank=True, null=True, on_delete=models.SET_NULL) geo_city = models.ForeignKey('geo.City', blank=True, null=True, on_delete=models.SET_NULL) lat = models.FloatField(_lazy(u'Latitude'), blank=True, null=True) lng = models.FloatField(_lazy(u'Longitude'), blank=True, null=True) city = models.ForeignKey('cities_light.City', blank=True, null=True, on_delete=models.SET_NULL) region = models.ForeignKey('cities_light.Region', blank=True, null=True, on_delete=models.SET_NULL) country = models.ForeignKey('cities_light.Country', blank=True, null=True, on_delete=models.SET_NULL) basket_token = models.CharField(max_length=1024, default='', blank=True) date_mozillian = models.DateField('When was involved with Mozilla', null=True, blank=True, default=None) timezone = models.CharField(max_length=100, blank=True, default='', choices=zip(common_timezones, common_timezones)) tshirt = models.IntegerField( _lazy(u'T-Shirt'), blank=True, null=True, default=None, choices=( (1, _lazy(u'Fitted Small')), (2, _lazy(u'Fitted Medium')), (3, _lazy(u'Fitted Large')), (4, _lazy(u'Fitted X-Large')), (5, _lazy(u'Fitted XX-Large')), (6, _lazy(u'Fitted XXX-Large')), (7, _lazy(u'Straight-cut Small')), (8, _lazy(u'Straight-cut Medium')), (9, _lazy(u'Straight-cut Large')), (10, _lazy(u'Straight-cut X-Large')), (11, _lazy(u'Straight-cut XX-Large')), (12, _lazy(u'Straight-cut XXX-Large')) )) title = models.CharField(_lazy(u'What do you do for Mozilla?'), max_length=70, blank=True, default='') story_link = models.URLField( _lazy(u'Link to your contribution story'), help_text=_lazy(u'If you have created something public that ' u'tells the story of how you came to be a ' u'Mozillian, specify that link here.'), max_length=1024, blank=True, default='') referral_source = models.CharField(max_length=32, choices=REFERRAL_SOURCE_CHOICES, default='direct') auth0_user_id = models.CharField(max_length=1024, default='', blank=True) is_staff = models.BooleanField(default=False) def __unicode__(self): return self.display_name def get_absolute_url(self): return reverse('phonebook:profile_view', args=[self.user.username]) class Meta: db_table = 'profile' ordering = ['full_name'] def __getattribute__(self, attrname): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) privacy_fields = UserProfile.privacy_fields() privacy_level = _getattr('_privacy_level') special_functions = { 'accounts': '_accounts', 'alternate_emails': '_alternate_emails', 'email': '_primary_email', 'is_public_indexable': '_is_public_indexable', 'languages': '_languages', 'vouches_made': '_vouches_made', 'vouches_received': '_vouches_received', 'vouched_by': '_vouched_by', 'websites': '_websites', 'identity_profiles': '_identity_profiles' } if attrname in special_functions: return _getattr(special_functions[attrname]) if not privacy_level or attrname not in privacy_fields: return _getattr(attrname) field_privacy = _getattr('privacy_%s' % attrname) if field_privacy < privacy_level: return privacy_fields.get(attrname) return _getattr(attrname) def _filter_accounts_privacy(self, accounts): if self._privacy_level: return accounts.filter(privacy__gte=self._privacy_level) return accounts @property def _accounts(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) excluded_types = [ExternalAccount.TYPE_WEBSITE, ExternalAccount.TYPE_EMAIL] accounts = _getattr('externalaccount_set').exclude(type__in=excluded_types) return self._filter_accounts_privacy(accounts) @property def _alternate_emails(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) accounts = _getattr('externalaccount_set').filter(type=ExternalAccount.TYPE_EMAIL) return self._filter_accounts_privacy(accounts) @property
BSD 3-Clause New or Revised License
google-research/language
language/mentionmemory/tasks/ultra_fine_entity_typing_task.py
get_predictions
python
def get_predictions(logit_per_label): num_labels = logit_per_label.shape[1] predictions = (logit_per_label > 0).astype(jnp.int32) single_best_prediction = jnp.argmax(logit_per_label, axis=-1) single_best_prediction = jax.nn.one_hot( single_best_prediction, num_labels, dtype=jnp.int32) predictions_exists = predictions.sum(axis=1, keepdims=True) > 0 predictions_exists = predictions_exists.astype(jnp.int32) final_predictions = ( predictions_exists * predictions + (1 - predictions_exists) * single_best_prediction) return final_predictions
Prediction according to https://www.aclweb.org/anthology/P18-1009.pdf.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/mentionmemory/tasks/ultra_fine_entity_typing_task.py#L54-L68
import flax.linen as nn import jax import jax.numpy as jnp from language.mentionmemory.tasks import mention_classifier_task from language.mentionmemory.tasks import task_registry from language.mentionmemory.utils.custom_types import Array, MetricGroups import ml_collections NUM_CLASSES = 10331 COARSE_CLASSES_START = 0 COARSE_CLASSES_END = 9 FINE_CLASSES_START = COARSE_CLASSES_END FINE_CLASSES_END = 130 ULTRA_FINE_CLASSES_START = FINE_CLASSES_END ULTRA_FINE_CLASSES_END = NUM_CLASSES _SMALL_NUMBER = 1e-10 def get_weight_per_group(labels, group_start, group_end): label_per_group_exists = labels[:, group_start:group_end].sum(1) > 0 label_per_group_exists = label_per_group_exists.astype(jnp.float32) return label_per_group_exists def get_loss_per_group(loss_per_label, weight_per_group, group_start, group_end): loss_per_group = loss_per_label[:, group_start:group_end].sum(1) loss_per_group *= weight_per_group return loss_per_group
Apache License 2.0
magicstack/contextvars
tests/test_basics.py
isolated_context
python
def isolated_context(func): @functools.wraps(func) def wrapper(*args, **kwargs): ctx = contextvars.Context() return ctx.run(func, *args, **kwargs) return wrapper
Needed to make reftracking test mode work.
https://github.com/magicstack/contextvars/blob/d13d478ee17a61b1f28bea67097357556ffb942b/tests/test_basics.py#L15-L21
import concurrent.futures import functools import random import time import unittest import contextvars
Apache License 2.0
upb-lea/gym-electric-motor
tests/test_physical_systems/test_mechanical_loads.py
defaultMechanicalLoad
python
def defaultMechanicalLoad(): return MechanicalLoad()
pytest fixture that returns a default MechanicalLoad object :return: MechanicalLoad object initialized with default values
https://github.com/upb-lea/gym-electric-motor/blob/f091d6b4a754d4fa2439fea64e2c89b9e86d683a/tests/test_physical_systems/test_mechanical_loads.py#L32-L37
import pytest import gym_electric_motor as gem from gym_electric_motor.physical_systems import PolynomialStaticLoad, MechanicalLoad, ConstantSpeedLoad, ExternalSpeedLoad from gym.spaces import Box import numpy as np from scipy import signal import math load_parameter1 = {'j_load': 0.2, 'state_names': ['omega'], 'j_rot_load': 0.25, 'omega_range': (0, 1), 'parameter': dict(a=0.12, b=0.13, c=0.4, j_load=0.2)} test_const_initializer = {'states': {'omega': 15.0}, 'interval': None, 'random_init': None, 'random_params': (None, None)} test_rand_initializer = { 'interval': None, 'random_init': None, 'random_params': (None, None)} test_amp = 20 test_bias = 10 test_freq = 2 def speed_profile_(t, amp, freq, bias): return amp*signal.sawtooth(2*np.pi*freq*t, width=0.5)+bias @pytest.fixture
MIT License
stencila/hub
manager/accounts/ui/views/accounts.py
billing
python
def billing(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("update_plan", request, args, kwargs) account = viewset.get_object() session = account.get_customer_portal_session(request) return redir(session.url)
Allow users to manage their billing. Currently implemented as a redirect to Stripe Customer Portal (https://stripe.com/docs/billing/subscriptions/customer-portal). Creates a new `account.customer` if necessary and then redirects them to a portal session.
https://github.com/stencila/hub/blob/e696c39213156bb43a098f81286197e919379cdf/manager/accounts/ui/views/accounts.py#L174-L186
from django.contrib.auth.decorators import login_required from django.http import Http404, HttpRequest, HttpResponse from django.shortcuts import redirect as redir from django.shortcuts import render from accounts.api.views import AccountsViewSet from accounts.models import AccountTier from accounts.quotas import AccountQuotas from accounts.ui.forms import AccountImageForm from projects.api.views.projects import ProjectsViewSet def redirect(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("retrieve", request, args, kwargs) account = viewset.get_object() return redir("/{0}{1}".format(account.name, kwargs["rest"])) def list_orgs(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("list", request, args, kwargs) accounts = viewset.get_queryset().filter(user__isnull=True) return render(request, "accounts/list_orgs.html", dict(accounts=accounts)) def list_users(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("list", request, args, kwargs) accounts = viewset.get_queryset().filter(user__isnull=False) return render(request, "accounts/list_users.html", dict(accounts=accounts)) @login_required def create(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("create", request, args, kwargs) serializer = viewset.get_serializer() return render(request, "accounts/create.html", dict(serializer=serializer)) def retrieve(request: HttpRequest, *args, **kwargs) -> HttpResponse: account_viewset = AccountsViewSet.init("retrieve", request, args, kwargs) account = account_viewset.get_object() projects_viewset = ProjectsViewSet.init("retrieve", request, args, kwargs) projects = projects_viewset.get_queryset() if account.is_personal: projects = projects.filter(agents__user=account.user_id) else: projects = projects.filter(account=account) return render( request, "accounts/retrieve.html", dict( account=account, role=account.role, projects=projects, meta=account.get_meta(), ), ) @login_required def profile(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("partial_update", request, args, kwargs) account = viewset.get_object() serializer = viewset.get_serializer(account) update_image_form = AccountImageForm() return render( request, "accounts/profile.html", dict( account=account, role=account.role, serializer=serializer, update_image_form=update_image_form, ), ) @login_required def profile_image(request: HttpRequest, *args, **kwargs) -> HttpResponse: if request.method == "POST": viewset = AccountsViewSet.init("partial_update", request, args, kwargs) account = viewset.get_object() form = AccountImageForm(request.POST, request.FILES, instance=account) if form.is_valid(): form.save() if account.is_personal: if request.session and "user" in request.session: request.session["user"][ "image" ] = request.user.personal_account.image.medium request.session.modified = True return redir("ui-accounts-profile", account.name) raise RuntimeError("Error attempting to save the account image.") else: raise Http404 @login_required def publishing(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("partial_update", request, args, kwargs) account = viewset.get_object() serializer = viewset.get_serializer(account) return render( request, "accounts/publishing.html", dict(account=account, role=account.role, serializer=serializer), ) @login_required def plan(request: HttpRequest, *args, **kwargs) -> HttpResponse: viewset = AccountsViewSet.init("update_plan", request, args, kwargs) account = viewset.get_object() usage = AccountQuotas.usage(account) tiers = AccountTier.active_tiers() fields = AccountTier.fields() return render( request, "accounts/plan.html", dict( account=account, role=account.role, usage=usage, tier=account.tier, tiers=tiers, fields=fields, ), ) @login_required
Apache License 2.0
borda/pyimsegm
imsegm/utilities/data_io.py
io_image_decorate
python
def io_image_decorate(func): @wraps(func) def wrap(*args, **kwargs): log_level = logging.getLogger().getEffectiveLevel() logging.getLogger().setLevel(logging.INFO) with warnings.catch_warnings(): warnings.simplefilter("ignore") response = func(*args, **kwargs) logging.getLogger().setLevel(log_level) return response return wrap
costume decorator to suppers debug messages from the PIL function to suppress PIl debug logging - DEBUG:PIL.PngImagePlugin:STREAM b'IHDR' 16 13 :param func: :return:
https://github.com/borda/pyimsegm/blob/7463cfc7aad8781564dc84c8780f291cc3c17fe3/imsegm/utilities/data_io.py#L295-L314
import glob import logging import os import re import warnings from functools import wraps import nibabel import numpy as np import pandas as pd from PIL import Image from scipy import ndimage from skimage import color, exposure, io, measure from imsegm.utilities import ImageDimensionError from imsegm.utilities.read_zvi import load_image as load_zvi COLUMNS_COORDS = ['X', 'Y'] DICT_CONVERT_COLOR_FROM_RGB = { 'hsv': color.rgb2hsv, 'luv': color.rgb2luv, 'lab': color.rgb2lab, 'hed': color.rgb2hed, 'xyz': color.rgb2xyz } DICT_CONVERT_COLOR_TO_RGB = { 'hsv': color.hsv2rgb, 'luv': color.luv2rgb, 'lab': color.lab2rgb, 'hed': color.hed2rgb, 'xyz': color.xyz2rgb } def convert_img_color_from_rgb(image, color_space): im_dims = image.ndim == 3 and image.shape[-1] in (3, 4) if im_dims and color_space in DICT_CONVERT_COLOR_FROM_RGB: image = DICT_CONVERT_COLOR_FROM_RGB[color_space](image) return image def convert_img_color_to_rgb(image, color_space): im_dims = image.ndim == 3 and image.shape[-1] == 3 if im_dims and color_space in DICT_CONVERT_COLOR_TO_RGB: image = DICT_CONVERT_COLOR_TO_RGB[color_space](image) return image def update_path(path_file, lim_depth=5, absolute=True): if path_file.startswith('/'): return path_file if path_file.startswith('~'): path_file = os.path.expanduser(path_file) else: tmp_path = path_file for _ in range(lim_depth): if os.path.exists(tmp_path): path_file = tmp_path break tmp_path = os.path.join('..', tmp_path) if absolute: path_file = os.path.abspath(path_file) return path_file def swap_coord_x_y(points): points = np.array(points) if not points.size: return points.tolist() if points.shape[1] != 2: raise ValueError points_new = points[:, [1, 0]] return points_new.tolist() def load_landmarks_txt(path_file): path_file = os.path.abspath(os.path.expanduser(path_file)) if not os.path.isfile(path_file): raise FileNotFoundError('missing "%s"' % path_file) with open(path_file, 'r') as f: lines = f.readlines() landmarks = [] for line in lines[2:]: match_obj = re.match('(.*) (.*)', line) vals = match_obj.groups() vals = [int(float(i)) for i in vals] landmarks.append(vals) logging.debug(' load_landmarks_txt (%i): \n%r', len(landmarks), landmarks) return landmarks def load_landmarks_csv(path_file): path_file = os.path.abspath(os.path.expanduser(path_file)) if not os.path.isfile(path_file): raise FileNotFoundError('missing "%s"' % path_file) df = pd.read_csv(path_file, index_col=0) landmarks = df[COLUMNS_COORDS].values.tolist() logging.debug(' load_landmarks_csv (%i): \n%r', len(landmarks), np.asarray(landmarks).astype(int).tolist()) return landmarks def save_landmarks_txt(path_file, landmarks): if not os.path.isdir(os.path.dirname(path_file)): raise FileNotFoundError('missing "%s"' % os.path.dirname(path_file)) path_file = os.path.splitext(path_file)[0] + '.txt' logging.info(' save_landmarks_txt: -> creating TXT file: %s', path_file) logging.info(' save_landmarks_txt: -> creating TXT file: %s', path_file) with open(path_file, 'w') as f: f.write('point\n') f.write('%i\n' % len(landmarks)) for el in landmarks: f.write("{} {}\n".format(int(el[0]), int(el[1]))) return path_file def save_landmarks_csv(path_file, landmarks, dtype=float): if not os.path.isdir(os.path.dirname(path_file)): raise FileNotFoundError('missing "%s"' % os.path.dirname(path_file)) path_file = os.path.splitext(path_file)[0] + '.csv' logging.debug(' save_landmarks_csv: -> creating CSV file: %s', path_file) landmarks = np.array(landmarks, dtype=dtype) if not landmarks.size: logging.warning('empty set of landmarks') landmarks = np.zeros((0, 2), dtype=dtype) df = pd.DataFrame(landmarks, columns=COLUMNS_COORDS) df.to_csv(path_file) return path_file def scale_image_vals_in_range(img, im_range=1.): img = (img - np.min(img)) / float(np.max(img) - np.min(img)) if im_range == 255: img = (img * im_range).astype(np.uint8) return img def scale_image_intensity(img, im_range=1., quantiles=(2, 98)): p_low = np.percentile(img, quantiles[0]) p_high = np.percentile(img, quantiles[1]) img = exposure.rescale_intensity(img.astype(float), in_range=(p_low, p_high), out_range='float') if im_range == 255: img = np.array(img * im_range).astype(np.uint8) return img
BSD 3-Clause New or Revised License
jmafc/database-ui-tutorial
werkzeug/film.py
FilmHandler.save
python
def save(self, request, id=None): form = FilmForm(**request.form) form.validate() errors = form.errors if not errors: film = self.relvar.tuple(int(id), form.title, int(form.release_year)) film._tuple_version = int(form.rowver) try: self.relvar.update_one(film, self.relvar.key_tuple(int(id))) except Exception as exc: errors = {None: self.db_error(exc)} self.db.commit() if errors: return render('film/edit.html', id=id, film=film, errors=errors) return redirect('/films')
Saves the film data submitted from 'edit
https://github.com/jmafc/database-ui-tutorial/blob/bb20bd5714403cff44d36e40f35bdb2c591d9dc2/werkzeug/film.py#L125-L141
from werkzeug.routing import Map, Rule from werkzeug.exceptions import HTTPException, NotFound from werkzeug.utils import redirect from templating import render from bl.film import Film_RV, Film_List def film_repr(tup): return "%s - %d" % (tup.title, tup.release_year) class FilmForm(object): def __init__(self, **data): for attr in ['rowver', 'id', 'title', 'release_year']: if attr in data: setattr(self, attr, data.get(attr, 0)[0]) else: setattr(self, attr, '') self.errors = None def validate(self): self.errors = {} if not self.title: self.errors['title'] = "Title cannot be an empty string" if not self.release_year.isdigit() or int(self.release_year) < 1888: self.errors['release_year'] = "Release year must be a number greater than 1887" class FilmHandler(object): def __init__(self, dbconn): self.db = dbconn self.relvar = Film_RV self.relvar.connect(dbconn) self.relation = Film_List self.relation.connect(self.db) self.url_map = Map([ Rule('/films', endpoint='index'), Rule('/film/new', endpoint='new'), Rule('/film/create', endpoint='create'), Rule('/film/<int:id>', endpoint='edit'), Rule('/film/<int:id>/save', endpoint='save'), Rule('/film/<int:id>/delete', endpoint='delete')]) def dispatch(self, request): adapter = self.url_map.bind_to_environ(request.environ) try: endpoint, values = adapter.match() return getattr(self, endpoint)(request, **values) except NotFound as exc: raise exc except HTTPException as exc: return exc def db_error(self, exc): return 'A database error has occurred: %s' % exc.args[0] def new(self, request): return render('film/new.html', film=self.relvar.default_tuple()) def create(self, request): form = FilmForm(**request.form) form.validate() errors = form.errors if not errors: film = self.relvar.tuple(title=form.title, release_year=int(form.release_year)) try: self.relvar.insert_one(film) except Exception as exc: errors = {None: self.db_error(exc)} self.db.commit() if errors: return render('film/new.html', film=form, errors=errors) return redirect('/films') def add_args(self, names, req_args): args = {} for name in names: if name in req_args and req_args[name]: args.update({name: req_args[name]}) return args def index(self, request): errors = {} p = int(request.args.get('p', 1)) qry_args = self.add_args(['title', 'release_year'], request.args) maxlines = 10 try: numrows = self.relation.count(qry_args) film_list = self.relation.subset(maxlines, (p - 1) * maxlines, qry_args) except KeyError as exc: numrows = 0 film_list = [] errors = {None: exc} except Exception as exc: numrows = 0 film_list = [] errors = {None: self.db_error(exc)} more = 1 if (numrows % maxlines) else 0 return render('film/list.html', films=film_list, curr_page=p, numrows=numrows, numpages=numrows // maxlines + more, qry_args=qry_args, errors=errors) def edit(self, request, id): if id < 1: raise NotFound("Film id must be a positive integer: %s" % id) try: row = self.relvar.get_one(self.relvar.key_tuple(id)) except Exception as exc: return render('film/edit.html', id=id, errors={None: self.db_error(exc)}) if not row: raise NotFound("Film %d not found " % id) return render('film/edit.html', film=row)
BSD 3-Clause New or Revised License
ssfdust/full-stack-flask-smorest
app/utils/formatters.py
celery_worker_formatter
python
def celery_worker_formatter(worker_info): from flask_babel import get_timezone result = [] for _, task_info in worker_info.items(): for _, details in task_info.items(): state, info_dict = details tmp = {} tmp["state"] = state tmp.update(info_dict) tmp["time_start"] = arrow.get(tmp["time_start"]).to(str(get_timezone())) _id = uuid.UUID(tmp.pop("id")) tmp["id"] = _id result.append(tmp) return result
e.g. {'celery@RedLotus': { '61421e6b-b933-412b-8f62-65425f312b69': [ 'active', { 'id': '61421e6b-b933-412b-8f62-65425f312b69', 'name': 'send_mail', 'args': '()', 'kwargs': '{}', 'type': 'send_mail', 'hostname': 'celery@RedLotus', 'time_start': 1565752238.579593, 'acknowledged': False, 'delivery_info': { 'exchange': '', 'routing_key': 'celery', 'priority': 0, 'redelivered': None }, 'worker_pid': 19696 } ] } }
https://github.com/ssfdust/full-stack-flask-smorest/blob/a0bdbd3a7d314b82bb43b265578aba7bbd175e51/app/utils/formatters.py#L29-L73
import uuid import arrow from pprint import pformat from loguru import logger def mongon_opts_str(opts): url = "mongodb://{username}:{password}@{host}:{port}/{db}".format(**opts) return url
Apache License 2.0
googleads/google-ads-python
google/ads/googleads/v7/services/services/distance_view_service/client.py
DistanceViewServiceClient.parse_distance_view_path
python
def parse_distance_view_path(path: str) -> Dict[str, str]: m = re.match( r"^customers/(?P<customer_id>.+?)/distanceViews/(?P<placeholder_chain_id>.+?)~(?P<distance_bucket>.+?)$", path, ) return m.groupdict() if m else {}
Parse a distance_view path into its component segments.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v7/services/services/distance_view_service/client.py#L170-L176
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib from google.api_core import exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.ads.googleads.v7.resources.types import distance_view from google.ads.googleads.v7.services.types import distance_view_service from .transports.base import DistanceViewServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DistanceViewServiceGrpcTransport class DistanceViewServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = DistanceViewServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[DistanceViewServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class DistanceViewServiceClient(metaclass=DistanceViewServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> DistanceViewServiceTransport: return self._transport @staticmethod def distance_view_path( customer_id: str, placeholder_chain_id: str, distance_bucket: str, ) -> str: return "customers/{customer_id}/distanceViews/{placeholder_chain_id}~{distance_bucket}".format( customer_id=customer_id, placeholder_chain_id=placeholder_chain_id, distance_bucket=distance_bucket, ) @staticmethod
Apache License 2.0
fidelity/stoke
stoke/io.py
DeepspeedIO.load
python
def load( self, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, OSS], gpu: bool, path: str, tag: str, scaler_dict_fn: Optional[Callable] = None, strict: bool = True, ): map_loc = f"cuda:{self.device_id}" self._print_device(f"Load is mapping to {map_loc}") try: _, client_sd = model.load_checkpoint( path, tag, load_module_strict=strict, load_optimizer_states=True ) except OSError as e: self._print_device(f"Unable to load model from given path: {path}/{tag}") raise e return ( client_sd["backward_step"], client_sd["grad_accum_step"], client_sd["optimizer_step"], client_sd["extras"], )
Deepspeed override implementation for loading a PyTorch model checkpoint https://www.deepspeed.ai/getting-started/#model-checkpointing Parameters ---------- model: torch.nn.Module current model object optimizer: Union[torch.optim.Optimizer, OSS] current optimizer object gpu: bool if using gpu device or not path: str path to directory that the model checkpoint was saved (prefer absolute paths over relative paths) tag: str full tag name the model checkpoint was saved as scaler_dict_fn: Callable, default: None callable function to load the scaler state dict strict: bool ignore non-matching keys Returns ------- backward_step: int current number of backward calls (for resuming training correctly) grad_accum_step: int, current step of gradient accumulation (for resuming training correctly) optimizer_step: int current number of optimizer calls (for resuming training correctly) extras: dict a dictionary of any extra things that were saved
https://github.com/fidelity/stoke/blob/5aae84bcafe1890d50a3d9e2e5366a367e31729d/stoke/io.py#L485-L544
from abc import ABC from enum import Enum from typing import Callable, Dict, Optional, Union import horovod.torch as hvd import torch from fairscale.nn.data_parallel import FullyShardedDataParallel from fairscale.optim.oss import OSS from stoke.utils import make_folder class BaseStokeIO(ABC): def __init__(self, save_rank: int = 0, verbose: bool = True, **kwargs): self._save_rank = save_rank self._prefix = "stoke" self._verbose = verbose def _make_tag(self, name: str, backward_step: int): return f"{self._prefix}-{name}-backward-step-{backward_step}" def _make_full_save_path( self, path: str, name: str, backward_step: int, extension: str ): return f"{path}/{self._make_tag(name=name, backward_step=backward_step)}.{extension}" def save( self, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, OSS], path: str, backward_step: int, grad_accum_step: int, optimizer_step: int, name: str, status: dict, scaler_dict: Optional[dict] = None, extension: str = "pt", create_directory: bool = True, extras: Optional[dict] = None, ): out_path, tag = self._save( model_dict=model.state_dict(), optimizer_dict=optimizer.state_dict(), path=path, backward_step=backward_step, optimizer_step=optimizer_step, name=name, scaler_dict=scaler_dict, extension=extension, create_directory=create_directory, extras=extras, grad_accum_step=grad_accum_step, status=status, ) return out_path, tag def _save( self, model_dict: Dict, optimizer_dict: Dict, path: str, backward_step: int, grad_accum_step: int, optimizer_step: int, name: str, status: Dict, scaler_dict: Optional[Dict], extension: str, create_directory: bool, extras: Optional[Dict], ): save_path = self._make_full_save_path( path=path, name=name, backward_step=backward_step, extension=extension ) if self._verbose: self._print_device(f"Attempting to save model checkpoint to {save_path}") try: if create_directory: make_folder(path) torch.save( { "backward_step": backward_step, "grad_accum_step": grad_accum_step, "optimizer_step": optimizer_step, "stoke_status": status, "model_state_dict": model_dict, "optimizer_state_dict": optimizer_dict, "scaler_state_dict": scaler_dict, "extras": extras, }, save_path, ) except OSError as e: self._print_device(f"Unable to save model to given path: {save_path}") raise e return ( path, f"{self._make_tag(name=name, backward_step=backward_step)}.{extension}", ) def _load( self, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, OSS], map_loc: str, path: str, tag: str, scaler_dict_fn: Optional[Callable] = None, strict: bool = True, ): try: load_dict = torch.load(f"{path}/{tag}", map_location=map_loc) model.load_state_dict( state_dict=load_dict["model_state_dict"], strict=strict ) if isinstance(model, FullyShardedDataParallel): self._print_device( "Handling loading of correct optimizer sharded state for Fairscale FSDP" ) optimizer.load_state_dict( state_dict=model.get_shard_from_optim_state_dict( load_dict["optimizer_state_dict"] ) ) else: optimizer.load_state_dict(state_dict=load_dict["optimizer_state_dict"]) if scaler_dict_fn is not None: scaler_dict_fn(load_dict["scaler_state_dict"]) except OSError as e: self._print_device(f"Unable to load model from given path: {path}/{tag}") raise e return ( load_dict["backward_step"], load_dict["grad_accum_step"], load_dict["optimizer_step"], load_dict["extras"], ) def load( self, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, OSS], gpu: bool, path: str, tag: str, scaler_dict_fn: Optional[Callable] = None, strict: bool = True, ): map_loc = f"cuda:{self.device_id}" if gpu else self.device_id self._print_device(f"Load is mapping to {map_loc}") backward_step, grad_accum_step, optimizer_step, extras = self._load( model=model, optimizer=optimizer, map_loc=map_loc, path=path, tag=tag, scaler_dict_fn=scaler_dict_fn, strict=strict, ) return backward_step, grad_accum_step, optimizer_step, extras class DeepspeedIO(BaseStokeIO): def __init__(self, save_rank: int = 0, **kwargs): super(DeepspeedIO, self).__init__(save_rank=save_rank, **kwargs) def save( self, model: torch.nn.Module, optimizer: Union[torch.optim.Optimizer, OSS], path: str, backward_step: int, grad_accum_step: int, optimizer_step: int, name: str, status: dict, scaler_dict: Optional[dict] = None, extension: str = "pt", create_directory: bool = True, extras: Optional[dict] = None, ): tag = self._make_tag(name=name, backward_step=backward_step) save_path = self._make_full_save_path( path=path, name=name, backward_step=backward_step, extension=extension ) if self._verbose: self._print_device(f"Attempting to save model checkpoint to {save_path}") torch.distributed.barrier() try: client_sd = { "backward_step": backward_step, "grad_accum_step": grad_accum_step, "optimizer_step": optimizer_step, "stoke_status": status, "extras": extras, } _ = model.save_checkpoint( path, tag, client_state=client_sd, save_latest=False ) except OSError as e: self._print_device(f"Unable to save model to given path: {path}") raise e torch.distributed.barrier() return path, tag
Apache License 2.0
ucb-sts/sts
sts/control_flow/peeker.py
SnapshotPeeker.peek
python
def peek(self, dag): if dag.input_events == []: return dag unsupported_types = [ProcessFlowMod, DataplaneDrop] if find(lambda e: type(e) in unsupported_types, dag.events) is not None: raise ValueError('''Delayed flow_mods not yet supported. Please ''' '''implement the TODO near the sleep() call in play_forward()''') if not isinstance(dag.events[0], ConnectToControllers): raise ValueError("First event must be ConnectToControllers") simulation = None try: inferred_events = [] (simulation, controller) = self.setup_simulation() assert(dag.input_events[0] != dag.events[0]) snapshot_inputs = [dag.events[0]] + dag.input_events events_inferred_last_iteration = [] for inject_input_idx in xrange(0, len(snapshot_inputs)): inject_input = get_inject_input(inject_input_idx, snapshot_inputs) following_input = get_following_input(inject_input_idx, snapshot_inputs) expected_internal_events = get_expected_internal_events(inject_input, following_input, dag.events) log.debug("peek()'ing after input %d (%s)" % (inject_input_idx, str(inject_input))) inferred_events += events_inferred_last_iteration fencepost = NOPInput(time=inject_input.time, round=inject_input.round) dag_interval = EventDag(events_inferred_last_iteration + [fencepost]) if expected_internal_events == []: log.debug("Optimization: no expected internal events") Peeker.ambiguous_counts[0.0] += 1 self.replay_interval(simulation, dag_interval, 0) events_inferred_last_iteration = [inject_input] continue wait_time_seconds = self.get_wait_time_seconds(inject_input, following_input) (found_events, snapshotter) = self.find_internal_events(simulation, controller, dag_interval, inject_input, wait_time_seconds) events_inferred_last_iteration = [inject_input] events_inferred_last_iteration += match_and_filter(found_events, expected_internal_events) snapshotter.snapshot_proceed() inferred_events += events_inferred_last_iteration finally: if simulation is not None: simulation.clean_up() return EventDag(inferred_events)
If dag.events == [], returns immediately. If dag.events != [], assumes that isinstance(dag.events[0], ConnectToControllers)
https://github.com/ucb-sts/sts/blob/82190b7662523e3aaa21998a6a31d0878abe66c7/sts/control_flow/peeker.py#L111-L190
import logging import time import abc from collections import Counter from sts.replay_event import * from sts.event_dag import EventDag from sts.control_flow.replayer import Replayer from sts.control_flow.base import ReplaySyncCallback from sts.control_flow.snapshot_utils import * from sts.replay_event import InternalEvent, NOPInput from sts.util.rpc_forker import LocalForker from sts.util.convenience import find from sts.input_traces.log_parser import parse log = logging.getLogger("peeker") class Peeker(object): __metaclass__ = abc.ABCMeta ambiguous_counts = Counter() ambiguous_events = Counter() def __init__(self, simulation_cfg, default_wait_time_seconds=0.05, epsilon_time=0.05): self.simulation_cfg = simulation_cfg self.default_wait_time_seconds = default_wait_time_seconds self.epsilon_time = epsilon_time @abc.abstractmethod def peek(self, dag): pass def get_wait_time_seconds(self, first_event, second_event): if first_event is None or second_event is None: return self.default_wait_time_seconds else: return second_event.time.as_float() - first_event.time.as_float() + self.epsilon_time class SnapshotPeeker(Peeker): def __init__(self, simulation_cfg, default_wait_time_seconds=0.05, epsilon_time=0.05, **kwargs): if len(simulation_cfg.controller_configs) != 1: raise ValueError("Only one controller supported for snapshotting") if simulation_cfg.controller_configs[0].sync is not None: raise ValueError("STSSyncProto currently incompatible with snapshotting") super(SnapshotPeeker, self).__init__(simulation_cfg, default_wait_time_seconds=default_wait_time_seconds, epsilon_time=epsilon_time) self.forker = LocalForker() if 'default_dp_permit' in kwargs and not kwargs['default_dp_permit']: raise ValueError('''Non-default DP Permit not currently supported ''' '''Please implement the TODO near the sleep() call ''' '''in play_forward()''') kwargs['default_dp_permit'] = True if 'pass_through_whitelisted_messages' not in kwargs: kwargs['pass_through_whitelisted_messages'] = False self.kwargs = kwargs unknown_kwargs = [ k for k in kwargs.keys() if k not in Replayer.kwargs ] if unknown_kwargs != []: raise ValueError("Unknown kwargs %s" % str(unknown_kwargs)) def setup_simulation(self): simulation = self.simulation_cfg.bootstrap(ReplaySyncCallback(None)) simulation.openflow_buffer.pass_through_whitelisted_packets = self.kwargs['pass_through_whitelisted_messages'] controller = simulation.controller_manager.controllers[0] return (simulation, controller)
Apache License 2.0
wheatoncs/lexos
lexos/models/content_analysis_model.py
Document.name
python
def name(self) -> str: return self._name
:return: document name.
https://github.com/wheatoncs/lexos/blob/994be4e403053ebbef18e5758a100af616195706/lexos/models/content_analysis_model.py#L506-L508
import random from copy import deepcopy from typing import Optional import pandas as pd from lexos.helpers.definitions import count_phrase_in_text from lexos.receivers.content_analysis_receiver import ContentAnalysisReceiver, ContentAnalysisOption class ContentAnalysisModel(object): def __init__(self, test_options: Optional[ContentAnalysisOption] = None): self._test_options = test_options self._dictionaries = [] self._corpus = [] self._counters = [] self._formulas = [] self._scores = [] self._averages = [] self._formula = "" self._toggle_all = True def add_file(self, file_name: str, label: str, content: str): content = content.strip() total_word_counts = len(str(content).split(" ")) self._corpus.append(File(content=content, file_name=file_name, label=label, total_word_counts=total_word_counts)) def add_dictionary(self, file_name: str, label: str, content: str): new_list = str(content).split(", ") new_list.sort(key=lambda x: len(x.split()), reverse=True) self._dictionaries.append(Dictionary(content=new_list, file_name=file_name, label=label)) def get_active_dicts(self) -> list: return [dictionary for dictionary in self.dictionaries if dictionary.active] def count(self) -> list: self._counters = [] dictionaries = self.join_active_dicts() for file in deepcopy(self._corpus): dictionaries = count_phrases(dictionary=dictionaries, file=file) self.get_dictionary_counts(dictionaries) return dictionaries def generate_corpus_results(self, dictionaries: list) -> list: corpus_results = [] for phrase in dictionaries: count = 0 for i in phrase.file_counts: count += phrase.file_counts[i] corpus_results.append([phrase.dict_label, phrase.content, str(count)]) return corpus_results def generate_document_results(self, dictionaries: list) -> list: document_results = [] for file in self._corpus: result = {"name": file.label, "table": []} for phrase in dictionaries: result["table"].append([phrase.dict_label, phrase.content, str(phrase.file_counts[file.label])]) document_results.append(result) return document_results def get_dictionary_counts(self, dictionaries: list): counter = [] active_dicts = self.get_active_dicts() for dictionary in active_dicts: count = 0 for phrase in dictionaries: if phrase.dict_label == dictionary.label: count += phrase.count counter.append(count) if len(counter) == len(active_dicts): self._counters.append(counter) def generate_scores(self): self._scores = [] self._formulas = [] active_dicts = self.get_active_dicts() result = 0 for corpus_index, file in enumerate(self._corpus): new_formula = self._formula for active_dict_index, active_dict in enumerate(active_dicts): new_formula = new_formula.replace( "[" + active_dict.label + "]", str(self._counters[corpus_index][active_dict_index])) new_formula = new_formula.replace("()", "") try: result = eval(new_formula) except (ValueError, SyntaxError): pass self._scores.append(round( float(result) / file.total_word_count, ndigits=3)) self._formulas.append(result) def generate_averages(self): self._averages = [] scores_sum = 0 total_word_counts_sum = 0 formulas_sum = 0 active_dicts = self.get_active_dicts() for index, (score, formula, file) in enumerate(zip(self.scores, self._formulas, self._corpus)): scores_sum += score total_word_counts_sum += file.total_word_count formulas_sum += formula if len(self.scores) != 0: scores_avg = round( (float(scores_sum) / len(self.scores)), ndigits=3) else: scores_avg = 0 if len(self._corpus) != 0: average = (float(total_word_counts_sum) / (len(self._corpus))) total_word_counts_avg = round(average, ndigits=1) else: total_word_counts_avg = 0 if len(self._formulas) != 0: sums_avg = round((float(formulas_sum) / len(self._formulas)), ndigits=1) else: sums_avg = 0 for dict_index, _ in enumerate(active_dicts): cat_count = sum([counter[dict_index] for counter in self._counters]) if len(self._counters) != 0: self._averages.append(round( float(cat_count) / len(self._counters), ndigits=1)) else: self._averages.append(0) self._averages.append(sums_avg) self._averages.append(total_word_counts_avg) self._averages.append(scores_avg) def join_active_dicts(self) -> list: active_dicts = self.get_active_dicts() dictionaries = [Phrase(content=phrase, dict_label=dictionary.label) for dictionary in active_dicts for phrase in dictionary.content if phrase != ''] dictionaries.sort(key=lambda x: len(x.content.split()), reverse=True) return dictionaries def to_data_frame(self) -> pd.DataFrame: columns = ["Document Name"] + [dictionary.label for dictionary in self.get_active_dicts()] + ["Formula", "Word Count", "Score"] dataframe = pd.DataFrame(columns=columns) avg_column = pd.Series(["Averages"] + self._averages, index=columns) dataframe = dataframe.append(avg_column, ignore_index=True) for index, (file, formula, score, counters) in enumerate( zip(self._corpus, self._formulas, self._scores, self._counters)): column = pd.Series( [file.label] + counters + [formula] + [file.total_word_count] + [score], index=columns) dataframe = dataframe.append(column, ignore_index=True) return dataframe def is_secure(self) -> bool: formula = self._formula allowed_input = ["[" + dictionary.label + "]" for dictionary in self.get_active_dicts()] + ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", " ", "+", "-", "*", "/", "sin", "cos", "tan", "log", "sqrt", "(", ")"] for item in allowed_input: formula = formula.replace(item, "") if len(formula) == 0: return True return False def save_formula(self): if self._test_options is not None: formula = self._test_options.formula else: formula = self.content_analysis_option.formula if len(formula) == 0: self._formula = "0" else: formula = formula.replace("√", "sqrt").replace("^", "**") self._formula = formula return self.check_formula() def check_formula(self) -> str: error_msg = "Formula errors:<br>" is_error = False if self._formula.count("(") != self._formula.count(")"): error_msg += "Mismatched parenthesis<br>" is_error = True if "sin()" in self._formula: error_msg += "sin takes exactly one argument (0 given)<br>" is_error = True if "cos()" in self._formula: error_msg += "cos takes exactly one argument (0 given)<br>" is_error = True if "tan()" in self._formula: error_msg += "tan takes exactly one argument (0 given)<br>" is_error = True if "log()" in self._formula: error_msg += "log takes exactly one argument (0 given)<br>" is_error = True if is_error: return error_msg return "" def get_top_results(self, dataframe) -> list: dataframe.Count = pd.to_numeric(dataframe.Count, errors="coerce") dataframe = dataframe.sort_values(by="Count", ascending=False) return dataframe.head(100).values.tolist() def analyze(self) -> (Optional[str], Optional[str]): dictionaries = self.count() if self.is_secure(): formula_errors = self.save_formula() self.generate_scores() self.generate_averages() dataframe_unsorted = self.to_data_frame() dataframe = dataframe_unsorted.sort_values( by=[dataframe_unsorted.columns [self.content_analysis_option.sort_column]], ascending=self.content_analysis_option.sort_ascending) overview = dataframe.values.tolist() overview.insert(0, dataframe.columns.values.tolist()) overview_csv = dataframe.to_csv() corpus_dataframe = pd.DataFrame(self.generate_corpus_results( dictionaries=dictionaries), columns=["Dictionary", "Phrase", "Count"]) corpus_results = self.get_top_results(corpus_dataframe) corpus_csv = corpus_dataframe.to_csv() document_results = [] for document_result in self.generate_document_results( dictionaries=dictionaries): dataframe = pd.DataFrame( document_result["table"], columns=["Dictionary", "Phrase", "Count"]) document_results.append({ "name": document_result["name"], "data": self.get_top_results(dataframe), "csv": dataframe.to_csv() }) else: formula_errors = "Formula error: Invalid input" overview = "" overview_csv = "" corpus_results = "" corpus_csv = "" document_results = "" return overview, overview_csv, corpus_results, corpus_csv, document_results, formula_errors @property def dictionaries(self) -> list: return self._dictionaries @dictionaries.setter def dictionaries(self, dictionaries: list): self._dictionaries = dictionaries @property def corpus(self) -> list: return self._corpus @property def counters(self) -> list: return self._counters @property def scores(self) -> list: return self._scores @property def averages(self) -> list: return self._averages @property def dictionary_colors(self) -> dict: colors = {} for dictionary in self._dictionaries: colors[dictionary.label] = ''.join( [random.choice('0123456789ABCD') for x in range(6)]) return colors @property def content_analysis_option(self) -> ContentAnalysisOption: if self._test_options is not None: if self._test_options.formula is not None: self.save_formula() return self._test_options return ContentAnalysisReceiver().options_from_front_end() @property def test_option(self): return self._test_options @test_option.setter def test_option(self, options): self._test_options = options def count_phrases(dictionary: list, file: object) -> list: for phrase in dictionary: phrase.count = count_phrase_in_text(phrase=phrase.content, text=file.content) phrase.file_counts[file.label] = count_phrase_in_text( phrase=phrase.content, text=file.content) if ' ' in phrase.content: file.content = file.content.replace(phrase.content, ' ') return dictionary class Document(object): def __init__(self): self._active = True self._label = "" self._name = "" @property def active(self) -> bool: return self._active @active.setter def active(self, active: bool): self._active = active @property def label(self) -> str: return self._label @label.setter def label(self, label: str): self._label = label @property
MIT License
deepinsight/insightface
recognition/partial_fc/mxnet/symbol/resnet.py
residual_unit_v3_x
python
def residual_unit_v3_x(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs): assert (bottle_neck) use_se = kwargs.get('version_se', 1) bn_mom = kwargs.get('bn_mom', 0.9) workspace = kwargs.get('workspace', 256) memonger = kwargs.get('memonger', False) act_type = kwargs.get('version_act', 'prelu') num_group = 32 bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') conv1 = Conv(data=bn1, num_group=num_group, num_filter=int(num_filter * 0.5), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_group=num_group, num_filter=int(num_filter * 0.5), kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') act2 = Act(data=bn3, act_type=act_type, name=name + '_relu2') conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') bn4 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4') if use_se: body = mx.sym.Pooling(data=bn4, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn4 = mx.symbol.broadcast_mul(bn4, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return bn4 + shortcut
Return ResNeXt Unit symbol for building ResNeXt Parameters ---------- data : str Input data num_filter : int Number of output channels bnf : int Bottle neck channels factor with regard to num_filter stride : tuple Stride used in convolution dim_match : Boolean True means channel number between input and output is the same, otherwise means differ name : str Base name of the operators workspace : int Workspace used in convolution operator
https://github.com/deepinsight/insightface/blob/6baaa7bcaf1a1624feec75270022e2dafeb6883b/recognition/partial_fc/mxnet/symbol/resnet.py#L827-L949
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import mxnet as mx import numpy as np from symbol import symbol_utils sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) from default import config def Conv(**kwargs): body = mx.sym.Convolution(**kwargs) return body def Act(data, act_type, name): if act_type == 'prelu': body = mx.sym.LeakyReLU(data=data, act_type='prelu', name=name) else: body = mx.symbol.Activation(data=data, act_type=act_type, name=name) return body def residual_unit_v1(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs): use_se = kwargs.get('version_se', 1) bn_mom = kwargs.get('bn_mom', 0.9) workspace = kwargs.get('workspace', 256) memonger = kwargs.get('memonger', False) act_type = kwargs.get('version_act', 'prelu') if bottle_neck: conv1 = Conv(data=data, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2') conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if use_se: body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn3 = mx.symbol.broadcast_mul(bn3, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return Act(data=bn3 + shortcut, act_type=act_type, name=name + '_relu3') else: conv1 = Conv(data=data, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') if use_se: body = mx.sym.Pooling(data=bn2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn2 = mx.symbol.broadcast_mul(bn2, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return Act(data=bn2 + shortcut, act_type=act_type, name=name + '_relu3') def residual_unit_v1_L(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs): use_se = kwargs.get('version_se', 1) bn_mom = kwargs.get('bn_mom', 0.9) workspace = kwargs.get('workspace', 256) memonger = kwargs.get('memonger', False) act_type = kwargs.get('version_act', 'prelu') if bottle_neck: conv1 = Conv(data=data, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2') conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if use_se: body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn3 = mx.symbol.broadcast_mul(bn3, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return Act(data=bn3 + shortcut, act_type=act_type, name=name + '_relu3') else: conv1 = Conv(data=data, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') if use_se: body = mx.sym.Pooling(data=bn2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn2 = mx.symbol.broadcast_mul(bn2, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return Act(data=bn2 + shortcut, act_type=act_type, name=name + '_relu3') def residual_unit_v2(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs): use_se = kwargs.get('version_se', 1) bn_mom = kwargs.get('bn_mom', 0.9) workspace = kwargs.get('workspace', 256) memonger = kwargs.get('memonger', False) act_type = kwargs.get('version_act', 'prelu') if bottle_neck: bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv1 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2') conv2 = Conv(data=act2, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') act3 = Act(data=bn3, act_type=act_type, name=name + '_relu3') conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') if use_se: body = mx.sym.Pooling(data=conv3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") conv3 = mx.symbol.broadcast_mul(conv3, body) if dim_match: shortcut = data else: shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return conv3 + shortcut else: bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1') act1 = Act(data=bn1, act_type=act_type, name=name + '_relu1') conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') act2 = Act(data=bn2, act_type=act_type, name=name + '_relu2') conv2 = Conv(data=act2, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') if use_se: body = mx.sym.Pooling(data=conv2, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") conv2 = mx.symbol.broadcast_mul(conv2, body) if dim_match: shortcut = data else: shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return conv2 + shortcut def residual_unit_v3(data, num_filter, stride, dim_match, name, bottle_neck, **kwargs): use_se = kwargs.get('version_se', 1) bn_mom = kwargs.get('bn_mom', 0.9) workspace = kwargs.get('workspace', 256) memonger = kwargs.get('memonger', False) act_type = kwargs.get('version_act', 'prelu') if bottle_neck: bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') conv1 = Conv(data=bn1, num_filter=int(num_filter * 0.25), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=int(num_filter * 0.25), kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') act2 = Act(data=bn3, act_type=act_type, name=name + '_relu2') conv3 = Conv(data=act2, num_filter=num_filter, kernel=(1, 1), stride=stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name + '_conv3') bn4 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4') if use_se: body = mx.sym.Pooling(data=bn4, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn4 = mx.symbol.broadcast_mul(bn4, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return bn4 + shortcut else: bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') conv1 = Conv(data=bn1, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv1') bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act1 = Act(data=bn2, act_type=act_type, name=name + '_relu1') conv2 = Conv(data=act1, num_filter=num_filter, kernel=(3, 3), stride=stride, pad=(1, 1), no_bias=True, workspace=workspace, name=name + '_conv2') bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if use_se: body = mx.sym.Pooling(data=bn3, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_se_pool1') body = Conv(data=body, num_filter=num_filter // 16, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv1", workspace=workspace) body = Act(data=body, act_type=act_type, name=name + '_se_relu1') body = Conv(data=body, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=name + "_se_conv2", workspace=workspace) body = mx.symbol.Activation(data=body, act_type='sigmoid', name=name + "_se_sigmoid") bn3 = mx.symbol.broadcast_mul(bn3, body) if dim_match: shortcut = data else: conv1sc = Conv(data=data, num_filter=num_filter, kernel=(1, 1), stride=stride, no_bias=True, workspace=workspace, name=name + '_conv1sc') shortcut = mx.sym.BatchNorm(data=conv1sc, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_sc') if memonger: shortcut._set_attr(mirror_stage='True') return bn3 + shortcut
MIT License
sectorlabs/django-postgres-extra
psqlextra/backend/migrations/operations/add_range_partition.py
PostgresAddRangePartition.__init__
python
def __init__(self, model_name: str, name: str, from_values, to_values): super().__init__(model_name, name) self.from_values = from_values self.to_values = to_values
Initializes new instance of :see:AddRangePartition. Arguments: model_name: The name of the :see:PartitionedPostgresModel. name: The name to give to the new partition table. from_values: Start of the partitioning key range of values that need to be stored in this partition. to_values: End of the partitioning key range of values that need to be stored in this partition.
https://github.com/sectorlabs/django-postgres-extra/blob/e8280404d5bf9ac714d8523ca71a370b8dec1065/psqlextra/backend/migrations/operations/add_range_partition.py#L9-L33
from psqlextra.backend.migrations.state import PostgresRangePartitionState from .partition import PostgresPartitionOperation class PostgresAddRangePartition(PostgresPartitionOperation):
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/tts/__init__.py
async_setup
python
async def async_setup(hass, config): tts = SpeechManager(hass) try: conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {} use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE) cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR) time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY) await tts.async_init_cache(use_cache, cache_dir, time_memory) except (HomeAssistantError, KeyError) as err: _LOGGER.error("Error on cache init %s", err) return False hass.http.register_view(TextToSpeechView(tts)) hass.http.register_view(TextToSpeechUrlView(tts)) async def async_setup_platform(p_type, p_config, disc_info=None): platform = await async_prepare_setup_platform( hass, config, DOMAIN, p_type) if platform is None: return try: if hasattr(platform, 'async_get_engine'): provider = await platform.async_get_engine( hass, p_config) else: provider = await hass.async_add_job( platform.get_engine, hass, p_config) if provider is None: _LOGGER.error("Error setting up platform %s", p_type) return tts.async_register_engine(p_type, provider, p_config) except Exception: _LOGGER.exception("Error setting up platform %s", p_type) return async def async_say_handle(service): entity_ids = service.data.get(ATTR_ENTITY_ID) message = service.data.get(ATTR_MESSAGE) cache = service.data.get(ATTR_CACHE) language = service.data.get(ATTR_LANGUAGE) options = service.data.get(ATTR_OPTIONS) try: url = await tts.async_get_url( p_type, message, cache=cache, language=language, options=options ) except HomeAssistantError as err: _LOGGER.error("Error on init tts: %s", err) return data = { ATTR_MEDIA_CONTENT_ID: url, ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC, } if entity_ids: data[ATTR_ENTITY_ID] = entity_ids await hass.services.async_call( DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True) hass.services.async_register( DOMAIN, "{}_{}".format(p_type, SERVICE_SAY), async_say_handle, schema=SCHEMA_SERVICE_SAY) setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config in config_per_platform(config, DOMAIN)] if setup_tasks: await asyncio.wait(setup_tasks, loop=hass.loop) async def async_clear_cache_handle(service): await tts.async_clear_cache() hass.services.async_register( DOMAIN, SERVICE_CLEAR_CACHE, async_clear_cache_handle, schema=SCHEMA_SERVICE_CLEAR_CACHE) return True
Set up TTS.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/tts/__init__.py#L81-L169
import asyncio import ctypes import functools as ft import hashlib import io import logging import mimetypes import os import re from aiohttp import web import voluptuous as vol from homeassistant.components.http import HomeAssistantView from homeassistant.components.media_player import ( ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, MEDIA_TYPE_MUSIC, SERVICE_PLAY_MEDIA) from homeassistant.components.media_player import DOMAIN as DOMAIN_MP from homeassistant.const import ATTR_ENTITY_ID from homeassistant.core import callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_per_platform import homeassistant.helpers.config_validation as cv from homeassistant.setup import async_prepare_setup_platform REQUIREMENTS = ['mutagen==1.40.0'] _LOGGER = logging.getLogger(__name__) ATTR_CACHE = 'cache' ATTR_LANGUAGE = 'language' ATTR_MESSAGE = 'message' ATTR_OPTIONS = 'options' ATTR_PLATFORM = 'platform' CONF_CACHE = 'cache' CONF_CACHE_DIR = 'cache_dir' CONF_LANG = 'language' CONF_TIME_MEMORY = 'time_memory' DEFAULT_CACHE = True DEFAULT_CACHE_DIR = 'tts' DEFAULT_TIME_MEMORY = 300 DEPENDENCIES = ['http'] DOMAIN = 'tts' MEM_CACHE_FILENAME = 'filename' MEM_CACHE_VOICE = 'voice' SERVICE_CLEAR_CACHE = 'clear_cache' SERVICE_SAY = 'say' _RE_VOICE_FILE = re.compile( r"([a-f0-9]{40})_([^_]+)_([^_]+)_([a-z_]+)\.[a-z0-9]{3,4}") KEY_PATTERN = '{0}_{1}_{2}_{3}' PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({ vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean, vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string, vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY): vol.All(vol.Coerce(int), vol.Range(min=60, max=57600)), }) SCHEMA_SERVICE_SAY = vol.Schema({ vol.Required(ATTR_MESSAGE): cv.string, vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Optional(ATTR_CACHE): cv.boolean, vol.Optional(ATTR_LANGUAGE): cv.string, vol.Optional(ATTR_OPTIONS): dict, }) SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
MIT License
jeeftor/alfredtoday
src/lib/requests/packages/urllib3/connectionpool.py
HTTPConnectionPool._make_request
python
def _make_request(self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw): self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) read_timeout = timeout_obj.read_timeout if getattr(conn, 'sock', None): if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: conn.sock.settimeout(read_timeout) try: try: httplib_response = conn.getresponse(buffering=True) except TypeError: httplib_response = conn.getresponse() except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') log.debug("\"%s %s %s\" %s %s", method, url, http_version, httplib_response.status, httplib_response.length) try: assert_header_parsing(httplib_response.msg) except HeaderParsingError as hpe: log.warning( 'Failed to parse headers (url=%s): %s', self._absolute_url(url), hpe, exc_info=True) return httplib_response
Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts.
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/requests/packages/urllib3/connectionpool.py#L327-L404
from __future__ import absolute_import import errno import logging import sys import warnings from socket import error as SocketError, timeout as SocketTimeout import socket try: from queue import LifoQueue, Empty, Full except ImportError: from Queue import LifoQueue, Empty, Full import Queue as _unused_module_Queue from .exceptions import ( ClosedPoolError, ProtocolError, EmptyPoolError, HeaderParsingError, HostChangedError, LocationValueError, MaxRetryError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, InsecureRequestWarning, NewConnectionError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six from .connection import ( port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, ) from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host, Url xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() class ConnectionPool(object): scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") self.host = host.strip('[]') self.port = port def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False def close(): pass _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) class HTTPConnectionPool(ConnectionPool, RequestMethods): scheme = 'http' ConnectionCls = HTTPConnection def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, **conn_kw): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} for _ in xrange(maxsize): self.pool.put(None) self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: self.conn_kw.setdefault('socket_options', []) def _new_conn(self): self.num_connections += 1 log.info("Starting new HTTP connection (%d): %s", self.num_connections, self.host) conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return conn def _get_conn(self, timeout=None): conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: raise ClosedPoolError(self, "Pool is closed.") except Empty: if self.block: raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.") pass if conn and is_connection_dropped(conn): log.info("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, 'auto_open', 1) == 0: conn = None return conn or self._new_conn() def _put_conn(self, conn): try: self.pool.put(conn, block=False) return except AttributeError: pass except Full: log.warning( "Connection pool is full, discarding connection: %s", self.host) if conn: conn.close() def _validate_conn(self, conn): pass def _prepare_proxy(self, conn): pass def _get_timeout(self, timeout): if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): if isinstance(err, SocketTimeout): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) if hasattr(err, 'errno') and err.errno in _blocking_errnos: raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) if 'timed out' in str(err) or 'did not complete (read)' in str(err): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
MIT License
carla-simulator/scenario_runner
srunner/autoagents/agent_wrapper.py
AgentWrapper.__init__
python
def __init__(self, agent): self._agent = agent
Set the autonomous agent
https://github.com/carla-simulator/scenario_runner/blob/e5b06cc727831ffe61ff828e0566c202ef34ae04/srunner/autoagents/agent_wrapper.py#L29-L33
from __future__ import print_function import carla from srunner.autoagents.sensor_interface import CallBack from srunner.scenariomanager.carla_data_provider import CarlaDataProvider class AgentWrapper(object): _agent = None _sensors_list = []
MIT License
roboy/sonosco
sonosco/training/callbacks/gradient_collector.py
LayerwiseGradientNorm._store_batch_layer_grads
python
def _store_batch_layer_grads(self, model: torch.nn.Module) -> None: for name, param in model.named_parameters(): if not param.requires_grad or param.grad is None: continue if not name in self._batch_layer_grads: self._batch_layer_grads[name] = [] grad_norm = torch.sqrt(torch.sum(param.grad**2)).item() self._batch_layer_grads[name].append(grad_norm)
Store gradient norm of each layer for current batch.
https://github.com/roboy/sonosco/blob/170bc58e52229e2da33cdaf228c5dc6c88a03fb8/sonosco/training/callbacks/gradient_collector.py#L46-L59
import logging import numpy as np import torch import torch.nn from typing import Dict from ..abstract_callback import AbstractCallback, ModelTrainer LOGGER = logging.getLogger(__name__) class LayerwiseGradientNorm(AbstractCallback): def __init__(self): self.layer_grads = dict() self._batch_layer_grads = dict() def __call__(self, epoch: int, step: int, performance_measures: Dict, context: ModelTrainer, validation: bool = False) -> None: self._store_batch_layer_grads(context.model) if step == (len(context.train_data_loader) - 1): self._store_layer_grads() self._batch_layer_grads = dict()
BSD 3-Clause New or Revised License
kieranwynn/pyquaternion
pyquaternion/quaternion.py
Quaternion._validate_number_sequence
python
def _validate_number_sequence(self, seq, n): if seq is None: return np.zeros(n) if len(seq) == n: try: l = [float(e) for e in seq] except ValueError: raise ValueError("One or more elements in sequence <{!r}> cannot be interpreted as a real number".format(seq)) else: return np.asarray(l) elif len(seq) == 0: return np.zeros(n) else: raise ValueError("Unexpected number of elements in sequence. Got: {}, Expected: {}.".format(len(seq), n))
Validate a sequence to be of a certain length and ensure it's a numpy array of floats. Raises: ValueError: Invalid length or non-numeric value
https://github.com/kieranwynn/pyquaternion/blob/99025c17bab1c55265d61add13375433b35251af/pyquaternion/quaternion.py#L138-L156
from __future__ import absolute_import, division, print_function from math import sqrt, pi, sin, cos, asin, acos, atan2, exp, log from copy import deepcopy import numpy as np class Quaternion: def __init__(self, *args, **kwargs): s = len(args) if s == 0: if kwargs: if ("scalar" in kwargs) or ("vector" in kwargs): scalar = kwargs.get("scalar", 0.0) if scalar is None: scalar = 0.0 else: scalar = float(scalar) vector = kwargs.get("vector", []) vector = self._validate_number_sequence(vector, 3) self.q = np.hstack((scalar, vector)) elif ("real" in kwargs) or ("imaginary" in kwargs): real = kwargs.get("real", 0.0) if real is None: real = 0.0 else: real = float(real) imaginary = kwargs.get("imaginary", []) imaginary = self._validate_number_sequence(imaginary, 3) self.q = np.hstack((real, imaginary)) elif ("axis" in kwargs) or ("radians" in kwargs) or ("degrees" in kwargs) or ("angle" in kwargs): try: axis = self._validate_number_sequence(kwargs["axis"], 3) except KeyError: raise ValueError( "A valid rotation 'axis' parameter must be provided to describe a meaningful rotation." ) angle = kwargs.get('radians') or self.to_radians(kwargs.get('degrees')) or kwargs.get('angle') or 0.0 self.q = Quaternion._from_axis_angle(axis, angle).q elif "array" in kwargs: self.q = self._validate_number_sequence(kwargs["array"], 4) elif "matrix" in kwargs: optional_args = {key: kwargs[key] for key in kwargs if key in ['rtol', 'atol']} self.q = Quaternion._from_matrix(kwargs["matrix"], **optional_args).q else: keys = sorted(kwargs.keys()) elements = [kwargs[kw] for kw in keys] if len(elements) == 1: r = float(elements[0]) self.q = np.array([r, 0.0, 0.0, 0.0]) else: self.q = self._validate_number_sequence(elements, 4) else: self.q = np.array([1.0, 0.0, 0.0, 0.0]) elif s == 1: if isinstance(args[0], Quaternion): self.q = args[0].q return if args[0] is None: raise TypeError("Object cannot be initialised from {}".format(type(args[0]))) try: r = float(args[0]) self.q = np.array([r, 0.0, 0.0, 0.0]) return except TypeError: pass self.q = self._validate_number_sequence(args[0], 4) return else: self.q = self._validate_number_sequence(args, 4) def __hash__(self): return hash(tuple(self.q))
MIT License
mila-iqia/babyai
babyai/levels/verifier.py
PutNextInstr.objs_next
python
def objs_next(self): for obj_a in self.desc_move.obj_set: pos_a = obj_a.cur_pos for pos_b in self.desc_fixed.obj_poss: if pos_next_to(pos_a, pos_b): return True return False
Check if the objects are next to each other This is used for rejection sampling
https://github.com/mila-iqia/babyai/blob/863f3529371ba45ef0148a48b48f5ae6e61e06cc/babyai/levels/verifier.py#L379-L391
import os import numpy as np from enum import Enum from gym_minigrid.minigrid import COLOR_NAMES, DIR_TO_VEC OBJ_TYPES = ['box', 'ball', 'key', 'door'] OBJ_TYPES_NOT_DOOR = list(filter(lambda t: t != 'door', OBJ_TYPES)) LOC_NAMES = ['left', 'right', 'front', 'behind'] use_done_actions = os.environ.get('BABYAI_DONE_ACTIONS', False) def dot_product(v1, v2): return sum([i * j for i, j in zip(v1, v2)]) def pos_next_to(pos_a, pos_b): xa, ya = pos_a xb, yb = pos_b d = abs(xa - xb) + abs(ya - yb) return d == 1 class ObjDesc: def __init__(self, type, color=None, loc=None): assert type in [None, *OBJ_TYPES], type assert color in [None, *COLOR_NAMES], color assert loc in [None, *LOC_NAMES], loc self.color = color self.type = type self.loc = loc self.obj_set = [] self.obj_poss = [] def __repr__(self): return "{} {} {}".format(self.color, self.type, self.loc) def surface(self, env): self.find_matching_objs(env) assert len(self.obj_set) > 0, "no object matching description" if self.type: s = str(self.type) else: s = 'object' if self.color: s = self.color + ' ' + s if self.loc: if self.loc == 'front': s = s + ' in front of you' elif self.loc == 'behind': s = s + ' behind you' else: s = s + ' on your ' + self.loc if len(self.obj_set) > 1: s = 'a ' + s else: s = 'the ' + s return s def find_matching_objs(self, env, use_location=True): if use_location: self.obj_set = [] self.obj_poss = [] agent_room = env.room_from_pos(*env.agent_pos) for i in range(env.grid.width): for j in range(env.grid.height): cell = env.grid.get(i, j) if cell is None: continue if not use_location: already_tracked = any([cell is obj for obj in self.obj_set]) if not already_tracked: continue if self.type is not None and cell.type != self.type: continue if self.color is not None and cell.color != self.color: continue if use_location and self.loc in ["left", "right", "front", "behind"]: if not agent_room.pos_inside(i, j): continue v = (i - env.agent_pos[0], j - env.agent_pos[1]) d1 = DIR_TO_VEC[env.agent_dir] d2 = (-d1[1], d1[0]) pos_matches = { "left": dot_product(v, d2) < 0, "right": dot_product(v, d2) > 0, "front": dot_product(v, d1) > 0, "behind": dot_product(v, d1) < 0 } if not (pos_matches[self.loc]): continue if use_location: self.obj_set.append(cell) self.obj_poss.append((i, j)) return self.obj_set, self.obj_poss class Instr: def __init__(self): self.env = None def surface(self, env): raise NotImplementedError def reset_verifier(self, env): self.env = env def verify(self, action): raise NotImplementedError def update_objs_poss(self): potential_objects = ('desc', 'desc_move', 'desc_fixed') for attr in potential_objects: if hasattr(self, attr): getattr(self, attr).find_matching_objs(self.env, use_location=False) class ActionInstr(Instr): def __init__(self): super().__init__() self.lastStepMatch = False def verify(self, action): if not use_done_actions: return self.verify_action(action) if action == self.env.actions.done: if self.lastStepMatch: return 'success' return 'failure' res = self.verify_action(action) self.lastStepMatch = (res == 'success') def verify_action(self): raise NotImplementedError class OpenInstr(ActionInstr): def __init__(self, obj_desc, strict=False): super().__init__() assert obj_desc.type == 'door' self.desc = obj_desc self.strict = strict def surface(self, env): return 'open ' + self.desc.surface(env) def reset_verifier(self, env): super().reset_verifier(env) self.desc.find_matching_objs(env) def verify_action(self, action): if action != self.env.actions.toggle: return 'continue' front_cell = self.env.grid.get(*self.env.front_pos) for door in self.desc.obj_set: if front_cell and front_cell is door and door.is_open: return 'success' if self.strict: if front_cell and front_cell.type == 'door': return 'failure' return 'continue' class GoToInstr(ActionInstr): def __init__(self, obj_desc): super().__init__() self.desc = obj_desc def surface(self, env): return 'go to ' + self.desc.surface(env) def reset_verifier(self, env): super().reset_verifier(env) self.desc.find_matching_objs(env) def verify_action(self, action): for pos in self.desc.obj_poss: if np.array_equal(pos, self.env.front_pos): return 'success' return 'continue' class PickupInstr(ActionInstr): def __init__(self, obj_desc, strict=False): super().__init__() assert obj_desc.type != 'door' self.desc = obj_desc self.strict = strict def surface(self, env): return 'pick up ' + self.desc.surface(env) def reset_verifier(self, env): super().reset_verifier(env) self.preCarrying = None self.desc.find_matching_objs(env) def verify_action(self, action): preCarrying = self.preCarrying self.preCarrying = self.env.carrying if action != self.env.actions.pickup: return 'continue' for obj in self.desc.obj_set: if preCarrying is None and self.env.carrying is obj: return 'success' if self.strict: if self.env.carrying: return 'failure' self.preCarrying = self.env.carrying return 'continue' class PutNextInstr(ActionInstr): def __init__(self, obj_move, obj_fixed, strict=False): super().__init__() assert obj_move.type != 'door' self.desc_move = obj_move self.desc_fixed = obj_fixed self.strict = strict def surface(self, env): return 'put ' + self.desc_move.surface(env) + ' next to ' + self.desc_fixed.surface(env) def reset_verifier(self, env): super().reset_verifier(env) self.preCarrying = None self.desc_move.find_matching_objs(env) self.desc_fixed.find_matching_objs(env)
BSD 3-Clause New or Revised License
cogent3/cogent3
src/cogent3/maths/stats/ks.py
pkolmogorov2x
python
def pkolmogorov2x(statistic, n): k = int(n * statistic) + 1 m = 2 * k - 1 h = k - n * statistic H = ones(m ** 2, "d") for i in range(m): for j in range(m): if i - j + 1 < 0: H[i * m + j] = 0 for i in range(m): H[i * m] -= h ** (i + 1) H[(m - 1) * m + i] -= h ** (m - i) H[(m - 1) * m] += [0, (2 * h - 1) ** m][2 * h - 1 > 0] for i in range(m): for j in range(m): if i - j + 1 > 0: for g in range(1, i - j + 2): H[i * m + j] /= g Q = ravel(mpower(reshape(H, (m, m)), n)) s = Q[(k - 1) * m + k - 1] for i in range(1, n + 1): s *= i / n return s
Probability function for Kolmogorovs distribution.
https://github.com/cogent3/cogent3/blob/3d98bddc0aef2bf7fea21b9a89de76b01f3d2da8/src/cogent3/maths/stats/ks.py#L71-L95
from numpy import arange, array, asarray from numpy import dot as matrixmultiply from numpy import ( exp, fabs, floor, log, ones, pi, ravel, reshape, sqrt, sum, zeros, ) from cogent3.maths.stats.special import combinations __author__ = "Gavin Huttley" __copyright__ = "Copyright 2007-2021, The Cogent Project" __credits__ = ["Gavin Huttley"] __license__ = "BSD-3" __version__ = "2021.10.12a1" __maintainer__ = "Gavin Huttley" __email__ = "gavin.huttley@anu.edu.au" __status__ = "Production" PIO4 = pi / 4 PIO2 = pi / 2 INVSQRT2PI = 1 / sqrt(2 * pi) def mpower(A, exponent): new = A for i in range(1, exponent): new = matrixmultiply(new, A) return new def pkolmogorov1x(statistic, n): statistic = asarray(statistic) if statistic <= 0: return 0.0 if statistic >= 1: return 1.0 to = floor(n * (1 - statistic)) + 1 j = arange(0, to) coeffs = asarray([log(combinations(n, i)) for i in j]) p = sum( exp( coeffs + (n - j) * log(1 - statistic - j / n) + (j - 1) * (log(statistic + j / n)) ) ) return 1 - statistic * p
BSD 3-Clause New or Revised License
jdasoftwaregroup/kartothek
kartothek/io_components/cube/copy.py
get_datasets_to_copy
python
def get_datasets_to_copy( cube: Cube, src_store: Union[Callable[[], KeyValueStore], KeyValueStore], tgt_store: Union[Callable[[], KeyValueStore], KeyValueStore], overwrite: bool, datasets: Optional[Union[Iterable[str], Dict[str, DatasetMetadata]]] = None, ) -> Dict[str, DatasetMetadata]: if not isinstance(datasets, dict): new_datasets = discover_datasets_unchecked( uuid_prefix=cube.uuid_prefix, store=src_store, filter_ktk_cube_dataset_ids=datasets, ) else: new_datasets = datasets if datasets is None: if not new_datasets: raise RuntimeError("{} not found in source store".format(cube)) else: unknown_datasets = set(datasets) - set(new_datasets) if unknown_datasets: raise RuntimeError( "{cube}, datasets {datasets} do not exist in source store".format( cube=cube, datasets=unknown_datasets ) ) existing_datasets = discover_datasets_unchecked(cube.uuid_prefix, tgt_store) if not overwrite: for ktk_cube_dataset_id in sorted(new_datasets.keys()): if ktk_cube_dataset_id in existing_datasets: raise RuntimeError( 'Dataset "{uuid}" exists in target store but overwrite was set to False'.format( uuid=new_datasets[ktk_cube_dataset_id].uuid ) ) all_datasets = copy(existing_datasets) all_datasets.update(new_datasets) check_datasets(all_datasets, cube) return new_datasets
Determine all dataset names of a given cube that should be copied and apply addtional consistency checks. Copying only a specific set of datasets is possible by providing a list of dataset names via the parameter `datasets`. Parameters ---------- cube: Cube specification. src_store: Source KV store. tgt_store: Target KV store. overwrite: If possibly existing datasets in the target store should be overwritten. datasets: Datasets to copy, must all be part of the cube. May be either the result of :func:`~kartothek.api.discover.discover_datasets`, an iterable of Ktk_cube dataset ID or ``None`` (in which case entire cube will be copied). Returns ------- all_datasets: Dict[str, DatasetMetadata] All datasets that should be copied.
https://github.com/jdasoftwaregroup/kartothek/blob/6bc7e868435e98cbda0b695900f29d1ff7d49110/kartothek/io_components/cube/copy.py#L19-L85
from __future__ import absolute_import from copy import copy from typing import Callable, Dict, Iterable, Optional, Union from simplekv import KeyValueStore from kartothek.api.discover import check_datasets, discover_datasets_unchecked from kartothek.core.cube.cube import Cube from kartothek.core.dataset import DatasetMetadata from kartothek.utils.ktk_adapters import get_dataset_keys __all__ = ( "get_copy_keys", "get_datasets_to_copy", )
MIT License
mozilla/remo
remo/voting/tasks.py
create_rotm_poll
python
def create_rotm_poll(): from remo.voting.models import Poll, RangePoll, RangePollChoice create_poll_flag = True poll_name = 'Rep of the month for {0}'.format(number2month(now().month)) start = (datetime.combine(rotm_nomination_end_date(), datetime.min.time()) + timedelta(days=1)).replace(tzinfo=pytz.UTC) end = start + timedelta(days=ROTM_VOTING_DAYS) rotm_poll = Poll.objects.filter(name=poll_name, start=start, end=end) if not now().date() > rotm_nomination_end_date() or rotm_poll.exists(): create_poll_flag = False nominees = User.objects.filter(userprofile__registration_complete=True, userprofile__is_rotm_nominee=True) if ((nominees and create_poll_flag) or waffle.switch_is_active('enable_rotm_tasks')): remobot = User.objects.get(username='remobot') description = 'Automated vote for the Rep of this month.' mentor_group = Group.objects.get(name='Mentor') with transaction.atomic(): poll = Poll.objects.create(name=poll_name, description=description, valid_groups=mentor_group, start=start, end=end, created_by=remobot) range_poll = RangePoll.objects.create(poll=poll, name='Rep of the month nominees') for nominee in nominees: RangePollChoice.objects.create(range_poll=range_poll, nominee=nominee)
Create a poll for the Rep of the month nominee. This task will create a range poll after the first days of the month during which mentors nominated mentees through their user profiles. The poll will last 14 days.
https://github.com/mozilla/remo/blob/751c266a09ea560220029d95eb54359564f93d5e/remo/voting/tasks.py#L109-L148
from datetime import date, datetime, timedelta from operator import or_ from django.conf import settings from django.contrib.auth.models import Group, User from django.contrib.contenttypes.models import ContentType from django.core.mail import send_mail from django.db import transaction from django.template.loader import render_to_string from django.utils.timezone import make_aware, now import pytz import waffle from remo.base.tasks import send_remo_mail from remo.base.utils import get_date, number2month from remo.celery import app from remo.dashboard.models import ActionItem EXTEND_VOTING_PERIOD = 48 * 3600 NOTIFICATION_INTERVAL = 24 * 3600 ROTM_VOTING_DAYS = 14 def rotm_nomination_end_date(): return date(now().year, now().month, 10) @app.task def send_voting_mail(voting_id, subject, email_template): from remo.voting.models import Poll poll = Poll.objects.get(pk=voting_id) data = {'SITE_URL': settings.SITE_URL, 'FROM_EMAIL': settings.FROM_EMAIL, 'poll': poll} if poll.automated_poll: message = render_to_string(email_template, data) send_mail(subject, message, settings.FROM_EMAIL, [settings.REPS_REVIEW_ALIAS]) else: user_list = User.objects.filter(groups=poll.valid_groups).exclude(username='remobot') for user in user_list: ctx_data = {'user': user, 'userprofile': user.userprofile} ctx_data.update(data) message = render_to_string(email_template, ctx_data) send_mail(subject, message, settings.FROM_EMAIL, [user.email]) @app.task def extend_voting_period(): from remo.voting.models import Poll tomorrow = get_date(days=1) review_count = User.objects.filter(groups__name='Review').count() query_start = make_aware(datetime.combine(tomorrow, datetime.min.time()), pytz.UTC) query_end = make_aware(datetime.combine(tomorrow, datetime.max.time()), pytz.UTC) polls = Poll.objects.filter(end__range=[query_start, query_end]) for poll in polls: if not poll.is_extended: budget_poll = poll.radio_polls.get(question='Budget Approval') majority = reduce(or_, map(lambda x: x.votes > review_count / 2, budget_poll.answers.all())) if not majority: poll.end += timedelta(seconds=EXTEND_VOTING_PERIOD) poll.save() subject = '[Urgent] Voting extended for {0}'.format(poll.name) recipients = (User.objects.filter(groups=poll.valid_groups) .exclude(pk__in=poll.users_voted.all()) .values_list('id', flat=True)) ctx_data = {'poll': poll} template = 'emails/voting_vote_reminder.jinja' send_remo_mail.delay(subject=subject, recipients_list=recipients, email_template=template, data=ctx_data) @app.task def resolve_action_items(): from remo.voting.models import Poll start = datetime.combine(get_date(days=-1), datetime.min.time()) end = datetime.combine(get_date(days=-1), datetime.max.time()) polls = Poll.objects.filter(end__range=[start, end]) action_model = ContentType.objects.get_for_model(Poll) items = ActionItem.objects.filter(content_type=action_model, object_id__in=polls) items.update(resolved=True) @app.task
BSD 3-Clause New or Revised License
oneiroe/pysimpleautomata
PySimpleAutomata/NFA.py
rename_nfa_states
python
def rename_nfa_states(nfa: dict, suffix: str): conversion_dict = {} new_states = set() new_initials = set() new_accepting = set() for state in nfa['states']: conversion_dict[state] = '' + suffix + state new_states.add('' + suffix + state) if state in nfa['initial_states']: new_initials.add('' + suffix + state) if state in nfa['accepting_states']: new_accepting.add('' + suffix + state) nfa['states'] = new_states nfa['initial_states'] = new_initials nfa['accepting_states'] = new_accepting new_transitions = {} for transition in nfa['transitions']: new_arrival = set() for arrival in nfa['transitions'][transition]: new_arrival.add(conversion_dict[arrival]) new_transitions[ conversion_dict[transition[0]], transition[1]] = new_arrival nfa['transitions'] = new_transitions return nfa
Side effect on input! Renames all the states of the NFA adding a **suffix**. It is an utility function to be used to avoid automata to have states with names in common. Avoid suffix that can lead to special name like "as", "and",... :param dict nfa: input NFA. :param str suffix: string to be added at beginning of each state name.
https://github.com/oneiroe/pysimpleautomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/NFA.py#L337-L373
from PySimpleAutomata import DFA def nfa_intersection(nfa_1: dict, nfa_2: dict) -> dict: intersection = { 'alphabet': nfa_1['alphabet'].intersection(nfa_2['alphabet']), 'states': set(), 'initial_states': set(), 'accepting_states': set(), 'transitions': dict() } for init_1 in nfa_1['initial_states']: for init_2 in nfa_2['initial_states']: intersection['initial_states'].add((init_1, init_2)) intersection['states'].update(intersection['initial_states']) boundary = set() boundary.update(intersection['initial_states']) while boundary: (state_nfa_1, state_nfa_2) = boundary.pop() if state_nfa_1 in nfa_1['accepting_states'] and state_nfa_2 in nfa_2['accepting_states']: intersection['accepting_states'].add((state_nfa_1, state_nfa_2)) for a in intersection['alphabet']: if (state_nfa_1, a) not in nfa_1['transitions'] or (state_nfa_2, a) not in nfa_2['transitions']: continue s1 = nfa_1['transitions'][state_nfa_1, a] s2 = nfa_2['transitions'][state_nfa_2, a] for destination_1 in s1: for destination_2 in s2: next_state = (destination_1, destination_2) if next_state not in intersection['states']: intersection['states'].add(next_state) boundary.add(next_state) intersection['transitions'].setdefault( ((state_nfa_1, state_nfa_2), a), set()).add(next_state) if destination_1 in nfa_1['accepting_states'] and destination_2 in nfa_2['accepting_states']: intersection['accepting_states'].add(next_state) return intersection def nfa_union(nfa_1: dict, nfa_2: dict) -> dict: union = { 'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']), 'states': nfa_1['states'].union(nfa_2['states']), 'initial_states': nfa_1['initial_states'].union(nfa_2['initial_states']), 'accepting_states': nfa_1['accepting_states'].union(nfa_2['accepting_states']), 'transitions': nfa_1['transitions'].copy()} for trans in nfa_2['transitions']: for elem in nfa_2['transitions'][trans]: union['transitions'].setdefault(trans, set()).add(elem) return union def nfa_determinization(nfa: dict) -> dict: def state_name(s): return str(set(sorted(s))) dfa = { 'alphabet': nfa['alphabet'].copy(), 'initial_state': None, 'states': set(), 'accepting_states': set(), 'transitions': dict() } if len(nfa['initial_states']) > 0: dfa['initial_state'] = state_name(nfa['initial_states']) dfa['states'].add(state_name(nfa['initial_states'])) sets_states = list() sets_queue = list() sets_queue.append(nfa['initial_states']) sets_states.append(nfa['initial_states']) if len(sets_states[0].intersection(nfa['accepting_states'])) > 0: dfa['accepting_states'].add(state_name(sets_states[0])) while sets_queue: current_set = sets_queue.pop(0) for a in dfa['alphabet']: next_set = set() for state in current_set: if (state, a) in nfa['transitions']: for next_state in nfa['transitions'][state, a]: next_set.add(next_state) if len(next_set) == 0: continue if next_set not in sets_states: sets_states.append(next_set) sets_queue.append(next_set) dfa['states'].add(state_name(next_set)) if next_set.intersection(nfa['accepting_states']): dfa['accepting_states'].add(state_name(next_set)) dfa['transitions'][state_name(current_set), a] = state_name(next_set) return dfa def nfa_complementation(nfa: dict) -> dict: determinized_nfa = nfa_determinization(nfa) return DFA.dfa_complementation(determinized_nfa) def nfa_nonemptiness_check(nfa: dict) -> bool: queue = list() visited = set() for state in nfa['initial_states']: visited.add(state) queue.append(state) while queue: state = queue.pop(0) visited.add(state) for a in nfa['alphabet']: if (state, a) in nfa['transitions']: for next_state in nfa['transitions'][state, a]: if next_state in nfa['accepting_states']: return True if next_state not in visited: queue.append(next_state) return False def nfa_nonuniversality_check(nfa: dict) -> bool: complemented_nfa = nfa_complementation(nfa) return DFA.dfa_nonemptiness_check(complemented_nfa) def nfa_interestingness_check(nfa: dict) -> bool: return nfa_nonemptiness_check(nfa) and nfa_nonuniversality_check(nfa) def nfa_word_acceptance(nfa: dict, word: list) -> bool: current_level = set() current_level = current_level.union(nfa['initial_states']) next_level = set() for action in word: for state in current_level: if (state, action) in nfa['transitions']: next_level.update(nfa['transitions'][state, action]) if len(next_level) < 1: return False current_level = next_level next_level = set() if current_level.intersection(nfa['accepting_states']): return True else: return False
MIT License
wkentaro-archive/conque.vim
autoload/conque_term/conque.py
Conque.csi_cursor_down
python
def csi_cursor_down(self, csi): self.l = self.bound(self.l + csi['val'], self.top, self.bottom) self.color_changes = {}
Process the move cursor down escape sequence.
https://github.com/wkentaro-archive/conque.vim/blob/10c4ba47769d8ec79243aa5cc869a509856503bb/autoload/conque_term/conque.py#L791-L795
import vim import re import math class Conque: screen = None proc = None columns = 80 lines = 24 working_columns = 80 working_lines = 24 top = 1 bottom = 24 l = 1 c = 1 autowrap = True absolute_coords = True tabstops = [] enable_colors = True color_changes = {} color_history = {} highlight_groups = {} color_pruning = True unwrap_tables = True wrap_cursor = False cursor_set = False character_set = 'ascii' read_count = 0 input_buffer = [] def open(self): command = vim.eval('command') options = vim.eval('options') self.screen = ConqueScreen() self.columns = vim.current.window.width self.lines = vim.current.window.height self.working_columns = vim.current.window.width self.working_lines = vim.current.window.height self.bottom = vim.current.window.height if int(options['offset']) > 0: self.l = int(options['offset']) self.enable_colors = options['color'] and not CONQUE_FAST_MODE self.init_tabstops() self.proc = ConqueSubprocess() self.proc.open(command, {'TERM': options['TERM'], 'CONQUE': '1', 'LINES': str(self.lines), 'COLUMNS': str(self.columns)}) self.update_window_size(True) def write(self, input, set_cursor=True, read=True): self.proc.write(input) if read: self.read(1, set_cursor) def write_ord(self, input, set_cursor=True, read=True): if CONQUE_PYTHON_VERSION == 2: self.write(unichr(input), set_cursor, read) else: self.write(chr(input), set_cursor, read) def write_expr(self, expr, set_cursor=True, read=True): if CONQUE_PYTHON_VERSION == 2: try: val = vim.eval(expr) self.write(unicode(val, CONQUE_VIM_ENCODING, 'ignore'), set_cursor, read) except: pass else: try: self.write(vim.eval(expr), set_cursor, read) except: pass def write_latin1(self, input, set_cursor=True, read=True): if CONQUE_PYTHON_VERSION == 2: try: input_unicode = input.decode('latin-1', 'ignore') self.write(input_unicode.encode('utf-8', 'ignore'), set_cursor, read) except: return else: self.write(input, set_cursor, read) def write_buffered_ord(self, chr): self.input_buffer.append(chr) def read(self, timeout=1, set_cursor=True, return_output=False, update_buffer=True): output = '' try: output = self.proc.read(timeout) if output == '': return if not update_buffer: return output output = output.replace(chr(0), '') chunks = CONQUE_SEQ_REGEX.split(output) if len(chunks) == 1: self.plain_text(chunks[0]) else: for s in chunks: if s == '': continue if CONQUE_SEQ_REGEX_CTL.match(s[0]): nr = ord(s[0]) if nr in CONQUE_CTL: getattr(self, 'ctl_' + CONQUE_CTL[nr])() else: pass elif CONQUE_SEQ_REGEX_CSI.match(s): if s[-1] in CONQUE_ESCAPE: csi = self.parse_csi(s[2:]) getattr(self, 'csi_' + CONQUE_ESCAPE[s[-1]])(csi) else: pass elif CONQUE_SEQ_REGEX_TITLE.match(s): self.change_title(s[2], s[4:-1]) elif CONQUE_SEQ_REGEX_HASH.match(s): if s[-1] in CONQUE_ESCAPE_HASH: getattr(self, 'hash_' + CONQUE_ESCAPE_HASH[s[-1]])() else: pass elif CONQUE_SEQ_REGEX_CHAR.match(s): if s[-1] in CONQUE_ESCAPE_CHARSET: getattr(self, 'charset_' + CONQUE_ESCAPE_CHARSET[s[-1]])() else: pass elif CONQUE_SEQ_REGEX_ESC.match(s): if s[-1] in CONQUE_ESCAPE_PLAIN: getattr(self, 'esc_' + CONQUE_ESCAPE_PLAIN[s[-1]])() else: pass else: self.plain_text(s) if set_cursor: self.screen.set_cursor(self.l, self.c) self.cursor_set = False except: pass if return_output: if CONQUE_PYTHON_VERSION == 3: return output else: return output.encode(CONQUE_VIM_ENCODING, 'replace') def auto_read(self): if len(self.input_buffer): for chr in self.input_buffer: self.write_ord(chr, set_cursor=False, read=False) self.input_buffer = [] self.read(1) if self.read_count % 32 == 0: if not self.proc.is_alive(): vim.command('call conque_term#get_instance().close()') return if self.read_count > 512: self.read_count = 0 if self.enable_colors and self.color_pruning: self.prune_colors() self.read_count += 1 self.read(1) if self.c == 1: vim.command('call feedkeys("\<right>\<left>", "n")') else: vim.command('call feedkeys("\<left>\<right>", "n")') if self.cursor_set: return if not CONQUE_FAST_MODE: self.update_window_size() try: self.set_cursor(self.l, self.c) except: pass self.cursor_set = True def plain_text(self, input): if self.character_set == 'graphics': old_input = input input = u('') for i in range(0, len(old_input)): chrd = ord(old_input[i]) try: if chrd > 255: input = input + old_input[i] else: input = input + uchr(CONQUE_GRAPHICS_SET[chrd]) except: pass current_line = self.screen[self.l] if len(current_line) < self.c: current_line = current_line + ' ' * (self.c - len(current_line)) if self.c + len(input) - 1 > self.working_columns: if self.unwrap_tables and CONQUE_TABLE_OUTPUT.match(input): self.screen[self.l] = current_line[:self.c - 1] + input + current_line[self.c + len(input) - 1:] self.apply_color(self.c, self.c + len(input)) self.c += len(input) return diff = self.c + len(input) - self.working_columns - 1 if self.autowrap: self.screen[self.l] = current_line[:self.c - 1] + input[:-1 * diff] self.apply_color(self.c, self.working_columns) self.ctl_nl() self.ctl_cr() remaining = input[-1 * diff:] self.plain_text(remaining) else: self.screen[self.l] = current_line[:self.c - 1] + input[:-1 * diff - 1] + input[-1] self.apply_color(self.c, self.working_columns) self.c = self.working_columns else: self.screen[self.l] = current_line[:self.c - 1] + input + current_line[self.c + len(input) - 1:] self.apply_color(self.c, self.c + len(input)) self.c += len(input) def apply_color(self, start, end, line=0): if not self.enable_colors: return if line: buffer_line = line else: buffer_line = self.get_buffer_line(self.l) to_del = [] if buffer_line in self.color_history: for i in range(len(self.color_history[buffer_line])): syn = self.color_history[buffer_line][i] if syn['start'] >= start and syn['start'] < end: vim.command('syn clear ' + syn['name']) to_del.append(i) if syn['end'] > end: self.exec_highlight(buffer_line, end, syn['end'], syn['highlight']) elif syn['end'] > start and syn['end'] <= end: vim.command('syn clear ' + syn['name']) to_del.append(i) if syn['start'] < start: self.exec_highlight(buffer_line, syn['start'], start, syn['highlight']) if len(to_del) > 0: to_del.reverse() for di in to_del: del self.color_history[buffer_line][di] if len(self.color_changes) == 0: return highlight = '' for attr in self.color_changes.keys(): highlight = highlight + ' ' + attr + '=' + self.color_changes[attr] self.exec_highlight(buffer_line, start, end, highlight) def exec_highlight(self, buffer_line, start, end, highlight): syntax_name = 'ConqueHighLightAt_%d_%d_%d_%d' % (self.proc.pid, self.l, start, len(self.color_history) + 1) syntax_options = 'contains=ALLBUT,ConqueString,MySQLString,MySQLKeyword oneline' syntax_region = 'syntax match %s /\%%%dl\%%>%dc.\{%d}\%%<%dc/ %s' % (syntax_name, buffer_line, start - 1, end - start, end + 1, syntax_options) hgroup = 'ConqueHL_%d' % (abs(hash(highlight))) if hgroup not in self.highlight_groups: syntax_group = 'highlight %s %s' % (hgroup, highlight) self.highlight_groups[hgroup] = hgroup vim.command(syntax_group) syntax_highlight = 'highlight link %s %s' % (syntax_name, self.highlight_groups[hgroup]) vim.command(syntax_region) vim.command(syntax_highlight) if not buffer_line in self.color_history: self.color_history[buffer_line] = [] self.color_history[buffer_line].append({'name': syntax_name, 'start': start, 'end': end, 'highlight': highlight}) def prune_colors(self): buffer_line = self.get_buffer_line(self.l) ks = list(self.color_history.keys()) for line in ks: if line < buffer_line - CONQUE_MAX_SYNTAX_LINES: for syn in self.color_history[line]: vim.command('syn clear ' + syn['name']) del self.color_history[line] def ctl_nl(self): if self.lines != self.working_lines and self.l == self.bottom: del self.screen[self.top] self.screen.insert(self.bottom, '') elif self.l == self.bottom: self.screen.append('') else: self.l += 1 self.color_changes = {} def ctl_cr(self): self.c = 1 self.color_changes = {} def ctl_bs(self): if self.c > 1: self.c += -1 def ctl_soh(self): pass def ctl_stx(self): pass def ctl_bel(self): vim.command('call conque_term#bell()') def ctl_tab(self): ts = self.working_columns for i in range(self.c, len(self.tabstops)): if self.tabstops[i]: ts = i + 1 break self.c = ts def ctl_so(self): self.character_set = 'graphics' def ctl_si(self): self.character_set = 'ascii' def csi_font(self, csi): if not self.enable_colors: return if len(csi['vals']) == 0: csi['vals'] = [0] if len(csi['vals']) == 3 and csi['vals'][0] == 38 and csi['vals'][1] == 5: self.color_changes['ctermfg'] = str(csi['vals'][2]) self.color_changes['guifg'] = '#' + self.xterm_to_rgb(csi['vals'][2]) elif len(csi['vals']) == 3 and csi['vals'][0] == 48 and csi['vals'][1] == 5: self.color_changes['ctermbg'] = str(csi['vals'][2]) self.color_changes['guibg'] = '#' + self.xterm_to_rgb(csi['vals'][2]) else: for val in csi['vals']: if val in CONQUE_FONT: if CONQUE_FONT[val]['normal'] and len(self.color_changes) == 0: continue elif CONQUE_FONT[val]['normal']: self.color_changes = {} else: for attr in CONQUE_FONT[val]['attributes'].keys(): if attr in self.color_changes and (attr == 'cterm' or attr == 'gui'): self.color_changes[attr] += ',' + CONQUE_FONT[val]['attributes'][attr] else: self.color_changes[attr] = CONQUE_FONT[val]['attributes'][attr] def csi_clear_line(self, csi): if len(csi['vals']) == 0: csi['val'] = 0 if csi['val'] == 0: self.screen[self.l] = self.screen[self.l][0:self.c - 1] elif csi['val'] == 1: self.screen[self.l] = ' ' * (self.c) + self.screen[self.l][self.c:] elif csi['val'] == 2: self.screen[self.l] = '' if csi['val'] == 2 or (csi['val'] == 0 and self.c == 1): buffer_line = self.get_buffer_line(self.l) if buffer_line in self.color_history: for syn in self.color_history[buffer_line]: vim.command('syn clear ' + syn['name']) def csi_cursor_right(self, csi): if csi['val'] == 0: csi['val'] = 1 if self.wrap_cursor and self.c + csi['val'] > self.working_columns: self.l += int(math.floor((self.c + csi['val']) / self.working_columns)) self.c = (self.c + csi['val']) % self.working_columns return self.c = self.bound(self.c + csi['val'], 1, self.working_columns) def csi_cursor_left(self, csi): if csi['val'] == 0: csi['val'] = 1 if self.wrap_cursor and csi['val'] >= self.c: self.l += int(math.floor((self.c - csi['val']) / self.working_columns)) self.c = self.working_columns - (csi['val'] - self.c) % self.working_columns return self.c = self.bound(self.c - csi['val'], 1, self.working_columns) def csi_cursor_to_column(self, csi): self.c = self.bound(csi['val'], 1, self.working_columns) def csi_cursor_up(self, csi): self.l = self.bound(self.l - csi['val'], self.top, self.bottom) self.color_changes = {}
MIT License
douban/kenshin
rurouni/conf.py
get_parser
python
def get_parser(usage="%prog [options] <start|stop|status>"): parser = OptionParser(usage=usage) parser.add_option( "--debug", action="store_true", help="Run in the foreground, log to stdout") parser.add_option( "--nodaemon", action="store_true", help='Run in the foreground') parser.add_option( "--pidfile", default=None, help='Write pid to the given file') parser.add_option( "--umask", default=None, help="Use the given umask when creating files") parser.add_option( '--config', default=None, help="Use the given config file") parser.add_option( "--instance", default="a", help="Manage a specific rurouni instance") return parser
Create a parser for command line options.
https://github.com/douban/kenshin/blob/bb5dfa05f5d10b4bdd1e0403c9e7d3c7e4399fcb/rurouni/conf.py#L216-L237
import os import sys import errno from os.path import join, normpath, expanduser, dirname, exists, isdir from ConfigParser import ConfigParser from optparse import OptionParser from twisted.python import usage from rurouni.exceptions import RurouniException, ConfigException from rurouni import log defaults = dict( CACHE_QUERY_PORT = '7002', CACHE_QUERY_INTERFACE = '0.0.0.0', LINE_RECEIVER_PORT = '2003', LINE_RECEIVER_INTERFACE = '0.0.0.0', PICKLE_RECEIVER_PORT = '2004', PICKLE_RECEIVER_INTERFACE = '0.0.0.0', DEFAULT_WAIT_TIME = 10, RUROUNI_METRIC_INTERVAL = 60, RUROUNI_METRIC = 'rurouni', LOG_UPDATES = True, CONF_DIR = None, LOCAL_DATA_DIR = None, LOCAL_LINK_DIR = None, PID_DIR = None, MAX_CREATES_PER_MINUTE = float('inf'), NUM_ALL_INSTANCE = 1, ) class Settings(dict): __getattr__ = dict.__getitem__ def __init__(self): dict.__init__(self) self.update(defaults) def readFrom(self, path, section): parser = ConfigParser() if not parser.read(path): raise RurouniException("Failed to read config: %s" % path) if not parser.has_section(section): return for key, val in parser.items(section): key = key.upper() val_typ = type(defaults[key]) if key in defaults else str if val_typ is list: val = [v.strip() for v in val.split(',')] elif val_typ is bool: val = parser.getboolean(section, key) else: try: val = int(val) except: try: val = float(val) except: pass self[key] = val settings = Settings() class OrderedConfigParser(ConfigParser): def read(self, path): if not os.access(path, os.R_OK): raise RurouniException( "Missing config file or wrong perm on %s" % path) return ConfigParser.read(self, path) class RurouniOptions(usage.Options): optFlags = [ ["debug", "", "run in debug mode."], ] optParameters = [ ['config', 'c', None, 'use the given config file.'], ['instance', '', 'a', 'manage a specific rurouni instance.'], ['logdir', '', None, 'write logs to the given directory.'], ] def postOptions(self): global settings pidfile = self.parent['pidfile'] if pidfile.endswith('twistd.pid'): pidfile = None self['pidfile'] = pidfile if not self.parent.has_key('umask') or self.parent['umask'] is None: self.parent['umask'] = 022 program = self.parent.subCommand settings['program'] = program program_settings = read_config(program, self) settings.update(program_settings) variables = ['STORAGE_DIR', 'LOCAL_DATA_DIR', 'LOCAL_LINK_DIR', 'PID_DIR', 'LOG_DIR', 'pidfile', 'INDEX_FILE'] for var in variables: settings[var] = normpath(expanduser(settings[var])) storage_schemas = join(settings['CONF_DIR'], 'storage-schemas.conf') if not exists(storage_schemas): print 'Error missing config %s' % storage_schemas sys.exit(1) self.parent['pidfile'] = settings['pidfile'] if not 'action' in self: self['action'] = 'start' self.handleAction() if self['debug']: log.setDebugEnabled(True) else: if self.parent.get('syslog', None): log.logToSyslog(self.parent['prefix']) elif not self.parent['nodaemon']: if not isdir(settings.LOG_DIR): os.makedirs(settings.LOG_DIR) log.logToDir(settings.LOG_DIR) @staticmethod def _normpath(path): return normpath(expanduser(path)) def parseArgs(self, *action): if len(action) == 1: self["action"] = action[0] def handleAction(self): action = self['action'] pidfile = self.parent["pidfile"] program = settings['program'] instance = self['instance'] if action == 'stop': if not exists(pidfile): print 'pidfile %s does not exist' % pidfile raise SystemExit(0) with open(pidfile) as f: pid = int(f.read().strip()) print 'sending kill signal to pid %d' % pid try: os.kill(pid, 15) except OSError as e: if e.errno == errno.ESRCH: print 'no process with pid %d running' % pid else: raise raise SystemExit(0) elif action == 'start': if exists(pidfile): with open(pidfile) as f: pid = int(f.read().strip()) if _process_alive(pid): print ('%s (instance %s) is already running with pid %d' % (program, instance, pid)) raise SystemExit(1) else: print 'removing stale pidfile %s' % pidfile try: os.unlink(pidfile) except: print 'could not remove pidfile %s' % pidfile else: if not os.path.exists(settings['PID_DIR']): try: os.makedirs(settings['PID_DIR']) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(settings['PID_DIR']): pass elif action == 'status': if not exists(pidfile): print '%s (instance %s) is not running' % (program, instance) raise SystemExit(0) with open(pidfile) as f: pid = int(f.read().strip()) if _process_alive(pid): print ('%s (instance %s) is running with pid %d' % (program, instance, pid)) raise SystemExit(0) else: print "%s (instance %s) is not running" % (program, instance) raise SystemExit(1)
Apache License 2.0
wesselb/stheno
stheno/model/measure.py
Measure.shift
python
def shift(self, p_shifted, p, shift): return self._update( p_shifted, self.means[p].shift(shift), self.kernels[p].shift(shift), lambda j: self.kernels[p, j].shift(shift, 0), )
Shift a GP. Args: p_shifted (:class:`.gp.GP`): Shifted GP. p (:class:`.gp.GP`): GP to shift. shift (object): Amount to shift by. Returns: :class:`.gp.GP`: The shifted GP.
https://github.com/wesselb/stheno/blob/800600785bc3de5ebeb6bfbea469385c07a88d18/stheno/model/measure.py#L255-L271
from types import FunctionType from lab import B from matrix import Constant from mlkernels import ( num_elements, ZeroKernel, TensorProductKernel, ) from plum import Union from .fdd import FDD from .gp import GP, assert_same_measure from .observations import ( AbstractObservations, Observations, PseudoObservations, combine, ) from .. import _dispatch, PromisedMeasure from ..lazy import LazyVector, LazyMatrix from ..mo import MultiOutputKernel as MOK, MultiOutputMean as MOM from ..random import Normal __all__ = ["Measure"] class Measure: default = None def __init__(self): self.ps = [] self._pids = set() self.means = LazyVector() self.kernels = LazyMatrix() self._gps_by_name = {} self._names_by_gp = {} self._prev_default = None def __enter__(self): self._prev_default = self.default Measure.default = self return self def __exit__(self, exc_type, exc_val, exc_tb): Measure.default = self._prev_default def __hash__(self): return id(self) @_dispatch def __getitem__(self, name: str): return self._gps_by_name[name] @_dispatch def __getitem__(self, p: GP): return self._names_by_gp[id(p)] @_dispatch def name(self, p: GP, name: str): if id(p) in self._names_by_gp: del self._gps_by_name[self._names_by_gp[id(p)]] del self._names_by_gp[id(p)] if name in self._gps_by_name: raise RuntimeError( f'Name "{name}" for "{p}" already taken by "{self[name]}".' ) self._gps_by_name[name] = p self._names_by_gp[id(p)] = name def _add_p(self, p): self.ps.append(p) self._pids.add(id(p)) p._measures.append(self) def _update(self, p, mean, kernel, left_rule, right_rule=None): self.means[p] = mean self.kernels[p] = kernel self.kernels.add_left_rule(id(p), self._pids, left_rule) if right_rule: self.kernels.add_right_rule(id(p), self._pids, right_rule) else: self.kernels.add_right_rule( id(p), self._pids, lambda i: reversed(self.kernels[p, i]) ) self._add_p(p) return p @_dispatch def __call__(self, p: GP): p_copy = GP() return self._update( p_copy, self.means[p], self.kernels[p], lambda j: self.kernels[p, j], lambda i: self.kernels[i, p], ) @_dispatch def __call__(self, fdd: FDD): return self(fdd.p)(fdd.x, fdd.noise) def add_independent_gp(self, p, mean, kernel): self.means[p] = mean self.kernels[p] = kernel self.kernels.add_left_rule(id(p), self._pids, lambda j: ZeroKernel()) self.kernels.add_right_rule(id(p), self._pids, lambda i: ZeroKernel()) self._add_p(p) return p @_dispatch def sum(self, p_sum: GP, other, p: GP): return self.sum(p_sum, p, other) @_dispatch def sum(self, p_sum: GP, p: GP, other: Union[B.Numeric, FunctionType]): return self._update( p_sum, self.means[p] + other, self.kernels[p], lambda j: self.kernels[p, j], ) @_dispatch def sum(self, p_sum: GP, p1: GP, p2: GP): assert_same_measure(p1, p2) return self._update( p_sum, self.means[p1] + self.means[p2], ( self.kernels[p1] + self.kernels[p2] + self.kernels[p1, p2] + self.kernels[p2, p1] ), lambda j: self.kernels[p1, j] + self.kernels[p2, j], ) @_dispatch def mul(self, p_mul: GP, other, p: GP): return self.mul(p_mul, p, other) @_dispatch def mul(self, p_mul: GP, p: GP, other: B.Numeric): return self._update( p_mul, self.means[p] * other, self.kernels[p] * other ** 2, lambda j: self.kernels[p, j] * other, ) @_dispatch def mul(self, p_mul: GP, p: GP, f: FunctionType): def ones(x): return Constant(B.one(x), num_elements(x), 1) return self._update( p_mul, f * self.means[p], f * self.kernels[p], lambda j: TensorProductKernel(f, ones) * self.kernels[p, j], ) @_dispatch def mul(self, p_mul: GP, p1: GP, p2: GP): assert_same_measure(p1, p2) term1 = self.sum( GP(), self.mul(GP(), lambda x: self.means[p1](x), p2), self.mul(GP(), p1, lambda x: self.means[p2](x)), ) term2 = self.add_independent_gp( GP(), -self.means[p1] * self.means[p2], ( self.kernels[p1] * self.kernels[p2] + self.kernels[p1, p2] * self.kernels[p2, p1] ), ) return self.sum(p_mul, term1, term2)
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor/importlib_metadata/_compat.py
ensure_is_path
python
def ensure_is_path(ob): if (3,) < sys.version_info < (3, 5): ob = str(ob) return pathlib.Path(ob)
Construct a Path from ob even if it's already one. Specialized for Python 3.4.
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor/importlib_metadata/_compat.py#L108-L114
from __future__ import absolute_import import io import abc import sys import email if sys.version_info > (3,): import builtins from configparser import ConfigParser from contextlib import suppress FileNotFoundError = builtins.FileNotFoundError IsADirectoryError = builtins.IsADirectoryError NotADirectoryError = builtins.NotADirectoryError PermissionError = builtins.PermissionError map = builtins.map else: from backports.configparser import ConfigParser from itertools import imap as map from contextlib2 import suppress FileNotFoundError = IOError, OSError IsADirectoryError = IOError, OSError NotADirectoryError = IOError, OSError PermissionError = IOError, OSError if sys.version_info > (3, 5): import pathlib else: import pathlib2 as pathlib try: ModuleNotFoundError = builtins.FileNotFoundError except (NameError, AttributeError): ModuleNotFoundError = ImportError if sys.version_info >= (3,): from importlib.abc import MetaPathFinder else: class MetaPathFinder(object): __metaclass__ = abc.ABCMeta __metaclass__ = type __all__ = [ 'install', 'NullFinder', 'MetaPathFinder', 'ModuleNotFoundError', 'pathlib', 'ConfigParser', 'map', 'suppress', 'FileNotFoundError', 'NotADirectoryError', 'email_message_from_string', ] def install(cls): sys.meta_path.append(cls()) return cls class NullFinder: @staticmethod def find_spec(*args, **kwargs): return None find_module = find_spec def py2_message_from_string(text): io_buffer = io.StringIO(text) return email.message_from_file(io_buffer) email_message_from_string = ( py2_message_from_string if sys.version_info < (3,) else email.message_from_string ) PYPY_OPEN_BUG = getattr(sys, 'pypy_version_info', (9, 9, 9))[:3] <= (7, 1, 1)
Apache License 2.0
thomas-young-2013/open-box
openbox/acquisition_function/multi_objective_acquisition.py
EHVI.psi
python
def psi(self, lower: np.ndarray, upper: np.ndarray, mu: np.ndarray, sigma: np.ndarray) -> np.ndarray: u = (upper - mu) / sigma return sigma * norm.pdf(u) + (mu - lower) * (1 - norm.cdf(u))
r"""Compute Psi function for minimization. For each cell i and outcome k: Psi(lower_{i,k}, upper_{i,k}, mu_k, sigma_k) = ( sigma_k * PDF((upper_{i,k} - mu_k) / sigma_k) + ( mu_k - lower_{i,k} ) * (1-CDF(upper_{i,k} - mu_k) / sigma_k) See Equation 19 in [Yang2019]_ for more details. Args: lower: A `num_cells x m`-dim array of lower cell bounds upper: A `num_cells x m`-dim array of upper cell bounds mu: A `batch_shape x 1 x m`-dim array of means sigma: A `batch_shape x 1 x m`-dim array of standard deviations (clamped). Returns: A `batch_shape x num_cells x m`-dim array of values.
https://github.com/thomas-young-2013/open-box/blob/74dc94e7ed99ee66552eb8be8d38881d903c7084/openbox/acquisition_function/multi_objective_acquisition.py#L52-L74
from typing import List, Tuple from itertools import product import random import numpy as np from scipy.stats import norm from sklearn.kernel_approximation import RBFSampler from openbox.acquisition_function.acquisition import AbstractAcquisitionFunction, Uncertainty from openbox.surrogate.base.base_model import AbstractModel from openbox.surrogate.base.gp import GaussianProcess from openbox.utils.platypus_utils import set_problem_types, get_variator from openbox.utils.constants import MAXINT from platypus import NSGAII, Problem, Real class EHVI(AbstractAcquisitionFunction): def __init__( self, model: List[AbstractModel], ref_point, **kwargs ): super().__init__(model=model, **kwargs) self.long_name = 'Expected Hypervolume Improvement' ref_point = np.asarray(ref_point) self.ref_point = ref_point self._cross_product_indices = np.array( list(product(*[[0, 1] for _ in range(ref_point.shape[0])])) )
MIT License
daeilkim/refinery
refinery/bnpy/bnpy-dev/bnpy/data/MinibatchIterator.py
MinibatchIterator.__init__
python
def __init__(self, Data, nBatch=10, nObsBatch=None, nLap=10, dataorderseed=42, startLap=0, **kwargs): self.Data = Data self.nBatch = nBatch self.nLap = nLap + startLap self.nObsTotal = Data.nObsTotal if nObsBatch is None: self.nObsBatch = Data.nObsTotal/nBatch else: self.nObsBatch = nObsBatch self.curLapPos = -1 self.lapID = startLap self.dataorderseed = int(int(dataorderseed) % MAXSEED) self.obsIDByBatch = self.configObsIDsForEachBatch()
Constructor for creating an iterator over the batches of data
https://github.com/daeilkim/refinery/blob/0d5de8fc3d680a2c79bd0e9384b506229787c74f/refinery/bnpy/bnpy-dev/bnpy/data/MinibatchIterator.py#L40-L59
import numpy as np MAXSEED = 1000000 class MinibatchIterator(object):
MIT License
andycasey/ads
ads/search.py
Article.build_citation_tree
python
def build_citation_tree(self, depth=1, **kwargs): raise NotImplementedError
Builds a citation tree for this paper. :param depth: [optional] The number of levels to fetch in the citation tree. :type depth: int :param kwargs: [optional] Keyword arguments to pass to ``ads.search``. :returns: A list of citation to the current article, with pre-loaded citation down by ``depth``.
https://github.com/andycasey/ads/blob/7939be2650bfee85be13cd46cfa2e037381da7fc/ads/search.py#L91-L110
import warnings import re import six import math from .config import SEARCH_URL from .exceptions import SolrResponseParseError, APIResponseError from .base import BaseQuery, APIResponse from .metrics import MetricsQuery from .export import ExportQuery from .utils import cached_property class Article(object): def __init__(self, **kwargs): self._raw = kwargs for key, value in six.iteritems(kwargs): setattr(self, key, value) def __str__(self): if six.PY3: return self.__unicode__() return self.__unicode__().encode("utf-8") def __unicode__(self): author = self.first_author or "Unknown author" if len(self.author or []) > 1: author += " et al." return u"<{author} {year}, {bibcode}>".format( author=author, year=self.year or "Unknown year", bibcode=self.bibcode or "Unknown bibcode" ) def __eq__(self, other): if self._raw.get("bibcode") is None or other._raw.get("bibcode") is None: raise TypeError("Cannot compare articles without bibcodes") return self._raw['bibcode'] == other._raw['bibcode'] def __ne__(self, other): return not self.__eq__(other) def keys(self): return self._raw.keys() def items(self): return self._raw.items() def iteritems(self): return six.iteritems(self._raw) def build_reference_tree(self, depth=1, **kwargs): raise NotImplementedError
MIT License
arkhn/pyfhirstore
benchmark/benchmark_fhirbase.py
download_resources
python
def download_resources(): if not path.exists("benchmark/examples.zip"): url = "http://www.hl7.org/fhir/examples-json.zip" r = requests.get(url, stream=True) total_size = int(r.headers.get("content-length", 0)) block_size = 1024 t = tqdm( total=total_size, unit="B", unit_scale=True, desc="Downloading example resources", ) with open("benchmark/examples.zip", "wb") as f: for data in r.iter_content(block_size): t.update(len(data)) f.write(data) t.close() else: print("Using cached resources")
Downloads examples from HL7 website.
https://github.com/arkhn/pyfhirstore/blob/dd43b6d7db600f95d81dc83ae0a6e6de78ff02c6/benchmark/benchmark_fhirbase.py#L154-L175
import requests import io import zipfile import json from os import path from timeit import default_timer as timer import statistics from uuid import uuid4 from tqdm import tqdm import fhirbase import psycopg2 example_blacklist = [ "package-min-ver.json", "profiles-resources.json", "questionnaireresponse-extensions-QuestionnaireResponse-item-subject.json", "binary-example.json", "binary-f006.json", "bundle-example.json", "bundle-references.json", "bundle-request-medsallergies.json", "bundle-request-simplesummary.json", "bundle-response-medsallergies.json", "bundle-response-simplesummary.json", "bundle-response.json", "bundle-search-warning.json", "catalogentry-example.json", "chargeitemdefinition-device-example.json", "chargeitemdefinition-ebm-example.json", "codesystem-extensions-CodeSystem-author.json", "codesystem-extensions-CodeSystem-effective.json", "chargeitemdefinition-ebm-example.json", "codesystem-extensions-CodeSystem-end.json", "codesystem-extensions-CodeSystem-keyword.json", "conceptmaps.json", "coord-0base-example.json", "coord-1base-example.json", "coverageeligibilityrequest-example-2.json", "coverageeligibilityrequest-example.json", "coverageeligibilityresponse-example-benefits-2.json", "dataelements.json", "device-extensions-Device-din.json", "devicedefinition-example.json", "diagnosticreport-example-f001-bloodexam.json", "diagnosticreport-example-f202-bloodculture.json", "document-example-dischargesummary.json", "effectevidencesynthesis-example.json", "endpoint-examples-general-template.json", "evidence-example.json", "evidencevariable-example.json", "extension-definitions.json", "external-resources.json", "group-example-herd1.json", "graphdefinition-questionnaire.json", "group-example-member.json", "group-example-patientlist.json", "group-example.json", "insuranceplan-example.json", "location-examples-general.json", "medicationknowledge-example.json", "medicinalproductcontraindication-example.json", "medicinalproductindication-example.json", "medicinalproductinteraction-example.json", "medicinalproductmanufactured-example.json", "medicinalproductundesirableeffect-example.json", "message-request-link.json", "message-response-link.json", "molecularsequence-example.json", "namingsystem-registry.json", "namingsystem-terminologies.json", "observation-genetic-Observation-amino-acid-change.json", "observation-genetic-Observation-dna-variant.json", "observation-genetic-Observation-gene-amino-acid-change.json", "observation-genetic-Observation-gene-dnavariant.json", "observation-genetic-Observation-gene-identifier.json", "organizationaffiliation-example.json", "orgrole-example-hie.json", "orgrole-example-services.json", "patient-examples-cypress-template.json", "patient-examples-general.json", "patient-extensions-Patient-age.json", "patient-extensions-Patient-birthOrderBoolean.json", "patient-extensions-Patient-mothersMaidenName.json", "practitioner-examples-general.json", "practitionerrole-examples-general.json", "profiles-others.json", "profiles-types.json", "questionnaire-profile-example-ussg-fht.json", "researchdefinition-example.json", "researchelementdefinition-example.json", "riskevidencesynthesis-example.json", "search-parameters.json", "searchparameter-example-extension.json", "searchparameter-example-reference.json", "searchparameter-example.json", "searchparameter-filter.json", "sequence-complex-variant.json", "sequence-example-fda-comparisons.json", "sequence-example-fda-vcfeval.json", "sequence-example-fda.json", "sequence-example-pgx-1.json", "sequence-example-pgx-2.json", "sequence-example-TPMT-one.json", "sequence-example-TPMT-two.json", "sequence-genetics-example-breastcancer.json", "sequence-graphic-example-1.json", "sequence-graphic-example-2.json", "sequence-graphic-example-3.json", "sequence-graphic-example-4.json", "sequence-graphic-example-5.json", "v2-tables.json", "v3-codesystems.json", "valueset-extensions-ValueSet-author.json", "valueset-extensions-ValueSet-effective.json", "valueset-extensions-ValueSet-end.json", "valueset-extensions-ValueSet-keyword.json", "valueset-extensions-ValueSet-workflow.json", "valuesets.json", "xds-example.json", "bundle-transaction.json", "codesystem-extensions-CodeSystem-workflow.json", "coverageeligibilityresponse-example-benefits.json", "coverageeligibilityresponse-example-error.json", "coverageeligibilityresponse-example.json", "diagnosticreport-example-ghp.json", "diagnosticreport-example-lipids.json", "diagnosticreport-example-lri.json", "diagnosticreport-example.json", "diagnosticreport-examples-general.json", "diagnosticreport-genetic-DiagnosticReport-assessed-condition.json", "diagnosticreport-genetics-comprehensive-bone-marrow-report.json", "diagnosticreport-genetics-example-2-familyhistory.json", "diagnosticreport-hla-genetics-results-example.json", "diagnosticreport-micro1.json", ] def count_examples(): with zipfile.ZipFile("benchmark/examples.zip") as archive: return len( [f for f in archive.infolist() if f.filename not in example_blacklist] ) def iter_examples(): with zipfile.ZipFile("benchmark/examples.zip") as archive: for zipinfo in archive.infolist(): with archive.open(zipinfo) as thefile: if zipinfo.filename not in example_blacklist: yield zipinfo.filename, json.load(thefile)
Apache License 2.0
crypto-toolbox/bitex
bitex/api/WSS/bitstamp.py
BitstampWSS.start
python
def start(self): super(BitstampWSS, self).start() self.pusher = pusherclient.Pusher(self.addr, **self.__pusher_options) self.pusher.connection.bind('pusher:connection_established', self._register_bindings) self.pusher.connect()
Extension of Pusher.connect() method, which registers all callbacks with the relevant channels, before initializing a connection. :return:
https://github.com/crypto-toolbox/bitex/blob/56d46ea3db6de5219a72dad9b052fbabc921232f/bitex/api/WSS/bitstamp.py#L67-L78
import logging import pusherclient from bitex.api.WSS.base import WSSAPI log = logging.getLogger(__name__) class BitstampWSS(WSSAPI): def __init__(self, key=None, exclude=None, include_only=None, **kwargs): key = key if key else 'de504dc5763aeef9ff52' super(BitstampWSS, self).__init__(key, 'Bitstamp') self.pusher = None self.__pusher_options = kwargs self.channels = ['live_trades', 'live_trades_btceur', 'live_trades_eurusd', 'live_trades_xrpusd', 'live_trades_xrpeur', 'live_trades_xrpbtc', 'order_book_btceur', 'order_book_eurusd', 'order_book_xrpusd', 'order_book_xrpeur', 'order_book_xrpbtc', 'diff_order_book', 'order_book', 'diff_order_book_btceur', 'diff_order_book_eurusd', 'diff_order_book_xrpusd', 'diff_order_book_xrpeur', 'diff_order_book_xrpbtc', 'live_orders', 'live_orders_btceur', 'live_orders_eurusd', 'live_orders_xrpusd', 'live_orders_xrpeur', 'live_orders_xrpbtc'] if include_only: if all(x in self.channels for x in include_only): self.channels = include_only else: raise ValueError("'include_only: must be a list of strings of" "valid channel name! %s" % self.channels) if exclude: if all(x in self.channels for x in exclude): for x in exclude: self.channels.remove(x) else: raise ValueError("'exclude: must be a list of strings of" "valid channel name! %s" % self.channels)
MIT License
hobson/aima
aima/logic.py
pl_fc_entails
python
def pl_fc_entails(KB, q): count = dict([(c, len(conjuncts(c.args[0]))) for c in KB.clauses if c.op == '>>']) inferred = DefaultDict(False) agenda = [s for s in KB.clauses if is_prop_symbol(s.op)] while agenda: p = agenda.pop() if p == q: return True if not inferred[p]: inferred[p] = True for c in KB.clauses_with_premise(p): count[c] -= 1 if count[c] == 0: agenda.append(c.args[1]) return False
Use forward chaining to see if a PropDefiniteKB entails symbol q. [Fig. 7.15] >>> pl_fc_entails(Fig[7,15], expr('Q')) True
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L603-L622
import itertools, re import agents from utils import * class KB: def __init__(self, sentence=None): abstract def tell(self, sentence): abstract def ask(self, query): for result in self.ask_generator(query): return result return False def ask_generator(self, query): abstract def retract(self, sentence): abstract class PropKB(KB): def __init__(self, sentence=None): self.clauses = [] if sentence: self.tell(sentence) def tell(self, sentence): self.clauses.extend(conjuncts(to_cnf(sentence))) def ask_generator(self, query): if tt_entails(Expr('&', *self.clauses), query): yield {} def retract(self, sentence): for c in conjuncts(to_cnf(sentence)): if c in self.clauses: self.clauses.remove(c) def KB_AgentProgram(KB): steps = itertools.count() def program(percept): t = steps.next() KB.tell(make_percept_sentence(percept, t)) action = KB.ask(make_action_query(t)) KB.tell(make_action_sentence(action, t)) return action def make_percept_sentence(self, percept, t): return Expr("Percept")(percept, t) def make_action_query(self, t): return expr("ShouldDo(action, %d)" % t) def make_action_sentence(self, action, t): return Expr("Did")(action[expr('action')], t) return program class Expr: def __init__(self, op, *args): assert isinstance(op, str) or (isnumber(op) and not args) self.op = num_or_str(op) self.args = map(expr, args) def __call__(self, *args): assert is_symbol(self.op) and not self.args return Expr(self.op, *args) def __repr__(self): if not self.args: return str(self.op) elif is_symbol(self.op): return '%s(%s)' % (self.op, ', '.join(map(repr, self.args))) elif len(self.args) == 1: return self.op + repr(self.args[0]) else: return '(%s)' % (' '+self.op+' ').join(map(repr, self.args)) def __eq__(self, other): return (other is self) or (isinstance(other, Expr) and self.op == other.op and self.args == other.args) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.op) ^ hash(tuple(self.args)) def __lt__(self, other): return Expr('<', self, other) def __le__(self, other): return Expr('<=', self, other) def __ge__(self, other): return Expr('>=', self, other) def __gt__(self, other): return Expr('>', self, other) def __add__(self, other): return Expr('+', self, other) def __sub__(self, other): return Expr('-', self, other) def __and__(self, other): return Expr('&', self, other) def __div__(self, other): return Expr('/', self, other) def __truediv__(self, other):return Expr('/', self, other) def __invert__(self): return Expr('~', self) def __lshift__(self, other): return Expr('<<', self, other) def __rshift__(self, other): return Expr('>>', self, other) def __mul__(self, other): return Expr('*', self, other) def __neg__(self): return Expr('-', self) def __or__(self, other): return Expr('|', self, other) def __pow__(self, other): return Expr('**', self, other) def __xor__(self, other): return Expr('^', self, other) def __mod__(self, other): return Expr('<=>', self, other) def expr(s): if isinstance(s, Expr): return s if isnumber(s): return Expr(s) s = s.replace('==>', '>>').replace('<==', '<<') s = s.replace('<=>', '%').replace('=/=', '^') s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s) return eval(s, {'Expr':Expr}) def is_symbol(s): return isinstance(s, str) and s[:1].isalpha() def is_var_symbol(s): return is_symbol(s) and s[0].islower() def is_prop_symbol(s): return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE' def variables(s): result = set([]) def walk(s): if is_variable(s): result.add(s) else: for arg in s.args: walk(arg) walk(s) return result def is_definite_clause(s): if is_symbol(s.op): return True elif s.op == '>>': antecedent, consequent = s.args return (is_symbol(consequent.op) and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent))) else: return False def parse_definite_clause(s): assert is_definite_clause(s) if is_symbol(s.op): return [], s else: antecedent, consequent = s.args return conjuncts(antecedent), consequent TRUE, FALSE, ZERO, ONE, TWO = map(Expr, ['TRUE', 'FALSE', 0, 1, 2]) A, B, C, D, E, F, G, P, Q, x, y, z = map(Expr, 'ABCDEFGPQxyz') def tt_entails(kb, alpha): assert not variables(alpha) return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {}) def tt_check_all(kb, alpha, symbols, model): if not symbols: if pl_true(kb, model): result = pl_true(alpha, model) assert result in (True, False) return result else: return True else: P, rest = symbols[0], symbols[1:] return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and tt_check_all(kb, alpha, rest, extend(model, P, False))) def prop_symbols(x): if not isinstance(x, Expr): return [] elif is_prop_symbol(x.op): return [x] else: return list(set(symbol for arg in x.args for symbol in prop_symbols(arg))) def tt_true(alpha): return tt_entails(TRUE, expr(alpha)) def pl_true(exp, model={}): op, args = exp.op, exp.args if exp == TRUE: return True elif exp == FALSE: return False elif is_prop_symbol(op): return model.get(exp) elif op == '~': p = pl_true(args[0], model) if p is None: return None else: return not p elif op == '|': result = False for arg in args: p = pl_true(arg, model) if p is True: return True if p is None: result = None return result elif op == '&': result = True for arg in args: p = pl_true(arg, model) if p is False: return False if p is None: result = None return result p, q = args if op == '>>': return pl_true(~p | q, model) elif op == '<<': return pl_true(p | ~q, model) pt = pl_true(p, model) if pt is None: return None qt = pl_true(q, model) if qt is None: return None if op == '<=>': return pt == qt elif op == '^': return pt != qt else: raise ValueError, "illegal operator in logic expression" + str(exp) def to_cnf(s): if isinstance(s, str): s = expr(s) s = eliminate_implications(s) s = move_not_inwards(s) return distribute_and_over_or(s) def eliminate_implications(s): if not s.args or is_symbol(s.op): return s args = map(eliminate_implications, s.args) a, b = args[0], args[-1] if s.op == '>>': return (b | ~a) elif s.op == '<<': return (a | ~b) elif s.op == '<=>': return (a | ~b) & (b | ~a) elif s.op == '^': assert len(args) == 2 return (a & ~b) | (~a & b) else: assert s.op in ('&', '|', '~') return Expr(s.op, *args) def move_not_inwards(s): if s.op == '~': NOT = lambda b: move_not_inwards(~b) a = s.args[0] if a.op == '~': return move_not_inwards(a.args[0]) if a.op =='&': return associate('|', map(NOT, a.args)) if a.op =='|': return associate('&', map(NOT, a.args)) return s elif is_symbol(s.op) or not s.args: return s else: return Expr(s.op, *map(move_not_inwards, s.args)) def distribute_and_over_or(s): if s.op == '|': s = associate('|', s.args) if s.op != '|': return distribute_and_over_or(s) if len(s.args) == 0: return FALSE if len(s.args) == 1: return distribute_and_over_or(s.args[0]) conj = find_if((lambda d: d.op == '&'), s.args) if not conj: return s others = [a for a in s.args if a is not conj] rest = associate('|', others) return associate('&', [distribute_and_over_or(c|rest) for c in conj.args]) elif s.op == '&': return associate('&', map(distribute_and_over_or, s.args)) else: return s def associate(op, args): args = dissociate(op, args) if len(args) == 0: return _op_identity[op] elif len(args) == 1: return args[0] else: return Expr(op, *args) _op_identity = {'&':TRUE, '|':FALSE, '+':ZERO, '*':ONE} def dissociate(op, args): result = [] def collect(subargs): for arg in subargs: if arg.op == op: collect(arg.args) else: result.append(arg) collect(args) return result def conjuncts(s): return dissociate('&', [s]) def disjuncts(s): return dissociate('|', [s]) def pl_resolution(KB, alpha): clauses = KB.clauses + conjuncts(to_cnf(~alpha)) new = set() while True: n = len(clauses) pairs = [(clauses[i], clauses[j]) for i in range(n) for j in range(i+1, n)] for (ci, cj) in pairs: resolvents = pl_resolve(ci, cj) if FALSE in resolvents: return True new = new.union(set(resolvents)) if new.issubset(set(clauses)): return False for c in new: if c not in clauses: clauses.append(c) def pl_resolve(ci, cj): clauses = [] for di in disjuncts(ci): for dj in disjuncts(cj): if di == ~dj or ~di == dj: dnew = unique(removeall(di, disjuncts(ci)) + removeall(dj, disjuncts(cj))) clauses.append(associate('|', dnew)) return clauses class PropDefiniteKB(PropKB): def tell(self, sentence): assert is_definite_clause(sentence), "Must be definite clause" self.clauses.append(sentence) def ask_generator(self, query): if pl_fc_entails(self.clauses, query): yield {} def retract(self, sentence): self.clauses.remove(sentence) def clauses_with_premise(self, p): return [c for c in self.clauses if c.op == '>>' and p in conjuncts(c.args[0])]
MIT License
chaitjo/graph-convnet-tsp
utils/beamsearch.py
Beamsearch.get_current_origin
python
def get_current_origin(self): return self.prev_Ks[-1]
Get the backpointers for the current timestep.
https://github.com/chaitjo/graph-convnet-tsp/blob/b8419762081e0c9a37d77fd1aca2ed1ac4e180d3/utils/beamsearch.py#L57-L60
import numpy as np import torch class Beamsearch(object): def __init__(self, beam_size, batch_size, num_nodes, dtypeFloat=torch.FloatTensor, dtypeLong=torch.LongTensor, probs_type='raw', random_start=False): self.batch_size = batch_size self.beam_size = beam_size self.num_nodes = num_nodes self.probs_type = probs_type self.dtypeFloat = dtypeFloat self.dtypeLong = dtypeLong self.start_nodes = torch.zeros(batch_size, beam_size).type(self.dtypeLong) if random_start == True: self.start_nodes = torch.randint(0, num_nodes, (batch_size, beam_size)).type(self.dtypeLong) self.mask = torch.ones(batch_size, beam_size, num_nodes).type(self.dtypeFloat) self.update_mask(self.start_nodes) self.scores = torch.zeros(batch_size, beam_size).type(self.dtypeFloat) self.all_scores = [] self.prev_Ks = [] self.next_nodes = [self.start_nodes] def get_current_state(self): current_state = (self.next_nodes[-1].unsqueeze(2) .expand(self.batch_size, self.beam_size, self.num_nodes)) return current_state
MIT License
carvesystems/gostringsr2
gostringsr2/gostringsr2.py
GoStringsR2.get_string_table_search
python
def get_string_table_search(self, rdata): self.log("Searching for string table") if rdata is not None: str_start, str_size = self._find_longest_string(rdata["data"]) if str_size > 0: g_str = {"vaddr": rdata["vaddr"] + str_start, "tabsize": str_size} startaddr = g_str["vaddr"] - rdata["vaddr"] endaddr = startaddr + g_str["tabsize"] g_str["table"] = rdata["data"][startaddr:endaddr] return g_str return None
Returns a dictionary with the raw data from the string table, as found via searching in the provided rdata dictionary.
https://github.com/carvesystems/gostringsr2/blob/2490e95cc885726ced727031558d6de6cc72e1c8/gostringsr2/gostringsr2.py#L160-L177
import sys import json import binascii import re import base64 import r2pipe class GoStringsR2Error(RuntimeError): pass class GoStringsR2: SUPPORTED_ARCHS = ["arm", "x86"] SUPPORTED_BINTYPES = ["elf", "pe", "mach0"] def __init__(self, _file, _logging=False): self.file = _file self.logging = _logging self.loaded = False self.r2 = None def kill(self): if self.loaded: self.r2.quit() self.r2 = None self.loaded = False def runjson(self, cmd): return self.r2.cmdj(cmd) def run(self, cmd): return self.r2.cmd(cmd) def load(self): self.log("Loading file into r2: {}".format(self.file)) self.r2 = r2pipe.open(self.file) self.data = {} self.data["info"] = self.runjson("ij") if "bin" not in self.data["info"]: raise GoStringsR2Error("r2 could not parse the binary") self.arch = self.data["info"]["bin"]["arch"] self.bintype = self.data["info"]["bin"]["bintype"] self.bits = self.data["info"]["bin"]["bits"] self.binos = self.data["info"]["bin"]["os"] if self.bintype not in ["elf", "mach0", "pe"]: raise GoStringsR2Error( "bintype {} not supported by gostringsr2. Supported: {}".format( self.bintype, GoStringsR2.SUPPORTED_BINTYPES ) ) if self.arch not in ["arm", "x86"]: self.log("warning: arch {} may not fully work".format(self.arch)) self.data["symbols"] = self.runjson("isj") self.data["sections"] = self.runjson("iSj") self.loaded = True self.log(self.file_info()) def file_info(self): if self.loaded: return ( "file: {}\n" "size: {} KB\n" "executable: {}\n" "language: {}\n" "architecture: {}-bit {}\n" "os: {}\n" "stripped: {}\n".format( self.data["info"]["core"]["file"], self.data["info"]["core"]["size"] // 1024, self.data["info"]["bin"]["bintype"], self.data["info"]["bin"]["lang"], self.data["info"]["bin"]["bits"], self.data["info"]["bin"]["arch"], self.data["info"]["bin"]["os"], self.data["info"]["bin"]["stripped"], ) ) return "file: <none>" def get_string_table_symbols(self, rdata): g_str = self.find_symbol("go.string.*") g_func = self.find_symbol("go.func.*") if g_str is not None and g_func is not None: g_str["tabsize"] = g_func["vaddr"] - g_str["vaddr"] startaddr = g_str["vaddr"] - rdata["vaddr"] endaddr = startaddr + g_str["tabsize"] g_str["table"] = rdata["data"][startaddr:endaddr] return g_str return None def get_rodata_section(self): if self.bintype == "elf": sname = ".rodata" elif self.bintype == "mach0": sname = ".__TEXT.__rodata" elif self.bintype == "pe": sname = ".rdata" return self.get_section_data(sname) def get_code_section(self): if self.bintype in ["elf", "pe"]: return self.get_section_info(".text") elif self.bintype == "mach0": return self.get_section_info(".__TEXT.__text") return None
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flashblade/FB_2_1/models/object_store_access_key.py
ObjectStoreAccessKey.__init__
python
def __init__( self, name=None, created=None, enabled=None, secret_access_key=None, user=None, ): if name is not None: self.name = name if created is not None: self.created = created if enabled is not None: self.enabled = enabled if secret_access_key is not None: self.secret_access_key = secret_access_key if user is not None: self.user = user
Keyword args: name (str): Name of the object (e.g., a file system or snapshot). created (int): Creation timestamp of the object. enabled (bool): Is the access key enabled? If not specified, defaults to `false`. secret_access_key (str): The secret access key, only populated on creation if it is not imported from another FlashBlade. user (FixedReference): Reference of the associated user.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flashblade/FB_2_1/models/object_store_access_key.py#L51-L76
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flashblade.FB_2_1 import models class ObjectStoreAccessKey(object): swagger_types = { 'name': 'str', 'created': 'int', 'enabled': 'bool', 'secret_access_key': 'str', 'user': 'FixedReference' } attribute_map = { 'name': 'name', 'created': 'created', 'enabled': 'enabled', 'secret_access_key': 'secret_access_key', 'user': 'user' } required_args = { }
BSD 2-Clause Simplified License
quantopian/zipline
zipline/data/fx/hdf5.py
HDF5FXRateWriter.write
python
def write(self, dts, currencies, data): if len(currencies): chunks = (len(currencies), min(self._date_chunk_size, len(dts))) else: chunks = None self._write_metadata() self._write_index_group(dts, currencies) self._write_data_group(dts, currencies, data, chunks)
Write data to the file. Parameters ---------- dts : pd.DatetimeIndex Index of row labels for rates to be written. currencies : np.array[object] Array of column labels for rates to be written. data : iterator[(str, str, np.array[float64])] Iterator of (rate, quote_currency, array) tuples. Each array should be of shape ``(len(dts), len(currencies))``, and should contain a table of rates where each column is a timeseries of rates mapping its column label's currency to ``quote_currency``.
https://github.com/quantopian/zipline/blob/014f1fc339dc8b7671d29be2d85ce57d3daec343/zipline/data/fx/hdf5.py#L250-L274
from interface import implements import h5py from logbook import Logger import numpy as np import pandas as pd from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array from .base import FXRateReader, DEFAULT_FX_RATE from .utils import check_dts, is_sorted_ascending HDF5_FX_VERSION = 0 HDF5_FX_DEFAULT_CHUNK_SIZE = 75 INDEX = 'index' DATA = 'data' CURRENCIES = 'currencies' DTS = 'dts' RATES = 'rates' log = Logger(__name__) class HDF5FXRateReader(implements(FXRateReader)): def __init__(self, group, default_rate): self._group = group self._default_rate = default_rate if self.version != HDF5_FX_VERSION: raise ValueError( "FX Reader version ({}) != File Version ({})".format( HDF5_FX_VERSION, self.version, ) ) @classmethod def from_path(cls, path, default_rate): return cls(h5py.File(path), default_rate=default_rate) @lazyval def version(self): try: return self._group.attrs['version'] except KeyError: return 0 @lazyval def dts(self): raw_dts = self._group[INDEX][DTS][:].astype('M8[ns]') if not is_sorted_ascending(raw_dts): raise ValueError("dts are not sorted for {}!".format(self._group)) return pd.DatetimeIndex(raw_dts, tz='UTC') @lazyval def currencies(self): bytes_array = self._group[INDEX][CURRENCIES][:] objects = bytes_array_to_native_str_object_array(bytes_array) return pd.Index(objects) def get_rates(self, rate, quote, bases, dts): if rate == DEFAULT_FX_RATE: rate = self._default_rate check_dts(dts) col_ixs = self.dts.searchsorted(dts, side='right') - 1 row_ixs = self.currencies.get_indexer(bases) try: dataset = self._group[DATA][rate][quote][RATES] except KeyError: raise ValueError( "FX rates not available for rate={}, quote_currency={}." .format(rate, quote) ) slice_begin = max(col_ixs[0], 0) slice_end = max(col_ixs[-1], 0) + 1 buf = np.full( (len(self.currencies) + 1, slice_end - slice_begin + 1), np.nan, ) buf[:-1, :-1] = dataset[:, slice_begin:slice_end] out = buf[:, col_ixs - slice_begin][row_ixs] return out.transpose() class HDF5FXRateWriter(object): def __init__(self, group, date_chunk_size=HDF5_FX_DEFAULT_CHUNK_SIZE): self._group = group self._date_chunk_size = date_chunk_size
Apache License 2.0
markvdw/gpflow-inter-domain
GPflow/param.py
DataHolder._get_type
python
def _get_type(self, array): if any([array.dtype == np.dtype(t) for t in [np.float32, np.float64]]): return np_float_type elif any([array.dtype == np.dtype(t) for t in [np.int16, np.int32, np.int64]]): return np.int32 else: raise NotImplementedError("unknown dtype")
Work out what a sensible type for the array is. if the default type is float32, downcast 64bit float to float32. For ints, assume int32
https://github.com/markvdw/gpflow-inter-domain/blob/0cf621e1896a3e1996f863b586c6cd2f795dd9f0/GPflow/param.py#L421-L431
from __future__ import absolute_import from contextlib import contextmanager from functools import wraps import numpy as np import pandas as pd import tensorflow as tf from . import transforms, session from ._settings import settings from .scoping import NameScoped float_type = settings.dtypes.float_type np_float_type = np.float32 if float_type is tf.float32 else np.float64 recompile_keys = ['prior', 'transform', 'fixed'] class Parentable(object): def __init__(self): self._parent = None @property def highest_parent(self): if self._parent is None: return self else: return self._parent.highest_parent @property def name(self): if self._parent is None: return 'unnamed' if isinstance(self._parent, ParamList): return 'item%i' % self._parent._list.index(self) matches = [key for key, value in self._parent.__dict__.items() if value is self] if len(matches) == 0: raise ValueError("mis-specified parent. This Param's\ _parent does not contain a reference to it.") if len(matches) > 1: raise ValueError("This Param appears to be doubly\ referenced by a parent") return matches[0] @property def long_name(self): if self._parent is None: return self.name return self._parent.long_name + '.' + self.name def __getstate__(self): d = self.__dict__.copy() d.pop('_parent') return d def __setstate__(self, d): self.__dict__.update(d) self._parent = None class Param(Parentable): def __init__(self, array, transform=transforms.Identity()): Parentable.__init__(self) self._array = np.asarray(np.atleast_1d(array), dtype=np_float_type) self.transform = transform self._tf_array = None self._log_jacobian = None self.prior = None self.fixed = False @property def value(self): return self._array.copy() def get_parameter_dict(self, d): d[self.long_name] = self.value def set_parameter_dict(self, d): self._array[...] = d[self.long_name] def get_samples_df(self, samples): if self.fixed: return pd.Series([self.value for _ in range(samples.shape[0])], name=self.long_name) start, _ = self.highest_parent.get_param_index(self) end = start + self.size samples = samples[:, start:end] samples = samples.reshape((samples.shape[0],) + (self.transform.free_state_size(self.shape),)) samples = np.atleast_1d(np.concatenate( [self.transform.forward(s).reshape((1,) + self.shape) for s in samples], 0)) return pd.Series([v for v in samples], name=self.long_name) def make_tf_array(self, free_array): if self.fixed: self._tf_array = tf.placeholder(dtype=float_type, shape=self._array.shape, name=self.name) self._log_jacobian = 0.0 return 0 free_size = self.transform.free_state_size(self.shape) x_free = free_array[:free_size] mapped_array = self.transform.tf_forward(x_free) self._tf_array = tf.reshape(mapped_array, self.shape) self._log_jacobian = self.transform.tf_log_jacobian(x_free) return free_size def get_free_state(self): if self.fixed: return np.empty((0,), np_float_type) return self.transform.backward(self.value.flatten()) def get_feed_dict_keys(self): d = {} if self.fixed: d[self] = self._tf_array return d def update_feed_dict(self, key_dict, feed_dict): if self.fixed: feed_dict[key_dict[self]] = self.value def set_state(self, x): if self.fixed: return 0 free_size = self.transform.free_state_size(self.shape) new_array = self.transform.forward(x[:free_size]).reshape(self.shape) assert new_array.shape == self.shape self._array[...] = new_array return free_size def randomize(self, distributions={}, skipfixed=True): if not (skipfixed and self.fixed): if self in distributions.keys(): self._array = distributions[self].sample(self.shape) else: try: self._array = self.prior.sample(self.shape) except AttributeError: randn = np.random.randn( self.transform.free_state_size(self.shape)) self._array = self.transform.forward(randn).reshape(self.shape) def build_prior(self): if self.prior is None: return tf.constant(0.0, float_type) elif self._tf_array is None: raise ValueError("tensorflow array has not been initialized") else: return self.prior.logp(self._tf_array) + self._log_jacobian def __setattr__(self, key, value): object.__setattr__(self, key, value) if key in recompile_keys: self.highest_parent._needs_recompile = True def __str__(self, prepend=''): return prepend + '\033[1m' + self.name + '\033[0m' + ' transform:' + str(self.transform) + ' prior:' + str(self.prior) + (' [FIXED]' if self.fixed else '') + '\n' + str(self.value) @property def size(self): return self._array.size @property def shape(self): return self._array.shape def _html_table_rows(self, name_prefix=''): html = "<tr>" html += "<td>{0}</td>".format(name_prefix + self.name) html += "<td>{0}</td>".format(str(self._array).replace('\n', '</br>')) html += "<td>{0}</td>".format(str(self.prior)) html += "<td>{0}</td>".format('[FIXED]' if self.fixed else str(self.transform)) html += "</tr>" return html def __getstate__(self): d = Parentable.__getstate__(self) for key in ['_tf_array', '_log_jacobian']: d.pop(key, None) return d def __setstate__(self, d): Parentable.__setstate__(self, d) self._log_jacobian = None self.fixed = self.fixed class DataHolder(Parentable): def __init__(self, array, on_shape_change='raise'): Parentable.__init__(self) dt = self._get_type(array) self._array = np.asarray(array, dtype=dt) assert on_shape_change in ['raise', 'pass', 'recompile'] self.on_shape_change = on_shape_change
Apache License 2.0
mic-dkfz/trixi
trixi/util/metrics.py
get_roc_curve
python
def get_roc_curve(tensor, labels, reduce_to_n_samples=None, use_sub_process=False, results_fn=lambda x, *y, **z: None): def __get_roc_curve(tensor, labels, reduce_to_n_samples=None, results_fn=lambda x, *y, **z: None): if not isinstance(labels, list): labels = labels.flatten() if not isinstance(tensor, list): tensor = tensor.flatten() fpr, tpr, thresholds = metrics.roc_curve(labels, tensor) if reduce_to_n_samples is not None: fpr = [np.mean(x) for x in np.array_split(fpr, reduce_to_n_samples)] tpr = [np.mean(x) for x in np.array_split(tpr, reduce_to_n_samples)] results_fn(tpr, fpr) return tpr, fpr if use_sub_process: p = Process(target=__get_roc_curve, kwargs=dict(tensor=tensor, labels=labels, reduce_to_n_samples=reduce_to_n_samples, results_fn=results_fn )) atexit.register(p.terminate) p.start() else: try: return __get_roc_curve(tensor=tensor, labels=labels, reduce_to_n_samples=reduce_to_n_samples, results_fn=results_fn ) except Exception as e: warnings.warn("Sth went wrong with calculating the roc curve")
Displays a roc curve given a tensor with scores and the coresponding labels Args: tensor: Tensor with scores (e.g class probability ) labels: Labels of the samples to which the scores match reduce_to_n_samples: Reduce/ downsample to to n samples for fewer data points use_sub_process: Use a sub process to do the processing, if true nothing is returned results_fn: function which is called with the results/ return values. Expected f(tpr, fpr)
https://github.com/mic-dkfz/trixi/blob/193c6cfcbe6c28576d3ee745f8a23f88a8029029/trixi/util/metrics.py#L10-L57
import atexit import warnings from collections import OrderedDict from multiprocessing import Process import numpy as np from sklearn import metrics
MIT License
asharakeh/bayes-od-rc
src/core/evaluation_utils_2d.py
two_d_iou
python
def two_d_iou(box, boxes): iou = np.zeros(len(boxes), np.float64) x1_int = np.maximum(box[0], boxes[:, 0]) y1_int = np.maximum(box[1], boxes[:, 1]) x2_int = np.minimum(box[2], boxes[:, 2]) y2_int = np.minimum(box[3], boxes[:, 3]) w_int = np.maximum(x2_int - x1_int + 1., 0.) h_int = np.maximum(y2_int - y1_int + 1., 0.) non_empty = np.logical_and(w_int > 0, h_int > 0) if non_empty.any(): intersection_area = np.multiply(w_int[non_empty], h_int[non_empty]) box_area = (box[2] - box[0] + 1.) * (box[3] - box[1] + 1.) boxes_area = np.multiply( boxes[non_empty, 2] - boxes[non_empty, 0] + 1., boxes[non_empty, 3] - boxes[non_empty, 1] + 1.) union_area = box_area + boxes_area - intersection_area iou[non_empty] = intersection_area / union_area return iou.round(3)
Compute 2D IOU between a 2D bounding box 'box' and a list :param box: a numpy array in the form of [x1, y1, x2, y2] where (x1,y1) are image coordinates of the top-left corner of the bounding box, and (x2,y2) are the image coordinates of the bottom-right corner of the bounding box. :param boxes: a numpy array formed as a list of boxes in the form [[x1, y1, x2, y2], [x1, y1, x2, y2]]. :return iou: a numpy array containing 2D IOUs between box and every element in numpy array boxes.
https://github.com/asharakeh/bayes-od-rc/blob/3f478e5c9a593ee03d7b63d533d46d87d739fc26/src/core/evaluation_utils_2d.py#L12-L47
import numpy as np from collections import defaultdict from scipy.stats import multivariate_normal from scipy.spatial.distance import cdist _HEATMAP_THRESH = 0.0027 _2D_MAH_DIST_THRESH = 3.439 _SMALL_VAL = 1e-14
MIT License