repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/encoding/encodings/muxings/text/text_api.py
TextApi.list
python
def list(self, encoding_id, query_params=None, **kwargs): return self.api_client.get( '/encoding/encodings/{encoding_id}/muxings/text', path_params={'encoding_id': encoding_id}, query_params=query_params, pagination_response=True, type=TextMuxing, **kwargs )
List Text muxings :param encoding_id: Id of the encoding. :type encoding_id: string_types, required :param query_params: Query parameters :type query_params: TextMuxingListQueryParams :return: List of Text muxings :rtype: TextMuxing
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/encoding/encodings/muxings/text/text_api.py#L92-L111
from __future__ import absolute_import from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase from bitmovin_api_sdk.common.poscheck import poscheck_except from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope from bitmovin_api_sdk.models.response_error import ResponseError from bitmovin_api_sdk.models.text_muxing import TextMuxing from bitmovin_api_sdk.encoding.encodings.muxings.text.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.encodings.muxings.text.text_muxing_list_query_params import TextMuxingListQueryParams class TextApi(BaseApi): @poscheck_except(2) def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None): super(TextApi, self).__init__( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.customdata = CustomdataApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) def create(self, encoding_id, text_muxing, **kwargs): return self.api_client.post( '/encoding/encodings/{encoding_id}/muxings/text', text_muxing, path_params={'encoding_id': encoding_id}, type=TextMuxing, **kwargs ) def delete(self, encoding_id, muxing_id, **kwargs): return self.api_client.delete( '/encoding/encodings/{encoding_id}/muxings/text/{muxing_id}', path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id}, type=BitmovinResponse, **kwargs ) def get(self, encoding_id, muxing_id, **kwargs): return self.api_client.get( '/encoding/encodings/{encoding_id}/muxings/text/{muxing_id}', path_params={'encoding_id': encoding_id, 'muxing_id': muxing_id}, type=TextMuxing, **kwargs )
MIT License
cybertronai/ncluster
ncluster/ncluster_globals.py
get_logdir
python
def get_logdir(run_name: str): if not run_name: return '/tmp' return run_logdir_dict.get(run_name, '')
Returns logdir for this run. It is the job of logdir creator to set logdir for this run
https://github.com/cybertronai/ncluster/blob/9c2a7fb9677dba9afe48c94f35bde7c41e4cc75f/ncluster/ncluster_globals.py#L126-L131
import os import sys from typing import Dict, Any, List from . import aws_backend as backend from . import util task_launched = False task_counter = 0 job_counter = 0 run_counter = 0 run_dict: Dict[str, Any] = {} task_run_dict: Dict["backend.Task", str] = {} run_task_dict: Dict[str, List["backend.Task"]] = {} run_logdir_dict: Dict[str, str] = {} tasks_seen: List["backend.Task"] = [] enforce_placement_group_val = False def enforce_placement_group(): global enforce_placement_group_val enforce_placement_group_val = True def unenforce_placement_group(): global enforce_placement_group_val enforce_placement_group_val = False def is_enforced_placement_group(): return enforce_placement_group_val def auto_assign_task_name_if_needed(name, instance_type='', image_name='', tasks=1): global task_counter if name: return name main_script = os.path.abspath(sys.argv[0]) script_id = util.alphanumeric_hash( f"{main_script}-{instance_type}-{image_name}-{tasks}") name = f"unnamedtask-{task_counter}-{script_id}" task_counter += 1 return name def auto_assign_job_name_if_needed(name): global job_counter if name: return name script_id = util.alphanumeric_hash(sys.argv[0]) name = f"unnamedjob-{job_counter}-{script_id}" job_counter += 1 return name def auto_assign_run_name_if_needed(name): global run_counter if name: return name script_id = util.alphanumeric_hash(sys.argv[0]) name = f"unnamedrun-{run_counter}-{script_id}" run_counter += 1 return name def register_task(task: Any, run_name: str): global task_run_dict, run_task_dict, tasks_seen assert task.name not in tasks_seen tasks_seen.append(task.name) task_run_dict[task] = run_name run_task_list = run_task_dict.get(run_name, []) run_task_list.append(task) def register_run(run: "backend.Run", run_name: str) -> None: print(f"Registering run {run_name}") assert run_name not in run_dict assert run_name run_dict[run_name] = run def is_chief(task: "backend.Task", run_name: str): global run_task_dict if run_name not in run_task_dict: return True task_list = run_task_dict[run_name] assert task in task_list, f"Task {task.name} doesn't belong to run {run_name}" return task_list[0] == task def get_chief(run_name: str): assert run_name in run_task_dict, f"Run {run_name} doesn't exist" tasks = run_task_dict[run_name] assert tasks, f"Run {run_name} had tasks {tasks}, expected non-empty list" return tasks[0]
MIT License
hhstore/annotated-py-asyncio
asyncio-3.4.3/asyncio/tasks.py
async
python
def async(coro_or_future, *, loop=None): if isinstance(coro_or_future, futures.Future): if loop is not None and loop is not coro_or_future._loop: raise ValueError('loop argument must agree with Future') return coro_or_future elif coroutines.iscoroutine(coro_or_future): if loop is None: loop = events.get_event_loop() task = loop.create_task(coro_or_future) if task._source_traceback: del task._source_traceback[-1] return task else: raise TypeError('A Future or coroutine is required')
Wrap a coroutine in a future. If the argument is a Future, it is returned directly.
https://github.com/hhstore/annotated-py-asyncio/blob/ab4a5574396cc9579318bf68e51f3da9696feaa4/asyncio-3.4.3/asyncio/tasks.py#L589-L607
__all__ = ['Task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'async', 'gather', 'shield', ] import concurrent.futures import functools import inspect import linecache import sys import traceback import weakref from . import coroutines from . import events from . import futures from .coroutines import coroutine _PY34 = (sys.version_info >= (3, 4)) class Task(futures.Future): _all_tasks = weakref.WeakSet() _current_tasks = {} _log_destroy_pending = True @classmethod def current_task(cls, loop=None): if loop is None: loop = events.get_event_loop() return cls._current_tasks.get(loop) @classmethod def all_tasks(cls, loop=None): if loop is None: loop = events.get_event_loop() return {t for t in cls._all_tasks if t._loop is loop} def __init__(self, coro, *, loop=None): assert coroutines.iscoroutine(coro), repr(coro) super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] self._coro = iter(coro) self._fut_waiter = None self._must_cancel = False self._loop.call_soon(self._step) self.__class__._all_tasks.add(self) if _PY34: def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: context = { 'task': self, 'message': 'Task was destroyed but it is pending!', } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) futures.Future.__del__(self) def _repr_info(self): info = super()._repr_info() if self._must_cancel: info[0] = 'cancelling' coro = coroutines._format_coroutine(self._coro) info.insert(1, 'coro=<%s>' % coro) if self._fut_waiter is not None: info.insert(2, 'wait_for=%r' % self._fut_waiter) return info def get_stack(self, *, limit=None): frames = [] f = self._coro.gi_frame if f is not None: while f is not None: if limit is not None: if limit <= 0: break limit -= 1 frames.append(f) f = f.f_back frames.reverse() elif self._exception is not None: tb = self._exception.__traceback__ while tb is not None: if limit is not None: if limit <= 0: break limit -= 1 frames.append(tb.tb_frame) tb = tb.tb_next return frames def print_stack(self, *, limit=None, file=None): extracted_list = [] checked = set() for f in self.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = self._exception if not extracted_list: print('No stack for %r' % self, file=file) elif exc is not None: print('Traceback for %r (most recent call last):' % self, file=file) else: print('Stack for %r (most recent call last):' % self, file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='') def cancel(self): if self.done(): return False if self._fut_waiter is not None: if self._fut_waiter.cancel(): return True self._must_cancel = True return True def _step(self, value=None, exc=None): assert not self.done(), '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc) if self._must_cancel: if not isinstance(exc, futures.CancelledError): exc = futures.CancelledError() self._must_cancel = False coro = self._coro self._fut_waiter = None self.__class__._current_tasks[self._loop] = self try: if exc is not None: result = coro.throw(exc) elif value is not None: result = coro.send(value) else: result = next(coro) except StopIteration as exc: self.set_result(exc.value) except futures.CancelledError as exc: super().cancel() except Exception as exc: self.set_exception(exc) except BaseException as exc: self.set_exception(exc) raise else: if isinstance(result, futures.Future): if result._blocking: result._blocking = False result.add_done_callback(self._wakeup) self._fut_waiter = result if self._must_cancel: if self._fut_waiter.cancel(): self._must_cancel = False else: self._loop.call_soon( self._step, None, RuntimeError( 'yield was used instead of yield from ' 'in task {!r} with {!r}'.format(self, result))) elif result is None: self._loop.call_soon(self._step) elif inspect.isgenerator(result): self._loop.call_soon( self._step, None, RuntimeError( 'yield was used instead of yield from for ' 'generator in task {!r} with {}'.format( self, result))) else: self._loop.call_soon( self._step, None, RuntimeError( 'Task got bad yield: {!r}'.format(result))) finally: self.__class__._current_tasks.pop(self._loop) self = None def _wakeup(self, future): try: value = future.result() except Exception as exc: self._step(None, exc) else: self._step(value, None) self = None FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION ALL_COMPLETED = concurrent.futures.ALL_COMPLETED @coroutine def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED): if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs): raise TypeError("expect a list of futures, not %s" % type(fs).__name__) if not fs: raise ValueError('Set of coroutines/Futures is empty.') if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): raise ValueError('Invalid return_when value: {}'.format(return_when)) if loop is None: loop = events.get_event_loop() fs = {async(f, loop=loop) for f in set(fs)} return (yield from _wait(fs, timeout, return_when, loop)) def _release_waiter(waiter, *args): if not waiter.done(): waiter.set_result(None) @coroutine def wait_for(fut, timeout, *, loop=None): if loop is None: loop = events.get_event_loop() if timeout is None: return (yield from fut) waiter = futures.Future(loop=loop) timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) fut = async(fut, loop=loop) fut.add_done_callback(cb) try: try: yield from waiter except futures.CancelledError: fut.remove_done_callback(cb) fut.cancel() raise if fut.done(): return fut.result() else: fut.remove_done_callback(cb) fut.cancel() raise futures.TimeoutError() finally: timeout_handle.cancel() @coroutine def _wait(fs, timeout, return_when, loop): assert fs, 'Set of Futures is empty.' waiter = futures.Future(loop=loop) timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) counter = len(fs) def _on_completion(f): nonlocal counter counter -= 1 if (counter <= 0 or return_when == FIRST_COMPLETED or return_when == FIRST_EXCEPTION and (not f.cancelled() and f.exception() is not None)): if timeout_handle is not None: timeout_handle.cancel() if not waiter.done(): waiter.set_result(None) for f in fs: f.add_done_callback(_on_completion) try: yield from waiter finally: if timeout_handle is not None: timeout_handle.cancel() done, pending = set(), set() for f in fs: f.remove_done_callback(_on_completion) if f.done(): done.add(f) else: pending.add(f) return done, pending def as_completed(fs, *, loop=None, timeout=None): if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs): raise TypeError("expect a list of futures, not %s" % type(fs).__name__) loop = loop if loop is not None else events.get_event_loop() todo = {async(f, loop=loop) for f in set(fs)} from .queues import Queue done = Queue(loop=loop) timeout_handle = None def _on_timeout(): for f in todo: f.remove_done_callback(_on_completion) done.put_nowait(None) todo.clear() def _on_completion(f): if not todo: return todo.remove(f) done.put_nowait(f) if not todo and timeout_handle is not None: timeout_handle.cancel() @coroutine def _wait_for_one(): f = yield from done.get() if f is None: raise futures.TimeoutError return f.result() for f in todo: f.add_done_callback(_on_completion) if todo and timeout is not None: timeout_handle = loop.call_later(timeout, _on_timeout) for _ in range(len(todo)): yield _wait_for_one() @coroutine def sleep(delay, result=None, *, loop=None): future = futures.Future(loop=loop) h = future._loop.call_later(delay, future._set_result_unless_cancelled, result) try: return (yield from future) finally: h.cancel()
MIT License
davidfischer/pycoreutils
pycoreutils/utils.py
getuserhome
python
def getuserhome(): if 'HOME' in os.environ: return os.environ['HOME'] if 'HOMEPATH' in os.environ: return os.environ['HOMEPATH']
Returns the home-directory of the current user
https://github.com/davidfischer/pycoreutils/blob/a5f72d4765b2340eb2ac96099e8de87214a908fa/pycoreutils/utils.py#L24-L31
import os import signal import stat def getsignals(): signallist = [ 'ABRT', 'CONT', 'IO', 'PROF', 'SEGV', 'TSTP', 'USR2', '_DFL', 'ALRM', 'FPE', 'IOT', 'PWR', 'STOP', 'TTIN', 'VTALRM', '_IGN', 'BUS', 'HUP', 'KILL', 'QUIT', 'SYS', 'TTOU', 'WINCH', 'CHLD', 'ILL', 'PIPE', 'RTMAX', 'TERM', 'URG', 'XCPU', 'CLD', 'INT', 'POLL', 'RTMIN', 'TRAP', 'USR1', 'XFSZ', ] signals = {} for signame in signallist: if hasattr(signal, 'SIG' + signame): signals[signame] = getattr(signal, 'SIG' + signame) return signals
MIT License
explosion/spacy-transformers
spacy_transformers/architectures.py
transformer_tok2vec_v2
python
def transformer_tok2vec_v2( name: str, get_spans, tokenizer_config: dict, pooling: Model[Ragged, Floats2d], grad_factor: float = 1.0, transformer_config: dict = {}, ) -> Model[List[Doc], List[Floats2d]]: return chain( TransformerModel(name, get_spans, tokenizer_config, transformer_config), split_trf_batch(), trfs2arrays(pooling, grad_factor), )
Use a transformer as a "Tok2Vec" layer directly. This does not allow multiple components to share the transformer weights, and does not allow the transformer to set annotations into the `Doc` object, but it's a simpler solution if you only need the transformer within one component. get_spans (Callable[[List[Doc]], List[List[Span]]]): A function to extract spans from the batch of Doc objects. See the "TransformerModel" layer for details. tokenizer_config (dict): Settings to pass to the transformers tokenizer. pooling (Model[Ragged, Floats2d]): A reduction layer used to calculate the token vectors based on zero or more wordpiece vectors. If in doubt, mean pooling (see `thinc.layers.reduce_mean`) is usually a good choice. grad_factor (float): Reweight gradients from the component before passing them to the transformer. You can set this to 0 to "freeze" the transformer weights with respect to the component, or to make it learn more slowly. Leaving it at 1.0 is usually fine. transformers_config (dict): Settings to pass to the transformers forward pass of the transformer.
https://github.com/explosion/spacy-transformers/blob/66a3c1c65b59503c73bbebf39cc994d268990476/spacy_transformers/architectures.py#L79-L110
from typing import List, Callable from thinc.api import Model, chain from thinc.types import Ragged, Floats2d from spacy.tokens import Doc from .layers import TransformerModel, TransformerListener from .layers import trfs2arrays, split_trf_batch from .util import registry @registry.architectures.register("spacy-transformers.TransformerListener.v1") def transformer_listener_tok2vec_v1( pooling: Model[Ragged, Floats2d], grad_factor: float = 1.0, upstream: str = "*" ) -> Model[List[Doc], List[Floats2d]]: listener = TransformerListener(upstream_name=upstream) model = chain(listener, trfs2arrays(pooling, grad_factor)) model.set_ref("listener", listener) return model @registry.architectures.register("spacy-transformers.Tok2VecTransformer.v1") def transformer_tok2vec_v1( name: str, get_spans, tokenizer_config: dict, pooling: Model[Ragged, Floats2d], grad_factor: float = 1.0, ) -> Model[List[Doc], List[Floats2d]]: return chain( TransformerModel(name, get_spans, tokenizer_config), split_trf_batch(), trfs2arrays(pooling, grad_factor), ) @registry.architectures.register("spacy-transformers.Tok2VecTransformer.v2")
MIT License
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/customer.py
Customer.parent_customer_id
python
def parent_customer_id(self): return self._parent_customer_id
Gets the parent_customer_id of this Customer. # noqa: E501 :return: The parent_customer_id of this Customer. # noqa: E501 :rtype: CustomerId
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/customer.py#L357-L364
import pprint import re import six class Customer(object): swagger_types = { 'additional_info': 'str', 'address': 'str', 'address2': 'str', 'city': 'str', 'country': 'str', 'created_time': 'int', 'customer_id': 'CustomerId', 'email': 'str', 'id': 'CustomerId', 'name': 'str', 'owner_id': 'EntityId', 'parent_customer_id': 'CustomerId', 'phone': 'str', 'state': 'str', 'tenant_id': 'TenantId', 'title': 'str', 'zip': 'str' } attribute_map = { 'additional_info': 'additionalInfo', 'address': 'address', 'address2': 'address2', 'city': 'city', 'country': 'country', 'created_time': 'createdTime', 'customer_id': 'customerId', 'email': 'email', 'id': 'id', 'name': 'name', 'owner_id': 'ownerId', 'parent_customer_id': 'parentCustomerId', 'phone': 'phone', 'state': 'state', 'tenant_id': 'tenantId', 'title': 'title', 'zip': 'zip' } def __init__(self, additional_info=None, address=None, address2=None, city=None, country=None, created_time=None, customer_id=None, email=None, id=None, name=None, owner_id=None, parent_customer_id=None, phone=None, state=None, tenant_id=None, title=None, zip=None): self._additional_info = None self._address = None self._address2 = None self._city = None self._country = None self._created_time = None self._customer_id = None self._email = None self._id = None self._name = None self._owner_id = None self._parent_customer_id = None self._phone = None self._state = None self._tenant_id = None self._title = None self._zip = None self.discriminator = None if additional_info is not None: self.additional_info = additional_info if address is not None: self.address = address if address2 is not None: self.address2 = address2 if city is not None: self.city = city if country is not None: self.country = country if created_time is not None: self.created_time = created_time if customer_id is not None: self.customer_id = customer_id if email is not None: self.email = email if id is not None: self.id = id if name is not None: self.name = name if owner_id is not None: self.owner_id = owner_id if parent_customer_id is not None: self.parent_customer_id = parent_customer_id if phone is not None: self.phone = phone if state is not None: self.state = state if tenant_id is not None: self.tenant_id = tenant_id if title is not None: self.title = title if zip is not None: self.zip = zip @property def additional_info(self): return self._additional_info @additional_info.setter def additional_info(self, additional_info): self._additional_info = additional_info @property def address(self): return self._address @address.setter def address(self, address): self._address = address @property def address2(self): return self._address2 @address2.setter def address2(self, address2): self._address2 = address2 @property def city(self): return self._city @city.setter def city(self, city): self._city = city @property def country(self): return self._country @country.setter def country(self, country): self._country = country @property def created_time(self): return self._created_time @created_time.setter def created_time(self, created_time): self._created_time = created_time @property def customer_id(self): return self._customer_id @customer_id.setter def customer_id(self, customer_id): self._customer_id = customer_id @property def email(self): return self._email @email.setter def email(self, email): self._email = email @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def owner_id(self): return self._owner_id @owner_id.setter def owner_id(self, owner_id): self._owner_id = owner_id @property
Apache License 2.0
gammapy/gamma-cat
gammacat/input.py
InputData.dataset_file_list
python
def dataset_file_list(self): path = gammacat_info.base_dir / 'input/data' paths = path.glob('*/*/tev*.yaml') return sorted(paths)
List of all dataset files in the input folder.
https://github.com/gammapy/gamma-cat/blob/e4d09c06e74683e41257837c0d036f7d9b32f752/gammacat/input.py#L387-L391
import logging from collections import OrderedDict from pathlib import Path import urllib.parse from astropy.table import Table from .info import gammacat_info from .utils import load_yaml, NA, validate_schema from .sed import SED from .lightcurve import LightCurve __all__ = [ 'BasicSourceInfo', 'BasicSourceList', 'DatasetSourceInfo', 'InputData', 'InputDataset', 'InputDatasetCollection', ] log = logging.getLogger(__name__) class BasicSourceInfo: schema = load_yaml(gammacat_info.base_dir / 'input/schemas/basic_source_info.schema.yaml') def __init__(self, data, path): self.data = data self.path = path @classmethod def read(cls, path): path = Path(path) data = load_yaml(path) return cls(data=data, path=path) def __repr__(self): return 'BasicSourceInfo(source_id={})'.format(repr(self.data['source_id'])) def to_dict(self, filled=False): data = OrderedDict() if filled: for name, spec in self.schema['properties'].items(): try: datatype = spec['type'] except KeyError: datatype = 'string' if name == 'pos': continue try: data[name] = NA.fill_value[datatype] except TypeError: data[name] = NA.fill_value[datatype[0]] data.update(self.data) if data['reference_ids'] is None or data['reference_ids'][0] is None: data['reference_ids'] = '' else: data['reference_ids'] = data['reference_ids'] data.pop('pos', None) return data def validate(self): validate_schema(path=self.path, data=self.data, schema=self.schema) class DatasetSourceInfo: schema = load_yaml(gammacat_info.base_dir / 'input/schemas/dataset_source_info.schema.yaml') def __init__(self, data, path): self.data = data self.path = path @classmethod def read(cls, path): path = Path(path) data = load_yaml(path) return cls(data=data, path=path) def __repr__(self): return 'DatasetSourceInfo(source_id={}, reference_id={})'.format( repr(self.data['source_id']), repr(self.data['reference_id']), ) def validate(self): validate_schema(path=self.path, data=self.data, schema=self.schema) class InputDataset: def __init__(self, reference_id, path, sources): log.debug(f'Creating InputDataset for path={path}') self.reference_id = reference_id self.path = path self.sources = sources _source_ids = [source.data['source_id'] for source in sources] self._sources_by_id = dict(zip(_source_ids, sources)) @classmethod def read(cls, path): path = Path(path) reference_id = urllib.parse.unquote(path.parts[-1]) sources = [] for source_path in sorted(path.glob('tev-*.yaml')): source_info = DatasetSourceInfo.read(source_path) sources.append(source_info) path = '/'.join(path.parts[-2:]) return cls(reference_id=reference_id, path=path, sources=sources) def to_dict(self): sources = [] for source in self.sources: data = OrderedDict() data['source_id'] = source.data['source_id'] data['reference_id'] = source.data['reference_id'] sources.append(data) url = self.path.replace('%26', '%2526') data = OrderedDict() data['reference_id'] = self.reference_id data['path'] = self.path data['url'] = url data['sources'] = sources return data def __repr__(self): return 'InputDataset(reference_id={})'.format(repr(self.reference_id)) def validate(self): [_.validate() for _ in self.sources] def get_source_by_id(self, source_id): try: return self._sources_by_id[source_id] except KeyError: data = dict(source_id=source_id, reference_id='') return DatasetSourceInfo(data=data, path=None) class BasicSourceList: column_spec = load_yaml(gammacat_info.base_dir / 'input/schemas/basic_source_list.schema.yaml') def __init__(self, data): self.data = data @property def source_ids(self): return [source.data['source_id'] for source in self.data] def get_source_by_id(self, source_id): idx = self.source_ids.index(source_id) return self.data[idx] @classmethod def read(cls): path = gammacat_info.base_dir / 'input/sources' paths = path.glob('*.yaml') data = [] for path in paths: info = BasicSourceInfo.read(path) data.append(info) return cls(data=data) def to_dict(self): return OrderedDict(data=self.data_per_row(filled=True)) def data_per_row(self, filled=False): return [ source.to_dict(filled=filled) for source in self.data ] def validate(self): log.info('Validating YAML files in `input/sources`') [_.validate() for _ in self.data] class InputInfo: schema = load_yaml(gammacat_info.base_dir / 'input/schemas/dataset_info.schema.yaml') def __init__(self, data, path): self.data = data self.path = path @classmethod def read(cls, path): path = Path(path) data = load_yaml(path) return cls(data=data, path=path) def validate(self): validate_schema(path=self.path, data=self.data, schema=self.schema) class InputInfoCollection: def __init__(self, data): self.data = data @classmethod def read(cls): path = gammacat_info.base_dir / 'input/data' paths = path.glob('*/*/info.yaml') data = [] for path in paths: info = InputInfo.read(path) data.append(info) return cls(data=data) def validate(self): log.info('Validating info.yaml files in `input/data`') [_.validate() for _ in self.data] class InputDatasetCollection: def __init__(self, data): self.data = data @classmethod def read(cls): path = gammacat_info.base_dir / 'input/data' paths = list(path.glob('*/*')) data = [InputDataset.read(path) for path in paths] return cls(data=data) @property def reference_ids(self): return [dataset.reference_id for dataset in self.data] def to_table(self): meta = OrderedDict() meta['name'] = 'todo' meta['version'] = 'todo' meta['url'] = 'todo' rows = [OrderedDict(spam=99)] return Table(rows=rows, meta=meta, masked=True) def to_dict(self): data = [] for dataset in self.data: data.append(dataset.to_dict()) return OrderedDict(data=data) def validate(self): log.info('Validating YAML files in `input/data`') for dataset in self.data: dataset.validate() def get_dataset_by_reference_id(self, reference_id): if reference_id is None: return InputDataset(reference_id=None, path=None, sources=[]) idx = self.reference_ids.index(reference_id) return self.data[idx] class Schemas: def __init__(self, data): self.data = data @classmethod def read(cls): path = gammacat_info.base_dir / 'input/schemas' paths = path.glob('*.yaml') data = [] for path in paths: info = load_yaml(path) data.append(info) return cls(data=data) def validate(self): log.info('Validating YAML files in `input/schemas') class InputData: def __init__(self, schemas=None, sources=None, datasets=None, ref_infos=None, gammacat_dataset_config=None): self.path = gammacat_info.base_dir / 'input' self.schemas = schemas self.sources = sources self.datasets = datasets self.ref_infos = ref_infos self.gammacat_dataset_config = gammacat_dataset_config @property def src_info_list(self): path = gammacat_info.base_dir / 'input/sources' return sorted(path.glob('tev*.yaml')) @property def lightcurve_file_list(self): path = gammacat_info.base_dir / 'input/data' return sorted(path.glob('*/*/tev*lc*.ecsv')) @property def sed_file_list(self): path = gammacat_info.base_dir / 'input/data' paths = path.glob('*/*/tev*sed*.ecsv') return sorted(paths) @property def info_yaml_list(self): path = gammacat_info.base_dir / 'input/data' paths = path.glob('*/*/info.yaml') return sorted(paths) @property
BSD 3-Clause New or Revised License
onelogin/python3-saml
src/onelogin/saml2/authn_request.py
OneLogin_Saml2_Authn_Request.get_xml
python
def get_xml(self): return self._authn_request
Returns the XML that will be sent as part of the request :return: XML request body :rtype: string
https://github.com/onelogin/python3-saml/blob/ab62b0d6f3e5ac2ae8e95ce3ed2f85389252a32d/src/onelogin/saml2/authn_request.py#L160-L166
from onelogin.saml2.constants import OneLogin_Saml2_Constants from onelogin.saml2.utils import OneLogin_Saml2_Utils from onelogin.saml2.xml_templates import OneLogin_Saml2_Templates class OneLogin_Saml2_Authn_Request(object): def __init__(self, settings, force_authn=False, is_passive=False, set_nameid_policy=True, name_id_value_req=None): self._settings = settings sp_data = self._settings.get_sp_data() idp_data = self._settings.get_idp_data() security = self._settings.get_security_data() self._id = self._generate_request_id() issue_instant = OneLogin_Saml2_Utils.parse_time_to_SAML(OneLogin_Saml2_Utils.now()) destination = idp_data['singleSignOnService']['url'] provider_name_str = '' organization_data = settings.get_organization() if isinstance(organization_data, dict) and organization_data: langs = organization_data if 'en-US' in langs: lang = 'en-US' else: lang = sorted(langs)[0] display_name = 'displayname' in organization_data[lang] and organization_data[lang]['displayname'] if display_name: provider_name_str = "\n" + ' ProviderName="%s"' % organization_data[lang]['displayname'] force_authn_str = '' if force_authn is True: force_authn_str = "\n" + ' ForceAuthn="true"' is_passive_str = '' if is_passive is True: is_passive_str = "\n" + ' IsPassive="true"' subject_str = '' if name_id_value_req: subject_str = """ <saml:Subject> <saml:NameID Format="%s">%s</saml:NameID> <saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer"></saml:SubjectConfirmation> </saml:Subject>""" % (sp_data['NameIDFormat'], name_id_value_req) nameid_policy_str = '' if set_nameid_policy: name_id_policy_format = sp_data['NameIDFormat'] if security['wantNameIdEncrypted']: name_id_policy_format = OneLogin_Saml2_Constants.NAMEID_ENCRYPTED nameid_policy_str = """ <samlp:NameIDPolicy Format="%s" AllowCreate="true" />""" % name_id_policy_format requested_authn_context_str = '' if security['requestedAuthnContext'] is not False: authn_comparison = security['requestedAuthnContextComparison'] if security['requestedAuthnContext'] is True: requested_authn_context_str = """ <samlp:RequestedAuthnContext Comparison="%s"> <saml:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport</saml:AuthnContextClassRef> </samlp:RequestedAuthnContext>""" % authn_comparison else: requested_authn_context_str = ' <samlp:RequestedAuthnContext Comparison="%s">' % authn_comparison for authn_context in security['requestedAuthnContext']: requested_authn_context_str += '<saml:AuthnContextClassRef>%s</saml:AuthnContextClassRef>' % authn_context requested_authn_context_str += ' </samlp:RequestedAuthnContext>' attr_consuming_service_str = '' if 'attributeConsumingService' in sp_data and sp_data['attributeConsumingService']: attr_consuming_service_str = "\n AttributeConsumingServiceIndex=\"%s\"" % sp_data['attributeConsumingService'].get('index', '1') request = OneLogin_Saml2_Templates.AUTHN_REQUEST % { 'id': self._id, 'provider_name': provider_name_str, 'force_authn_str': force_authn_str, 'is_passive_str': is_passive_str, 'issue_instant': issue_instant, 'destination': destination, 'assertion_url': sp_data['assertionConsumerService']['url'], 'entity_id': sp_data['entityId'], 'subject_str': subject_str, 'nameid_policy_str': nameid_policy_str, 'requested_authn_context_str': requested_authn_context_str, 'attr_consuming_service_str': attr_consuming_service_str, 'acs_binding': sp_data['assertionConsumerService'].get('binding', 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST') } self._authn_request = request def _generate_request_id(self): return OneLogin_Saml2_Utils.generate_unique_id() def get_request(self, deflate=True): if deflate: request = OneLogin_Saml2_Utils.deflate_and_base64_encode(self._authn_request) else: request = OneLogin_Saml2_Utils.b64encode(self._authn_request) return request def get_id(self): return self._id
MIT License
google/meta_tagger
layers.py
linear_with_dropout
python
def linear_with_dropout(is_training, inputs, output_size, initializer=None, keep_prob=1, add_bias=True): input_size = inputs.get_shape().as_list()[-1] if is_training and keep_prob < 1: inputs = tf.nn.dropout(inputs, keep_prob) shape = tf.shape(inputs) output_shape = [] for i in xrange(len(inputs.get_shape().as_list()) - 1): output_shape.append(shape[i]) output_shape.append(output_size) inputs = tf.reshape(inputs, [-1, input_size]) if not initializer: initializer = tf.orthogonal_initializer() with tf.variable_scope('Linear'): matrix = tf.get_variable( 'Weights', [input_size, output_size], initializer=initializer) if add_bias: bias = tf.get_variable( 'Biases', [output_size], initializer=tf.zeros_initializer()) else: bias = 0 linear = tf.nn.xw_plus_b(inputs, matrix, bias) return tf.reshape(linear, output_shape)
Linear mapping with dropout.
https://github.com/google/meta_tagger/blob/4db1137308835fcd1e30d81ae48a578c2fe674ff/layers.py#L22-L56
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf
Apache License 2.0
tuxsoul/bitcoin-tools
transaction.py
dump_transaction
python
def dump_transaction(datadir, db_env, tx_id): db = DB(db_env) try: r = db.open("blkindex.dat", "main", DB_BTREE, DB_THREAD|DB_RDONLY) except DBError: r = True if r is not None: logging.error("Couldn't open blkindex.dat/main. Try quitting any running Bitcoin apps.") sys.exit(1) kds = BCDataStream() vds = BCDataStream() n_tx = 0 n_blockindex = 0 key_prefix = "\x02tx"+(tx_id[-4:].decode('hex_codec')[::-1]) cursor = db.cursor() (key, value) = cursor.set_range(key_prefix) while key.startswith(key_prefix): kds.clear(); kds.write(key) vds.clear(); vds.write(value) type = kds.read_string() hash256 = (kds.read_bytes(32)) hash_hex = long_hex(hash256[::-1]) version = vds.read_uint32() tx_pos = _read_CDiskTxPos(vds) if (hash_hex.startswith(tx_id) or short_hex(hash256[::-1]).startswith(tx_id)): _dump_tx(datadir, hash256, tx_pos) (key, value) = cursor.next() db.close()
Dump a transaction, given hexadecimal tx_id-- either the full ID OR a short_hex version of the id.
https://github.com/tuxsoul/bitcoin-tools/blob/95d025c2914292558c6323fd88a19756f25558b1/transaction.py#L31-L69
from bsddb.db import * import logging import os.path import sys import time from BCDataStream import * from base58 import public_key_to_bc_address from util import short_hex from deserialize import * def _read_CDiskTxPos(stream): n_file = stream.read_uint32() n_block_pos = stream.read_uint32() n_tx_pos = stream.read_uint32() return (n_file, n_block_pos, n_tx_pos) def _dump_tx(datadir, tx_hash, tx_pos): blockfile = open(os.path.join(datadir, "blk%04d.dat"%(tx_pos[0],)), "rb") ds = BCDataStream() ds.map_file(blockfile, tx_pos[2]) d = parse_Transaction(ds) print deserialize_Transaction(d) ds.close_file() blockfile.close()
MIT License
openstack/zaqar
zaqar/storage/mongodb/topic_messages.py
FIFOMessageController._ensure_indexes
python
def _ensure_indexes(self, collection): collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True) collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', unique=True, background=True) collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True)
Ensures that all indexes are created.
https://github.com/openstack/zaqar/blob/5ec4277546e94e4dd2d1e3c5cde805debb5be1c8/zaqar/storage/mongodb/topic_messages.py#L682-L712
import datetime import time from bson import objectid from oslo_log import log as logging from oslo_utils import timeutils import pymongo.errors import pymongo.read_preferences from zaqar.i18n import _ from zaqar import storage from zaqar.storage import errors from zaqar.storage.mongodb import utils from zaqar.storage import utils as s_utils LOG = logging.getLogger(__name__) MAX_RETRY_POST_DURATION = 45 COUNTER_STALL_WINDOW = 5 ID_INDEX_FIELDS = [('_id', 1)] TTL_INDEX_FIELDS = [ ('e', 1), ] PROJ_TOPIC = utils.PROJ_TOPIC_KEY ACTIVE_INDEX_FIELDS = [ (PROJ_TOPIC, 1), ('k', 1), ] COUNTING_INDEX_FIELDS = [ (PROJ_TOPIC, 1), ] MARKER_INDEX_FIELDS = [ ('k', 1), (PROJ_TOPIC, 1), ] TRANSACTION_INDEX_FIELDS = [ ('tx', 1), ] class MessageController(storage.Message): def __init__(self, *args, **kwargs): super(MessageController, self).__init__(*args, **kwargs) self._num_partitions = self.driver.mongodb_conf.partitions self._topic_ctrl = self.driver.topic_controller self._retry_range = range(self.driver.mongodb_conf.max_attempts) self._collections = [db.messages for db in self.driver.message_databases] for collection in self._collections: self._ensure_indexes(collection) def _ensure_indexes(self, collection): collection.ensure_index(TTL_INDEX_FIELDS, name='ttl', expireAfterSeconds=0, background=True) collection.ensure_index(ACTIVE_INDEX_FIELDS, name='active', background=True) collection.ensure_index(COUNTING_INDEX_FIELDS, name='counting', background=True) collection.ensure_index(MARKER_INDEX_FIELDS, name='queue_marker', background=True) collection.ensure_index(TRANSACTION_INDEX_FIELDS, name='transaction', background=True) def _collection(self, topic_name, project=None): return self._collections[utils.get_partition(self._num_partitions, topic_name, project)] def _backoff_sleep(self, attempt): conf = self.driver.mongodb_conf seconds = utils.calculate_backoff(attempt, conf.max_attempts, conf.max_retry_sleep, conf.max_retry_jitter) time.sleep(seconds) def _purge_topic(self, topic_name, project=None): scope = utils.scope_queue_name(topic_name, project) collection = self._collection(topic_name, project) collection.delete_many({PROJ_TOPIC: scope}) def _list(self, topic_name, project=None, marker=None, echo=False, client_uuid=None, projection=None, include_claimed=False, include_delayed=False, sort=1, limit=None): if sort not in (1, -1): raise ValueError(u'sort must be either 1 (ascending) ' u'or -1 (descending)') now = timeutils.utcnow_ts() query = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), 'tx': None, } if not echo: query['u'] = {'$ne': client_uuid} if marker is not None: query['k'] = {'$gt': marker} collection = self._collection(topic_name, project) if not include_delayed: query['$or'] = [{'d': {'$lte': now}}, {'d': {'$exists': False}}] cursor = collection.find(query, projection=projection, sort=[('k', sort)]) if limit is not None: cursor.limit(limit) return cursor.hint(ACTIVE_INDEX_FIELDS) def _count(self, topic_name, project=None, include_claimed=False): query = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), 'tx': None, } collection = self._collection(topic_name, project) return collection.count(filter=query, hint=COUNTING_INDEX_FIELDS) def _active(self, topic_name, marker=None, echo=False, client_uuid=None, projection=None, project=None, limit=None, include_delayed=False): return self._list(topic_name, project=project, marker=marker, echo=echo, client_uuid=client_uuid, projection=projection, include_claimed=False, include_delayed=include_delayed, limit=limit) def _inc_counter(self, topic_name, project=None, amount=1, window=None): if hasattr(self._topic_ctrl, '_inc_counter'): return self._topic_ctrl._inc_counter(topic_name, project, amount, window) now = timeutils.utcnow_ts() update = {'$inc': {'c.v': amount}, '$set': {'c.t': now}} query = _get_scoped_query(topic_name, project) if window is not None: threshold = now - window query['c.t'] = {'$lt': threshold} while True: try: collection = self._collection(topic_name, project).stats doc = collection.find_one_and_update( query, update, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) break except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error.') if doc is None: if window is None: message = (u'Failed to increment the message ' u'counter for topic %(name)s and ' u'project %(project)s') message %= dict(name=topic_name, project=project) LOG.warning(message) raise errors.TopicDoesNotExist(topic_name, project) return None return doc['c']['v'] def _get_counter(self, topic_name, project=None): if hasattr(self._topic_ctrl, '_get_counter'): return self._topic_ctrl._get_counter(topic_name, project) update = {'$inc': {'c.v': 0, 'c.t': 0}} query = _get_scoped_query(topic_name, project) try: collection = self._collection(topic_name, project).stats doc = collection.find_one_and_update( query, update, upsert=True, return_document=pymongo.ReturnDocument.AFTER, projection={'c.v': 1, '_id': 0}) return doc['c']['v'] except pymongo.errors.AutoReconnect: LOG.exception('Auto reconnect error.') def list(self, topic_name, project=None, marker=None, limit=storage.DEFAULT_MESSAGES_PER_PAGE, echo=False, client_uuid=None, include_claimed=False, include_delayed=False): if marker is not None: try: marker = int(marker) except ValueError: yield iter([]) messages = self._list(topic_name, project=project, marker=marker, client_uuid=client_uuid, echo=echo, include_claimed=include_claimed, include_delayed=include_delayed, limit=limit) marker_id = {} now = timeutils.utcnow_ts() def denormalizer(msg): marker_id['next'] = msg['k'] return _basic_message(msg, now) yield utils.HookedCursor(messages, denormalizer) yield str(marker_id['next']) @utils.raises_conn_error @utils.retries_on_autoreconnect def first(self, topic_name, project=None, sort=1): cursor = self._list(topic_name, project=project, include_claimed=True, sort=sort, limit=1) try: message = next(cursor) except StopIteration: raise errors.TopicIsEmpty(topic_name, project) now = timeutils.utcnow_ts() return _basic_message(message, now) @utils.raises_conn_error @utils.retries_on_autoreconnect def get(self, topic_name, message_id, project=None): mid = utils.to_oid(message_id) if mid is None: raise errors.MessageDoesNotExist(message_id, topic_name, project) now = timeutils.utcnow_ts() query = { '_id': mid, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) message = list(collection.find(query).limit(1).hint(ID_INDEX_FIELDS)) if not message: raise errors.MessageDoesNotExist(message_id, topic_name, project) return _basic_message(message[0], now) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_get(self, topic_name, message_ids, project=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if not message_ids: return iter([]) now = timeutils.utcnow_ts() query = { '_id': {'$in': message_ids}, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) messages = collection.find(query).hint(ID_INDEX_FIELDS) def denormalizer(msg): return _basic_message(msg, now) return utils.HookedCursor(messages, denormalizer) @utils.raises_conn_error @utils.retries_on_autoreconnect def post(self, topic_name, messages, client_uuid, project=None): if not self._topic_ctrl.exists(topic_name, project): raise errors.TopicDoesNotExist(topic_name, project) self._get_counter(topic_name, project) now = timeutils.utcnow_ts() now_dt = datetime.datetime.utcfromtimestamp(now) collection = self._collection(topic_name, project) messages = list(messages) msgs_n = len(messages) next_marker = self._inc_counter(topic_name, project, amount=msgs_n) - msgs_n prepared_messages = [] for index, message in enumerate(messages): msg = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), 't': message['ttl'], 'e': now_dt + datetime.timedelta(seconds=message['ttl']), 'u': client_uuid, 'd': now + message.get('delay', 0), 'b': message['body'] if 'body' in message else {}, 'k': next_marker + index, 'tx': None } if self.driver.conf.enable_checksum: msg['cs'] = s_utils.get_checksum(message.get('body', None)) prepared_messages.append(msg) res = collection.insert_many(prepared_messages, bypass_document_validation=True) return [str(id_) for id_ in res.inserted_ids] @utils.raises_conn_error @utils.retries_on_autoreconnect def delete(self, topic_name, message_id, project=None, claim=None): mid = utils.to_oid(message_id) if mid is None: return collection = self._collection(topic_name, project) query = { '_id': mid, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } cid = utils.to_oid(claim) if cid is None: raise errors.ClaimDoesNotExist(claim, topic_name, project) now = timeutils.utcnow_ts() cursor = collection.find(query).hint(ID_INDEX_FIELDS) try: message = next(cursor) except StopIteration: return if claim is None: if _is_claimed(message, now): raise errors.MessageIsClaimed(message_id) else: if message['c']['id'] != cid: kwargs = {} message = collection.find_one(query, **kwargs) if message['c']['id'] != cid: if _is_claimed(message, now): raise errors.MessageNotClaimedBy(message_id, claim) raise errors.MessageNotClaimed(message_id) collection.delete_one(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def bulk_delete(self, topic_name, message_ids, project=None, claim_ids=None): message_ids = [mid for mid in map(utils.to_oid, message_ids) if mid] if claim_ids: claim_ids = [cid for cid in map(utils.to_oid, claim_ids) if cid] query = { '_id': {'$in': message_ids}, PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } collection = self._collection(topic_name, project) if claim_ids: message_claim_ids = [] messages = collection.find(query).hint(ID_INDEX_FIELDS) for message in messages: message_claim_ids.append(message['c']['id']) for cid in claim_ids: if cid not in message_claim_ids: raise errors.ClaimDoesNotExist(cid, topic_name, project) collection.delete_many(query) @utils.raises_conn_error @utils.retries_on_autoreconnect def pop(self, topic_name, limit, project=None): query = { PROJ_TOPIC: utils.scope_queue_name(topic_name, project), } now = timeutils.utcnow_ts() query['c.e'] = {'$lte': now} collection = self._collection(topic_name, project) projection = {'_id': 1, 't': 1, 'b': 1, 'c.id': 1} messages = (collection.find_one_and_delete(query, projection=projection) for _ in range(limit)) final_messages = [_basic_message(message, now) for message in messages if message] return final_messages class FIFOMessageController(MessageController):
Apache License 2.0
drorlab/atom3d
atom3d/util/file.py
find_files
python
def find_files(path, suffix, relative=None): if not relative: find_cmd = r"find {:} -regex '.*\.{:}' | sort".format(path, suffix) else: find_cmd = r"cd {:}; find . -regex '.*\.{:}' | cut -d '/' -f 2- | sort" .format(path, suffix) out = subprocess.Popen( find_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.getcwd(), shell=True) (stdout, stderr) = out.communicate() name_list = stdout.decode().split() name_list.sort() return [Path(x) for x in name_list]
Find all files in path with given suffix. = :param path: Directory in which to find files. :type path: Union[str, Path] :param suffix: Suffix determining file type to search for. :type suffix: str :param relative: Flag to indicate whether to return absolute or relative path. :return: list of paths to all files with suffix sorted by their names. :rtype: list[Path]
https://github.com/drorlab/atom3d/blob/7eacb676f56b4130fd805f4b2901a600170b88f9/atom3d/util/file.py#L7-L31
import os from pathlib import Path import subprocess
MIT License
make-all/tuya-local
custom_components/tuya_local/lock.py
async_setup_platform
python
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): _LOGGER.debug(f"Domain data: {hass.data[DOMAIN]}") data = hass.data[DOMAIN][discovery_info[CONF_DEVICE_ID]] device = data["device"] locks = [] cfg = get_config(discovery_info[CONF_TYPE]) if cfg is None: raise ValueError(f"No device config found for {discovery_info}") ecfg = cfg.primary_entity if ecfg.entity == "lock" and discovery_info.get(ecfg.config_id, False): data[ecfg.config_id] = TuyaLocalLock(device, ecfg) locks.append(data[ecfg.config_id]) if ecfg.deprecated: _LOGGER.warning(ecfg.deprecation_message) _LOGGER.debug(f"Adding lock for {ecfg.name}") for ecfg in cfg.secondary_entities(): if ecfg.entity == "lock" and discovery_info.get(ecfg.config_id, False): data[ecfg.config_id] = TuyaLocalLock(device, ecfg) locks.append(data[ecfg.config_id]) if ecfg.deprecated: _LOGGER.warning(ecfg.deprecation_message) _LOGGER.debug(f"Adding lock for {ecfg.name}") if not locks: raise ValueError(f"{device.name} does not support use as a lock device.") async_add_entities(locks)
Set up the lock device according to its type.
https://github.com/make-all/tuya-local/blob/636d0cd4cb2432676d862d290d2f6deea7328437/custom_components/tuya_local/lock.py#L17-L45
import logging from . import DOMAIN from .const import ( CONF_DEVICE_ID, CONF_TYPE, ) from .generic.lock import TuyaLocalLock from .helpers.device_config import get_config _LOGGER = logging.getLogger(__name__)
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/lqa_severity_dto.py
LqaSeverityDto.__eq__
python
def __eq__(self, other): if not isinstance(other, LqaSeverityDto): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/lqa_severity_dto.py#L132-L137
import pprint import re import six class LqaSeverityDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'severity_id': 'int', 'name': 'str' } attribute_map = { 'severity_id': 'severityId', 'name': 'name' } def __init__(self, severity_id=None, name=None): self._severity_id = None self._name = None self.discriminator = None if severity_id is not None: self.severity_id = severity_id if name is not None: self.name = name @property def severity_id(self): return self._severity_id @severity_id.setter def severity_id(self, severity_id): self._severity_id = severity_id @property def name(self): return self._name @name.setter def name(self, name): self._name = name def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(LqaSeverityDto, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
google-research/s4l
preprocess.py
get_hsvnoise_preprocess
python
def get_hsvnoise_preprocess(sv_pow=(-2.0, 2.0), sv_mul=(-0.5, 0.5), sv_add=(-0.1, 0.1), h_add=(-0.1, 0.1)): rnd = lambda *a: tf.random.uniform((), *a) rnd2 = lambda *a: tf.random.uniform((2,), *a) def _hsvnoise(rgb): hsv = tf.image.rgb_to_hsv(rgb / 255.0) h, sv = hsv[..., :1], hsv[..., 1:] h = tf.floormod(1. + h + rnd(*h_add), 1.) pow_, mul, add = 2.0**rnd2(*sv_pow), 2.0**rnd2(*sv_mul), rnd2(*sv_add) sv = sv**pow_ * mul + add hsv = tf.clip_by_value(tf.concat([h, sv], axis=-1), 0, 1) return tf.image.hsv_to_rgb(hsv) * 255.0 def _hsvnoise_pp(data): data["image"] = utils.tf_apply_to_image_or_images(_hsvnoise, data["image"]) return data return _hsvnoise_pp
Returns a function that randomises HSV similarly to the Exemplar paper. Requires the input to still be in [0-255] range. Transforms the input to HSV, applies rnd(mul)*S**(2**rnd(pow)) + rnd(add) to the S and V channels independently, and H + rnd(add) to the H channel, then converts back to RGB in float [0-255]. Args: sv_pow: The min/max powers of two to which to take S/V. sv_mul: The min/max powers of two with which to scale S/V. sv_add: The min/max shift of S/V. h_add: The min/max shift of hue. Returns: A function applying random HSV augmentation to its input.
https://github.com/google-research/s4l/blob/8f1cf0555dad64d987309e3bee682cf8390bf48a/preprocess.py#L252-L286
from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np import tensorflow.compat.v1 as tf import inception_preprocessing as inception_pp import utils COLOR_PALETTE_PATH = ("/cns/vz-d/home/dune/representation/" "color_palette.npy") def crop(image, is_training, crop_size): h, w, c = crop_size[0], crop_size[1], image.shape[-1] if is_training: return tf.random_crop(image, [h, w, c]) else: dy = (tf.shape(image)[0] - h)//2 dx = (tf.shape(image)[1] - w)//2 return tf.image.crop_to_bounding_box(image, dy, dx, h, w) def get_inception_preprocess(is_training, im_size): def _inception_preprocess(data): data["image"] = inception_pp.preprocess_image( data["image"], im_size[0], im_size[1], is_training, add_image_summaries=False) return data return _inception_preprocess def get_resize_small(smaller_size): def _resize_small_pp(data): image = data["image"] h, w = tf.shape(image)[-3], tf.shape(image)[-2] ratio = tf.to_float(smaller_size) / tf.to_float(tf.minimum(h, w)) h = tf.to_int32(tf.round(tf.to_float(h) * ratio)) w = tf.to_int32(tf.round(tf.to_float(w) * ratio)) static_rank = len(image.get_shape().as_list()) if static_rank == 3: data["image"] = tf.image.resize_area(image[None], [h, w])[0] elif static_rank == 4: data["image"] = tf.image.resize_area(image, [h, w]) return data return _resize_small_pp def get_multi_crop(crop_size): def _crop(image, offset, size): return tf.image.crop_to_bounding_box(image, offset[0], offset[1], size[0], size[1]) def _multi_crop_pp(data): image = data["image"] h, w, c = crop_size[0], crop_size[1], image.shape[-1] tl = (0, 0) tr = (0, tf.shape(image)[1] - w) bl = (tf.shape(image)[0] - h, 0) br = (tf.shape(image)[0] - h, tf.shape(image)[1] - w) c = ((tf.shape(image)[0] - h) // 2, (tf.shape(image)[1] - w) // 2) data["image"] = tf.stack([ _crop(image, c, crop_size), _crop(image, tl, crop_size), _crop(image, tr, crop_size), _crop(image, bl, crop_size), _crop(image, br, crop_size) ]) return data return _multi_crop_pp def get_crop(is_training, crop_size): def _crop_pp(data): crop_fn = functools.partial( crop, is_training=is_training, crop_size=crop_size) data["image"] = utils.tf_apply_to_image_or_images(crop_fn, data["image"]) return data return _crop_pp def inception_crop(image, **kw): begin, size, _ = tf.image.sample_distorted_bounding_box( tf.shape(image), tf.zeros([0, 0, 4], tf.float32), use_image_if_no_bounding_boxes=True, **kw) crop = tf.slice(image, begin, size) crop.set_shape([None, None, image.shape[-1]]) return crop def get_inception_crop(is_training, **kw): def _inception_crop_pp(data): if is_training: data["image"] = inception_crop(data["image"], **kw) else: tf.logging.warn("inception_crop pre-processing keeps the full image in " "eval mode for now. Contact lbeyer@ with your use-case " "and propose a reasonable default behaviour.") return data return _inception_crop_pp def get_random_flip_lr(is_training): def _random_flip_lr_pp(data): if is_training: data["image"] = utils.tf_apply_to_image_or_images( tf.image.random_flip_left_right, data["image"]) return data return _random_flip_lr_pp def get_resize_preprocess(fn_args, is_training): try: fn_args.remove("randomize_method") randomize_resize_method = is_training except ValueError: randomize_resize_method = False im_size = utils.str2intlist(fn_args, 2) def _resize(image, method, align_corners): def _process(): resized = tf.cast( tf.image.resize_images( image, im_size, method, align_corners=align_corners), dtype=tf.float32) return resized return _process def _resize_pp(data): im = data["image"] if randomize_resize_method: r = tf.random_uniform([], 0, 3, dtype=tf.int32) im = tf.case({ tf.equal(r, tf.cast(0, r.dtype)): _resize(im, tf.image.ResizeMethod.BILINEAR, True), tf.equal(r, tf.cast(1, r.dtype)): _resize(im, tf.image.ResizeMethod.NEAREST_NEIGHBOR, True), tf.equal(r, tf.cast(2, r.dtype)): _resize(im, tf.image.ResizeMethod.BICUBIC, True), tf.equal(r, tf.cast(3, r.dtype)): _resize(im, tf.image.ResizeMethod.AREA, False), }) else: im = tf.image.resize_images(im, im_size) data["image"] = im return data return _resize_pp def get_rotate_preprocess(create_labels=True): def _four_rots(img): return tf.stack([ img, tf.transpose(tf.reverse_v2(img, [1]), [1, 0, 2]), tf.reverse_v2(img, [0, 1]), tf.reverse_v2(tf.transpose(img, [1, 0, 2]), [1]), ]) def _rotate_pp(data): if create_labels: data["label"] = utils.tf_apply_to_image_or_images( lambda _: tf.constant([0, 1, 2, 3]), data["image"], dtype=tf.int32) data["image"] = utils.tf_apply_to_image_or_images(_four_rots, data["image"]) return data return _rotate_pp def get_copy_label_preprocess(new_name): def _copy_label_pp(data): data[new_name] = data["label"] return data return _copy_label_pp def get_value_range_preprocess(vmin=-1, vmax=1, dtype=tf.float32): def _value_range_pp(data): img = tf.cast(data["image"], dtype) img = vmin + (img / tf.constant(255.0, dtype)) * (vmax - vmin) data["image"] = img return data return _value_range_pp
Apache License 2.0
threatconnect-inc/tcex
tcex/batch/indicator.py
Indicator.last_modified
python
def last_modified(self, last_modified: str): self._indicator_data['lastModified'] = self._utils.datetime.format_datetime( last_modified, date_format='%Y-%m-%dT%H:%M:%SZ' )
Set Indicator lastModified.
https://github.com/threatconnect-inc/tcex/blob/dae37b73d8b33cf26360f6d25c6b305a68f2f0e2/tcex/batch/indicator.py#L280-L284
import json import uuid from typing import Callable, Optional from ..utils import Utils from .attribute import Attribute from .security_label import SecurityLabel from .tag import Tag module = __import__(__name__) def custom_indicator_class_factory(indicator_type, base_class, class_dict, value_fields): value_count = len(value_fields) def init_1(self, tcex, value1, xid, **kwargs): summary = self.build_summary(value1) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) def init_2( self, tcex, value1, value2, xid, **kwargs ): summary = self.build_summary(value1, value2) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) def init_3( self, tcex, value1, value2, value3, xid, **kwargs ): summary = self.build_summary(value1, value2, value3) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) class_name = indicator_type.replace(' ', '') init_method = locals()[f'init_{value_count}'] newclass = type(str(class_name), (base_class,), {'__init__': init_method}) return newclass class Indicator: __slots__ = [ '_attributes', '_file_actions', '_indicator_data', '_labels', '_occurrences', '_summary', '_tags', '_type', '_utils', ] def __init__(self, indicator_type: str, summary: str, **kwargs): self._utils = Utils() self._summary = summary self._type = indicator_type self._indicator_data = {'summary': summary, 'type': indicator_type} for arg, value in kwargs.items(): self.add_key_value(arg, value) if kwargs.get('xid') is None: self._indicator_data['xid'] = str(uuid.uuid4()) self._attributes = [] self._file_actions = [] self._labels = [] self._occurrences = [] self._tags = [] @property def _metadata_map(self) -> dict: return { 'date_added': 'dateAdded', 'dnsActive': 'flag1', 'dns_active': 'flag1', 'last_modified': 'lastModified', 'private_flag': 'privateFlag', 'size': 'intValue1', 'whoisActive': 'flag2', 'whois_active': 'flag2', } def add_key_value(self, key: str, value: str) -> None: key = self._metadata_map.get(key, key) if key in ['dateAdded', 'lastModified']: self._indicator_data[key] = self._utils.datetime.format_datetime( value, date_format='%Y-%m-%dT%H:%M:%SZ' ) elif key == 'confidence': self._indicator_data[key] = int(value) elif key == 'rating': self._indicator_data[key] = float(value) else: self._indicator_data[key] = value @property def active(self) -> bool: return self._indicator_data.get('active') @active.setter def active(self, active: bool): self._indicator_data['active'] = self._utils.to_bool(active) def association(self, group_xid: str) -> None: association = {'groupXid': group_xid} self._indicator_data.setdefault('associatedGroups', []).append(association) def attribute( self, attr_type: str, attr_value: str, displayed: Optional[bool] = False, source: Optional[str] = None, unique: Optional[bool] = True, formatter: Optional[Callable[[str], str]] = None, ) -> Attribute: attr = Attribute(attr_type, attr_value, displayed, source, formatter) if unique == 'Type': for attribute_data in self._attributes: if attribute_data.type == attr_type: attr = attribute_data break else: self._attributes.append(attr) elif unique is True: for attribute_data in self._attributes: if attribute_data.type == attr_type and attribute_data.value == attr.value: attr = attribute_data break else: self._attributes.append(attr) elif unique is False: self._attributes.append(attr) return attr @staticmethod def build_summary( val1: Optional[str] = None, val2: Optional[str] = None, val3: Optional[str] = None ) -> str: summary = [] if val1 is not None: summary.append(val1) if val2 is not None: summary.append(val2) if val3 is not None: summary.append(val3) if not summary: pass return ' : '.join(summary) @property def confidence(self) -> int: return self._indicator_data.get('confidence') @confidence.setter def confidence(self, confidence: int): self._indicator_data['confidence'] = int(confidence) @property def data(self) -> dict: if self._attributes: self._indicator_data['attribute'] = [] for attr in self._attributes: if attr.valid: self._indicator_data['attribute'].append(attr.data) if self._file_actions: self._indicator_data.setdefault('fileAction', {}) self._indicator_data['fileAction'].setdefault('children', []) for action in self._file_actions: self._indicator_data['fileAction']['children'].append(action.data) if self._occurrences: self._indicator_data.setdefault('fileOccurrence', []) for occurrence in self._occurrences: self._indicator_data['fileOccurrence'].append(occurrence.data) if self._labels: self._indicator_data['securityLabel'] = [] for label in self._labels: self._indicator_data['securityLabel'].append(label.data) if self._tags: self._indicator_data['tag'] = [] for tag in self._tags: if tag.valid: self._indicator_data['tag'].append(tag.data) return self._indicator_data @property def date_added(self) -> str: return self._indicator_data.get('dateAdded') @date_added.setter def date_added(self, date_added: str): self._indicator_data['dateAdded'] = self._utils.datetime.format_datetime( date_added, date_format='%Y-%m-%dT%H:%M:%SZ' ) @property def last_modified(self) -> str: return self._indicator_data.get('lastModified') @last_modified.setter
Apache License 2.0
ucsbarchlab/pyrtl
pyrtl/corecircuits.py
bitfield_update
python
def bitfield_update(w, range_start, range_end, newvalue, truncating=False): from .corecircuits import concat_list w = as_wires(w) idxs = list(range(len(w))) idxs_middle = idxs[range_start:range_end] if len(idxs_middle) == 0: raise PyrtlError('Cannot update bitfield of size 0 (i.e. there are no bits to update)') idxs_lower = idxs[:idxs_middle[0]] idxs_upper = idxs[idxs_middle[-1] + 1:] newvalue = as_wires(newvalue, bitwidth=len(idxs_middle), truncating=truncating) if len(idxs_middle) != len(newvalue): raise PyrtlError('Cannot update bitfield of length %d with value of length %d ' 'unless truncating=True is specified' % (len(idxs_middle), len(newvalue))) result_list = [] if idxs_lower: result_list.append(w[idxs_lower[0]:idxs_lower[-1] + 1]) result_list.append(newvalue) if idxs_upper: result_list.append(w[idxs_upper[0]:idxs_upper[-1] + 1]) result = concat_list(result_list) if len(result) != len(w): raise PyrtlInternalError('len(result)=%d, len(original)=%d' % (len(result), len(w))) return result
Return WireVector w but with some of the bits overwritten by newvalue. :param w: a WireVector to use as the starting point for the update :param range_start: the start of the range of bits to be updated :param range_end: the end of the range of bits to be updated :param newvalue: the value to be written in to the start:end range :param truncating: if true, silently clip newvalue to the proper bitwidth rather than throw an error if the value provided is too large Given a WireVector w, this function returns a new WireVector that is identical to w except in the range of bits specified. In that specified range, the value newvalue is swapped in. For example: `bitfield_update(w, 20, 23, 0x7)` will return return a WireVector of the same length as w, and with the same values as w, but with bits 20, 21, and 22 all set to 1. Note that range_start and range_end will be inputs to a slice and so standard Python slicing rules apply (e.g. negative values for end-relative indexing and support for None). :: w = bitfield_update(w, 20, 23, 0x7) # sets bits 20, 21, 22 to 1 w = bitfield_update(w, 20, 23, 0x6) # sets bit 20 to 0, bits 21 and 22 to 1 w = bitfield_update(w, 20, None, 0x7) # assuming w is 32 bits, sets bits 31..20 = 0x7 w = bitfield_update(w, -1, None, 0x1) # set the MSB (bit) to 1 w = bitfield_update(w, None, -1, 0x9) # set the bits before the MSB (bit) to 9 w = bitfield_update(w, None, 1, 0x1) # set the LSB (bit) to 1 w = bitfield_update(w, 1, None, 0x9) # set the bits after the LSB (bit) to 9
https://github.com/ucsbarchlab/pyrtl/blob/8b42f566a3c2c23de21f1b534900232219a3b313/pyrtl/corecircuits.py#L415-L469
from __future__ import division import six import math from .pyrtlexceptions import PyrtlError, PyrtlInternalError from .core import LogicNet, working_block from .wire import Const, WireVector from pyrtl.rtllib import barrel from pyrtl.rtllib import muxes from .conditional import otherwise def mux(index, *mux_ins, **kwargs): if kwargs: if len(kwargs) != 1 or 'default' not in kwargs: try: result = select(index, **kwargs) import warnings warnings.warn("Predicates are being deprecated in Mux. " "Use the select operator instead.", stacklevel=2) return result except Exception: bad_args = [k for k in kwargs.keys() if k != 'default'] raise PyrtlError('unknown keywords %s applied to mux' % str(bad_args)) default = kwargs['default'] else: default = None index = as_wires(index) short_by = 2**len(index) - len(mux_ins) if short_by > 0: if default is not None: mux_ins = list(mux_ins) extention = [default] * short_by mux_ins.extend(extention) if 2 ** len(index) != len(mux_ins): raise PyrtlError( 'Mux select line is %d bits, but selecting from %d inputs. ' % (len(index), len(mux_ins))) if len(index) == 1: return select(index, falsecase=mux_ins[0], truecase=mux_ins[1]) half = len(mux_ins) // 2 return select(index[-1], falsecase=mux(index[0:-1], *mux_ins[:half]), truecase=mux(index[0:-1], *mux_ins[half:])) def select(sel, truecase, falsecase): sel, f, t = (as_wires(w) for w in (sel, falsecase, truecase)) f, t = match_bitwidth(f, t) outwire = WireVector(bitwidth=len(f)) net = LogicNet(op='x', op_param=None, args=(sel, f, t), dests=(outwire,)) working_block().add_net(net) return outwire def concat(*args): if len(args) <= 0: raise PyrtlError('error, concat requires at least 1 argument') if len(args) == 1: return as_wires(args[0]) arg_wirevectors = tuple(as_wires(arg) for arg in args) final_width = sum(len(arg) for arg in arg_wirevectors) outwire = WireVector(bitwidth=final_width) net = LogicNet( op='c', op_param=None, args=arg_wirevectors, dests=(outwire,)) working_block().add_net(net) return outwire def concat_list(wire_list): return concat(*reversed(wire_list)) def signed_add(a, b): if isinstance(a, int): a = Const(a, signed=True) if isinstance(b, int): b = Const(b, signed=True) a, b = match_bitwidth(as_wires(a), as_wires(b), signed=True) result_len = len(a) + 1 ext_a = a.sign_extended(result_len) ext_b = b.sign_extended(result_len) return (ext_a + ext_b)[0:result_len] def mult_signed(a, b): return signed_mult(a, b) def signed_mult(a, b): if isinstance(a, int): a = Const(a, signed=True) if isinstance(b, int): b = Const(b, signed=True) a, b = as_wires(a), as_wires(b) final_len = len(a) + len(b) a, b = a.sign_extended(final_len), b.sign_extended(final_len) return (a * b)[0:final_len] def signed_lt(a, b): a, b = match_bitwidth(as_wires(a), as_wires(b), signed=True) r = a - b return r[-1] ^ (~a[-1]) ^ (~b[-1]) def signed_le(a, b): a, b = match_bitwidth(as_wires(a), as_wires(b), signed=True) r = a - b return (r[-1] ^ (~a[-1]) ^ (~b[-1])) | (a == b) def signed_gt(a, b): a, b = match_bitwidth(as_wires(a), as_wires(b), signed=True) r = b - a return r[-1] ^ (~a[-1]) ^ (~b[-1]) def signed_ge(a, b): a, b = match_bitwidth(as_wires(a), as_wires(b), signed=True) r = b - a return (r[-1] ^ (~a[-1]) ^ (~b[-1])) | (a == b) def _check_shift_inputs(a, shamt): if isinstance(shamt, int): raise PyrtlError('shift_amount is an integer, use slice instead') a, shamt = as_wires(a), as_wires(shamt) log_length = int(math.log(len(a), 2)) return a, shamt def shift_left_arithmetic(bits_to_shift, shift_amount): return shift_left_logical(bits_to_shift, shift_amount) def shift_right_arithmetic(bits_to_shift, shift_amount): if isinstance(shift_amount, int): return bits_to_shift[shift_amount:].sign_extended(len(bits_to_shift)) bit_in = bits_to_shift[-1] dir = Const(0) return barrel.barrel_shifter(bits_to_shift, bit_in, dir, shift_amount) def shift_left_logical(bits_to_shift, shift_amount): if isinstance(shift_amount, int): return concat(bits_to_shift[:-shift_amount], Const(0, shift_amount)) bit_in = Const(0) dir = Const(1) return barrel.barrel_shifter(bits_to_shift, bit_in, dir, shift_amount) def shift_right_logical(bits_to_shift, shift_amount): if isinstance(shift_amount, int): return bits_to_shift[shift_amount:].zero_extended(len(bits_to_shift)) bit_in = Const(0) dir = Const(0) return barrel.barrel_shifter(bits_to_shift, bit_in, dir, shift_amount) def match_bitwidth(*args, **opt): if len(opt) == 0: signed = False else: if len(opt) > 1 or 'signed' not in opt: raise PyrtlError('error, only supported kwarg to match_bitwidth is "signed"') signed = bool(opt['signed']) max_len = max(len(wv) for wv in args) if signed: return (wv.sign_extended(max_len) for wv in args) else: return (wv.zero_extended(max_len) for wv in args) def as_wires(val, bitwidth=None, truncating=True, block=None): from .memory import _MemIndexed block = working_block(block) if isinstance(val, (int, six.string_types)): return Const(val, bitwidth=bitwidth, block=block) elif isinstance(val, _MemIndexed): if val.wire is None: val.wire = as_wires(val.mem._readaccess(val.index), bitwidth, truncating, block) return val.wire elif not isinstance(val, WireVector): raise PyrtlError('error, expecting a wirevector, int, or verilog-style ' 'const string got %s instead' % repr(val)) elif bitwidth == '0': raise PyrtlError('error, bitwidth must be >= 1') elif val.bitwidth is None: raise PyrtlError('error, attempting to use wirevector with no defined bitwidth') elif bitwidth and bitwidth > val.bitwidth: return val.zero_extended(bitwidth) elif bitwidth and truncating and bitwidth < val.bitwidth: return val[:bitwidth] else: return val
BSD 3-Clause New or Revised License
dropreg/r-drop
huggingface_transformer_src/src/transformers/models/bart/modeling_tf_bart.py
TFBartLearnedPositionalEmbedding.call
python
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): bsz, seq_len = input_shape[:2] positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range") return super().call(positions + self.offset)
Input is expected to be of size [bsz x seqlen].
https://github.com/dropreg/r-drop/blob/497eae6c5428a0cb3c2c581b66e1fd6a85226527/huggingface_transformer_src/src/transformers/models/bart/modeling_tf_bart.py#L123-L128
import random from typing import Dict, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) from ...modeling_tf_utils import ( DUMMY_INPUTS, TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, TFWrappedEmbeddings, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_bart import BartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/bart-large" _CONFIG_FOR_DOC = "BartConfig" _TOKENIZER_FOR_DOC = "BartTokenizer" LARGE_NEGATIVE = -1e8 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = tf.roll(input_ids, 1, axis=-1) start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), decoder_start_token_id) shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, 1:]], -1) shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) if tf.executing_eagerly(): assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): bsz, tgt_len = input_ids_shape mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings): def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
MIT License
ictu/quality-time
components/collector/src/source_collectors/sonarqube/violations.py
SonarQubeViolations._entity
python
async def _entity(self, issue) -> Entity: return Entity( key=issue["key"], url=await self.__issue_landing_url(issue["key"]), message=issue["message"], severity=issue.get("severity", "no severity").lower(), type=issue["type"].lower(), component=issue["component"], creation_date=issue["creationDate"], update_date=issue["updateDate"], )
Create an entity from an issue.
https://github.com/ictu/quality-time/blob/4bd5df14f584dcc174276da0d2ddb6fcfaf1d427/components/collector/src/source_collectors/sonarqube/violations.py#L62-L73
from collector_utilities.type import URL from model import Entities, Entity, SourceMeasurement, SourceResponses from .base import SonarQubeCollector class SonarQubeViolations(SonarQubeCollector): rules_configuration = "" types_parameter = "types" async def _landing_url(self, responses: SourceResponses) -> URL: url = await super()._landing_url(responses) component = self._parameter("component") branch = self._parameter("branch") landing_url = f"{url}/project/issues?id={component}&resolved=false&branch={branch}" return URL(landing_url + self.__rules_url_parameter()) async def _api_url(self) -> URL: url = await super()._api_url() component = self._parameter("component") branch = self._parameter("branch") api = ( f"{url}/api/issues/search?componentKeys={component}&resolved=false&ps=500&" f"severities={self._violation_severities()}&types={self._violation_types()}&branch={branch}" ) return URL(api + self.__rules_url_parameter()) def __rules_url_parameter(self) -> str: rules = ( self._data_model["sources"][self.source_type]["configuration"][self.rules_configuration]["value"] if self.rules_configuration else [] ) return f"&rules={','.join(rules)}" if rules else "" async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement: value = 0 entities = Entities() for response in responses: json = await response.json() value += int(json.get("total", 0)) entities.extend([await self._entity(issue) for issue in json.get("issues", [])]) return SourceMeasurement(value=str(value), entities=entities) async def __issue_landing_url(self, issue_key: str) -> URL: url = await super()._landing_url(SourceResponses()) component = self._parameter("component") branch = self._parameter("branch") return URL(f"{url}/project/issues?id={component}&issues={issue_key}&open={issue_key}&branch={branch}")
Apache License 2.0
giswqs/geemap
geemap/chart.py
feature_histogram
python
def feature_histogram( features, property, maxBuckets=None, minBucketWidth=None, **kwargs ): import math def nextPowerOf2(n): return pow(2, math.ceil(math.log2(n))) def grow_bin(bin_size, ref): while bin_size < ref: bin_size *= 2 return bin_size try: raw_data = pd.to_numeric( pd.Series(features.aggregate_array(property).getInfo()) ) y_data = raw_data.tolist() if "ylim" in kwargs: min_value = kwargs["ylim"][0] max_value = kwargs["ylim"][1] else: min_value = raw_data.min() max_value = raw_data.max() data_range = max_value - min_value if not maxBuckets: initial_bin_size = nextPowerOf2(data_range / pow(2, 8)) if minBucketWidth: if minBucketWidth < initial_bin_size: bin_size = grow_bin(minBucketWidth, initial_bin_size) else: bin_size = minBucketWidth else: bin_size = initial_bin_size else: initial_bin_size = math.ceil(data_range / nextPowerOf2(maxBuckets)) if minBucketWidth: if minBucketWidth < initial_bin_size: bin_size = grow_bin(minBucketWidth, initial_bin_size) else: bin_size = minBucketWidth else: bin_size = initial_bin_size start_bins = (math.floor(min_value / bin_size) * bin_size) - (bin_size / 2) end_bins = (math.ceil(max_value / bin_size) * bin_size) + (bin_size / 2) if start_bins < min_value: y_data.append(start_bins) else: y_data[y_data.index(min_value)] = start_bins if end_bins > max_value: y_data.append(end_bins) else: y_data[y_data.index(max_value)] = end_bins num_bins = math.floor((end_bins - start_bins) / bin_size) if "title" not in kwargs: title = "" else: title = kwargs["title"] fig = plt.figure(title=title) if "width" in kwargs: fig.layout.width = kwargs["width"] if "height" in kwargs: fig.layout.height = kwargs["height"] if "xlabel" not in kwargs: xlabel = "" else: xlabel = kwargs["xlabel"] if "ylabel" not in kwargs: ylabel = "" else: ylabel = kwargs["ylabel"] histogram = plt.hist( sample=y_data, bins=num_bins, axes_options={"count": {"label": ylabel}, "sample": {"label": xlabel}}, ) if "colors" in kwargs: histogram.colors = kwargs["colors"] if "stroke" in kwargs: histogram.stroke = kwargs["stroke"] else: histogram.stroke = "#ffffff00" if "stroke_width" in kwargs: histogram.stroke_width = kwargs["stroke_width"] else: histogram.stroke_width = 0 if ("xlabel" in kwargs) and ("ylabel" in kwargs): histogram.tooltip = Tooltip( fields=["midpoint", "count"], labels=[kwargs["xlabel"], kwargs["ylabel"]], ) else: histogram.tooltip = Tooltip(fields=["midpoint", "count"]) plt.show() except Exception as e: raise Exception(e)
Generates a Chart from a set of features. Computes and plots a histogram of the given property. - X-axis = Histogram buckets (of property value). - Y-axis = Frequency Reference: https://developers.google.com/earth-engine/guides/charts_feature#uichartfeaturehistogram Args: features (ee.FeatureCollection): The features to include in the chart. property (str): The name of the property to generate the histogram for. maxBuckets (int, optional): The maximum number of buckets (bins) to use when building a histogram; will be rounded up to a power of 2. minBucketWidth (float, optional): The minimum histogram bucket width, or null to allow any power of 2. Raises: Exception: If the provided xProperties is not a list or dict. Exception: If the chart fails to create.
https://github.com/giswqs/geemap/blob/ee39ca827a724691ebd76f57f1dc6fa73c1bb240/geemap/chart.py#L277-L408
import pandas as pd from bqplot import Tooltip from bqplot import pyplot as plt from .common import ee_to_pandas def feature_byFeature(features, xProperty, yProperties, **kwargs): try: df = ee_to_pandas(features) if "ylim" in kwargs: min_value = kwargs["ylim"][0] max_value = kwargs["ylim"][1] else: min_value = df[yProperties].to_numpy().min() max_value = df[yProperties].to_numpy().max() max_value = max_value + 0.2 * (max_value - min_value) if "title" not in kwargs: title = "" else: title = kwargs["title"] if "legend_location" not in kwargs: legend_location = "top-left" else: legend_location = kwargs["legend_location"] x_data = list(df[xProperty]) y_data = df[yProperties].values.T.tolist() plt.bar(x_data, y_data) fig = plt.figure( title=title, legend_location=legend_location, ) if "width" in kwargs: fig.layout.width = kwargs["width"] if "height" in kwargs: fig.layout.height = kwargs["height"] if "labels" in kwargs: labels = kwargs["labels"] else: labels = yProperties if "display_legend" not in kwargs: display_legend = True else: display_legend = kwargs["display_legend"] bar_chart = plt.bar( x_data, y_data, labels=labels, display_legend=display_legend ) bar_chart.type = "grouped" if "colors" in kwargs: bar_chart.colors = kwargs["colors"] if "xlabel" in kwargs: plt.xlabel(kwargs["xlabel"]) if "ylabel" in kwargs: plt.ylabel(kwargs["ylabel"]) plt.ylim(min_value, max_value) if "xlabel" in kwargs and ("ylabel" in kwargs): bar_chart.tooltip = Tooltip( fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]] ) else: bar_chart.tooltip = Tooltip(fields=["x", "y"]) plt.show() except Exception as e: raise Exception(e) def feature_byProperty(features, xProperties, seriesProperty, **kwargs): try: df = ee_to_pandas(features) if isinstance(xProperties, list): x_data = xProperties y_data = df[xProperties].values elif isinstance(xProperties, dict): x_data = list(xProperties.values()) y_data = df[list(xProperties.keys())].values else: raise Exception("xProperties must be a list or dictionary.") labels = list(df[seriesProperty]) if "ylim" in kwargs: min_value = kwargs["ylim"][0] max_value = kwargs["ylim"][1] else: min_value = y_data.min() max_value = y_data.max() max_value = max_value + 0.2 * (max_value - min_value) if "title" not in kwargs: title = "" else: title = kwargs["title"] if "legend_location" not in kwargs: legend_location = "top-left" else: legend_location = kwargs["legend_location"] if "display_legend" not in kwargs: display_legend = True else: display_legend = kwargs["display_legend"] fig = plt.figure( title=title, legend_location=legend_location, ) if "width" in kwargs: fig.layout.width = kwargs["width"] if "height" in kwargs: fig.layout.height = kwargs["height"] bar_chart = plt.bar( x=x_data, y=y_data, labels=labels, display_legend=display_legend ) bar_chart.type = "grouped" if "colors" in kwargs: bar_chart.colors = kwargs["colors"] if "xlabel" in kwargs: plt.xlabel(kwargs["xlabel"]) if "ylabel" in kwargs: plt.ylabel(kwargs["ylabel"]) plt.ylim(min_value, max_value) if "xlabel" in kwargs and ("ylabel" in kwargs): bar_chart.tooltip = Tooltip( fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]] ) else: bar_chart.tooltip = Tooltip(fields=["x", "y"]) plt.show() except Exception as e: raise Exception(e) def feature_groups(features, xProperty, yProperty, seriesProperty, **kwargs): try: df = ee_to_pandas(features) df[yProperty] = pd.to_numeric(df[yProperty]) unique_series_values = df[seriesProperty].unique().tolist() new_column_names = [] for value in unique_series_values: sample_filter = (df[seriesProperty] == value).map({True: 1, False: 0}) column_name = str(yProperty) + "_" + str(value) df[column_name] = df[yProperty] * sample_filter new_column_names.append(column_name) if "labels" in kwargs: labels = kwargs["labels"] else: labels = [str(x) for x in unique_series_values] if "ylim" in kwargs: min_value = kwargs["ylim"][0] max_value = kwargs["ylim"][1] else: min_value = df[yProperty].to_numpy().min() max_value = df[yProperty].to_numpy().max() max_value = max_value + 0.2 * (max_value - min_value) if "title" not in kwargs: title = "" else: title = kwargs["title"] if "legend_location" not in kwargs: legend_location = "top-left" else: legend_location = kwargs["legend_location"] x_data = list(df[xProperty]) y_data = [df[x] for x in new_column_names] plt.bar(x_data, y_data) fig = plt.figure( title=title, legend_location=legend_location, ) if "width" in kwargs: fig.layout.width = kwargs["width"] if "height" in kwargs: fig.layout.height = kwargs["height"] if "display_legend" not in kwargs: display_legend = True else: display_legend = kwargs["display_legend"] bar_chart = plt.bar( x_data, y_data, labels=labels, display_legend=display_legend ) if "colors" in kwargs: bar_chart.colors = kwargs["colors"] if "xlabel" in kwargs: plt.xlabel(kwargs["xlabel"]) if "ylabel" in kwargs: plt.ylabel(kwargs["ylabel"]) plt.ylim(min_value, max_value) if "xlabel" in kwargs and ("ylabel" in kwargs): bar_chart.tooltip = Tooltip( fields=["x", "y"], labels=[kwargs["xlabel"], kwargs["ylabel"]] ) else: bar_chart.tooltip = Tooltip(fields=["x", "y"]) plt.show() except Exception as e: raise Exception(e)
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_flex_volume_source.py
V1FlexVolumeSource.secret_ref
python
def secret_ref(self): return self._secret_ref
Gets the secret_ref of this V1FlexVolumeSource. # noqa: E501 :return: The secret_ref of this V1FlexVolumeSource. # noqa: E501 :rtype: V1LocalObjectReference
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_flex_volume_source.py#L169-L176
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1FlexVolumeSource(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'driver': 'str', 'fs_type': 'str', 'options': 'dict(str, str)', 'read_only': 'bool', 'secret_ref': 'V1LocalObjectReference' } attribute_map = { 'driver': 'driver', 'fs_type': 'fsType', 'options': 'options', 'read_only': 'readOnly', 'secret_ref': 'secretRef' } def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._driver = None self._fs_type = None self._options = None self._read_only = None self._secret_ref = None self.discriminator = None self.driver = driver if fs_type is not None: self.fs_type = fs_type if options is not None: self.options = options if read_only is not None: self.read_only = read_only if secret_ref is not None: self.secret_ref = secret_ref @property def driver(self): return self._driver @driver.setter def driver(self, driver): if self.local_vars_configuration.client_side_validation and driver is None: raise ValueError("Invalid value for `driver`, must not be `None`") self._driver = driver @property def fs_type(self): return self._fs_type @fs_type.setter def fs_type(self, fs_type): self._fs_type = fs_type @property def options(self): return self._options @options.setter def options(self, options): self._options = options @property def read_only(self): return self._read_only @read_only.setter def read_only(self, read_only): self._read_only = read_only @property
Apache License 2.0
wangronin/bayesian-optimization
bayes_optim/surrogate/random_forest.py
RandomForest.__init__
python
def __init__( self, n_estimators: int = 100, max_features: float = 5 / 6, min_samples_leaf: int = 2, levels: dict = None, **kwargs, ): super().__init__( n_estimators=n_estimators, max_features=max_features, min_samples_leaf=min_samples_leaf, **kwargs, ) assert isinstance(levels, dict) self.levels = levels self.is_fitted = False if self.levels: self._levels = OrderedDict(sorted(levels.items())) self._cat_idx = list(self._levels.keys()) self._categories = list(self._levels.values()) self._enc = OneHotEncoder(categories=self._categories, sparse=False)
parameter --------- levels : dict, for categorical inputs keys: indices of categorical variables values: list of levels of categorical variables
https://github.com/wangronin/bayesian-optimization/blob/ffbcf4c8813dfa603b9065355e20eda0ccb99e30/bayes_optim/surrogate/random_forest.py#L71-L103
from __future__ import annotations from collections import OrderedDict from typing import List, Union import numpy as np from joblib import Parallel, delayed from numpy import array from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble._base import _partition_estimators from sklearn.preprocessing import OneHotEncoder from sklearn.utils.validation import check_is_fitted from ..solution import Solution __authors__ = ["Hao Wang"] class SurrogateAggregation(object): def __init__(self, surrogates, aggregation="WS", **kwargs): self.surrogates = surrogates self.N = len(self.surrogates) self.aggregation = aggregation self.weights = np.asarray(kwargs["weights"], dtype="float").ravel() assert self.aggregation in ["WS", "Tchebycheff"] def fit(self, X, y): pass def predict(self, X, eval_MSE=False): if eval_MSE: y_hat_, MSE_ = list(zip(*[s.predict(X, eval_MSE=True) for s in self.surrogates])) y_hat_ = np.atleast_2d([_.ravel() for _ in y_hat_]) MSE_ = np.atleast_2d([_.ravel() for _ in MSE_]) else: y_hat_ = np.atleast_2d([_.predict(X, eval_MSE=False).ravel() for _ in self.surrogates]) if self.aggregation == "WS": y_hat = self.weights.dot(y_hat_) if eval_MSE: MSE = (self.weights ** 2.0).dot(MSE_) elif self.aggregation == "Tchebycheff": pass return (y_hat, MSE) if eval_MSE else y_hat def gradient(self, X): pass def _save_prediction(predict, X, index, out): out[..., index] = predict(X, check_input=False) class RandomForest(RandomForestRegressor):
BSD 3-Clause New or Revised License
chanzuckerberg/corpora-data-portal
backend/corpora/common/entities/entity.py
Entity.get
python
def get(cls, session, key: typing.Union[str, typing.Tuple[str, str]]) -> typing.Union["Entity", None]: result = session.query(cls.table).get(key) if result: return cls(result) else: logger.info(f"Unable to find a row with primary key {key}, in {cls.__name__} table.") return None
Retrieves an entity from the database given its primary key if found. :param key: Simple or composite primary key :return: Entity or None
https://github.com/chanzuckerberg/corpora-data-portal/blob/546870b58caa422bd381a47e3fbc75908182114c/backend/corpora/common/entities/entity.py#L32-L43
import logging import typing from sqlalchemy import inspect logger = logging.getLogger(__name__) from ..corpora_orm import Base class Entity: table: Base = None list_attributes: typing.Tuple = None def __init__(self, db_object: Base): self.db_object = db_object self.session = inspect(db_object).session @classmethod
MIT License
ethereum/trinity
trinity/components/builtin/ethstats/ethstats_service.py
EthstatsService.get_node_info
python
def get_node_info(self) -> EthstatsData: return { 'name': self.node_id, 'contact': self.node_contact, 'node': construct_trinity_client_identifier(), 'net': self.boot_info.trinity_config.network_id, 'port': self.boot_info.trinity_config.port, 'os': platform.system(), 'os_v': platform.release(), 'client': '0.1.1', 'canUpdateHistory': False, }
Getter for data that should be sent once, on start-up.
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/components/builtin/ethstats/ethstats_service.py#L100-L112
import asyncio import logging import platform from typing import ContextManager import websockets from async_service import ManagerAPI, Service from lahja import EndpointAPI from eth.abc import ChainAPI from trinity.constants import ( TO_NETWORKING_BROADCAST_CONFIG, ) from trinity._utils.connect import get_eth1_chain_with_remote_db from trinity._utils.version import ( construct_trinity_client_identifier, ) from trinity.boot_info import BootInfo from trinity.components.builtin.ethstats.ethstats_client import ( EthstatsClient, EthstatsMessage, EthstatsData, timestamp_ms, ) from trinity.protocol.common.events import ( PeerCountRequest, ) class EthstatsService(Service): logger = logging.getLogger('trinity.components.ethstats.Service') def __init__( self, boot_info: BootInfo, event_bus: EndpointAPI, server_url: str, server_secret: str, node_id: str, node_contact: str, stats_interval: int, ) -> None: self.boot_info = boot_info self.event_bus = event_bus self.server_url = server_url self.server_secret = server_secret self.node_id = node_id self.node_contact = node_contact self.stats_interval = stats_interval async def run(self) -> None: with self.get_chain() as chain: self.chain = chain while self.manager.is_running: self.logger.info('Connecting to %s...', self.server_url) async with websockets.connect(self.server_url) as websocket: client: EthstatsClient = EthstatsClient( websocket, self.node_id, ) client_manager = self.manager.run_child_service(client) self.manager.run_task(self.server_handler, client, client_manager) self.manager.run_task(self.statistics_handler, client, client_manager) await client_manager.wait_finished() self.logger.info('Connection to %s closed', self.server_url) self.logger.info('Reconnecting in 5s...') await asyncio.sleep(5) async def server_handler(self, client: EthstatsClient, manager: ManagerAPI) -> None: while manager.is_running: message: EthstatsMessage = await client.recv() if message.command == 'node-pong': await client.send_latency((timestamp_ms() - message.data['clientTime']) // 2) else: self.logger.debug('Unhandled message received: %s: %r', message.command, message) async def statistics_handler(self, client: EthstatsClient, manager: ManagerAPI) -> None: await client.send_hello(self.server_secret, self.get_node_info()) while manager.is_running: await client.send_node_ping() await client.send_stats(await self.get_node_stats()) await client.send_block(self.get_node_block()) await asyncio.sleep(self.stats_interval)
MIT License
simakvokka/robotframework-imagelibrary
src/ImageLibrary/keywords.py
Keywords.start_gui_process
python
def start_gui_process(self, command, *args, **kwargs): return self.gui_process.start_gui_process(command, *args, **kwargs)
Starts the given process using the standart Robot library Process, but also takes the programm window as the active window. Zones and screenshots will be taken related to the program coords.
https://github.com/simakvokka/robotframework-imagelibrary/blob/487099843de02b40a44aa0376cbb6747bf8794d3/src/ImageLibrary/keywords.py#L185-L188
import yaml, re, os import tkinter as tk from robot.api import logger as LOGGER from ImageLibrary.libcore.librarycomponent import LibraryComponent from ImageLibrary.libcore.robotlibcore import keyword from ImageLibrary.buttons.button_constructor import ButtonConstructor from ImageLibrary.error_handler import ErrorHandler from ImageLibrary.window import Window from ImageLibrary.buttons.global_button import GlobalButtonRegistry from ImageLibrary.image_processor import ImageProcessor from ImageLibrary.main_window import MainWindow from ImageLibrary.open_cv import OpenCV from ImageLibrary import errors, utils from ImageLibrary.window_function import window_function from ImageLibrary.animations import Animations from ImageLibrary.GUIProcess import GUIProcess def _get_images_from(node): images = set() if isinstance(node, (str, bytes)): if re.search('.*\.png$', node) is not None: images.add(node) elif isinstance(node, dict): for key, value in sorted(node.items()): images.update(_get_images_from(value)) elif isinstance(node, list): for value in node: images.update(_get_images_from(value)) return images def _check_config(config, reference_folders): images = _get_images_from(config) not_found_folders = set() not_found = images.copy() for folder in reference_folders: if not os.path.isdir(folder): not_found_folders.add(folder) continue for image in images: if image in not_found and os.path.isfile(os.path.join(folder, image)): not_found.remove(image) out = "" if bool(not_found_folders): out += "Not found reference folders: " + ", ".join(not_found_folders) if bool(not_found): out += " and " if bool(not_found_folders) else "Not found " out += "images: " + ", ".join(not_found) out += " at folders:" + ", ".join(set(reference_folders).difference(not_found_folders)) if bool(out): raise errors.InitError(out) return True class Keywords(LibraryComponent, Animations, GUIProcess): def __init__(self, screenshot_folder, state, debug=False): super().__init__(state) if screenshot_folder is None: self.screenshot_folder = os.path.join(os.getcwd()) else: self.screenshot_folder = screenshot_folder self.mw = None self.debug = debug self.gui_process = GUIProcess() @keyword def init(self, settings, references, area=None): self.settings = {} self.button_registry = GlobalButtonRegistry('hack') assert settings is not None, "YAML config file must not be empty and must contain 'main' window section with at least one of buttons|templates|zones etc" if hasattr(settings, '__iter__'): for setting in settings: with open(setting, 'r') as f: config = yaml.safe_load(f) if "global_buttons_defs" in config: self.button_registry.update_info(config["global_buttons_defs"]) del config["global_buttons_defs"] self.settings.update(config) else: with open(settings) as f: config = yaml.safe_load(f) if "global_buttons_defs" in config: self.button_registry.update_info(config["global_buttons_defs"]) del config["global_buttons_defs"] self.settings.update(config) self.reference_folders = references _check_config(self.settings, self.reference_folders) if area is None: screen_width = tk.Tk().winfo_screenwidth() screen_height = tk.Tk().winfo_screenheight() self.area = (0, 0, screen_width, screen_height) else: self.area = area if "main" not in self.settings: raise errors.ConfigError('config must contain "main" section') self.error_handler = ErrorHandler(self.screenshot_folder, self.area) try: self.image_processor = ImageProcessor(self.area, OpenCV(), references, self.error_handler) except TypeError: LOGGER.info( "Something went wrong while the ImageLibrary library init process: it doesn't get the required params.\n" "Program may not be loaded.") self.button_registry.report_merge_errors() self.button_constructor = ButtonConstructor() self.windows = {} for name, config in sorted(self.settings.items()): if name == 'main': self.mw = MainWindow(config, "main", self.button_constructor, self.debug) elif isinstance(config, dict): self.windows[name] = Window(config, name, self.button_constructor, self.debug) elif isinstance(config, list): self.windows[name] = [] for index, screen in enumerate(config): if not isinstance(screen, dict): raise errors.ConfigError( "screen {} of window {} not properly configured: dict expected".format(index + 1, name)) self.windows[name].append(Window(screen, name, self.button_constructor, self.debug)) def _get_window(self, window, index=-1): if window is not None: return utils.get_element_by_name_and_index(self.windows, window, index) else: return self.mw @keyword
Apache License 2.0
comparativegenomicstoolkit/comparative-annotation-toolkit
cat/__init__.py
ToilTask.prepare_toil_options
python
def prepare_toil_options(self, work_dir): toil_args = self.get_toil_defaults() toil_args.__dict__.update(vars(self)) toil_args.stats = True toil_args.defaultPreemptable = True if self.zone is not None: job_dir = os.path.join(work_dir, 'jobStore') if os.path.exists(job_dir): for i in os.listdir(job_dir): if os.path.isfile(os.path.join(job_dir, i)) and self.provisioner in i: job_store = i toil_args.restart = True break if toil_args.restart is not True: job_store = self.provisioner + ':' + self.zone + ':' + ''.join( random.choice(string.ascii_lowercase) for _ in range(7)) try: os.makedirs(job_dir) except OSError: pass tools.fileOps.touch(os.path.join(job_dir, job_store)) else: job_store = os.path.join(work_dir, 'jobStore') tools.fileOps.ensure_file_dir(job_store) if os.path.exists(job_store): try: root_job = next(open(os.path.join(job_store, 'rootJobStoreID'))).rstrip() if not os.path.exists(os.path.join(job_store, 'tmp', root_job)): shutil.rmtree(job_store) else: toil_args.restart = True except OSError: toil_args.restart = True if tools.misc.running_in_container(): toil_args.disableCaching = True if toil_args.batchSystem == 'parasol' and toil_args.disableCaching is False: raise RuntimeError('Running parasol without disabled caching is a very bad idea.') if toil_args.batchSystem == 'parasol' and toil_args.workDir is None: raise RuntimeError('Running parasol without setting a shared work directory will not work. Please specify ' '--workDir.') if toil_args.workDir is not None: tools.fileOps.ensure_dir(toil_args.workDir) toil_args.jobStore = job_store self.job_store = job_store return toil_args
Prepares a Namespace object for Toil which has all defaults, overridden as specified Will see if the jobStore path exists, and if it does, assume that we need to add --restart :param work_dir: Parent directory where toil work will be done. jobStore will be placed inside. Will be used to fill in the workDir class variable. :return: Namespace
https://github.com/comparativegenomicstoolkit/comparative-annotation-toolkit/blob/fc1623da5df1309d2e2f0b9bb0363aaab84708f4/cat/__init__.py#L488-L545
import string import random import datetime import collections import logging import os import shutil import json import subprocess from collections import OrderedDict from frozendict import frozendict from configobj import ConfigObj import luigi import luigi.contrib.sqla from luigi.util import requires from toil.job import Job import pandas as pd from bx.intervals.cluster import ClusterTree from toil.lib.memoize import memoize import tools.bio import tools.fileOps import tools.intervals import tools.hal import tools.misc import tools.nameConversions import tools.procOps import tools.mathOps import tools.psl import tools.sqlInterface import tools.sqlite import tools.hintsDatabaseInterface import tools.transcripts import tools.gff3 from tools.luigiAddons import multiple_requires, IndexTarget from .align_transcripts import align_transcripts from .augustus import augustus from .augustus_cgp import augustus_cgp from .augustus_pb import augustus_pb from .chaining import chaining from .classify import classify from .consensus import generate_consensus, load_alt_names, load_hgm_vectors from .filter_transmap import filter_transmap from .hgm import hgm, parse_hgm_gtf from .transmap_classify import transmap_classify from .plots import generate_plots from .hints_db import hints_db from .parent_gene_assignment import assign_parents from .exceptions import * logger = logging.getLogger('cat') logger.setLevel('INFO') class PipelineTask(luigi.Task): hal = luigi.Parameter() ref_genome = luigi.Parameter() config = luigi.Parameter() out_dir = luigi.Parameter(default='./cat_output') work_dir = luigi.Parameter(default='./cat_work') target_genomes = luigi.TupleParameter(default=None) annotate_ancestors = luigi.BoolParameter(default=False) binary_mode = luigi.ChoiceParameter(choices=["docker", "local", "singularity"], default='docker', significant=False) augustus = luigi.BoolParameter(default=False) augustus_species = luigi.Parameter(default='human', significant=False) tm_cfg = luigi.Parameter(default='augustus_cfgs/extrinsic.ETM1.cfg', significant=False) tmr_cfg = luigi.Parameter(default='augustus_cfgs/extrinsic.ETM2.cfg', significant=False) augustus_utr_off = luigi.BoolParameter(default=False, significant=False) augustus_cgp = luigi.BoolParameter(default=False) cgp_param = luigi.Parameter(default=None, significant=False) augustus_cgp_cfg_template = luigi.Parameter(default='augustus_cfgs/cgp_extrinsic_template.cfg', significant=False) maf_chunksize = luigi.IntParameter(default=2500000, significant=False) maf_overlap = luigi.IntParameter(default=500000, significant=False) cgp_train_num_exons = luigi.IntParameter(default=5000, significant=False) augustus_pb = luigi.BoolParameter(default=False) pb_genome_chunksize = luigi.IntParameter(default=5000000, significant=False) pb_genome_overlap = luigi.IntParameter(default=500000, significant=False) pb_cfg = luigi.Parameter(default='augustus_cfgs/extrinsic.M.RM.PB.E.W.cfg', significant=False) hgm_cpu = luigi.IntParameter(default=4, significant=False) assembly_hub = luigi.BoolParameter(default=False) hub_email = luigi.Parameter(default='NoEmail', significant=False) global_near_best = luigi.FloatParameter(default=0.15, significant=False) filter_overlapping_genes = luigi.BoolParameter(default=False, significant=True) overlapping_ignore_bases = luigi.IntParameter(default=0, significant=True) intron_rnaseq_support = luigi.IntParameter(default=0, significant=False) exon_rnaseq_support = luigi.IntParameter(default=0, significant=False) intron_annot_support = luigi.IntParameter(default=0, significant=False) exon_annot_support = luigi.IntParameter(default=0, significant=False) original_intron_support = luigi.IntParameter(default=0, significant=False) denovo_num_introns = luigi.IntParameter(default=0, significant=False) denovo_splice_support = luigi.IntParameter(default=0, significant=False) denovo_exon_support = luigi.IntParameter(default=0, significant=False) denovo_ignore_novel_genes = luigi.BoolParameter(default=False, significant=False) denovo_only_novel_genes = luigi.BoolParameter(default=False, significant=False) denovo_novel_end_distance = luigi.IntParameter(default=0, significant=False) denovo_allow_novel_ends = luigi.BoolParameter(default=False, significant=False) denovo_allow_unsupported = luigi.BoolParameter(default=False, significant=False) denovo_allow_bad_annot_or_tm = luigi.BoolParameter(default=False, significant=False) require_pacbio_support = luigi.BoolParameter(default=False, significant=False) in_species_rna_support_only = luigi.BoolParameter(default=False, significant=True) rebuild_consensus = luigi.BoolParameter(default=False, significant=True) batchSystem = luigi.Parameter(default='single_machine', significant=False) maxCores = luigi.IntParameter(default=8, significant=False) parasolCommand = luigi.Parameter(default=None, significant=False) defaultMemory = luigi.Parameter(default='8G', significant=False) disableCaching = luigi.BoolParameter(default=False, significant=False) workDir = luigi.Parameter(default=None, significant=False) defaultDisk = luigi.Parameter(default='8G', significant=False) cleanWorkDir = luigi.Parameter(default='onSuccess', significant=False) provisioner = luigi.Parameter(default=None, significant=False) nodeTypes = luigi.Parameter(default=None, significant=False) maxNodes = luigi.Parameter(default=None, significant=False) minNode = luigi.Parameter(default=None, significant=False) metrics = luigi.Parameter(default=None, significant=False) zone = luigi.Parameter(default=None, significant=False) logLevel = luigi.ChoiceParameter(default="INFO", choices=["INFO", "DEBUG", "ERROR", "WARNING"], significant=False) def __repr__(self): if hasattr(self, 'genome'): return 'Task: {} for {}'.format(self.__class__.__name__, self.genome) elif hasattr(self, 'mode'): return 'Task: {} for {}'.format(self.__class__.__name__, self.mode) else: return 'Task: {}'.format(self.__class__.__name__) def get_pipeline_args(self): args = tools.misc.PipelineNamespace() args.set('binary_mode', self.binary_mode, False) args.set('hal', os.path.abspath(self.hal), True) args.set('ref_genome', self.ref_genome, True) args.set('out_dir', os.path.abspath(self.out_dir), True) args.set('work_dir', os.path.abspath(self.work_dir), True) args.set('augustus', self.augustus, True) args.set('augustus_cgp', self.augustus_cgp, True) args.set('augustus_pb', self.augustus_pb, True) args.set('augustus_species', self.augustus_species, True) args.set('tm_cfg', os.path.abspath(self.tm_cfg), True) args.set('tmr_cfg', os.path.abspath(self.tmr_cfg), True) args.set('augustus_cgp', self.augustus_cgp, True) args.set('maf_chunksize', self.maf_chunksize, True) args.set('maf_overlap', self.maf_overlap, True) args.set('pb_genome_chunksize', self.pb_genome_chunksize, True) args.set('pb_genome_overlap', self.pb_genome_overlap, True) args.set('pb_cfg', os.path.abspath(self.pb_cfg), True) args.set('augustus_cgp_cfg_template', os.path.abspath(self.augustus_cgp_cfg_template), True) args.set('augustus_utr_off', self.augustus_utr_off, True) if self.cgp_param is not None: args.set('cgp_param', os.path.abspath(self.cgp_param), True) else: args.set('cgp_param', None, True) args.set('cgp_train_num_exons', self.cgp_train_num_exons, True) args.set('hgm_cpu', self.hgm_cpu, False) args.set('global_near_best', self.global_near_best, True) args.set('filter_overlapping_genes', self.filter_overlapping_genes, True) args.set('overlapping_ignore_bases', self.overlapping_ignore_bases, True) args.set('intron_rnaseq_support', self.intron_rnaseq_support, False) args.set('exon_rnaseq_support', self.exon_rnaseq_support, False) args.set('intron_annot_support', self.intron_annot_support, False) args.set('exon_annot_support', self.exon_annot_support, False) args.set('original_intron_support', self.original_intron_support, False) args.set('denovo_num_introns', self.denovo_num_introns, False) args.set('denovo_splice_support', self.denovo_splice_support, False) args.set('denovo_exon_support', self.denovo_exon_support, False) args.set('denovo_ignore_novel_genes', self.denovo_ignore_novel_genes, False) args.set('denovo_only_novel_genes', self.denovo_only_novel_genes, False) args.set('denovo_allow_novel_ends', self.denovo_allow_novel_ends, False) args.set('denovo_novel_end_distance', self.denovo_novel_end_distance, False) args.set('denovo_allow_unsupported', self.denovo_allow_unsupported, False) args.set('denovo_allow_bad_annot_or_tm', self.denovo_allow_bad_annot_or_tm, False) args.set('require_pacbio_support', self.require_pacbio_support, False) args.set('in_species_rna_support_only', self.in_species_rna_support_only, False) args.set('rebuild_consensus', self.rebuild_consensus, False) args.set('stats_db', os.path.join(args.out_dir, 'databases', 'timing_stats.db'), False) args.set('assembly_hub', self.assembly_hub, False) args.set('hub_email', self.hub_email, False) args.set('annotate_ancestors', self.annotate_ancestors, True) if not tools.misc.is_exec('halStats'): raise ToolMissingException('halStats from the HAL tools package not in global path') args.set('hal_genomes', tools.hal.extract_genomes(args.hal, self.annotate_ancestors), True) target_genomes = tools.hal.extract_genomes(args.hal, self.annotate_ancestors, self.target_genomes) target_genomes = tuple(x for x in target_genomes if x != self.ref_genome) args.set('target_genomes', target_genomes, True) args.set('cfg', self.parse_cfg(), True) args.set('dbs', PipelineTask.get_databases(args), True) args.set('annotation', args.cfg['ANNOTATION'][args.ref_genome], True) args.set('hints_db', os.path.join(args.work_dir, 'hints_database', 'hints.db'), True) args.set('rnaseq_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) | set(args.cfg['BAM'].keys())), True) args.set('intron_only_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) - set(args.cfg['BAM'].keys())), True) args.set('isoseq_genomes', frozenset(list(args.cfg['ISO_SEQ_BAM'].keys())), True) args.set('annotation_genomes', frozenset(list(args.cfg['ANNOTATION'].keys())), True) args.set('external_ref_genomes', args.annotation_genomes - {args.ref_genome}, True) args.set('modes', self.get_modes(args), True) args.set('augustus_tmr', True if 'augTMR' in args.modes else False, True) if self.__class__.__name__ in ['RunCat', 'Augustus', 'AugustusCgp', 'AugustusPb']: self.validate_cfg(args) return args def parse_cfg(self): if not os.path.exists(self.config): raise MissingFileException('Config file {} not found.'.format(self.config)) configspec = ['[ANNOTATION]', '__many__ = string', '[INTRONBAM]', '__many__ = list', '[BAM]', '__many__ = list', '[ISO_SEQ_BAM]', '__many__ = list', '[PROTEIN_FASTA]', '__many__ = list'] parser = ConfigObj(self.config, configspec=configspec) for key in parser: if key not in ['ANNOTATION', 'INTRONBAM', 'BAM', 'ISO_SEQ_BAM', 'PROTEIN_FASTA']: raise InvalidInputException('Invalid field {} in config file'.format(key)) cfg = collections.defaultdict(dict) for dtype in ['ANNOTATION', 'PROTEIN_FASTA']: if dtype not in parser: cfg[dtype] = {} else: for genome, annot in parser[dtype].items(): annot = os.path.abspath(annot) if not os.path.exists(annot): raise MissingFileException('Missing {} file {}.'.format(dtype.lower(), annot)) cfg[dtype][genome] = annot for dtype in ['BAM', 'INTRONBAM', 'ISO_SEQ_BAM']: if dtype not in parser: cfg[dtype] = {} continue for genome in parser[dtype]: path = parser[dtype][genome] if isinstance(path, str): if not tools.misc.is_bam(path): cfg[dtype][genome] = [os.path.abspath(x.rstrip()) for x in open(path)] else: cfg[dtype][genome] = [os.path.abspath(path)] else: cfg[dtype][genome] = [] for p in path: if tools.misc.is_bam(p): cfg[dtype][genome].append(os.path.abspath(p)) else: cfg[dtype][genome].extend([os.path.abspath(x.rstrip()) for x in open(p)]) return frozendict((key, frozendict((ikey, tuple(ival) if isinstance(ival, list) else ival) for ikey, ival in val.items())) for key, val in cfg.items()) def validate_cfg(self, args): if len(args.cfg['BAM']) + len(args.cfg['INTRONBAM']) + len(args.cfg['ISO_SEQ_BAM']) + len(args.cfg['ANNOTATION']) == 0: logger.warning('No extrinsic data or annotations found in config. Will load genomes only.') elif len(args.cfg['BAM']) + len(args.cfg['INTRONBAM']) + len(args.cfg['ISO_SEQ_BAM']) == 0: logger.warning('No extrinsic data found in config. Will load genomes and annotation only.') for dtype in ['BAM', 'INTRONBAM', 'ISO_SEQ_BAM']: for genome in args.cfg[dtype]: for bam in args.cfg[dtype][genome]: if not os.path.exists(bam): raise MissingFileException('Missing BAM {}.'.format(bam)) if not tools.misc.is_bam(bam): raise InvalidInputException('BAM {} is not a valid BAM.'.format(bam)) if not os.path.exists(bam + '.bai'): raise MissingFileException('Missing BAM index {}.'.format(bam + '.bai')) for dtype in ['ANNOTATION', 'PROTEIN_FASTA']: for genome, annot in args.cfg[dtype].items(): if not os.path.exists(annot): raise MissingFileException('Missing {} file {}.'.format(dtype.lower(), annot)) if all(g in args.hal_genomes for g in args.target_genomes) is False: bad_genomes = set(args.hal_genomes) - set(args.target_genomes) err_msg = 'Genomes {} present in configuration and not present in HAL.'.format(','.join(bad_genomes)) raise UserException(err_msg) if args.ref_genome not in args.cfg['ANNOTATION']: raise UserException('Reference genome {} did not have a provided annotation.'.format(self.ref_genome)) if args.augustus_cgp and len(args.rnaseq_genomes) == 0: raise InvalidInputException('AugustusCGP is being ran without any RNA-seq hints!') if args.augustus_pb and len(args.isoseq_genomes) == 0: raise InvalidInputException('AugustusPB is being ran without any IsoSeq hints!') def get_modes(self, args): modes = ['transMap'] if args.augustus_cgp is True: modes.append('augCGP') if args.augustus is True: modes.append('augTM') if len(set(args.rnaseq_genomes) & set(args.target_genomes)) > 0: modes.append('augTMR') if args.augustus_pb is True: modes.append('augPB') if len(args.annotation_genomes) > 1: modes.append('exRef') return tuple(modes) def get_module_args(self, module, **args): pipeline_args = self.get_pipeline_args() return module.get_args(pipeline_args, **args) @memoize def load_docker(self): os.environ['CAT_BINARY_MODE'] = self.binary_mode if self.binary_mode == 'docker': if not tools.misc.is_exec('docker'): raise ToolMissingException('docker binary not found. ' 'Either install it or use a different option for --binary-mode.') subprocess.check_call(['docker', 'pull', 'quay.io/ucsc_cgl/cat:latest']) elif self.binary_mode == 'singularity': if not tools.misc.is_exec('singularity'): raise ToolMissingException('singularity binary not found. ' 'Either install it or use a different option for --binary-mode.') os.environ['SINGULARITY_PULLFOLDER'] = os.path.abspath(self.work_dir) os.environ['SINGULARITY_CACHEDIR'] = os.path.abspath(self.work_dir) if os.environ.get('SINGULARITY_IMAGE'): return tools.fileOps.ensure_dir(self.work_dir) if not os.path.isfile(os.path.join(self.work_dir, 'cat.img')): subprocess.check_call(['singularity', 'pull', '--name', 'cat.img', 'docker://quay.io/ucsc_cgl/cat:latest']) assert os.path.exists(os.path.join(self.work_dir, 'cat.img')) @staticmethod def get_databases(pipeline_args): dbs = {genome: PipelineTask.get_database(pipeline_args, genome) for genome in pipeline_args.hal_genomes} return frozendict(dbs) @staticmethod def get_database(pipeline_args, genome): base_out_dir = os.path.join(pipeline_args.out_dir, 'databases') return os.path.join(base_out_dir, '{}.db'.format(genome)) @staticmethod def get_plot_dir(pipeline_args, genome): base_out_dir = os.path.join(pipeline_args.out_dir, 'plots') return os.path.join(base_out_dir, genome) @staticmethod def get_metrics_dir(pipeline_args, genome): base_out_dir = os.path.join(pipeline_args.work_dir, 'plot_data') return os.path.join(base_out_dir, genome) @staticmethod def write_metrics(metrics_dict, out_target): tools.fileOps.ensure_file_dir(out_target.path) with out_target.open('w') as outf: json.dump(metrics_dict, outf, indent=4) @PipelineTask.event_handler(luigi.Event.PROCESSING_TIME) def processing_time(task, processing_time): pipeline_args = task.get_pipeline_args() stats_db = pipeline_args.stats_db finish_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') with tools.sqlite.ExclusiveSqlConnection(stats_db) as engine: c = engine.cursor() c.execute('create table if not exists stats ' '(TaskId string unique, FinishTime string, ProcessingTime real)') c.execute('insert or replace into stats values (?, ?, ?)', [task.task_id, finish_time, processing_time]) engine.commit() class PipelineWrapperTask(PipelineTask, luigi.WrapperTask): pass class AbstractAtomicFileTask(PipelineTask): def run_cmd(self, cmd): with self.output().open('w') as outf: tools.procOps.run_proc(cmd, stdout=outf) class ToilTask(PipelineTask): resources = {'toil': 1} def __repr__(self): base_repr = super(ToilTask, self).__repr__() return 'Toil' + base_repr + ' using batchSystem {}'.format(self.batchSystem)
Apache License 2.0
ostwalprasad/lgnpy
lgnpy/Graph.py
Graph.get_evidences
python
def get_evidences(self): return self.evidences
Get evidences if they are set Returns ------- dict: Evidences with keys as nodes.
https://github.com/ostwalprasad/lgnpy/blob/da9c0f660ed7ace8598c9a802cca7f9871538675/lgnpy/Graph.py#L293-L300
import pandas as pd import numpy as np import networkx as nx import numbers import math from unittest.mock import patch from .logging_config import Logger log = Logger() class Graph: def __init__(self): self.g = nx.DiGraph() def set_data(self, dataframe): if not isinstance(dataframe, pd.DataFrame): raise TypeError("Argument invalid. Please provide Pandas DataFrame") if len(dataframe.columns) <= 1: raise ValueError(f"Dataframe contains only {dataframe.columns}") if not set(list((self.g.nodes))).issubset(list(dataframe.columns)): raise ValueError( f"DataFrame does not contain {np.setdiff1d(list(self.g.nodes), list(dataframe.columns))}" ) dataframe = dataframe[list(self.g.nodes)] self.data = dataframe.reindex(sorted(dataframe.columns), axis=1) self.nodes = list((self.data.columns)) self.mean = np.array(self.get_mean()) self.cov = np.array(self.get_covariance()) self.precision_matrix = np.linalg.inv(self.cov) self.hvector = self.precision_matrix.dot(self.mean) self.evidences = dict.fromkeys(self.nodes) def set_parameters(self, mean, cov): raise ValueError("Not implemented yet.") def get_covariance(self): return self.data.cov() def get_precision_matrix(self): return self.precision_matrix def get_mean(self): return self.data.mean() def set_edge(self, u, v): if u == v: raise ValueError("Self Loops not allowed.") self.g.add_edge(u, v) def set_edges_from(self, edges): for edge in edges: if edge[0] == edge[1]: raise ValueError("Self loops not allowed") self.g.add_edge(edge[0], edge[1]) def get_parents(self, node): return list(self.g.pred[node]) def get_children(self, node): return list(self.g.succ[node]) def get_siblings(self, node): successors = list(self.g.succ[node]) siblings = [] for s in successors: siblings.extend(list(self.g.pred[s])) return list(set(siblings)) def get_neighbors(self,node): return list(nx.all_neighbors(self.g,node)) def get_nodes(self): return list(self.g.nodes) def get_edges(self): return list(self.g.edges) def has_parents(self,node): parents = self.get_parents(node) return True if len(parents)!=0 else False def has_children(self,node): parents = self.get_children(node) return True if len(parents)!=0 else False def remove_nodes(self, nodes): self.g.remove_nodes_from(nodes) def set_evidences(self, evidence_dict): if not isinstance(evidence_dict, dict): raise ValueError("Please provide dictionary") for key, val in evidence_dict.items(): if key not in self.nodes: raise ValueError(f"'{key}'' node is not available in network") if not isinstance(val, numbers.Number): raise ValueError( f"Node '{key}'s given evidence is not a number. It's ({val})'" ) self.evidences[key] = val
MIT License
trevor/calendarserver
txweb2/stream.py
FileStream.__init__
python
def __init__(self, f, start=0, length=None, useMMap=bool(mmap)): self.f = f self.start = start if length is None: self.length = os.fstat(f.fileno()).st_size else: self.length = length self.useMMap = useMMap
Create the stream from file f. If you specify start and length, use only that portion of the file.
https://github.com/trevor/calendarserver/blob/c9970b06a70445ca75b62e3d170c26bc897a035e/txweb2/stream.py#L204-L215
from __future__ import generators import copy, os, types, sys from zope.interface import Interface, Attribute, implements from twisted.internet.defer import Deferred from twisted.internet import interfaces as ti_interfaces, defer, reactor, protocol, error as ti_error from twisted.python import components from twisted.python.failure import Failure from hashlib import md5 from twext.python.log import Logger log = Logger() if sys.version_info[0:3] != (2,4,2): try: import mmap except ImportError: mmap = None else: mmap = None class IStream(Interface): def read(): def close(): class IByteStream(IStream): length = Attribute("""How much data is in this stream. Can be None if unknown.""") def read(): def split(point): def close(): class ISendfileableStream(Interface): def read(sendfile=False): class SimpleStream(object): implements(IByteStream) length = None start = None def read(self): return None def close(self): self.length = 0 def split(self, point): if self.length is not None: if point > self.length: raise ValueError("split point (%d) > length (%d)" % (point, self.length)) b = copy.copy(self) self.length = point if b.length is not None: b.length -= point b.start += point return (self, b) MMAP_LIMIT = 4*1024*1024 MMAP_THRESHOLD = 8*1024 SENDFILE_LIMIT = 16777216 SENDFILE_THRESHOLD = 256 def mmapwrapper(*args, **kwargs): offset = kwargs.get('offset', None) if offset in [None, 0]: if 'offset' in kwargs: del kwargs['offset'] else: raise mmap.error("mmap: Python sucks and does not support offset.") return mmap.mmap(*args, **kwargs) class FileStream(SimpleStream): implements(ISendfileableStream) CHUNK_SIZE = 2 ** 2 ** 2 ** 2 - 32 f = None
Apache License 2.0
telstra/messagingapi-sdk-python
Telstra_Messaging/models/outbound_poll_response.py
OutboundPollResponse.received_timestamp
python
def received_timestamp(self): return self._received_timestamp
Gets the received_timestamp of this OutboundPollResponse. # noqa: E501 The date and time when the message was recieved by recipient. # noqa: E501 :return: The received_timestamp of this OutboundPollResponse. # noqa: E501 :rtype: str
https://github.com/telstra/messagingapi-sdk-python/blob/1f9413a7f43321c84056ce54bc1ac3d626b6bbe2/Telstra_Messaging/models/outbound_poll_response.py#L117-L125
import pprint import re import six from Telstra_Messaging.configuration import Configuration class OutboundPollResponse(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'to': 'str', 'sent_timestamp': 'str', 'received_timestamp': 'str', 'delivery_status': 'Status' } attribute_map = { 'to': 'to', 'sent_timestamp': 'sentTimestamp', 'received_timestamp': 'receivedTimestamp', 'delivery_status': 'deliveryStatus' } def __init__(self, to=None, sent_timestamp=None, received_timestamp=None, delivery_status=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._to = None self._sent_timestamp = None self._received_timestamp = None self._delivery_status = None self.discriminator = None if to is not None: self.to = to if sent_timestamp is not None: self.sent_timestamp = sent_timestamp if received_timestamp is not None: self.received_timestamp = received_timestamp if delivery_status is not None: self.delivery_status = delivery_status @property def to(self): return self._to @to.setter def to(self, to): self._to = to @property def sent_timestamp(self): return self._sent_timestamp @sent_timestamp.setter def sent_timestamp(self, sent_timestamp): self._sent_timestamp = sent_timestamp @property
Apache License 2.0
sighingnow/parsec.py
src/parsec/__init__.py
mark
python
def mark(p): return p.mark()
Mark the line and column information of the result of the parser `p`.
https://github.com/sighingnow/parsec.py/blob/a58b6e89fa4f557edef97e6dfffd8f1256312754/src/parsec/__init__.py#L349-L351
__author__ = 'He Tao, sighingnow@gmail.com' import re from functools import wraps from collections import namedtuple class ParseError(RuntimeError): def __init__(self, expected, text, index): super(ParseError, self).__init__() self.expected = expected self.text = text self.index = index @staticmethod def loc_info(text, index): if index > len(text): raise ValueError('Invalid index.') line, last_ln = text.count('\n', 0, index), text.rfind('\n', 0, index) col = index - (last_ln + 1) return (line, col) def loc(self): try: return '{}:{}'.format(*ParseError.loc_info(self.text, self.index)) except ValueError: return '<out of bounds index {!r}>'.format(self.index) def __str__(self): return 'expected {} at {}'.format(self.expected, self.loc()) class Value(namedtuple('Value', 'status index value expected')): @staticmethod def success(index, actual): return Value(True, index, actual, None) @staticmethod def failure(index, expected): return Value(False, index, None, expected) def aggregate(self, other=None): if not self.status: return self if not other: return self if not other.status: return other return Value(True, other.index, self.value + other.value, None) @staticmethod def combinate(values): prev_v = None for v in values: if prev_v: if not v: return prev_v if not v.status: return v out_values = tuple([v.value for v in values]) return Value(True, values[-1].index, out_values, None) def __str__(self): return 'Value: state: {}, @index: {}, values: {}, expected: {}'.format( self.status, self.index, self.value, self.expected) class Parser(object): def __init__(self, fn): self.fn = fn def __call__(self, text, index): return self.fn(text, index) def parse(self, text): return self.parse_partial(text)[0] def parse_partial(self, text): if not isinstance(text, str): raise TypeError( 'Can only parsing string but got {!r}'.format(text)) res = self(text, 0) if res.status: return (res.value, text[res.index:]) else: raise ParseError(res.expected, text, res.index) def parse_strict(self, text): return (self < eof()).parse_partial(text)[0] def bind(self, fn): @Parser def bind_parser(text, index): res = self(text, index) return res if not res.status else fn(res.value)(text, res.index) return bind_parser def compose(self, other): @Parser def compose_parser(text, index): res = self(text, index) return res if not res.status else other(text, res.index) return compose_parser def joint(self, *parsers): return joint(self, *parsers) def choice(self, other): @Parser def choice_parser(text, index): res = self(text, index) return res if res.status or res.index != index else other(text, index) return choice_parser def try_choice(self, other): @Parser def try_choice_parser(text, index): res = self(text, index) return res if res.status else other(text, index) return try_choice_parser def skip(self, other): @Parser def ends_with_parser(text, index): res = self(text, index) if not res.status: return res end = other(text, res.index) if end.status: return Value.success(end.index, res.value) else: return Value.failure(end.index, 'ends with {}'.format(end.expected)) return ends_with_parser def ends_with(self, other): @Parser def ends_with_parser(text, index): res = self(text, index) if not res.status: return res end = other(text, res.index) if end.status: return res else: return Value.failure(end.index, 'ends with {}'.format(end.expected)) return ends_with_parser def parsecmap(self, fn): return self.bind(lambda res: Parser(lambda _, index: Value.success(index, fn(res)))) def parsecapp(self, other): return self.bind(lambda res: other.parsecmap(lambda x: res(x))) def result(self, res): return self >> Parser(lambda _, index: Value.success(index, res)) def mark(self): def pos(text, index): return ParseError.loc_info(text, index) @Parser def mark_parser(text, index): res = self(text, index) if res.status: return Value.success(res.index, (pos(text, index), res.value, pos(text, res.index))) else: return res return mark_parser def desc(self, description): return self | Parser(lambda _, index: Value.failure(index, description)) def __or__(self, other): return self.choice(other) def __xor__(self, other): return self.try_choice(other) def __add__(self, other): return self.joint(other) def __rshift__(self, other): return self.compose(other) def __irshift__(self, other): return self.bind(other) def __lshift__(self, other): return self.skip(other) def __lt__(self, other): return self.ends_with(other) def parse(p, text, index=0): return p.parse(text[index:]) def bind(p, fn): return p.bind(fn) def compose(pa, pb): return pa.compose(pb) def joint(*parsers): @Parser def joint_parser(text, index): values = [] prev_v = None for p in parsers: if prev_v: index = prev_v.index prev_v = v = p(text, index) if not v.status: return v values.append(v) return Value.combinate(values) return joint_parser def choice(pa, pb): return pa.choice(pb) def try_choice(pa, pb): return pa.try_choice(pb) def skip(pa, pb): return pa.skip(pb) def ends_with(pa, pb): return pa.ends_with(pb) def parsecmap(p, fn): return p.parsecmap(fn) def parsecapp(p, other): return p.parsecapp(other) def result(p, res): return p.result(res)
MIT License
piccolo-orm/piccolo
piccolo/apps/schema/commands/generate.py
get_fk_triggers
python
async def get_fk_triggers( table_class: t.Type[Table], tablename: str, schema_name: str = "public" ) -> TableTriggers: triggers = await table_class.raw( ( "SELECT tc.constraint_name, " " tc.constraint_type, " " tc.table_name, " " kcu.column_name, " " rc.update_rule AS on_update, " " rc.delete_rule AS on_delete, " " ccu.table_name AS references_table, " " ccu.column_name AS references_column " "FROM information_schema.table_constraints tc " "LEFT JOIN information_schema.key_column_usage kcu " " ON tc.constraint_catalog = kcu.constraint_catalog " " AND tc.constraint_schema = kcu.constraint_schema " " AND tc.constraint_name = kcu.constraint_name " "LEFT JOIN information_schema.referential_constraints rc " " ON tc.constraint_catalog = rc.constraint_catalog " " AND tc.constraint_schema = rc.constraint_schema " " AND tc.constraint_name = rc.constraint_name " "LEFT JOIN information_schema.constraint_column_usage ccu " " ON rc.unique_constraint_catalog = ccu.constraint_catalog " " AND rc.unique_constraint_schema = ccu.constraint_schema " " AND rc.unique_constraint_name = ccu.constraint_name " "WHERE lower(tc.constraint_type) in ('foreign key')" "AND tc.table_schema = {} " "AND tc.table_name = {}; " ), schema_name, tablename, ) return TableTriggers( tablename=tablename, triggers=[Trigger(**i) for i in triggers], )
Get all of the constraints for a table. :param table_class: Any Table subclass - just used to execute raw queries on the database.
https://github.com/piccolo-orm/piccolo/blob/27539219431874bae99b7206df48133fbe1a27eb/piccolo/apps/schema/commands/generate.py#L451-L494
from __future__ import annotations import asyncio import dataclasses import json import re import typing as t import uuid from datetime import date, datetime import black from typing_extensions import Literal from piccolo.apps.migrations.auto.serialisation import serialise_params from piccolo.columns import defaults from piccolo.columns.base import Column, OnDelete, OnUpdate from piccolo.columns.column_types import ( JSON, JSONB, UUID, BigInt, Boolean, Bytea, Date, DoublePrecision, ForeignKey, Integer, Interval, Numeric, Real, Serial, SmallInt, Text, Timestamp, Timestamptz, Varchar, ) from piccolo.columns.defaults.interval import IntervalCustom from piccolo.columns.indexes import IndexMethod from piccolo.engine.finder import engine_finder from piccolo.engine.postgres import PostgresEngine from piccolo.table import Table, create_table_class, sort_table_classes from piccolo.utils.naming import _snake_to_camel if t.TYPE_CHECKING: from piccolo.engine.base import Engine class ForeignKeyPlaceholder(Table): pass @dataclasses.dataclass class ConstraintTable: name: str = "" schema: str = "" @dataclasses.dataclass class RowMeta: column_default: str column_name: str is_nullable: Literal["YES", "NO"] table_name: str character_maximum_length: t.Optional[int] data_type: str numeric_precision: t.Optional[t.Union[int, str]] numeric_scale: t.Optional[t.Union[int, str]] numeric_precision_radix: t.Optional[Literal[2, 10]] @classmethod def get_column_name_str(cls) -> str: return ", ".join(i.name for i in dataclasses.fields(cls)) @dataclasses.dataclass class Constraint: constraint_type: Literal["PRIMARY KEY", "UNIQUE", "FOREIGN KEY", "CHECK"] constraint_name: str constraint_schema: t.Optional[str] = None column_name: t.Optional[str] = None @dataclasses.dataclass class TableConstraints: tablename: str constraints: t.List[Constraint] def __post_init__(self): foreign_key_constraints: t.List[Constraint] = [] unique_constraints: t.List[Constraint] = [] primary_key_constraints: t.List[Constraint] = [] for constraint in self.constraints: if constraint.constraint_type == "FOREIGN KEY": foreign_key_constraints.append(constraint) elif constraint.constraint_type == "PRIMARY KEY": primary_key_constraints.append(constraint) elif constraint.constraint_type == "UNIQUE": unique_constraints.append(constraint) self.foreign_key_constraints = foreign_key_constraints self.unique_constraints = unique_constraints self.primary_key_constraints = primary_key_constraints def is_primary_key(self, column_name: str) -> bool: return any( i.column_name == column_name for i in self.primary_key_constraints ) def is_unique(self, column_name: str) -> bool: return any( i.column_name == column_name for i in self.unique_constraints ) def is_foreign_key(self, column_name: str) -> bool: return any( i.column_name == column_name for i in self.foreign_key_constraints ) def get_foreign_key_constraint_name(self, column_name) -> ConstraintTable: for i in self.foreign_key_constraints: if i.column_name == column_name: return ConstraintTable( name=i.constraint_name, schema=i.constraint_schema ) raise ValueError("No matching constraint found") @dataclasses.dataclass class Trigger: constraint_name: str constraint_type: str table_name: str column_name: str on_update: str on_delete: Literal[ "NO ACTION", "RESTRICT", "CASCADE", "SET NULL", "SET_DEFAULT" ] references_table: str references_column: str @dataclasses.dataclass class TableTriggers: tablename: str triggers: t.List[Trigger] def get_column_triggers(self, column_name: str) -> t.List[Trigger]: return [i for i in self.triggers if i.column_name == column_name] def get_column_ref_trigger( self, column_name: str, references_table: str ) -> Trigger: for trigger in self.triggers: if ( trigger.column_name == column_name and trigger.references_table == references_table ): return trigger raise ValueError("No matching trigger found") @dataclasses.dataclass class Index: indexname: str indexdef: str def __post_init__(self): pat = re.compile( r"""^CREATE[ ](?:(?P<unique>UNIQUE)[ ])?INDEX[ ]\w+?[ ] ON[ ].+?[ ]USING[ ](?P<method>\w+?)[ ] \(\"?(?P<column_name>\w+?\"?)\)""", re.VERBOSE, ) groups = re.match(pat, self.indexdef).groupdict() self.column_name = groups["column_name"].lstrip('"').rstrip('"') self.unique = True if "unique" in groups else False self.method = INDEX_METHOD_MAP[groups["method"]] @dataclasses.dataclass class TableIndexes: tablename: str indexes: t.List[Index] def get_column_index(self, column_name: str) -> t.Optional[Index]: for i in self.indexes: if i.column_name == column_name: return i return None @dataclasses.dataclass class OutputSchema: imports: t.List[str] = dataclasses.field(default_factory=list) warnings: t.List[str] = dataclasses.field(default_factory=list) tables: t.List[t.Type[Table]] = dataclasses.field(default_factory=list) def get_table_with_name(self, tablename: str) -> t.Optional[t.Type[Table]]: tablename = _snake_to_camel(tablename) try: return next( table for table in self.tables if table.__name__ == tablename ) except StopIteration: return None def __radd__(self, value: OutputSchema) -> OutputSchema: if isinstance(value, int): return self value.imports.extend(self.imports) value.warnings.extend(self.warnings) value.tables.extend(self.tables) return value def __add__(self, value: OutputSchema) -> OutputSchema: self.imports.extend(value.imports) self.warnings.extend(value.warnings) self.tables.extend(value.tables) return self COLUMN_TYPE_MAP: t.Dict[str, t.Type[Column]] = { "bigint": BigInt, "boolean": Boolean, "bytea": Bytea, "character varying": Varchar, "date": Date, "integer": Integer, "interval": Interval, "json": JSON, "jsonb": JSONB, "numeric": Numeric, "real": Real, "double precision": DoublePrecision, "smallint": SmallInt, "text": Text, "timestamp with time zone": Timestamptz, "timestamp without time zone": Timestamp, "uuid": UUID, } COLUMN_DEFAULT_PARSER = { BigInt: re.compile(r"^'?(?P<value>-?[0-9]\d*)'?(?:::bigint)?$"), Boolean: re.compile(r"^(?P<value>true|false)$"), Bytea: re.compile(r"'(?P<value>.*)'::bytea$"), DoublePrecision: re.compile(r"(?P<value>[+-]?(?:[0-9]*[.])?[0-9]+)"), Varchar: re.compile(r"^'(?P<value>.*)'::character varying$"), Date: re.compile(r"^(?P<value>(?:\d{4}-\d{2}-\d{2})|CURRENT_DATE)$"), Integer: re.compile(r"^(?P<value>-?\d+)$"), Interval: re.compile( r"""^ (?:')? (?: (?:(?P<years>\d+)[ ]y(?:ear(?:s)?)?\b) | (?:(?P<months>\d+)[ ]m(?:onth(?:s)?)?\b) | (?:(?P<weeks>\d+)[ ]w(?:eek(?:s)?)?\b) | (?:(?P<days>\d+)[ ]d(?:ay(?:s)?)?\b) | (?: (?: (?:(?P<hours>\d+)[ ]h(?:our(?:s)?)?\b) | (?:(?P<minutes>\d+)[ ]m(?:inute(?:s)?)?\b) | (?:(?P<seconds>\d+)[ ]s(?:econd(?:s)?)?\b) ) | (?: (?P<digits>-?\d{2}:\d{2}:\d{2}))?\b) ) +(?P<direction>ago)? (?:'::interval)? $""", re.X, ), JSON: re.compile(r"^'(?P<value>.*)'::json$"), JSONB: re.compile(r"^'(?P<value>.*)'::jsonb$"), Numeric: re.compile(r"(?P<value>\d+)"), Real: re.compile(r"^(?P<value>-?[0-9]\d*(?:\.\d+)?)$"), SmallInt: re.compile(r"^'?(?P<value>-?[0-9]\d*)'?(?:::integer)?$"), Text: re.compile(r"^'(?P<value>.*)'::text$"), Timestamp: re.compile( r"""^ (?P<value> (?:\d{4}-\d{2}-\d{2}[ ]\d{2}:\d{2}:\d{2}) | CURRENT_TIMESTAMP ) $""", re.VERBOSE, ), Timestamptz: re.compile( r"""^ (?P<value> (?:\d{4}-\d{2}-\d{2}[ ]\d{2}:\d{2}:\d{2}(?:\.\d+)?-\d{2}) | CURRENT_TIMESTAMP ) $""", re.VERBOSE, ), UUID: None, Serial: None, ForeignKey: None, } def get_column_default( column_type: t.Type[Column], column_default: str ) -> t.Any: pat = COLUMN_DEFAULT_PARSER[column_type] if pat is not None: match = re.match(pat, column_default) if match is not None: value = match.groupdict() if column_type is Boolean: return value["value"] == "true" elif column_type is Interval: kwargs = {} for period in [ "years", "months", "weeks", "days", "hours", "minutes", "seconds", ]: period_match = value.get(period, 0) if period_match: kwargs[period] = int(period_match) digits = value["digits"] if digits: kwargs.update( dict( zip( ["hours", "minutes", "seconds"], [int(v) for v in digits.split(":")], ) ) ) return IntervalCustom(**kwargs) elif column_type is JSON or column_type is JSONB: return json.loads(value["value"]) elif column_type is UUID: return uuid.uuid4 elif column_type is Date: return ( date.today if value["value"] == "CURRENT_DATE" else defaults.date.DateCustom( *[int(v) for v in value["value"].split("-")] ) ) elif column_type is Bytea: return value["value"].encode("utf8") elif column_type is Timestamp: return ( datetime.now if value["value"] == "CURRENT_TIMESTAMP" else datetime.fromtimestamp(float(value["value"])) ) elif column_type is Timestamptz: return ( datetime.now if value["value"] == "CURRENT_TIMESTAMP" else datetime.fromtimestamp(float(value["value"])) ) else: return column_type.value_type(value["value"]) INDEX_METHOD_MAP: t.Dict[str, IndexMethod] = { "btree": IndexMethod.btree, "hash": IndexMethod.hash, "gist": IndexMethod.gist, "gin": IndexMethod.gin, } async def get_indexes( table_class: t.Type[Table], tablename: str, schema_name: str = "public" ) -> TableIndexes: indexes = await table_class.raw( ( "SELECT indexname, indexdef " "FROM pg_indexes " "WHERE schemaname = {} " "AND tablename = {}; " ), schema_name, tablename, ) return TableIndexes( tablename=tablename, indexes=[Index(**i) for i in indexes], )
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/page_dto_net_rate_scheme_reference.py
PageDtoNetRateSchemeReference.number_of_elements
python
def number_of_elements(self, number_of_elements): self._number_of_elements = number_of_elements
Sets the number_of_elements of this PageDtoNetRateSchemeReference. :param number_of_elements: The number_of_elements of this PageDtoNetRateSchemeReference. # noqa: E501 :type: int
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/page_dto_net_rate_scheme_reference.py#L172-L180
import pprint import re import six from memsource_cli.models.net_rate_scheme_reference import NetRateSchemeReference class PageDtoNetRateSchemeReference(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'total_elements': 'int', 'total_pages': 'int', 'page_size': 'int', 'page_number': 'int', 'number_of_elements': 'int', 'content': 'list[NetRateSchemeReference]' } attribute_map = { 'total_elements': 'totalElements', 'total_pages': 'totalPages', 'page_size': 'pageSize', 'page_number': 'pageNumber', 'number_of_elements': 'numberOfElements', 'content': 'content' } def __init__(self, total_elements=None, total_pages=None, page_size=None, page_number=None, number_of_elements=None, content=None): self._total_elements = None self._total_pages = None self._page_size = None self._page_number = None self._number_of_elements = None self._content = None self.discriminator = None if total_elements is not None: self.total_elements = total_elements if total_pages is not None: self.total_pages = total_pages if page_size is not None: self.page_size = page_size if page_number is not None: self.page_number = page_number if number_of_elements is not None: self.number_of_elements = number_of_elements if content is not None: self.content = content @property def total_elements(self): return self._total_elements @total_elements.setter def total_elements(self, total_elements): self._total_elements = total_elements @property def total_pages(self): return self._total_pages @total_pages.setter def total_pages(self, total_pages): self._total_pages = total_pages @property def page_size(self): return self._page_size @page_size.setter def page_size(self, page_size): self._page_size = page_size @property def page_number(self): return self._page_number @page_number.setter def page_number(self, page_number): self._page_number = page_number @property def number_of_elements(self): return self._number_of_elements @number_of_elements.setter
Apache License 2.0
jason0x43/jc-toggl
toggl.py
Workspace.retrieve
python
def retrieve(cls, id): resp = api_get('/workspaces/{0}'.format(id)) return Workspace(resp.json()['data'])
Retrieve a specific workspace
https://github.com/jason0x43/jc-toggl/blob/ef273ef45833092c89c91d21694dcec63e6880c7/toggl.py#L208-L211
from dateutil.parser import parse from tzlocal import get_localzone import requests import logging import json TOGGL_API = 'https://www.toggl.com/api/v8' REPORTS_API = 'https://www.toggl.com/reports/api/v2' LOCALTZ = get_localzone() LOG = logging.getLogger(__name__) api_key = None workspace_id = 425197 def api_get(path, params=None): url = TOGGL_API + path return requests.get(url, auth=(api_key, 'api_token'), params=params, headers={'content-type': 'application/json'}) def report_get(path, params=None): url = REPORTS_API + path if not params: params = {} params['user_agent'] = 'jc-toggl' params['workspace_id'] = workspace_id return requests.get(url, auth=(api_key, 'api_token'), params=params, headers={'content-type': 'application/json'}) def api_post(path, data=None): url = TOGGL_API + path return requests.post(url, auth=(api_key, 'api_token'), data=data, headers={'content-type': 'application/json'}) def api_put(path, data=None): url = TOGGL_API + path return requests.put(url, auth=(api_key, 'api_token'), data=data, headers={'content-type': 'application/json'}) def api_delete(path): url = TOGGL_API + path return requests.put(url, auth=(api_key, 'api_token'), headers={'content-type': 'application/json'}) class JsonObject(object): def __init__(self, data): self._data = data self._cache = {} @property def data(self): return self._data def _get_value(self, field_name): return self._data.get(field_name) def _get_timestamp(self, field_name): val = self._data.get(field_name) if val: return parse(val).astimezone(LOCALTZ) else: return val class TimeEntry(JsonObject): @classmethod def all(cls): resp = api_get('/time_entries') LOG.debug('response: %s', resp) return [TimeEntry(e) for e in resp.json()] @classmethod def retrieve(cls, id): resp = api_get('/time_entries/{0}'.format(id)) return TimeEntry(resp.json()['data']) @classmethod def start(cls, description, project_id=None): data = {'time_entry': {'description': description}} if project_id: data['time_entry']['pid'] = project_id data = json.dumps(data) LOG.debug('starting entry with {0}'.format(data)) resp = api_post('/time_entries/start', data=data) if resp.status_code != 200: raise Exception('Unable to start timer: {0}'.format(resp)) return TimeEntry(resp.json()['data']) @classmethod def stop(cls, id=None): if not id: entries = cls.all() for entry in entries: if entry.is_running: id = entry.id LOG.debug('running entry is {0}'.format(entry)) break if not id: return None resp = api_put('/time_entries/{0}/stop'.format(id)) return resp.json()['data'] @property def id(self): return self._get_value('id') @property def workspace(self): return self._workspace @property def account(self): return self._workspace.account @property def description(self): return self._get_value('description') @property def start_time(self): return self._get_timestamp('start') @property def stop_time(self): st = self._get_timestamp('stop') if st: return st import datetime delta = datetime.timedelta(seconds=self.duration) return self.start_time + delta @property def duration(self): return self._get_value('duration') @property def tags(self): return self._get_value('tags') @property def pid(self): return self._get_value('pid') @property def is_running(self): return self.duration < 0 def restart(self): return TimeEntry.start(self.description, pid=self.pid) def __str__(self): return ('{{TimeEntry: description={0}, running={1}, start={2}, ' 'stop={3}}}'.format(self.description, self.is_running, self.start_time, self.stop_time)) def __repr__(self): return self.__str__() class Project(JsonObject): @classmethod def retrieve(cls, id): resp = api_get('/projects/{0}'.format(id)) return Project(resp.json()['data']) @property def name(self): return self._get_value('name') @property def id(self): return self._get_value('id') @property def wid(self): return self._get_value('wid') def __str__(self): return '{{Project: name={0}}}'.format(self.name) def __repr__(self): return self.__str__() class Workspace(JsonObject): @classmethod def all(cls): resp = api_get('/workspaces') return [Workspace(w) for w in resp.json()] @classmethod
MIT License
facebookresearch/pytorchvideo
tutorials/video_detection_example/visualization.py
ImgVisualizer._align_y_top
python
def _align_y_top( self, box_coordinate: torch.Tensor, num_text: int, textbox_width: float ) -> int: dist_to_top = box_coordinate[1] num_text_top = dist_to_top // textbox_width if isinstance(num_text_top, torch.Tensor): num_text_top = int(num_text_top.item()) return min(num_text, num_text_top)
Calculate the number of text labels to plot on top of the box without going out of frames. Args: box_coordinate (array-like): shape (4,). The (x_left, y_top, x_right, y_bottom) coordinates of the box. num_text (int): the number of text labels to plot. textbox_width (float): the width of the box wrapped around text label.
https://github.com/facebookresearch/pytorchvideo/blob/832a6bc683257f07e74c95a1f9441ebaa64d95d8/tutorials/video_detection_example/visualization.py#L321-L339
from __future__ import annotations import itertools import logging from types import SimpleNamespace from typing import Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np import torch from detectron2.utils.visualizer import Visualizer logger = logging.getLogger(__name__) def _create_text_labels( classes: List[int], scores: List[float], class_names: List[str], ground_truth: bool = False, ) -> List[str]: try: labels = [class_names.get(c, "n/a") for c in classes] except IndexError: logger.error("Class indices get out of range: {}".format(classes)) return None if ground_truth: labels = ["[{}] {}".format("GT", label) for label in labels] elif scores is not None: assert len(classes) == len(scores) labels = ["[{:.2f}] {}".format(s, label) for s, label in zip(scores, labels)] return labels class ImgVisualizer(Visualizer): def __init__( self, img_rgb: torch.Tensor, meta: Optional[SimpleNamespace] = None, **kwargs ) -> None: super(ImgVisualizer, self).__init__(img_rgb, meta, **kwargs) def draw_text( self, text: str, position: List[int], *, font_size: Optional[int] = None, color: str = "w", horizontal_alignment: str = "center", vertical_alignment: str = "bottom", box_facecolor: str = "black", alpha: float = 0.5, ) -> None: if not font_size: font_size = self._default_font_size x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="monospace", bbox={ "facecolor": box_facecolor, "alpha": alpha, "pad": 0.7, "edgecolor": "none", }, verticalalignment=vertical_alignment, horizontalalignment=horizontal_alignment, color=color, zorder=10, ) def draw_multiple_text( self, text_ls: List[str], box_coordinate: torch.Tensor, *, top_corner: bool = True, font_size: Optional[int] = None, color: str = "w", box_facecolors: str = "black", alpha: float = 0.5, ) -> None: if not isinstance(box_facecolors, list): box_facecolors = [box_facecolors] * len(text_ls) assert len(box_facecolors) == len( text_ls ), "Number of colors provided is not equal to the number of text labels." if not font_size: font_size = self._default_font_size text_box_width = font_size + font_size // 2 if top_corner: num_text_split = self._align_y_top( box_coordinate, len(text_ls), text_box_width ) y_corner = 1 else: num_text_split = len(text_ls) - self._align_y_bottom( box_coordinate, len(text_ls), text_box_width ) y_corner = 3 text_color_sorted = sorted( zip(text_ls, box_facecolors), key=lambda x: x[0], reverse=True ) if len(text_color_sorted) != 0: text_ls, box_facecolors = zip(*text_color_sorted) else: text_ls, box_facecolors = [], [] text_ls, box_facecolors = list(text_ls), list(box_facecolors) self.draw_multiple_text_upward( text_ls[:num_text_split][::-1], box_coordinate, y_corner=y_corner, font_size=font_size, color=color, box_facecolors=box_facecolors[:num_text_split][::-1], alpha=alpha, ) self.draw_multiple_text_downward( text_ls[num_text_split:], box_coordinate, y_corner=y_corner, font_size=font_size, color=color, box_facecolors=box_facecolors[num_text_split:], alpha=alpha, ) def draw_multiple_text_upward( self, text_ls: List[str], box_coordinate: torch.Tensor, *, y_corner: int = 1, font_size: Optional[int] = None, color: str = "w", box_facecolors: str = "black", alpha: float = 0.5, ) -> None: if not isinstance(box_facecolors, list): box_facecolors = [box_facecolors] * len(text_ls) assert len(box_facecolors) == len( text_ls ), "Number of colors provided is not equal to the number of text labels." assert y_corner in [1, 3], "Y_corner must be either 1 or 3" if not font_size: font_size = self._default_font_size x, horizontal_alignment = self._align_x_coordinate(box_coordinate) y = box_coordinate[y_corner].item() for i, text in enumerate(text_ls): self.draw_text( text, (x, y), font_size=font_size, color=color, horizontal_alignment=horizontal_alignment, vertical_alignment="bottom", box_facecolor=box_facecolors[i], alpha=alpha, ) y -= font_size + font_size // 2 def draw_multiple_text_downward( self, text_ls: List[str], box_coordinate: torch.Tensor, *, y_corner: int = 1, font_size: Optional[int] = None, color: str = "w", box_facecolors: str = "black", alpha: float = 0.5, ) -> None: if not isinstance(box_facecolors, list): box_facecolors = [box_facecolors] * len(text_ls) assert len(box_facecolors) == len( text_ls ), "Number of colors provided is not equal to the number of text labels." assert y_corner in [1, 3], "Y_corner must be either 1 or 3" if not font_size: font_size = self._default_font_size x, horizontal_alignment = self._align_x_coordinate(box_coordinate) y = box_coordinate[y_corner].item() for i, text in enumerate(text_ls): self.draw_text( text, (x, y), font_size=font_size, color=color, horizontal_alignment=horizontal_alignment, vertical_alignment="top", box_facecolor=box_facecolors[i], alpha=alpha, ) y += font_size + font_size // 2 def _align_x_coordinate(self, box_coordinate: torch.Tensor) -> Tuple[float, str]: if box_coordinate[0] > (self.output.width * 5) // 6: return box_coordinate[2], "right" return box_coordinate[0], "left"
Apache License 2.0
camelot-dev/camelot
camelot/utils.py
bbox_intersection_area
python
def bbox_intersection_area(ba, bb) -> float: x_left = max(ba.x0, bb.x0) y_top = min(ba.y1, bb.y1) x_right = min(ba.x1, bb.x1) y_bottom = max(ba.y0, bb.y0) if x_right < x_left or y_bottom > y_top: return 0.0 intersection_area = (x_right - x_left) * (y_top - y_bottom) return intersection_area
Returns area of the intersection of the bounding boxes of two PDFMiner objects. Parameters ---------- ba : PDFMiner text object bb : PDFMiner text object Returns ------- intersection_area : float Area of the intersection of the bounding boxes of both objects
https://github.com/camelot-dev/camelot/blob/644bbe7c6d57b95aefa2f049a9aacdbc061cc04f/camelot/utils.py#L384-L407
import os import re import random import shutil import string import tempfile import warnings from itertools import groupby from operator import itemgetter import numpy as np from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfpage import PDFPage from pdfminer.pdfpage import PDFTextExtractionNotAllowed from pdfminer.pdfinterp import PDFResourceManager from pdfminer.pdfinterp import PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import ( LAParams, LTAnno, LTChar, LTTextLineHorizontal, LTTextLineVertical, LTImage, ) from urllib.request import Request, urlopen from urllib.parse import urlparse as parse_url from urllib.parse import uses_relative, uses_netloc, uses_params _VALID_URLS = set(uses_relative + uses_netloc + uses_params) _VALID_URLS.discard("") def is_url(url): try: return parse_url(url).scheme in _VALID_URLS except Exception: return False def random_string(length): ret = "" while length: ret += random.choice( string.digits + string.ascii_lowercase + string.ascii_uppercase ) length -= 1 return ret def download_url(url): filename = f"{random_string(6)}.pdf" with tempfile.NamedTemporaryFile("wb", delete=False) as f: headers = {"User-Agent": "Mozilla/5.0"} request = Request(url, None, headers) obj = urlopen(request) content_type = obj.info().get_content_type() if content_type != "application/pdf": raise NotImplementedError("File format not supported") f.write(obj.read()) filepath = os.path.join(os.path.dirname(f.name), filename) shutil.move(f.name, filepath) return filepath stream_kwargs = ["columns", "edge_tol", "row_tol", "column_tol"] lattice_kwargs = [ "process_background", "line_scale", "copy_text", "shift_text", "line_tol", "joint_tol", "threshold_blocksize", "threshold_constant", "iterations", "resolution", ] def validate_input(kwargs, flavor="lattice"): def check_intersection(parser_kwargs, input_kwargs): isec = set(parser_kwargs).intersection(set(input_kwargs.keys())) if isec: raise ValueError( f"{','.join(sorted(isec))} cannot be used with flavor='{flavor}'" ) if flavor == "lattice": check_intersection(stream_kwargs, kwargs) else: check_intersection(lattice_kwargs, kwargs) def remove_extra(kwargs, flavor="lattice"): if flavor == "lattice": for key in kwargs.keys(): if key in stream_kwargs: kwargs.pop(key) else: for key in kwargs.keys(): if key in lattice_kwargs: kwargs.pop(key) return kwargs class TemporaryDirectory(object): def __enter__(self): self.name = tempfile.mkdtemp() return self.name def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self.name) def translate(x1, x2): x2 += x1 return x2 def scale(x, s): x *= s return x def scale_pdf(k, factors): x1, y1, x2, y2 = k scaling_factor_x, scaling_factor_y, pdf_y = factors x1 = scale(x1, scaling_factor_x) y1 = scale(abs(translate(-pdf_y, y1)), scaling_factor_y) x2 = scale(x2, scaling_factor_x) y2 = scale(abs(translate(-pdf_y, y2)), scaling_factor_y) knew = (int(x1), int(y1), int(x2), int(y2)) return knew def scale_image(tables, v_segments, h_segments, factors): scaling_factor_x, scaling_factor_y, img_y = factors tables_new = {} for k in tables.keys(): x1, y1, x2, y2 = k x1 = scale(x1, scaling_factor_x) y1 = scale(abs(translate(-img_y, y1)), scaling_factor_y) x2 = scale(x2, scaling_factor_x) y2 = scale(abs(translate(-img_y, y2)), scaling_factor_y) j_x, j_y = zip(*tables[k]) j_x = [scale(j, scaling_factor_x) for j in j_x] j_y = [scale(abs(translate(-img_y, j)), scaling_factor_y) for j in j_y] joints = zip(j_x, j_y) tables_new[(x1, y1, x2, y2)] = joints v_segments_new = [] for v in v_segments: x1, x2 = scale(v[0], scaling_factor_x), scale(v[2], scaling_factor_x) y1, y2 = ( scale(abs(translate(-img_y, v[1])), scaling_factor_y), scale(abs(translate(-img_y, v[3])), scaling_factor_y), ) v_segments_new.append((x1, y1, x2, y2)) h_segments_new = [] for h in h_segments: x1, x2 = scale(h[0], scaling_factor_x), scale(h[2], scaling_factor_x) y1, y2 = ( scale(abs(translate(-img_y, h[1])), scaling_factor_y), scale(abs(translate(-img_y, h[3])), scaling_factor_y), ) h_segments_new.append((x1, y1, x2, y2)) return tables_new, v_segments_new, h_segments_new def get_rotation(chars, horizontal_text, vertical_text): rotation = "" hlen = len([t for t in horizontal_text if t.get_text().strip()]) vlen = len([t for t in vertical_text if t.get_text().strip()]) if hlen < vlen: clockwise = sum(t.matrix[1] < 0 and t.matrix[2] > 0 for t in chars) anticlockwise = sum(t.matrix[1] > 0 and t.matrix[2] < 0 for t in chars) rotation = "anticlockwise" if clockwise < anticlockwise else "clockwise" return rotation def segments_in_bbox(bbox, v_segments, h_segments): lb = (bbox[0], bbox[1]) rt = (bbox[2], bbox[3]) v_s = [ v for v in v_segments if v[1] > lb[1] - 2 and v[3] < rt[1] + 2 and lb[0] - 2 <= v[0] <= rt[0] + 2 ] h_s = [ h for h in h_segments if h[0] > lb[0] - 2 and h[2] < rt[0] + 2 and lb[1] - 2 <= h[1] <= rt[1] + 2 ] return v_s, h_s def text_in_bbox(bbox, text): lb = (bbox[0], bbox[1]) rt = (bbox[2], bbox[3]) t_bbox = [ t for t in text if lb[0] - 2 <= (t.x0 + t.x1) / 2.0 <= rt[0] + 2 and lb[1] - 2 <= (t.y0 + t.y1) / 2.0 <= rt[1] + 2 ] rest = {t for t in t_bbox} for ba in t_bbox: for bb in rest.copy(): if ba == bb: continue if bbox_intersect(ba, bb): if (bbox_intersection_area(ba, bb) / bbox_area(ba)) > 0.8: if bbox_longer(bb, ba): rest.discard(ba) unique_boxes = list(rest) return unique_boxes
MIT License
erigones/esdc-ce
api/validators.py
ip_validator
python
def ip_validator(value): try: ip = ipaddress.ip_address(six.text_type(value)) if ip.is_reserved: raise ValueError except ValueError: raise ValidationError(_('Enter a valid IPv4 address.'))
Validate IPv4 address
https://github.com/erigones/esdc-ce/blob/f83a62d0d430e3c8f9aac23d958583b0efce4312/api/validators.py#L119-L126
import re import struct import base64 import ipaddress from django.core import validators from django.core.exceptions import ValidationError from django.utils import six from django.utils.translation import ugettext_lazy as _ from gui.models import UserSSHKey SSH_KEY_TYPES = ( 'ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', 'ecdsa-sha2-nistp521', 'ssh-ed25519' ) RE_PEM_KEY_BEGIN = re.compile(r'^-----BEGIN( | \w+ )PRIVATE KEY-----', re.MULTILINE) RE_PEM_KEY_END = re.compile(r'^-----END( | \w+ )PRIVATE KEY-----', re.MULTILINE) RE_PEM_CRT_BEGIN = re.compile(r'^-----BEGIN CERTIFICATE-----', re.MULTILINE) RE_PEM_CRT_END = re.compile(r'^-----END CERTIFICATE-----', re.MULTILINE) RE_PLACEHOLDER = re.compile(r'{(.*?)}+', re.MULTILINE) def validate_owner(obj, new_owner, model_name): if obj and new_owner and obj.pk and obj.owner != new_owner: if obj.tasks: raise ValidationError(_('%(model)s has pending tasks.') % {'model': model_name}) def validate_alias(obj, value, field_comparison='alias__iexact'): qs = obj.__class__.objects if obj.pk: if obj.alias == value: return value else: qs = qs.exclude(pk=obj.pk) if qs.filter(**{field_comparison: value}).exists(): raise ValidationError(_('This alias is already in use. Please supply a different alias.')) return value def validate_mdata(reserved_keys): def mdata_validator(value): if value: invalid_keys = reserved_keys.intersection(value.keys()) if invalid_keys: raise ValidationError(_('Invalid key name(s) (%(invalid_keys)s).') % {'invalid_keys': invalid_keys}) return mdata_validator def validate_ssh_key(value): key = value.split(' ') if not (1 < len(key) and key[0] in SSH_KEY_TYPES): raise ValidationError(_('Unknown SSH public key type.')) if '\n' in value: raise ValidationError(_('Invalid SSH public key format (newlines detected).')) try: data = base64.decodestring(key[1]) int_len = 4 str_len = struct.unpack('>I', data[:int_len])[0] if data[int_len:int_len + str_len] != key[0]: raise ValueError fingerprint = UserSSHKey.get_fingerprint(value) except Exception: raise ValidationError(_('Invalid SSH public key format.')) return fingerprint def mod2_validator(num): if num % 2: raise ValidationError(_('Must be power of 2.')) def mac_address_validator(value): mac = six.text_type(value).lower() for prefix in ('33:33', '00:00', '00:01', '00:02', '00:52:00', '00:52:01', '00:52:13'): if mac.startswith(prefix): raise ValidationError(_('Enter a valid MAC address.')) if mac == 'ff:ff:ff:ff:ff:ff': raise ValidationError(_('Enter a valid MAC address.')) def cron_validator(value): if value.strip() != value: raise ValidationError(_('Leading nor trailing spaces are allowed.')) columns = value.split() if columns != value.split(' '): raise ValidationError(_('Use only a single space as a column separator.')) if len(columns) != 5: raise ValidationError(_('Entry has to consist of exactly 5 columns.')) cron_re = re.compile(r'^(\*|\d+(-\d+)?(,\d+(-\d+)?)*)(/\d+)?$') for i, c in enumerate(columns): if not cron_re.match(c): i += 1 raise ValidationError(_('Incorrect value in %d. column.') % i)
Apache License 2.0
genericmappingtools/pygmt
pygmt/tests/test_sphdistance.py
fixture_table
python
def fixture_table(): coords_list = [[85.5, 22.3], [82.3, 22.6], [85.8, 22.4], [86.5, 23.3]] return np.array(coords_list)
Load the table data.
https://github.com/genericmappingtools/pygmt/blob/0d07cccef60da5d4874bb2c55c9824ffd7faf646/pygmt/tests/test_sphdistance.py#L15-L20
import os import numpy as np import numpy.testing as npt import pytest from pygmt import sphdistance from pygmt.exceptions import GMTInvalidInput from pygmt.helpers import GMTTempFile @pytest.fixture(scope="module", name="array")
BSD 3-Clause New or Revised License
mlindauer/autofolio
autofolio/pre_solving/aspeed_schedule.py
Aspeed.fit
python
def fit(self, scenario: ASlibScenario, config: Configuration): if config["presolving"]: self.logger.info("Compute Presolving Schedule with Aspeed") X = scenario.performance_data.values if X.shape[0] > self.data_threshold: random_indx = np.random.choice( range(X.shape[0]), size=min(X.shape[0], max(int(X.shape[0] * self.data_fraction), self.data_threshold)), replace=True) X = X[random_indx, :] self.logger.debug("#Instances for pre-solving schedule: %d" %(X.shape[0])) times = ["time(i%d, %d, %d)." % (i, j, max(1,math.ceil(X[i, j]))) for i in range(X.shape[0]) for j in range(X.shape[1])] kappa = "kappa(%d)." % (config["pre:cutoff"]) data_in = " ".join(times) + " " + kappa self._call_clingo(data_in=data_in, algorithms=scenario.performance_data.columns)
fit pca object to ASlib scenario data Arguments --------- scenario: data.aslib_scenario.ASlibScenario ASlib Scenario with all data in pandas config: ConfigSpace.Configuration configuration classifier_class: selector.classifier.* class for classification
https://github.com/mlindauer/autofolio/blob/f296f528b1b684d36837075b0e8160e3fa4124f7/autofolio/pre_solving/aspeed_schedule.py#L85-L121
import os import sys import logging import math import numpy as np import pandas as pd import subprocess from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter from ConfigSpace.conditions import EqualsCondition, InCondition from ConfigSpace.configuration_space import ConfigurationSpace from ConfigSpace import Configuration from aslib_scenario.aslib_scenario import ASlibScenario __author__ = "Marius Lindauer" __license__ = "BSD" class Aspeed(object): @staticmethod def add_params(cs: ConfigurationSpace, cutoff: int): pre_solving = CategoricalHyperparameter( "presolving", choices=[True, False], default_value=False) cs.add_hyperparameter(pre_solving) pre_cutoff = UniformIntegerHyperparameter( "pre:cutoff", lower=1, upper=cutoff, default_value=math.ceil(cutoff * 0.1), log=True) cs.add_hyperparameter(pre_cutoff) cond = InCondition(child=pre_cutoff, parent=pre_solving, values=[True]) cs.add_condition(cond) def __init__(self, clingo: str=None, runsolver: str=None, enc_fn: str=None): self.logger = logging.getLogger("Aspeed") if not runsolver: self.runsolver = os.path.join( os.path.dirname(sys.argv[0]), "..", "aspeed", "runsolver") else: self.runsolver = runsolver if not clingo: self.clingo = os.path.join( os.path.dirname(sys.argv[0]), "..", "aspeed", "clingo") else: self.clingo = clingo if not enc_fn: self.enc_fn = os.path.join( os.path.dirname(sys.argv[0]), "..", "aspeed", "enc1.lp") else: self.enc_fn = enc_fn self.mem_limit = 2000 self.cutoff = 60 self.data_threshold = 300 self.data_fraction = 0.3 self.schedule = []
BSD 2-Clause Simplified License
blurstudio/cross3d
cross3d/studiomax/mixer/track.py
StudiomaxTrack.isSoloed
python
def isSoloed(self): return self.track.solo
Whether this track is Soloed
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/studiomax/mixer/track.py#L61-L63
import Py3dsMax from Py3dsMax import mxs from cross3d import Clip, ClipPortion, TrackPortion from cross3d.abstract.mixer.track import AbstractTrack class StudiomaxTrack(AbstractTrack): @property def numClips(self): if self.isLayerTrack: return int(self.track.numClips) elif self.isTransitionTrack: return int(self.track.numTransClips) @property def isLayerTrack(self): return self.track.trackType == mxs.pyhelper.namify('layertrack') @property def isTransitionTrack(self): return self.track.trackType == mxs.pyhelper.namify('transtrack') @property def isMuted(self): return self.track.mute @property
MIT License
sns-sdks/python-twitter
pytwitter/api.py
Api.get_oauth2_authorize_url
python
def get_oauth2_authorize_url( self, redirect_uri: str = None, scope: Optional[List[str]] = None, **kwargs ) -> Tuple[str, str, str]: session = self._get_oauth2_session( redirect_uri=redirect_uri, scope=scope, **kwargs, ) code_verifier = base64.urlsafe_b64encode(os.urandom(40)).decode("utf-8") code_verifier = re.sub("[^a-zA-Z0-9]+", "", code_verifier) authorization_url, state = session.create_authorization_url( url=self.BASE_OAUTH2_AUTHORIZE_URL, code_verifier=code_verifier ) return authorization_url, code_verifier, state
:param redirect_uri: The URL that twitter redirect back to after the user logged in. :param scope: A list of permission string to request from the user to using your app. :param kwargs: Additional parameters for oauth. :return: Authorization url, code_verifier, state
https://github.com/sns-sdks/python-twitter/blob/aeae4b7e5e3251033b2030f7c9c2d69c3e923860/pytwitter/api.py#L291-L311
import base64 import logging import os import re import time from typing import List, Optional, Tuple, Union import requests from requests.models import Response from authlib.integrations.requests_client import ( OAuth2Session, OAuth2Auth, OAuth1Auth, OAuth1Session, ) import pytwitter.models as md from pytwitter.error import PyTwitterError from pytwitter.rate_limit import RateLimit from pytwitter.utils.validators import enf_comma_separated logger = logging.getLogger(__name__) class Api: BASE_URL_V2 = "https://api.twitter.com/2" BASE_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" BASE_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" BASE_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" DEFAULT_CALLBACK_URI = "https://localhost/" BASE_OAUTH2_AUTHORIZE_URL = "https://twitter.com/i/oauth2/authorize" BASE_OAUTH2_ACCESS_TOKEN_URL = "https://api.twitter.com/2/oauth2/token" DEFAULT_SCOPES = ["users.read", "tweet.read"] def __init__( self, bearer_token=None, consumer_key=None, consumer_secret=None, access_token=None, access_secret=None, client_id=None, application_only_auth=False, oauth_flow=False, sleep_on_rate_limit=False, timeout=None, proxies=None, ): self.session = requests.Session() self._auth = None self._oauth_session = None self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.client_id = client_id self.timeout = timeout self.proxies = proxies self.rate_limit = RateLimit() self.sleep_on_rate_limit = sleep_on_rate_limit self.auth_user_id = None if bearer_token: self._auth = OAuth2Auth( token={"access_token": bearer_token, "token_type": "Bearer"} ) elif consumer_key and consumer_secret and application_only_auth: resp = self.generate_bearer_token( consumer_key=consumer_key, consumer_secret=consumer_secret ) self._auth = OAuth2Auth( token={"access_token": resp["access_token"], "token_type": "Bearer"} ) elif all([consumer_key, consumer_secret, access_token, access_secret]): self._auth = OAuth1Auth( client_id=consumer_key, client_secret=consumer_secret, token=access_token, token_secret=access_secret, ) self.rate_limit = RateLimit("user") self.auth_user_id = self.get_uid_from_access_token_key( access_token=access_token ) elif consumer_key and consumer_secret and oauth_flow: pass elif client_id and oauth_flow: pass else: raise PyTwitterError("Need oauth") @staticmethod def get_uid_from_access_token_key(access_token: str): uid, _ = access_token.split("-") return uid def _request( self, url, verb="GET", params=None, data=None, json=None, enforce_auth=True ): auth = None if enforce_auth: if not self._auth: raise PyTwitterError("The twitter.Api instance must be authenticated.") auth = self._auth if url and self.sleep_on_rate_limit: limit = self.rate_limit.get_limit(url=url, method=verb) if limit.remaining == 0: s_time = max((limit.reset - time.time()), 0) + 10.0 logger.debug( f"Rate limited requesting [{url}], sleeping for [{s_time}]" ) time.sleep(s_time) resp = self.session.request( url=url, method=verb, params=params, data=data, auth=auth, json=json, timeout=self.timeout, proxies=self.proxies, ) if url and self.rate_limit: self.rate_limit.set_limit(url=url, headers=resp.headers, method=verb) return resp def get_authorize_url(self, callback_uri=None, **kwargs): if callback_uri is None: callback_uri = self.DEFAULT_CALLBACK_URI self._oauth_session = OAuth1Session( client_id=self.consumer_key, client_secret=self.consumer_secret, callback_uri=callback_uri, ) self._oauth_session.fetch_request_token( self.BASE_REQUEST_TOKEN_URL, proxies=self.proxies ) return self._oauth_session.create_authorization_url( self.BASE_AUTHORIZE_URL, **kwargs ) def generate_access_token(self, response: str): if not self._oauth_session: raise PyTwitterError("Need get_authorize_url first") self._oauth_session.parse_authorization_response(response) data = self._oauth_session.fetch_access_token( self.BASE_ACCESS_TOKEN_URL, proxies=self.proxies ) self._auth = OAuth1Auth( client_id=self.consumer_key, client_secret=self.consumer_secret, token=data["oauth_token"], token_secret=data["oauth_token_secret"], ) if "user_id" in data: self.auth_user_id = data["user_id"] else: self.auth_user_id = self.get_uid_from_access_token_key( access_token=data["oauth_token"] ) return data def invalidate_access_token(self) -> dict: if not self._auth: raise PyTwitterError("Must have authorized credentials") if not isinstance(self._auth, OAuth1Auth): raise PyTwitterError("Can only revoke oauth1 token") resp = requests.post( url="https://api.twitter.com/1.1/oauth/invalidate_token", ) data = self._parse_response(resp=resp) return data def generate_bearer_token(self, consumer_key: str, consumer_secret: str) -> dict: bearer_token = base64.b64encode(f"{consumer_key}:{consumer_secret}".encode()) headers = { "Authorization": f"Basic {bearer_token.decode()}", "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8", } resp = requests.post( url="https://api.twitter.com/oauth2/token", data={"grant_type": "client_credentials"}, headers=headers, ) data = self._parse_response(resp=resp) return data def invalidate_bearer_token( self, consumer_key: str, consumer_secret: str, access_token: str ) -> dict: bearer_token = base64.b64encode(f"{consumer_key}:{consumer_secret}".encode()) headers = { "Authorization": f"Basic {bearer_token.decode()}", "Content-Type": "application/x-www-form-urlencoded", } resp = requests.post( url="https://api.twitter.com/oauth2/invalidate_token", data={"access_token": access_token}, headers=headers, ) data = self._parse_response(resp=resp) return data def _get_oauth2_session( self, redirect_uri: Optional[str] = None, scope: Optional[List[str]] = None, **kwargs, ) -> OAuth2Session: if not self.client_id: raise PyTwitterError({"message": "OAuth need your app credentials"}) if redirect_uri is None: redirect_uri = self.DEFAULT_CALLBACK_URI if scope is None: scope = self.DEFAULT_SCOPES session = OAuth2Session( client_id=self.client_id, scope=scope, redirect_uri=redirect_uri, code_challenge_method="S256", **kwargs, ) return session
MIT License
square/connect-python-sdk
squareconnect/models/employee_wage.py
EmployeeWage.hourly_rate
python
def hourly_rate(self): return self._hourly_rate
Gets the hourly_rate of this EmployeeWage. Can be a custom-set hourly wage or the calculated effective hourly wage based on annual wage and hours worked per week. :return: The hourly_rate of this EmployeeWage. :rtype: Money
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/employee_wage.py#L133-L141
from pprint import pformat from six import iteritems import re class EmployeeWage(object): def __init__(self, id=None, employee_id=None, title=None, hourly_rate=None): self.swagger_types = { 'id': 'str', 'employee_id': 'str', 'title': 'str', 'hourly_rate': 'Money' } self.attribute_map = { 'id': 'id', 'employee_id': 'employee_id', 'title': 'title', 'hourly_rate': 'hourly_rate' } self._id = id self._employee_id = employee_id self._title = title self._hourly_rate = hourly_rate @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def employee_id(self): return self._employee_id @employee_id.setter def employee_id(self, employee_id): if employee_id is None: raise ValueError("Invalid value for `employee_id`, must not be `None`") if len(employee_id) < 1: raise ValueError("Invalid value for `employee_id`, length must be greater than or equal to `1`") self._employee_id = employee_id @property def title(self): return self._title @title.setter def title(self, title): self._title = title @property
Apache License 2.0
gordonjo/versa
src/omniglot.py
OmniglotData._yield_random_task_batch
python
def _yield_random_task_batch(self, tasks_per_batch, images, character_indices, shot, way, eval_samples): train_images_to_return, test_images_to_return = [], [] train_labels_to_return, test_labels_to_return = [], [] for task in range(tasks_per_batch): im_train, im_test, lbl_train, lbl_test = self._generate_random_task(images, character_indices, shot, way, eval_samples) train_images_to_return.append(im_train) test_images_to_return.append(im_test) train_labels_to_return.append(lbl_train) test_labels_to_return.append(lbl_test) return np.array(train_images_to_return), np.array(test_images_to_return), np.array(train_labels_to_return), np.array(test_labels_to_return)
Generate a batch of tasks from image set. :param tasks_per_batch: Number of tasks per batch. :param images: Images set to generate batch from. :param character_indices: Index of each character. :param shot: Number of training images per class. :param way: Number of classes per task. :param eval_samples: number of evaluation samples to use. :return: A batch of tasks.
https://github.com/gordonjo/versa/blob/e398654176e7d942afc3c0b6159d2a6f0d68c1dd/src/omniglot.py#L104-L125
import numpy as np def shuffle_batch(images, labels): permutation = np.random.permutation(images.shape[0]) return images[permutation], labels[permutation] def extract_data(data, augment_data): images, char_nums = [], [] if augment_data: for character in data: data = augment_character_set(data, character) for character_index, character in enumerate(data): for m, instance in enumerate(character): images.append(instance[0]) char_nums.append(character_index) images = np.expand_dims(np.array(images), 4) char_number = np.array(char_nums) return images, char_number def augment_character_set(data, character_set): rotation_90, rotation_180, rotation_270 = [], [], [] for instance in character_set: image, char_num, char_language_num = instance rotation_90.append((np.rot90(image, k=1), char_num, char_language_num)) rotation_180.append((np.rot90(image, k=2), char_num, char_language_num)) rotation_270.append((np.rot90(image, k=3), char_num, char_language_num)) return np.vstack((data, np.array([rotation_90, rotation_180, rotation_270]))) class OmniglotData(object): def __init__(self, path, train_size, validation_size, augment_data, seed): np.random.seed(seed) data = np.load(path) np.random.shuffle(data) self.instances_per_char = 20 self.image_height = 28 self.image_width = 28 self.image_channels = 1 self.total_chars = data.shape[0] self.train_images, self.train_char_nums = extract_data(data[:train_size], augment_data=augment_data) if validation_size is not 0: self.validation_images, self.validation_char_nums = extract_data(data[train_size:train_size + validation_size], augment_data=augment_data) self.test_images, self.test_char_nums = extract_data(data[train_size + validation_size:], augment_data=augment_data) def get_image_height(self): return self.image_height def get_image_width(self): return self.image_width def get_image_channels(self): return self.image_channels def get_batch(self, source, tasks_per_batch, shot, way, eval_samples): if source == 'train': return self._yield_random_task_batch(tasks_per_batch, self.train_images, self.train_char_nums, shot, way, eval_samples) elif source == 'validation': return self._yield_random_task_batch(tasks_per_batch, self.validation_images, self.validation_char_nums, shot, way, eval_samples) elif source == 'test': return self._yield_random_task_batch(tasks_per_batch, self.test_images, self.test_char_nums, shot, way, eval_samples)
MIT License
sawcordwell/pymdptoolbox
src/mdptoolbox/mdp.py
MDP.run
python
def run(self): raise NotImplementedError("You should create a run() method.")
Raises error because child classes should implement this function.
https://github.com/sawcordwell/pymdptoolbox/blob/7c96789cc80e280437005c12065cf70266c11636/src/mdptoolbox/mdp.py#L345-L348
import math as _math import time as _time import numpy as _np import scipy.sparse as _sp import mdptoolbox.util as _util _MSG_STOP_MAX_ITER = "Iterating stopped due to maximum number of iterations " "condition." _MSG_STOP_EPSILON_OPTIMAL_POLICY = "Iterating stopped, epsilon-optimal " "policy found." _MSG_STOP_EPSILON_OPTIMAL_VALUE = "Iterating stopped, epsilon-optimal value " "function found." _MSG_STOP_UNCHANGING_POLICY = "Iterating stopped, unchanging policy found." def _computeDimensions(transition): A = len(transition) try: if transition.ndim == 3: S = transition.shape[1] else: S = transition[0].shape[0] except AttributeError: S = transition[0].shape[0] return S, A def _printVerbosity(iteration, variation): if isinstance(variation, float): print("{:>10}{:>12f}".format(iteration, variation)) elif isinstance(variation, int): print("{:>10}{:>12d}".format(iteration, variation)) else: print("{:>10}{:>12}".format(iteration, variation)) class MDP(object): def __init__(self, transitions, reward, discount, epsilon, max_iter, skip_check=False): if discount is not None: self.discount = float(discount) assert 0.0 < self.discount <= 1.0, ( "Discount rate must be in ]0; 1]" ) if self.discount == 1: print("WARNING: check conditions of convergence. With no " "discount, convergence can not be assumed.") if max_iter is not None: self.max_iter = int(max_iter) assert self.max_iter > 0, ( "The maximum number of iterations must be greater than 0." ) if epsilon is not None: self.epsilon = float(epsilon) assert self.epsilon > 0, "Epsilon must be greater than 0." if not skip_check: _util.check(transitions, reward) self.S, self.A = _computeDimensions(transitions) self.P = self._computeTransition(transitions) self.R = self._computeReward(reward, transitions) self.verbose = False self.time = None self.iter = 0 self.V = None self.policy = None def __repr__(self): P_repr = "P: \n" R_repr = "R: \n" for aa in range(self.A): P_repr += repr(self.P[aa]) + "\n" R_repr += repr(self.R[aa]) + "\n" return(P_repr + "\n" + R_repr) def _bellmanOperator(self, V=None): if V is None: V = self.V else: try: assert V.shape in ((self.S,), (1, self.S)), "V is not the " "right shape (Bellman operator)." except AttributeError: raise TypeError("V must be a numpy array or matrix.") Q = _np.empty((self.A, self.S)) for aa in range(self.A): Q[aa] = self.R[aa] + self.discount * self.P[aa].dot(V) return (Q.argmax(axis=0), Q.max(axis=0)) def _computeTransition(self, transition): return tuple(transition[a] for a in range(self.A)) def _computeReward(self, reward, transition): try: if reward.ndim == 1: return self._computeVectorReward(reward) elif reward.ndim == 2: return self._computeArrayReward(reward) else: r = tuple(map(self._computeMatrixReward, reward, transition)) return r except (AttributeError, ValueError): if len(reward) == self.A: r = tuple(map(self._computeMatrixReward, reward, transition)) return r else: return self._computeVectorReward(reward) def _computeVectorReward(self, reward): if _sp.issparse(reward): raise NotImplementedError else: r = _np.array(reward).reshape(self.S) return tuple(r for a in range(self.A)) def _computeArrayReward(self, reward): if _sp.issparse(reward): raise NotImplementedError else: def func(x): return _np.array(x).reshape(self.S) return tuple(func(reward[:, a]) for a in range(self.A)) def _computeMatrixReward(self, reward, transition): if _sp.issparse(reward): return reward.multiply(transition).sum(1).A.reshape(self.S) elif _sp.issparse(transition): return transition.multiply(reward).sum(1).A.reshape(self.S) else: return _np.multiply(transition, reward).sum(1).reshape(self.S) def _startRun(self): if self.verbose: _printVerbosity('Iteration', 'Variation') self.time = _time.time() def _endRun(self): self.V = tuple(self.V.tolist()) try: self.policy = tuple(self.policy.tolist()) except AttributeError: self.policy = tuple(self.policy) self.time = _time.time() - self.time
BSD 3-Clause New or Revised License
paddlepaddle/paddle
python/paddle/fluid/default_scope_funcs.py
scoped_function
python
def scoped_function(func): enter_local_scope() try: func() except: raise finally: leave_local_scope()
invoke `func` in new scope. :param func: a callable function that will be run in new scope. :type func: callable
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/fluid/default_scope_funcs.py#L90-L103
from __future__ import print_function import paddle.fluid.core import threading __tl_scope__ = threading.local() __all__ = [ 'get_cur_scope', 'enter_local_scope', 'leave_local_scope', 'var', 'find_var', 'scoped_function', ] def get_cur_scope(): cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: __tl_scope__.cur_scope.append(paddle.fluid.core.Scope()) return __tl_scope__.cur_scope[-1] def enter_local_scope(): cur_scope = get_cur_scope() new_scope = cur_scope.new_scope() __tl_scope__.cur_scope.append(new_scope) def leave_local_scope(): __tl_scope__.cur_scope.pop() get_cur_scope().drop_kids() def var(name): return get_cur_scope().var(name) def find_var(name): return get_cur_scope().find_var(name)
Apache License 2.0
mattvonrocketstein/smash
smashlib/ipy3x/parallel/tests/clienttest.py
segfault
python
def segfault(): import ctypes ctypes.memset(-1, 0, 1)
this will segfault
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/parallel/tests/clienttest.py#L35-L38
from __future__ import print_function import sys import tempfile import time from nose import SkipTest import zmq from zmq.tests import BaseZMQTestCase from IPython.external.decorator import decorator from IPython.parallel import error from IPython.parallel import Client from IPython.parallel.tests import launchers, add_engines
MIT License
jsmaika/intficpy
intficpy/actor.py
SaleItem.buyUnit
python
def buyUnit(self, game): for i in range(0, self.price): if self.currency.ix in game.me.contains: game.me.removeThing(game.me.contains[self.currency.ix][0]) elif self.currency.ix in game.me.sub_contains: game.me.removeThing(game.me.sub_contains[self.currency.ix][0]) if self.price > 1: game.addTextToEvent( "turn", "(Lost: " + str(self.price) + " " + self.currency.plural + ")", ) else: game.addTextToEvent( "turn", "(Lost: " + str(self.price) + " " + self.currency.verbose_name + ")", ) if self.number is True: obj = self.thing.copyThing() elif self.number > 1: obj = self.thing.copyThing() else: obj = self.thing if obj.location: obj.location.removeThing(obj) game.me.addThing(obj) if not self.number is True: self.number = self.number - 1 game.addTextToEvent("turn", "(Received: " + obj.verbose_name + ") ")
Buy one unit of the item for sale. Removes the needed amount of currency from the player's inventory, adds the purchased item, and prints the interaction messages to the turn. :param game: the current game :type game: IFPGame
https://github.com/jsmaika/intficpy/blob/4bf2c270abb822cc2ff2ee65d4478f20f1c5accb/intficpy/actor.py#L738-L773
from .ifp_object import IFPObject from .thing_base import Thing from . import vocab from .tokenizer import cleanInput, tokenize, removeArticles class Actor(Thing): POSITION_STATE_DESC_KEY = "position_state_desc" def __init__(self, game, name): super().__init__(game, name) self.can_be_led = False self.for_sale = {} self.will_buy = {} self.ask_topics = {} self.tell_topics = {} self.give_topics = {} self.show_topics = {} self.special_topics = {} self.special_topics_alternate_keys = {} self.sticky_topic = None self.hi_topic = None self.return_hi_topic = None self.said_hi = False self.hermit_topic = None self.manual_suggest = False self.default_topic = "No response." self.knows_about = [self.ix] self.position = "standing" self.commodity = False self.wearing = {} self.invItem = False self.cannotTakeMsg = "You cannot take a person. " @property def verb_to_be(self): if self.is_current_player: return "are" return "is" @property def verbose_name(self): if self.is_current_player: return "you" return super().verbose_name @property def is_current_player(self): if not self.game: return False return self.game.me.ix == self.ix @property def contains_desc(self): return "" @property def position_state_desc(self): if self.position == "standing": return "" return f"{self.capNameArticle()} is {self.position}. " def makeProper(self, proper_name): token_name = proper_name token_name = cleanInput(token_name, False) token_name = tokenize(token_name) token_name = removeArticles(token_name) self.name = token_name[-1] self.setAdjectives(self.adjectives + token_name) for tok in token_name: self.addSynonym(tok) self.full_name = proper_name self.has_proper_name = True def makeStanding(self): self.position = "standing" def makeSitting(self): self.position = "sitting" def makeLying(self): self.position = "lying" def setHiTopics(self, hi_topic, return_hi_topic): self.said_hi = False if hi_topic: hi_topic.owner = self if return_hi_topic: return_hi_topic.owner = self self.hi_topic = hi_topic self.return_hi_topic = return_hi_topic def setHermitTopic(self, hermit_topic): self.hermit_topic = hermit_topic if hermit_topic: hermit_topic.owner = self def removeHermitTopic(self): self.hermit_topic = None def addTopic(self, ask_tell_give_show, topic, thing): topic.owner = self if "ask" in ask_tell_give_show or ask_tell_give_show == "all": self.ask_topics[thing.ix] = topic if "tell" in ask_tell_give_show or ask_tell_give_show == "all": self.tell_topics[thing.ix] = topic if "give" in ask_tell_give_show or ask_tell_give_show == "all": self.give_topics[thing.ix] = topic if "show" in ask_tell_give_show or ask_tell_give_show == "all": self.show_topics[thing.ix] = topic def addSpecialTopic(self, topic): topic.owner = self self.special_topics[topic.suggestion] = topic for x in topic.alternate_phrasings: self.special_topics_alternate_keys[x] = topic def removeSpecialTopic(self, topic): if topic.suggestion in self.special_topics: del self.special_topics[topic.suggestion] for x in topic.alternate_phrasings: if x in self.special_topics_alternate_keys: del self.special_topics_alternate_keys[x] def removeAllSpecialTopics(self): topics = [] for suggestion in self.special_topics: topics.append(self.special_topics[suggestion]) for topic in topics: self.removeSpecialTopic(topic) def removeAllTopics(self): self.ask_topics = {} self.tell_topics = {} self.give_topics = {} self.show_topics = {} def printSuggestions(self, game): if self.special_topics != {}: for suggestion in self.special_topics: game.addTextToEvent("turn", "(You could " + suggestion + ")") game.parser.command.specialTopics[suggestion] = self.special_topics[ suggestion ] for phrasing in self.special_topics_alternate_keys: game.parser.command.specialTopics[ phrasing ] = self.special_topics_alternate_keys[phrasing] def defaultTopic(self, game): game.addTextToEvent("turn", self.default_topic) self.printSuggestions(game) def addSelling(self, item, currency, price, stock): if item.ix not in self.for_sale: self.for_sale[item.ix] = SaleItem(self.game, item, currency, price, stock) def addWillBuy(self, item, currency, price, max_wanted): if item.ix not in self.will_buy: self.will_buy[item.ix] = SaleItem( self.game, item, currency, price, max_wanted ) def playerTalksTo(self, event="turn", **kwargs): if self.hermit_topic: self.hermit_topic.func(self.game, False) elif self.sticky_topic: self.sticky_topic.func(self.game) elif self.hi_topic and not self.said_hi: self.hi_topic.func(self.game) self.said_hi = True elif self.return_hi_topic: self.return_hi_topic.func(self.game) else: self.defaultTopic(self.game) return True def playerAboutToAskAbout(self, item, event="turn", **kwargs): if self.hermit_topic: self.hermit_topic.func(self.game, False) return False return True def playerAsksAbout(self, item, event="turn", **kwargs): if self.hi_topic and not self.said_hi: self.hi_topic.func(self.game) self.said_hi = True elif self.return_hi_topic: self.return_hi_topic.func(self.game) if item.ix in self.ask_topics: self.ask_topics[item.ix].func(self.game) ret = True else: self.defaultTopic(self.game) ret = False if self.sticky_topic: self.sticky_topic.func(self.game) return ret def playerAboutToTellAbout(self, item, event="turn", **kwargs): if self.hermit_topic: self.hermit_topic.func(self.game, False) return False return True def playerTellsAbout(self, item, event="turn", **kwargs): if self.hi_topic and not self.said_hi: self.hi_topic.func(self.game) self.said_hi = True elif self.return_hi_topic: self.return_hi_topic.func(self.game) if item.ix in self.tell_topics: self.tell_topics[item.ix].func(self.game) ret = True else: self.defaultTopic(self.game) ret = False if self.sticky_topic: self.sticky_topic.func(self.game) return ret def playerAboutToShow(self, item, event="turn", **kwargs): if self.hermit_topic: self.hermit_topic.func(self.game, False) return False return True def playerShows(self, item, event="turn", **kwargs): if self.hi_topic and not self.said_hi: self.hi_topic.func(self.game) self.said_hi = True elif self.return_hi_topic: self.return_hi_topic.func(self.game) if item.ix in self.show_topics: self.show_topics[item.ix].func(self.game) ret = True else: self.defaultTopic(self.game) ret = False if self.sticky_topic: self.sticky_topic.func(self.game) return ret def playerAboutToGiveItem(self, item, event="turn", **kwargs): if self.hermit_topic: self.hermit_topic.func(self.game, False) return False return True def playerAboutToGiveAway(self, event="turn", **kwargs): if self.is_current_player: self.game.addTextToEvent(event, "You cannot give yourself away. ") return False return True def playerGivesItem(self, item, event="turn", **kwargs): if not item.playerAboutToGiveAway(event="turn"): return False if self.hi_topic and not self.said_hi: self.hi_topic.func(self.game) self.said_hi = True elif self.return_hi_topic: self.return_hi_topic.func(self.game) if item.ix in self.give_topics: self.give_topics[item.ix].func(self.game) if item.give: item.moveTo(self) ret = True else: self.defaultTopic(self.game) ret = False if self.sticky_topic: self.sticky_topic.func(self.game) return ret class Player(Actor): def __init__(self, game): super().__init__(game, "me") def setPlayer(self): self.addSynonym("me") self.addSynonym("myself") self.addSynonym("yourself") self.addSynonym("you") @property def default_desc(self): return "" @property def default_xdesc(self): return "You notice nothing remarkable about yourself. " class Topic(IFPObject): def __init__(self, game, topic_text): super().__init__(game) self.text = topic_text self.owner = None self.new_suggestions = [] self.expire_suggestions = [] def func(self, game, suggest=True): self.update_suggestions() game.addTextToEvent("turn", self.text) self.on_trigger(game) if self.owner and suggest: if not self.owner.manual_suggest: self.owner.printSuggestions(game) def update_suggestions(self): if not self.owner: return for item in self.new_suggestions: if item.suggestion not in self.owner.special_topics: self.owner.addSpecialTopic(item) for item in self.expire_suggestions: if item.suggestion in self.owner.special_topics: self.owner.removeSpecialTopic(item) def on_trigger(self, game): pass class SpecialTopic(Topic): def __init__(self, game, suggestion, topic_text): super().__init__(game, topic_text) self.suggestion = suggestion self.alternate_phrasings = [] def addAlternatePhrasing(self, phrasing): self.alternate_phrasings.append(phrasing) class SaleItem(IFPObject): def __init__(self, game, item, currency, price, number): super().__init__(game) self.thing = item self.currency = currency self.price = price self.number = number self.wants = number self.out_stock_msg = "That item is out of stock. " self.wants_no_more_msg = None self.purchase_msg = "You purchase " + item.lowNameArticle(False) + ". " self.sell_msg = "You sell " + item.lowNameArticle(True) + ". "
MIT License
openwfm/wrfxpy
src/utils.py
cache_file
python
def cache_file(path, cache_dir): if not osp.isdir(cache_dir): logging.error('%s is not directory' % str(cache_dir)) raise Exception('%s is not directory' % str(cache_dir)) if not osp.isfile(path): logging.error('%s is not file' % str(path)) raise Exception('%s is not directory' % str(cache_dir)) dst = osp.join(cache_dir,osp.basename(path)) if osp.islink(path): if osp.dirname(osp.realpath(path)) is osp.realpath(cache_dir): logging.debug('%s is link to %s already' % (path, cache_dir)) if osp.basename(osp.realpath(path)) is not osp.basename(path): logging.error('link %s -> %s does not match' % (path,osp.realpath(path))) raise Exception('link %s -> %s does not match' % (path,osp.realpath(path))) else: src = osp.realpath(path) logging.info('Copying %s to %s' % (src, dst)) shutil.copyfile(src,dst) else: logging.info('Moving %s to %s' % (path, dst)) shutil.move(path,dst) symlink_unless_exists(dst, path)
Move file at the path to cache directory and replace by symlink except when it is symlink to the cache direcory already except if the file is symlink to elsewhere, copy the file to cache directory and replace by symlink :path: file name :param cache_dir: source directory, must be absolute path
https://github.com/openwfm/wrfxpy/blob/7f7feba97baa6cd85134185520559028d2b5464e/src/utils.py#L266-L295
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from datetime import datetime, timedelta import pytz import os import os.path as osp import glob import numpy as np import math import pprint import logging import dill import json import pickle import inspect import shutil import psutil import requests import socket import collections import six from six.moves import map from six.moves import zip class Dict(dict): def __init__(self, d): self.update(d) def __getattr__(self, item): return self[item] def __setattr__(self, item, value): self[item] = value def __getitem__(self, item): if item in self: return super().__getitem__(item) else: for key in self: if isinstance(key,(range,tuple)) and item in key: return super().__getitem__(key) raise KeyError(item) def keys(self): if any([isinstance(key,(range,tuple)) for key in self]): keys = [] for key in self: if isinstance(key,(range,tuple)): for k in key: keys.append(k) else: keys.append(key) return keys else: return super().keys() def save(obj, file): with open(file,'wb') as output: dill.dump(obj, output ) def load(file): with open(file,'rb') as input: returnitem = dill.load(input) return returnitem def traceargs(): frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) for i in args: print((" %s:\n%s" % (i, pprint.pformat(values[i])))) def dump(obj,title): frame = inspect.currentframe() outframe = inspect.getouterframes(frame, 2) logging.info(outframe[1][1] + ':' + outframe[1][3] + ":" + title + ':\n' + pprint.pformat(obj,width=-1)) def check_obj(obj, title): if pprint.isreadable(obj): logging.info(title + " is readable") else: logging.info(title + " is NOT readable") try: json.dumps(obj) logging.info(title + " is JSON serializable") except TypeError as err: logging.error(title + " is not JSON serializable") logging.error(err) try: s=pickle.dumps(obj,pickle.HIGHEST_PROTOCOL) pickle.loads(s) logging.info(title + " can be pickled") except: logging.error(title + " could not be picked and unpickled") def kill_process(pid): if pid is not None: logging.info('Killing process %s and children' % pid) try: parent = psutil.Process(pid) try: for child in parent.children(recursive=True): child.kill() parent.kill() except: logging.error('Could not get them all, check for runaways') except: logging.error('Process %s does not exist' % pid) def process_create_time(pid): if pid is None: return -1 else: try: return psutil.Process(pid).create_time() except: return None def file_exists(path): return (os.path.exists(path) and os.access(path,os.R_OK)) def ensure_dir(path): path_dir = osp.dirname(path) if not osp.exists(path_dir): os.makedirs(path_dir) return path def delete(dir): if osp.exists(dir): try: shutil.rmtree(dir) except Exception as e: logging.warning(str(e)) def make_clean_dir(dir): logging.info('Deleting existing directory %s to make a clean one' % dir) delete(dir) if not osp.exists(dir): os.makedirs(dir) def make_dir(dir): if not osp.exists(dir): os.makedirs(dir) return dir def symlink_unless_exists(link_tgt, link_loc): logging.info('Linking %s -> %s' % (link_loc, link_tgt)) if osp.isfile(link_tgt) or osp.isdir(link_tgt): if not osp.lexists(link_loc): os.symlink(link_tgt, link_loc) else: logging.warning('Link %s already exists' % link_loc) else: logging.error('Link target %s does not exist' % link_tgt) def remove(tgt): if osp.isfile(tgt): logging.info('remove: file %s exists, removing' % tgt) os.remove(tgt) def force_copy(src,tgt): remove(tgt) shutil.copy(src,ensure_dir(tgt)) def append2file(addl,base): logging.info("appending file %s to %s" % (addl,base)) with open(base,'a') as base_file: with open(addl,'r') as addl_file: base_file.write(addl_file.read()) def link2copy(src): try: link_target = os.readlink(src) except OSError as e: return logging.info("replacing soft link %s -> %s by a copy" % (src,link_target)) os.remove(src) shutil.copy(link_target,src) def move(src,tgt): logging.info('moving %s to %s' % (src, tgt)) remove(tgt) shutil.move(src,tgt)
MIT License
cgatoxford/cgatpipelines
obsolete/PipelineChipseq.py
intersectBedFiles
python
def intersectBedFiles(infiles, outfile): if len(infiles) == 1: shutil.copyfile(infiles[0], outfile) elif len(infiles) == 2: if IOTools.isEmpty(infiles[0]) or IOTools.isEmpty(infiles[1]): P.touch(outfile) else: statement = ''' intersectBed -u -a %s -b %s | cut -f 1,2,3,4,5 | awk 'BEGIN { OFS="\\t"; } {$4=++a; print;}' | bgzip > %%(outfile)s ''' % (infiles[0], infiles[1]) P.run() else: tmpfile = P.getTempFilename(".") fn = infiles[0] if IOTools.isEmpty(infiles[0]): P.touch(outfile) return statement = '''mergeBed -i %(fn)s > %(tmpfile)s''' P.run() for fn in infiles[1:]: if IOTools.isEmpty(infiles[0]): P.touch(outfile) os.unlink(tmpfile) return statement = '''mergeBed -i %(fn)s | intersectBed -u -a %(tmpfile)s -b stdin > %(tmpfile)s.tmp; mv %(tmpfile)s.tmp %(tmpfile)s''' P.run() statement = '''cat %(tmpfile)s | cut -f 1,2,3,4,5 | awk 'BEGIN { OFS="\\t"; } {$4=++a; print;}' | bgzip > %(outfile)s ''' P.run() os.unlink(tmpfile)
merge :term:`bed` formatted *infiles* by intersection and write to *outfile*. Only intervals that overlap in all files are retained. Interval coordinates are given by the first file in *infiles*. Bed files are normalized (overlapping intervals within a file are merged) before intersection. Intervals are renumbered starting from 1.
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/obsolete/PipelineChipseq.py#L630-L689
import shutil import random import re import glob import os import collections import sqlite3 import pysam import numpy import CGAT.Experiment as E import CGATPipelines.Pipeline as P import CGAT.IndexedGenome as IndexedGenome import CGAT.IOTools as IOTools import CGAT.Bed as Bed import CGAT.BamTools as BamTools import CGAT.WrapperMACS as WrapperMACS import CGAT.WrapperZinba as WrapperZinba def getPeakShiftFromMacs(infile): shift = None with IOTools.openFile(infile, "r") as ins: rx = re.compile("#2 predicted fragment length is (\d+) bps") r2 = re.compile("#2 Use (\d+) as shiftsize, \d+ as fragment length") for line in ins: x = rx.search(line) if x: shift = int(x.groups()[0]) break x = r2.search(line) if x: shift = int(x.groups()[0]) E.warn("shift size was set automatically - see MACS logfiles") break return shift def getPeakShiftFromZinba(infile): shift = None with IOTools.openFile(infile, "r") as ins: lines = ins.readlines() for i, line in enumerate(lines): if line.startswith("$offset"): shift = int(lines[i + 1].split()[1]) break return shift def getPeakShift(track): if os.path.exists("%s.macs" % track): return getPeakShiftFromMacs("%s.macs" % track) elif os.path.exists("%s.zinba" % track): return getPeakShiftFromZinba("%s.zinba" % track) def getMappedReads(infile): for lines in IOTools.openFile(infile, "r"): data = lines[:-1].split("\t") if data[1].startswith("without duplicates"): return int(data[0]) return def getMinimumMappedReads(infiles): v = [] for infile in infiles: x = getMappedReads(infile) if x: v.append(x) if len(v) == 0: raise ValueError( "could not find mapped reads in files %s" % (str(infiles))) return min(v) def getExonLocations(filename): fh = IOTools.openFile(filename, "r") ensembl_ids = [] for line in fh: ensembl_ids.append(line.strip()) fh.close() dbhandle = sqlite3.connect(PARAMS["annotations_database"]) cc = dbhandle.cursor() gene_ids = [] n_ids = 0 for ID in ensembl_ids: gene_ids.append('gene_id="%s"' % ID) n_ids += 1 statement = "select contig,start,end from geneset_cds_gtf where " + " OR ".join(gene_ids) cc.execute(statement) region_list = [] n_regions = 0 for result in cc: b = Bed.Bed() b.contig, b.start, b.end = result region_list.append(b) n_regions += 1 cc.close() E.info("Retrieved exon locations for %i genes. Got %i regions" % (n_ids, n_regions)) return(region_list) def getBedLocations(filename): fh = open(filename, "r") region_list = [] n_regions = 0 for line in fh: if line.strip() != "" and line[0] != "#": fields = line.split("\t") contig, start, end = fields[0], int(fields[1]), int(fields[2]) region_list.append((contig, start, end)) n_regions += 1 fh.close() return (region_list) def buildQuicksectMask(bed_file): mask = IndexedGenome.Quicksect() n_regions = 0 for bed in Bed.iterator(IOTools.openFile(bed_file)): mask.add(bed.contig, (bed.start - 1), (bed.end + 1), 1) n_regions += 1 E.info("Built Quicksect mask for %i regions" % n_regions) return(mask) def buildBAMforPeakCalling(infiles, outfile, dedup, mask): samfiles = [] num_reads = 0 nfiles = 0 statement = [] tmpfile = P.getTempFilename(".") if len(infiles) > 1 and isinstance(infiles, str) == 0: statement.append('''samtools merge @OUT@ %s''' % (infiles.join(" "))) statement.append('''samtools sort @IN@ @OUT@''') if dedup: statement.append('''MarkDuplicates INPUT=@IN@ ASSUME_SORTED=true REMOVE_DUPLICATES=true QUIET=true OUTPUT=@OUT@ METRICS_FILE=%(outfile)s.picardmetrics VALIDATION_STRINGENCY=SILENT > %(outfile)s.picardlog ''') if mask: statement.append( '''intersectBed -abam @IN@ -b %(mask)s -wa -v > @OUT@''') statement.append('''mv @IN@ %(outfile)s''') statement.append('''samtools index %(outfile)s''') statement = P.joinStatements(statement, infiles) P.run() def buildSimpleNormalizedBAM(infiles, outfile, nreads): infile, countfile = infiles pysam_in = pysam.Samfile(infile, "rb") fh = IOTools.openFile(countfile, "r") readcount = int(fh.read()) fh.close() threshold = float(nreads) / float(readcount) pysam_out = pysam.Samfile(outfile, "wb", template=pysam_in) ninput, noutput = 0, 0 for read in pysam_in.fetch(): ninput += 1 if random.random() <= threshold: pysam_out.write(read) noutput += 1 pysam_in.close() pysam_out.close() pysam.index(outfile) E.info("buildNormalizedBam: %i input, %i output (%5.2f%%), should be %i" % (ninput, noutput, 100.0 * noutput / ninput, nreads)) def buildNormalizedBAM(infiles, outfile, normalize=True): min_reads = getMinimumMappedReads(glob.glob("*.readstats")) samfiles = [] num_reads = 0 for infile, statsfile in infiles: samfiles.append(pysam.Samfile(infile, "rb")) num_reads += getMappedReads(statsfile) threshold = float(min_reads) / num_reads E.info("%s: min reads: %i, total reads=%i, threshold=%f" % (infiles, min_reads, num_reads, threshold)) pysam_out = pysam.Samfile(outfile, "wb", template=samfiles[0]) ninput, noutput, nduplicates = 0, 0, 0 last_contig, last_pos = None, None for pysam_in in samfiles: for read in pysam_in.fetch(): ninput += 1 if read.rname == last_contig and read.pos == last_pos: nduplicates += 1 continue if normalize and random.random() <= threshold: pysam_out.write(read) noutput += 1 last_contig, last_pos = read.rname, read.pos pysam_in.close() pysam_out.close() logs = IOTools.openFile(outfile + ".log", "w") logs.write("# min_reads=%i, threshold= %5.2f\n" % (min_reads, threshold)) logs.write("set\tcounts\tpercent\n") logs.write("ninput\t%i\t%5.2f%%\n" % (ninput, 100.0)) nwithout_dups = ninput - nduplicates logs.write("duplicates\t%i\t%5.2f%%\n" % (nduplicates, 100.0 * nduplicates / ninput)) logs.write("without duplicates\t%i\t%5.2f%%\n" % (nwithout_dups, 100.0 * nwithout_dups / ninput)) logs.write("target\t%i\t%5.2f%%\n" % (min_reads, 100.0 * min_reads / nwithout_dups)) logs.write("noutput\t%i\t%5.2f%%\n" % (noutput, 100.0 * noutput / nwithout_dups)) logs.close() if len(samfiles) > 1: tmpfilename = P.getTempFilename(".") pysam.sort(outfile, tmpfilename) shutil.move(tmpfilename + ".bam", outfile) os.unlink(tmpfilename) pysam.index(outfile) E.info("buildNormalizedBam: %i input, %i output (%5.2f%%), should be %i" % (ninput, noutput, 100.0 * noutput / ninput, min_reads)) def buildBAMStats(infile, outfile): outs = IOTools.openFile(outfile, "w") outs.write("reads\tcategory\n") for line in pysam.flagstat(infile): data = line[:-1].split(" ") outs.write("%s\t%s\n" % (data[0], " ".join(data[1:]))) pysam_in = pysam.Samfile(infile, "rb") outs_dupl = IOTools.openFile(outfile + ".duplicates", "w") outs_dupl.write("contig\tpos\tcounts\n") outs_hist = IOTools.openFile(outfile + ".histogram", "w") outs_hist.write("duplicates\tcounts\tcumul\tfreq\tcumul_freq\n") last_contig, last_pos = None, None ninput, nduplicates = 0, 0 duplicates = collections.defaultdict(int) counts = collections.defaultdict(int) count = 0 nh, nm = [], [] for read in pysam_in.fetch(): ninput += 1 if read.rname == last_contig and read.pos == last_pos: count += 1 nduplicates += 1 continue if count > 1: outs_dupl.write("%s\t%i\t%i\n" % (last_contig, last_pos, count)) counts[count] += 1 count = 1 last_contig, last_pos = read.rname, read.pos outs.write("%i\tduplicates (%5.2f%%)\n" % (nduplicates, 100.0 * nduplicates / ninput)) outs.write("%i\twithout duplicates (%5.2f%%)\n" % (ninput - nduplicates, 100.0 * (ninput - nduplicates) / ninput)) pysam_in.close() outs.close() outs_dupl.close() keys = list(counts.keys()) c = 0 total = sum(counts.values()) for k in sorted(keys): c += counts[k] outs_hist.write("%i\t%i\t%i\t%f\t%f\n" % (k, counts[k], c, 100.0 * counts[k] / total, 100.0 * c / total)) outs_hist.close() def exportIntervalsAsBed(infile, outfile): dbhandle = sqlite3.connect(PARAMS["database_name"]) if outfile.endswith(".gz"): compress = True track = P.snip(outfile, ".bed.gz") else: compress = False track = P.snip(outfile, ".bed") tablename = "%s_intervals" % P.tablequote(track) cc = dbhandle.cursor() statement = "SELECT contig, start, end, interval_id, peakval FROM %s ORDER by contig, start" % tablename cc.execute(statement) outs = IOTools.openFile("%s.bed" % track, "w") for result in cc: contig, start, end, interval_id, peakval = result peakval = int(min(peakval, 1000)) outs.write("%s\t%i\t%i\t%s\t%i\n" % (contig, start, end, str(interval_id), peakval)) cc.close() outs.close() if compress: E.info("compressing and indexing %s" % outfile) use_cluster = True statement = 'bgzip -f %(track)s.bed; tabix -f -p bed %(outfile)s' P.run() def exportMacsIntervalsAsBed(infile, outfile, foldchange): dbhandle = sqlite3.connect(PARAMS["database_name"]) track = P.toTable(os.path.basename(infile)) assert track.endswith("_macs") track = track[:-len("_macs")] cc = dbhandle.cursor() statement = "SELECT contig, start, end, interval_id, fold FROM %(track)s_macs_intervals where fold >= %(foldchange)s ORDER by contig, start" % locals( ) cc.execute(statement) outs = open(outfile, "w") for result in cc: contig, start, end, interval_id, fold = result outs.write("%s\t%i\t%i\t%s\t%d\n" % (contig, start, end, str(interval_id), fold)) cc.close() outs.close() def exportPeaksAsBed(infile, outfile): dbhandle = sqlite3.connect(PARAMS["database_name"]) if infile.endswith("_macs.load"): track = infile[:-len("_macs.load")] else: track = infile[:-len("_intervals.load")] if track.startswith("control"): return peakwidth = PARAMS["peakwidth"] cc = dbhandle.cursor() statement = '''SELECT contig, peakcenter - %(peakwidth)i, peakcenter + %(peakwidth)i, interval_id, peakval FROM %(track)s_intervals ORDER by contig, start''' % locals() cc.execute(statement) outs = IOTools.openFile(outfile, "w") for result in cc: contig, start, end, interval_id, peakval = result peakval = int(min(peakval, 1000)) outs.write("%s\t%i\t%i\t%s\t%i\n" % (contig, start, end, str(interval_id), peakval)) cc.close() outs.close() def mergeBedFiles(infiles, outfile): if len(infiles) < 2: raise ValueError( "expected at least two files to merge into %s" % outfile) infile = " ".join(infiles) statement = ''' zcat %(infile)s | mergeBed -i stdin | cut -f 1-3 | awk '{printf("%%s\\t%%i\\n",$0, ++a); }' | bgzip > %(outfile)s ''' P.run() def mergeIntervalsWithScores(infile, outfile, dist, method): intervals = open(infile, "r") merged = open(outfile, "w") topline = intervals.readline() last_contig, last_start, last_end, last_id, last_score = topline[ :-1].split("\t")[:5] last_start = int(last_start) last_end = int(last_end) last_score = int(last_score) for line in intervals: data = line[:-1].split("\t") contig, start, end, interval_id, score = data[:5] start = int(start) end = int(end) score = int(score) if (contig == last_contig) and ((last_end + dist) >= start): if method == "mean": newscore = (score + last_score) / 2 elif method == "length_weighted_mean": length1 = end - start length2 = last_end - last_start newscore = ( (score * length1) + (last_score * length2)) / (length1 + length2) elif method == "max": newscore = max(last_score, score) last_end = end last_score = newscore else: merged.write( "%(last_contig)s\t%(last_start)i\t%(last_end)i\t%(last_id)s\t%(last_score)s\n" % locals()) data = line[:-1].split("\t") last_contig, last_start, last_end, last_id, last_score = data[:5] last_start = int(last_start) last_end = int(last_end) last_score = int(last_score) intervals.close() merged.close()
MIT License
deepgram/kur
kur/engine/jinja_engine.py
JinjaEngine._evaluate
python
def _evaluate(self, expression): result = self.env.from_string(expression).render(**self._scope) try: result = ast.literal_eval(result) except (ValueError, SyntaxError): pass return result
Evaluates an expression in the current scope. # Arguments expression: str. The string to evaluate. # Return value The evaluated expression (some Python object/class).
https://github.com/deepgram/kur/blob/fd0c120e50815c1e5be64e5dde964dcd47234556/kur/engine/jinja_engine.py#L178-L203
import os import ast import json import logging import yaml import jinja2 from .engine import Engine from ..utils import CudaContext, CudaError logger = logging.getLogger(__name__) def combine(value, new=None): new = new or {} value = dict(value) value.update(new) return value def as_dict(value, key): return {key : value} def ternary(value, result_true, result_false): return result_true if value else result_false def gpu_count(): if gpu_count._value is None: try: with CudaContext() as context: gpu_count._value = len(context) except CudaError: gpu_count._value = 0 return gpu_count._value gpu_count._value = None def resolve_path(engine, filename): filename = os.path.expanduser(os.path.expandvars(filename)) kurfile = engine._scope.get('filename') if kurfile: filename = os.path.join( os.path.dirname(kurfile), filename ) return os.path.abspath(filename) def create_load_json(engine): def load_json(filename, use_cache=True): path = resolve_path(engine, filename) logger.debug('Loading JSON file: %s (%s)', filename, path) if use_cache and path in load_json.cache: logger.trace('Using cached data.') else: with open(path) as fh: load_json.cache[path] = json.loads(fh.read()) return load_json.cache[path] load_json.cache = {} return load_json def create_load_yaml(engine): def load_yaml(filename, use_cache=True): path = resolve_path(engine, filename) logger.debug('Loading YAML file: %s (%s)', filename, path) if use_cache and path in load_yaml.cache: logger.trace('Using cached data.') else: with open(path) as fh: load_yaml.cache[path] = yaml.load(fh.read()) return load_yaml.cache[path] load_yaml.cache = {} return load_yaml class JinjaEngine(Engine): def register_custom_filters(self, env): env.filters['basename'] = os.path.basename env.filters['dirname'] = os.path.dirname env.filters['splitext'] = os.path.splitext env.filters['combine'] = combine env.filters['as_dict'] = as_dict env.filters['ternary'] = ternary env.globals['gpu_count'] = gpu_count env.globals['load_json'] = create_load_json(self) env.globals['load_yaml'] = create_load_yaml(self) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.env = jinja2.Environment() self.register_custom_filters(self.env)
Apache License 2.0
ericssonresearch/calvin-base
calvin/actor/actor.py
Actor.check_authorization_decision
python
def check_authorization_decision(self): if self.authorization_checks: if any(isinstance(elem, list) for elem in self.authorization_checks): for plugin_list in self.authorization_checks: if not check_authorization_plugin_list(plugin_list): return False return True else: return check_authorization_plugin_list(self.authorization_checks) return True
Check if authorization decision is still valid
https://github.com/ericssonresearch/calvin-base/blob/bc4645c2061c30ca305a660e48dc86e3317f5b6f/calvin/actor/actor.py#L797-L808
import wrapt import functools import time import copy from calvin.utilities import calvinuuid from calvin.actor import actorport from calvin.utilities.calvinlogger import get_logger from calvin.utilities.utils import enum from calvin.runtime.north.calvin_token import Token, ExceptionToken from calvin.runtime.north.replicationmanager import ReplicationId import calvin.requests.calvinresponse as response from calvin.runtime.south.async import async from calvin.runtime.north.plugins.authorization_checks import check_authorization_plugin_list from calvin.utilities.calvin_callback import CalvinCB from calvin.csparser.port_property_syntax import get_port_property_capabilities, get_port_property_runtime from calvin.runtime.north.calvinsys import get_calvinsys from calvin.runtime.north.calvinlib import get_calvinlib _log = get_logger(__name__) def manage(include=None, exclude=None): if include and type(include) is not list or exclude and type(exclude) is not list: raise Exception("@manage decorator: Must use list as argument") include_set = set(include) if include else set() exclude_set = set(exclude) if exclude else set() @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): exclude_set.update(instance.__dict__) x = wrapped(*args, **kwargs) if include is None: include_set.update(instance.__dict__) include_set.remove('_managed') include_set.difference_update(exclude_set) instance._managed.update(include_set) return x return wrapper def condition(action_input=[], action_output=[]): tokens_produced = len(action_output) tokens_consumed = len(action_input) def wrap(action_method): @functools.wraps(action_method) def condition_wrapper(self): input_ok = all(self.inports[portname].tokens_available(1) for portname in action_input) output_ok = all(self.outports[portname].tokens_available(1) for portname in action_output) if not input_ok or not output_ok: return (False, output_ok, ()) exhausted_ports = set() exception = False args = [] for portname in action_input: port = self.inports[portname] token, exhaust = port.read() is_exception_token = isinstance(token, ExceptionToken) exception = exception or is_exception_token args.append(token if is_exception_token else token.value ) if exhaust: exhausted_ports.add(port) if exception: production = self.exception_handler(action_method, args) or () else: production = action_method(self, *args) or () valid_production = (tokens_produced == len(production)) if not valid_production: action = "%s.%s" % (self._type, action_method.__name__) raise Exception("%s invalid production %s, expected %s" % (action, str(production), str(tuple(action_output)))) for portname, retval in zip(action_output, production): port = self.outports[portname] port.write_token(retval if isinstance(retval, Token) else Token(retval)) return (True, True, exhausted_ports) return condition_wrapper return wrap def stateguard(action_guard): def wrap(action_method): @functools.wraps(action_method) def guard_wrapper(self, *args): if not action_guard(self): return (False, True, ()) return action_method(self, *args) return guard_wrapper return wrap def verify_status(valid_status_list, raise_=False): @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): if not instance.fsm.disable_state_checks and instance.fsm.state() not in valid_status_list: msg = "Invalid status %s for operation %s" % (instance.fsm, wrapped.__name__) if raise_: raise Exception(msg) else: _log.info(msg) x = wrapped(*args, **kwargs) return x return wrapper def _implements_state(obj): return hasattr(obj, 'state') and callable(getattr(obj, 'state')) and hasattr(obj, 'set_state') and callable(getattr(obj, 'set_state')) class calvinsys(object): @staticmethod def open(actor, name, **kwargs): return get_calvinsys().open(name, actor, **kwargs) @staticmethod def can_write(ref): return get_calvinsys().can_write(ref) @staticmethod def write(ref, data): return get_calvinsys().write(ref, data) @staticmethod def can_read(ref): return get_calvinsys().can_read(ref) @staticmethod def read(ref): return get_calvinsys().read(ref) @staticmethod def close(ref): return get_calvinsys().close(ref) class calvinlib(object): @staticmethod def use(name, **kwargs): return get_calvinlib().use(name, **kwargs) class Actor(object): action_priority = tuple() _security_state_keys = ('_subject_attributes') _private_state_keys = ('_id', '_name', '_has_started', '_deployment_requirements', '_signature', '_migration_info', "_port_property_capabilities", "_replication_id") class FSM(object): def __init__(self, states, initial, transitions, hooks=None, allow_invalid_transitions=True, disable_transition_checks=False, disable_state_checks=False): self.states = states self._state = initial self.transitions = transitions self.hooks = hooks or {} self.allow_invalid_transitions = allow_invalid_transitions self.disable_transition_checks = disable_transition_checks self.disable_state_checks = disable_state_checks def state(self): return self._state def transition_to(self, new_state): if new_state in self.transitions[self._state] or self.disable_transition_checks: hook = self.hooks.get((self._state, new_state), None) if hook: hook() self._state = new_state else: msg = "Invalid transition %s -> %s" % (self, self.printable(new_state)) if self.allow_invalid_transitions: _log.warning("ALLOWING " + msg) self._state = new_state else: raise Exception(msg) def printable(self, state): return self.states.reverse_mapping[state] def __str__(self): return self.printable(self._state) STATUS = enum('LOADED', 'READY', 'PENDING', 'ENABLED', 'DENIED', 'MIGRATABLE') VALID_TRANSITIONS = { STATUS.LOADED : [STATUS.READY], STATUS.READY : [STATUS.PENDING, STATUS.ENABLED, STATUS.DENIED], STATUS.PENDING : [STATUS.READY, STATUS.PENDING, STATUS.ENABLED], STATUS.ENABLED : [STATUS.READY, STATUS.PENDING, STATUS.DENIED], STATUS.DENIED : [STATUS.ENABLED, STATUS.MIGRATABLE, STATUS.PENDING], STATUS.MIGRATABLE: [STATUS.READY, STATUS.DENIED] } test_args = () test_kwargs = {} @property def id(self): return self._id @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def migration_info(self): return self._migration_info def __init__(self, actor_type, name='', allow_invalid_transitions=True, disable_transition_checks=False, disable_state_checks=False, actor_id=None, security=None): super(Actor, self).__init__() self._type = actor_type self._name = name self._id = actor_id or calvinuuid.uuid("ACTOR") _log.debug("New actor id: %s, supplied actor id %s" % (self._id, actor_id)) self._deployment_requirements = [] self._port_property_capabilities = None self._signature = None self._component_members = set([self._id]) self._managed = set() self._has_started = False self._migration_info = None self._migrating_to = None self._migration_connected = True self._last_time_warning = 0.0 self.sec = security self._subject_attributes = self.sec.get_subject_attributes() if self.sec is not None else None self.authorization_checks = None self._replication_id = ReplicationId() self._exhaust_cb = None self._pressure_event = 0 self.inports = {p: actorport.InPort(p, self, pp) for p, pp in self.inport_properties.items()} self.outports = {p: actorport.OutPort(p, self, pp) for p, pp in self.outport_properties.items()} hooks = { (Actor.STATUS.PENDING, Actor.STATUS.ENABLED): self._will_start, (Actor.STATUS.ENABLED, Actor.STATUS.PENDING): self.will_stop, } self.fsm = Actor.FSM(Actor.STATUS, Actor.STATUS.LOADED, Actor.VALID_TRANSITIONS, hooks, allow_invalid_transitions=allow_invalid_transitions, disable_transition_checks=disable_transition_checks, disable_state_checks=disable_state_checks) def set_authorization_checks(self, authorization_checks): self.authorization_checks = authorization_checks @verify_status([STATUS.LOADED]) def setup_complete(self): self.fsm.transition_to(Actor.STATUS.READY) def init(self): raise Exception("Implementing 'init()' is mandatory.") def _will_start(self): if not self._has_started: self.will_start() self._has_started = True def will_start(self): pass def will_stop(self): pass def will_migrate(self): pass def did_migrate(self): pass def _will_end(self): if hasattr(self, "will_end") and callable(self.will_end): self.will_end() get_calvinsys().close_all(self) def did_replicate(self, index): pass def __str__(self): ip = "" for p in self.inports.values(): ip = ip + str(p) op = "" for p in self.outports.values(): op = op + str(p) s = "Actor: '%s' class '%s'\nstatus: %s\ninports: %s\noutports:%s" % ( self._name, self._type, self.fsm, ip, op) return s @verify_status([STATUS.READY, STATUS.PENDING, STATUS.ENABLED]) def did_connect(self, port): if self.fsm.state() == Actor.STATUS.ENABLED: return _log.debug("actor.did_connect BEGIN %s %s " % (self._name, self._id)) if self.fsm.state() == Actor.STATUS.READY: self.fsm.transition_to(Actor.STATUS.PENDING) if self.inports: for p in self.inports.values(): if not p.is_connected(): return if self.outports: for p in self.outports.values(): if not p.is_connected(): return self.fsm.transition_to(Actor.STATUS.ENABLED) _log.debug("actor.did_connect ENABLED %s %s " % (self._name, self._id)) @verify_status([STATUS.ENABLED, STATUS.PENDING, STATUS.DENIED, STATUS.MIGRATABLE]) def did_disconnect(self, port): _log.debug("Actor %s did_disconnect %s" % (self._id, Actor.STATUS.reverse_mapping[self.fsm.state()])) if self.fsm.state() == Actor.STATUS.MIGRATABLE: return if self.fsm.state() != Actor.STATUS.PENDING: self.fsm.transition_to(Actor.STATUS.PENDING) if self.inports: for p in self.inports.values(): if p.is_connected(): return if self.outports: for p in self.outports.values(): if p.is_connected(): return self.fsm.transition_to(Actor.STATUS.READY) def exhaust(self, callback): self._exhaust_cb = callback def get_pressure(self): _log.debug("get_pressure %s" % self._replication_id.measure_pressure()) if not self._replication_id.measure_pressure(): return None t = time.time() pressure = {} for port in self.inports.values(): for e in port.endpoints: PRESSURE_LENGTH = len(e.pressure) pressure[port.id + "," + e.peer_id] = {'last': e.pressure_last, 'count': e.pressure_count, 'pressure': [e.pressure[i % PRESSURE_LENGTH] for i in range( max(0, e.pressure_count - PRESSURE_LENGTH), e.pressure_count)]} pressure_event = False for p in pressure.values(): if len(p['pressure']) < 2: continue if ((p['pressure'][-1][1] - p['pressure'][-2][1]) < 10 and p['pressure'][-1][1] > self._pressure_event): self._pressure_event = max(p['pressure'][-1][1], self._pressure_event) pressure_event = True break if (p['pressure'][-1][1] < (t - 30) and p['last'] > p['pressure'][-1][0] + 3 and p['pressure'][-1][1] > self._pressure_event): self._pressure_event = max(p['pressure'][-1][1], self._pressure_event) pressure_event = True break pressure['time'] = t _log.debug("get_pressure pressure_event:%s, pressure: %s" % (pressure_event, pressure)) return pressure if pressure_event else None def _authorized(self): authorized = self.check_authorization_decision() if not authorized: _log.info("Access denied for actor %s(%s)" % ( self._type, self._id)) self.fsm.transition_to(Actor.STATUS.DENIED) self.sec.authorization_runtime_search(self._id, self._signature, callback=CalvinCB(self.set_migration_info)) return authorized def _warn_slow_actor(self, time_spent, start_time): time_since_warning = start_time - self._last_time_warning if time_since_warning < 120.0: return self._last_time_warning = start_time _log.warning("%s (%s) actor blocked for %f sec" % (self._name, self._type, time_spent)) def _handle_exhaustion(self, exhausted_ports, output_ok): _log.debug("actor_fire %s test exhaust %s, %s, %s" % (self._id, self._exhaust_cb is not None, exhausted_ports, output_ok)) for port in exhausted_ports: try: port.finished_exhaustion() except: _log.exception("FINSIHED EXHAUSTION FAILED") if (output_ok and self._exhaust_cb is not None and not any([p.any_outstanding_exhaustion_tokens() for p in self.inports.values()])): _log.debug("actor %s exhausted" % self._id) async.DelayedCall(0, self._exhaust_cb, status=response.CalvinResponse(True)) self._exhaust_cb = None @verify_status([STATUS.ENABLED]) def fire(self): for action_method in self.__class__.action_priority: did_fire, output_ok, exhausted = action_method(self) if did_fire: break return did_fire, output_ok, exhausted def enabled(self): r = self.fsm.state() == Actor.STATUS.ENABLED or self._exhaust_cb is not None if not r: _log.debug("Actor %s %s not enabled" % (self._name, self._id)) return r def denied(self): return self.fsm.state() == Actor.STATUS.DENIED def migratable(self): return self.fsm.state() == Actor.STATUS.MIGRATABLE @verify_status([STATUS.DENIED]) def enable_or_migrate(self): if self.check_authorization_decision(): self.fsm.transition_to(Actor.STATUS.ENABLED) else: self.sec.authorization_runtime_search(self._id, self._signature, callback=CalvinCB(self.set_migration_info)) @verify_status([STATUS.ENABLED]) def enable(self): self.fsm.transition_to(Actor.STATUS.ENABLED) @verify_status([STATUS.READY, STATUS.PENDING, STATUS.LOADED]) def disable(self): self.fsm.transition_to(Actor.STATUS.PENDING) @verify_status([STATUS.ENABLED, STATUS.READY, STATUS.PENDING, STATUS.MIGRATABLE]) def connections(self, node_id): c = {'actor_id': self._id, 'actor_name': self._name} inports = {} for port in self.inports.values(): peers = [ (node_id, p[1]) if p[0] == 'local' else p for p in port.get_peers()] inports[port.id] = peers c['inports'] = inports outports = {} for port in self.outports.values(): peers = [ (node_id, p[1]) if p[0] == 'local' else p for p in port.get_peers()] outports[port.id] = peers c['outports'] = outports return c def state(self): return {} def set_state(self, state): pass def _private_state(self): state = {} state['inports'] = { port: self.inports[port]._state() for port in self.inports} state['outports'] = { port: self.outports[port]._state() for port in self.outports} state['_component_members'] = list(self._component_members) state['_requires'] = self.requires if hasattr(self, 'requires') else [] for key in self._private_state_keys: obj = self.__dict__[key] if _implements_state(obj): state[key] = obj.state() else: state[key] = obj state["_calvinsys"] = get_calvinsys().serialize(actor=self) return state def _set_private_state(self, state): if "_calvinsys" in state: get_calvinsys().deserialize(actor=self, csobjects=state["_calvinsys"]) for port in state['inports']: self.inports.setdefault(port, actorport.InPort(port, self))._set_state(state['inports'][port]) for port in state['outports']: self.outports.setdefault(port, actorport.OutPort(port, self))._set_state(state['outports'][port]) self._component_members= set(state['_component_members']) for key in self._private_state_keys: if key not in self.__dict__: self.__dict__[key] = state.get(key, None) else: obj = self.__dict__[key] if _implements_state(obj): obj.set_state(state.get(key)) else: self.__dict__[key] = state.get(key, None) def _replication_state(self): return None def _set_replication_state(self, state): pass def _security_state(self): return {'_subject_attributes':self._subject_attributes} def _set_security_state(self, state): pass def _managed_state(self): state = {key: self.__dict__[key] for key in self._managed} return state def _set_managed_state(self, state): self._managed.update(set(state.keys())) for key, val in state.iteritems(): self.__dict__[key] = val def serialize(self): state = {} state['private'] = self._private_state() rstate = self._replication_state() if rstate is not None: state['replication'] = rstate state['managed'] = self._managed_state() state['security']= self._security_state() state['custom'] = self.state() return state def deserialize(self, state): self._set_private_state(state['private']) self._set_replication_state(state.get('replication', None)) self._set_security_state(state['security']) self._set_managed_state(state['managed']) self.set_state(state['custom']) def exception_handler(self, action, args): _log.error("ExceptionToken encountered\n name: %s\n type: %s\n action: %s\n args: %s\n" % (self._name, self._type, action.__name__, args)) raise Exception("ExceptionToken NOT HANDLED") def events(self): return [] def component_add(self, actor_ids): if not isinstance(actor_ids, (set, list, tuple)): actor_ids = [actor_ids] self._component_members.update(actor_ids) def component_remove(self, actor_ids): if not isinstance(actor_ids, (set, list, tuple)): actor_ids = [actor_ids] self._component_members -= set(actor_ids) def part_of_component(self): return len(self._component_members - set([self._id]))>0 def component_members(self): return self._component_members def requirements_add(self, deploy_reqs, extend=False): if extend: self._deployment_requirements.extend(deploy_reqs) else: self._deployment_requirements = deploy_reqs def requirements_get(self): if self._port_property_capabilities is None: self._port_property_capabilities = self._derive_port_property_capabilities() capability_port = [{ 'op': 'port_property_match', 'kwargs': {'port_property': self._port_property_capabilities}, 'type': '+' }] if hasattr(self, 'requires') and self.requires: capability_require = [{ 'op': 'actor_reqs_match', 'kwargs': {'requires': self.requires}, 'type': '+' }] else: capability_require = [] return (self._deployment_requirements + capability_require + capability_port + self._replication_id._placement_req) def _derive_port_property_capabilities(self): port_property_capabilities = set([]) for port in self.inports.values(): port_property_capabilities.update(get_port_property_capabilities(port.properties)) for port in self.outports.values(): port_property_capabilities.update(get_port_property_capabilities(port.properties)) _log.debug("derive_port_property_capabilities:" + str(port_property_capabilities)) return get_port_property_runtime(port_property_capabilities) def signature_set(self, signature): if self._signature is None: self._signature = signature
Apache License 2.0
gardener/cc-utils
github/util.py
outdated_draft_releases
python
def outdated_draft_releases( draft_releases: [github3.repos.release.Release], greatest_release_version: str, ): greatest_release_version_info = version.parse_to_semver(greatest_release_version) def _has_semver_draft_prerelease_label(release_name): version_info = version.parse_to_semver(release_name) if version_info.prerelease != 'draft': return False return True autogenerated_draft_releases = [ release for release in draft_releases if release.name and version.is_semver_parseable(release.name) and _has_semver_draft_prerelease_label(release.name) ] draft_release_version_infos = [ version.parse_to_semver(release.name) for release in autogenerated_draft_releases ] def _yield_outdated_version_infos_from_partition(partition): if len(partition) == 1: version_info = partition.pop() if version_info < greatest_release_version_info and version_info.patch == 0: yield version_info else: yield from [ version_info for version_info in partition[1:] ] outdated_version_infos = list() for partition in version.partition_by_major_and_minor(draft_release_version_infos): outdated_version_infos.extend(_yield_outdated_version_infos_from_partition(partition)) outdated_draft_releases = [ release for release in autogenerated_draft_releases if version.parse_to_semver(release.name) in outdated_version_infos ] return outdated_draft_releases
Find outdated draft releases from a list of draft releases and return them. This is achieved by partitioning the release versions according to their joined major and minor version. Partitions are then checked: - if there is only a single release in a partition it is either a hotfix release (keep corresponding release) or it is not (delete if it is not the greatest release according to semver) - if there are multiple releases versions in a partition, keep only the release corresponding to greatest (according to semver)
https://github.com/gardener/cc-utils/blob/70b9c15b002218b5d06633f70f0c4f1489c74dbc/github/util.py#L868-L923
import datetime import deprecated import enum import io import re import sys import typing from typing import Iterable, Tuple from pydash import _ import requests import github3 from github3.github import GitHub from github3.repos.release import Release from github3.exceptions import NotFoundError, ForbiddenError from github3.orgs import Team import gci.componentmodel import gci.componentmodel as cm import ccc.github import ci.util import product.v2 import version from model.github import GithubConfig class RepoPermission(enum.Enum): PULL = "pull" PUSH = "push" ADMIN = "admin" class GitHubRepoBranch: def __init__( self, github_config: GithubConfig, repo_owner: str, repo_name: str, branch: str, ): self._github_config = ci.util.not_none(github_config) self._repo_owner = ci.util.not_empty(repo_owner) self._repo_name = ci.util.not_empty(repo_name) self._branch = ci.util.not_empty(branch) def github_repo_path(self): return f'{self._repo_owner}/{self._repo_name}' def github_config(self): return self._github_config def repo_owner(self): return self._repo_owner def repo_name(self): return self._repo_name def branch(self): return self._branch class RepositoryHelperBase: GITHUB_TIMESTAMP_UTC_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def __init__( self, owner: str, name: str, default_branch: str='master', github_cfg: GithubConfig=None, github_api: GitHub=None, ): if not (bool(github_cfg) ^ bool(github_api)): raise ValueError('exactly one of github_api and github_cfg must be given') if github_cfg: self.github = ccc.github.github_api(github_cfg) else: self.github = github_api self.repository = self._create_repository( owner=owner, name=name ) self.owner = owner self.repository_name = name self.default_branch = default_branch def _create_repository(self, owner: str, name: str): try: repository = self.github.repository( owner=owner, repository=name ) return repository except NotFoundError as nfe: raise RuntimeError( 'failed to retrieve repository {o}/{r}'.format( o=owner, r=name, ), nfe ) class UpgradePullRequest: def __init__(self, pull_request, from_ref: typing.Union[cm.Resource, cm.ComponentReference], to_ref: typing.Union[cm.Resource, cm.ComponentReference], ): self.pull_request = ci.util.not_none(pull_request) if from_ref.name != to_ref.name: raise ValueError(f'reference name mismatch {from_ref.name=} {to_ref.name=}') if (isinstance(from_ref, cm.Resource) and isinstance(to_ref, cm.Resource) and from_ref.type != to_ref.type ) or type(from_ref) != type(to_ref): raise ValueError(f'reference types do not match: {from_ref=} {to_ref=}') self.ref_name = from_ref.name self.from_ref = from_ref self.to_ref = to_ref if isinstance(from_ref, cm.Resource): self.reference_type_name = from_ref.type.value elif isinstance(from_ref, cm.ComponentReference): self.reference_type_name = product.v2.COMPONENT_TYPE_NAME else: raise NotImplementedError(from_ref.type) def is_obsolete( self, reference_component: gci.componentmodel.Component, ): if not isinstance(reference_component, gci.componentmodel.Component): raise TypeError(reference_component) if self.reference_type_name == product.v2.COMPONENT_TYPE_NAME: reference_refs = sorted( [ rc for rc in reference_component.componentReferences if rc.componentName == self.ref_name ], key=lambda r: version.parse_to_semver(r.version) ) if not reference_refs: return False greatest_reference_version = version.parse_to_semver(reference_refs[-1].version) else: raise NotImplementedError return greatest_reference_version >= version.parse_to_semver(self.to_ref.version) def target_matches( self, reference: typing.Tuple[cm.ComponentReference, cm.Resource], reference_version: str = None, ): if not isinstance(reference, cm.ComponentReference) and not isinstance(reference, cm.Resource): raise TypeError(reference) if isinstance(reference, cm.ComponentReference): if product.v2.COMPONENT_TYPE_NAME != self.reference_type_name: return False if reference.componentName != self.ref_name: return False else: if reference.name != self.ref_name: return False if reference.type.value != self.reference_type_name: return False reference_version = reference_version or reference.version if reference_version != self.to_ref.version: return False return True def purge(self): self.pull_request.close() head_ref = 'heads/' + self.pull_request.head.ref self.pull_request.repository.ref(head_ref).delete() class PullRequestUtil(RepositoryHelperBase): PR_TITLE_PATTERN = re.compile(r'^\[ci:(\S*):(\S*):(\S*)->(\S*)\]$') @staticmethod def calculate_pr_title( reference: gci.componentmodel.ComponentReference, from_version: str, to_version: str, ) -> str: if not isinstance(reference, gci.componentmodel.ComponentReference): raise TypeError(reference) type_name = product.v2.COMPONENT_TYPE_NAME reference_name = reference.componentName return f'[ci:{type_name}:{reference_name}:{from_version}->{to_version}]' def _has_upgrade_pr_title(self, pull_request) -> bool: return bool(self.PR_TITLE_PATTERN.fullmatch(pull_request.title)) def _pr_to_upgrade_pull_request(self, pull_request): ci.util.not_none(pull_request) match = self.PR_TITLE_PATTERN.fullmatch(pull_request.title) if match is None: raise ValueError("PR-title '{t}' did not match title-schema".format( t=pull_request.title) ) reference_type_name = match.group(1) if not reference_type_name: reference_type_name = 'component' if not reference_type_name == 'component': raise NotImplementedError(reference_type_name) ref_name = match.group(2) from_version = match.group(3) to_version = match.group(4) from_ref = cm.ComponentReference( name=ref_name, componentName=ref_name, version=from_version, ) to_ref = cm.ComponentReference( name=ref_name, componentName=ref_name, version=to_version, ) return UpgradePullRequest( pull_request=pull_request, from_ref=from_ref, to_ref=to_ref, ) def enumerate_upgrade_pull_requests(self, state_filter: str='open'): def pr_to_upgrade_pr(pull_request): return self._pr_to_upgrade_pull_request(pull_request=pull_request) def strip_title(pull_request): pull_request.title = pull_request.title.strip() return pull_request parsed_prs = ci.util.FluentIterable(self.repository.pull_requests(state=state_filter)) .map(strip_title) .filter(self._has_upgrade_pr_title) .map(pr_to_upgrade_pr) .filter(lambda e: e) .as_list() return parsed_prs def retrieve_pr_template_text(self): pattern = re.compile(r"(pull_request_template)(\..{1,3})?$") directories = ['.github', '.', 'docs'] for directory in directories: try: for filename, content in self.repository.directory_contents(directory): if pattern.match(filename): content.refresh() return content.decoded.decode('utf-8') except github3.exceptions.NotFoundError: pass return None class GitHubRepositoryHelper(RepositoryHelperBase): def create_or_update_file( self, file_path: str, file_contents: str, commit_message: str, branch: str=None, ) -> str: if branch is None: branch = self.default_branch try: contents = self.retrieve_file_contents(file_path=file_path, branch=branch) except NotFoundError: contents = None if contents: decoded_contents = contents.decoded.decode('utf-8') if decoded_contents == file_contents: return ci.util.info( 'Repository file contents are identical to passed file contents.' ) else: response = contents.update( message=commit_message, content=file_contents.encode('utf-8'), branch=branch, ) else: response = self.repository.create_file( path=file_path, message=commit_message, content=file_contents.encode('utf-8'), branch=branch, ) return response['commit'].sha @staticmethod def from_githubrepobranch( githubrepobranch: GitHubRepoBranch, ): return GitHubRepositoryHelper( github_cfg=githubrepobranch.github_config(), owner=githubrepobranch.repo_owner(), name=githubrepobranch.repo_name(), default_branch=githubrepobranch.branch(), ) def retrieve_file_contents(self, file_path: str, branch: str=None): if branch is None: branch = self.default_branch return self.repository.file_contents( path=file_path, ref=branch, ) def retrieve_text_file_contents( self, file_path: str, branch: str=None, encoding: str='utf-8', ): if branch is None: branch = self.default_branch contents = self.retrieve_file_contents(file_path, branch) return contents.decoded.decode(encoding) def create_tag( self, tag_name: str, tag_message: str, repository_reference: str, author_name: str, author_email: str, repository_reference_type: str='commit' ): author = { 'name': author_name, 'email': author_email, 'date': datetime.datetime.now(datetime.timezone.utc) .strftime(self.GITHUB_TIMESTAMP_UTC_FORMAT) } self.repository.create_tag( tag=tag_name, message=tag_message, sha=repository_reference, obj_type=repository_reference_type, tagger=author ) MAXIMUM_GITHUB_RELEASE_BODY_LENGTH = 25000 def _replacement_release_notes( self, asset_url: str, component_name: str, component_version: str, ): return ( f'The release-notes for component **{component_name}** in version ' f'**{component_version}** exceeded the maximum length of ' f'{self.MAXIMUM_GITHUB_RELEASE_BODY_LENGTH} characters allowed by GitHub for ' 'release-bodies.\n' f'They have been uploaded as release-asset and can be found at {asset_url}.' ) RELEASE_NOTES_ASSET_NAME = 'release_notes.md' def create_release( self, tag_name: str, body: str, draft: bool=False, prerelease: bool=False, name: str=None, component_name: str=None, component_version: str=None, ): if len(body) < self.MAXIMUM_GITHUB_RELEASE_BODY_LENGTH: return self.repository.create_release( tag_name=tag_name, body=body, draft=draft, prerelease=prerelease, name=name ) else: release = self.repository.create_release( tag_name=tag_name, body='', draft=draft, prerelease=prerelease, name=name ) release_notes_asset = release.upload_asset( content_type='text/markdown', name=self.RELEASE_NOTES_ASSET_NAME, asset=body.encode('utf-8'), label='Release Notes', ) release.edit( body=self._replacement_release_notes( asset_url=release_notes_asset.browser_download_url, component_name=component_name, component_version=component_version or tag_name, ) ) return release def delete_releases( self, release_names: [str], ): for release in self.repository.releases(): if release.name in release_names: release.delete() def create_draft_release( self, name: str, body: str, component_name: str=None, component_version: str=None, ): return self.create_release( tag_name='', name=name, body=body, draft=True, component_name=component_name, component_version=component_version, ) def promote_draft_release( self, draft_release, release_tag, release_version, component_name: str=None, ): draft_release.edit( tag_name=release_tag, body=None, draft=False, prerelease=False, name=release_version, ) release_notes_asset = next( (a for a in draft_release.assets() if a.name == self.RELEASE_NOTES_ASSET_NAME), None, ) if release_notes_asset: draft_release.edit( body=self._replacement_release_notes( asset_url=release_notes_asset.browser_download_url, component_name=component_name, component_version=release_version, ) ) def update_release_notes( self, tag_name: str, component_name: str, body: str, ) -> bool: ci.util.not_empty(tag_name) release = self.repository.release_from_tag(tag_name) if not release: raise RuntimeError( f"No release with tag '{tag_name}' found " f"in repository {self.repository}" ) if len(body) < self.MAXIMUM_GITHUB_RELEASE_BODY_LENGTH: release.edit(body=body) else: release_notes_asset = next( (a for a in release.assets() if a.name == self.RELEASE_NOTES_ASSET_NAME), None, ) if release_notes_asset: release_notes_asset.delete() release_notes_asset = release.upload_asset( content_type='text/markdown', name=self.RELEASE_NOTES_ASSET_NAME, asset=body.encode('utf-8'), label='Release Notes', ) release.edit( body=self._replacement_release_notes( asset_url=release_notes_asset.browser_download_url, component_version=tag_name, component_name=component_name) ) return release def draft_release_with_name( self, name: str ) -> Release: releases = list(self.repository.releases()) release = _.find(releases, lambda rls: rls.draft and rls.name == name) return release def tag_exists( self, tag_name: str, ): ci.util.not_empty(tag_name) try: self.repository.ref('tags/' + tag_name) return True except NotFoundError: return False def retrieve_asset_contents(self, release_tag: str, asset_label: str): ci.util.not_none(release_tag) ci.util.not_none(asset_label) release = self.repository.release_from_tag(release_tag) for asset in release.assets(): if asset.label == asset_label or asset.name == asset_label: break else: response = requests.Response() response.status_code = 404 response.json = lambda: {'message':'no asset with label {} found'.format(asset_label)} raise NotFoundError(resp=response) buffer = io.BytesIO() asset.download(buffer) return buffer.getvalue().decode() def release_versions(self): for tag_name in self.release_tags(): try: version.parse_to_semver(tag_name) yield tag_name except ValueError: pass def release_tags(self): return _ .chain(self.repository.releases()) .filter(lambda release: not release.draft and not release.prerelease) .map('tag_name') .filter(lambda tag: tag is not None) .value() def search_issues_in_repo(self, query: str): query = "repo:{org}/{repo} {query}".format( org=self.owner, repo=self.repository_name, query=query ) search_result = self.github.search_issues(query) return search_result def is_pr_created_by_org_member(self, pull_request_number): pull_request = self.repository.pull_request(pull_request_number) user_login = pull_request.user.login return self.is_org_member(self.owner, user_login) def add_labels_to_pull_request(self, pull_request_number, *labels): pull_request = self.repository.pull_request(pull_request_number) pull_request.issue().add_labels(*labels) def add_comment_to_pr(self, pull_request_number, comment): pull_request = self.repository.pull_request(pull_request_number) pull_request.create_comment(comment) def is_org_member(self, organization_name, user_login): organization = self.github.organization(organization_name) return organization.is_member(user_login) def delete_outdated_draft_releases(self) -> Iterable[Tuple[github3.repos.release.Release, bool]]: releases = [release for release in self.repository.releases()] non_draft_releases = [release for release in releases if not release.draft] draft_releases = [release for release in releases if release.draft] greatest_release_version = find_greatest_github_release_version(non_draft_releases) if greatest_release_version is not None: draft_releases_to_delete = outdated_draft_releases( draft_releases=draft_releases, greatest_release_version=greatest_release_version, ) else: draft_releases_to_delete = [] for release in draft_releases_to_delete: yield release, release.delete() @deprecated.deprecated def github_cfg_for_hostname(cfg_factory, host_name, require_labels=('ci',)): return ccc.github.github_cfg_for_hostname( host_name=host_name, cfg_factory=cfg_factory, require_labels=require_labels, ) @deprecated.deprecated def _create_github_api_object(github_cfg): return ccc.github.github_api(github_cfg=github_cfg) def branches( github_cfg, repo_owner: str, repo_name: str, ): github_api = ccc.github.github_api(github_cfg=github_cfg) repo = github_api.repository(repo_owner, repo_name) return list(map(lambda r: r.name, repo.branches())) def retrieve_email_addresses( github_cfg: GithubConfig, github_users: [str], out_file: str=None ): github = ccc.github.github_api(github_cfg=github_cfg) def retrieve_email(username: str): user = github.user(username) return user.email fh = open(out_file, 'w') if out_file else sys.stdout email_addresses_count = 0 for email_address in filter(None, map(retrieve_email, github_users)): fh.write(email_address + '\n') email_addresses_count += 1 ci.util.verbose('retrieved {sc} email address(es) from {uc} user(s)'.format( sc=email_addresses_count, uc=len(github_users) ) ) def _create_team( github: GitHub, organization_name: str, team_name: str ): organization = github.organization(organization_name) team = _retrieve_team_by_name_or_none(organization, team_name) if team: ci.util.verbose("Team {name} already exists".format(name=team_name)) return try: organization.create_team(name=team_name) ci.util.info("Team {name} created".format(name=team_name)) except ForbiddenError as err: ci.util.fail("{err} Cannot create team {name} in org {org} due to missing privileges".format( err=err, name=team_name, org=organization_name )) def _add_user_to_team( github: GitHub, organization_name: str, team_name: str, user_name: str ): organization = github.organization(organization_name) team = _retrieve_team_by_name_or_none(organization, team_name) if not team: ci.util.fail(f"Team '{team_name}' does not exist") if team.is_member(user_name): ci.util.verbose(f"'{user_name}' is already assigned to team '{team_name}'") return if team.add_member(username=user_name): ci.util.info(f"Added '{user_name}' to team '{team_name}'") else: ci.util.fail( f"Could not add '{user_name}' to team '{team_name}'. Check for missing privileges" ) def _add_all_repos_to_team( github: GitHub, organization_name: str, team_name: str, permission: RepoPermission=RepoPermission.ADMIN ): organization = github.organization(organization_name) team = _retrieve_team_by_name_or_none(organization, team_name) if not team: ci.util.fail("Team {name} does not exist".format(name=team_name)) for repo in organization.repositories(): if team.has_repository(repo.full_name): ci.util.verbose("Team {teamnname} already assigned to repo {reponame}".format( teamnname=team_name, reponame=repo.full_name )) continue team.add_repository(repository=repo.full_name, permission=permission.value) ci.util.info("Added team {teamname} to repository {reponame}".format( teamname=team_name, reponame=repo.full_name )) def _retrieve_team_by_name_or_none( organization: github3.orgs.Organization, team_name: str ) -> Team: team_list = list(filter(lambda t: t.name == team_name, organization.teams())) return team_list[0] if team_list else None def find_greatest_github_release_version( releases: typing.List[github3.repos.release.Release], warn_for_unparseable_releases: bool = True, ignore_prerelease_versions: bool = False, ): release_versions = [ release.name if release.name else release.tag_name for release in releases ] def filter_non_semver_parseable_releases(release_name): try: version.parse_to_semver(release_name) return True except ValueError: if warn_for_unparseable_releases: ci.util.warning(f'ignoring release {release_name=} (not semver)') return False release_versions = [ name for name in filter(filter_non_semver_parseable_releases, release_versions) ] release_version_infos = [ version.parse_to_semver(release_version) for release_version in release_versions ] latest_version = version.find_latest_version( versions=release_version_infos, ignore_prerelease_versions=ignore_prerelease_versions, ) if latest_version: return str(latest_version) else: return None
Apache License 2.0
bsc-wdc/dislib
examples/linear_regression_plot.py
main
python
def main(): x = np.array([1000, 4000, 5000, 4500, 3000, 4000, 9000, 11000, 15000, 12000, 7000, 3000]) y = np.array([9914, 40487, 54324, 50044, 34719, 42551, 94871, 118914, 158484, 131348, 78504, 36284]) x_ds = ds.array(x[:, np.newaxis], (4, 1)) y_ds = ds.array(y[:, np.newaxis], (4, 1)) reg = LinearRegression() reg.fit(x_ds, y_ds) coef = reg.coef_.collect() intercept = reg.intercept_.collect() print(coef, intercept) scatter(x, y, marker='x') x_mesh = np.linspace(min(x), max(x), 1000) plot(x_mesh, [coef*x + intercept for x in x_mesh]) show()
Linear regression example with plot
https://github.com/bsc-wdc/dislib/blob/56209f1e1f978ad70ec1b5099b75e17ac7276c6f/examples/linear_regression_plot.py#L8-L30
import numpy as np from pylab import scatter, plot, show import dislib as ds from dislib.regression import LinearRegression
Apache License 2.0
ramonhagenaars/jsons
jsons/_datetime_impl.py
_new_datetime
python
def _new_datetime(date_inst: date, time_inst: time, tzinfo: timezone) -> datetime: return datetime.combine(date_inst, time_inst).replace(tzinfo=tzinfo)
Return a datetime instance from a date, time and timezone. This function was required due to the missing argument for tzinfo under the Linux Python distribution. :param date_inst: the date. :param time_inst: the time. :param tzinfo: the Timezone. :return: a combined datetime instance.
https://github.com/ramonhagenaars/jsons/blob/12594ebb13247a26a8511644e5ca324817075385/jsons/_datetime_impl.py#L128-L140
from datetime import datetime, timezone, timedelta, time, date from typing import Union RFC3339_DATE_PATTERN = '%Y-%m-%d' RFC3339_TIME_PATTERN = '%H:%M:%S' RFC3339_DATETIME_PATTERN = '{}T{}'.format( RFC3339_DATE_PATTERN, RFC3339_TIME_PATTERN) def to_str( dt: Union[datetime, date], strip_microseconds: bool, fork_inst: type, pattern: str = RFC3339_DATETIME_PATTERN) -> str: offset = get_offset_str(dt, fork_inst) if not strip_microseconds and getattr(dt, 'microsecond', None): pattern += '.%f' return dt.strftime("{}{}".format(pattern, offset)) def get_offset_str( obj: Union[datetime, date, timedelta], fork_inst: type) -> str: result = '' if isinstance(obj, datetime): result = _datetime_offset_str(obj, fork_inst) elif isinstance(obj, timedelta): result = _timedelta_offset_str(obj) return result def get_datetime_inst(obj: str, pattern: str) -> datetime: if obj[-1] == 'Z': result = _datetime_utc_inst(obj, pattern) elif 'T' in pattern: result = _datetime_offset_inst(obj, pattern) else: result = datetime.strptime(obj, pattern) return result def _datetime_offset_str(obj: datetime, fork_inst: type) -> str: tzone = obj.tzinfo if not tzone: fork_inst._warn('The use of datetimes without timezone is dangerous ' 'and can lead to undesired results.', 'datetime-without-tz') tzone = datetime.now(timezone.utc).astimezone().tzinfo if tzone is timezone.utc or tzone.utc is timezone.utc: return '+00:00' offset = 'Z' if tzone.tzname(None) not in ('UTC', 'UTC+00:00'): tdelta = tzone.utcoffset(None) or getattr(tzone, 'adjusted_offset', tzone.utcoffset(obj)) offset = _timedelta_offset_str(tdelta) return offset def _timedelta_offset_str(tdelta: timedelta) -> str: offset_s = tdelta.total_seconds() offset_h = int(offset_s / 3600) offset_m = int((offset_s / 60) % 60) offset_t = time(abs(offset_h), abs(offset_m)) operator = '+' if offset_s > 0 else '-' offset = offset_t.strftime('{}%H:%M'.format(operator)) return offset def _datetime_utc_inst(obj: str, pattern: str) -> datetime: dattim_str = obj[0:-1] dattim_obj = datetime.strptime(dattim_str, pattern) return _new_datetime(dattim_obj.date(), dattim_obj.time(), timezone.utc) def _datetime_offset_inst(obj: str, pattern: str) -> datetime: dat_str, tim_str = obj.split('T') splitter, factor = ('+', 1) if '+' in tim_str else ('-', -1) naive_tim_str, offset = tim_str.split(splitter) naive_dattim_str = '{}T{}'.format(dat_str, naive_tim_str) dattim_obj = datetime.strptime(naive_dattim_str, pattern) hrs_str, mins_str = offset.split(':') hrs = int(hrs_str) * factor mins = int(mins_str) * factor tz = timezone(offset=timedelta(hours=hrs, minutes=mins)) return _new_datetime(dattim_obj.date(), dattim_obj.time(), tz)
MIT License
membase/membase-cli
restclient.py
RestClient.sendCmd
python
def sendCmd(self, method, uri, user='', password='', opts = {}): data = '' headers = {} encoded_params = '' if user and password: self.user = user self.password = password auth = ('Basic ' + string.strip(base64.encodestring(user + ':' + password))) headers['Authorization'] = auth self.bootStrap(headers) if method == 'POST': encoded_params = urllib.urlencode(self.params) headers['Content-type'] = 'application/x-www-form-urlencoded' elif method == 'DELETE': encoded_params = urllib.urlencode(self.params) headers['Content-type'] = 'application/x-www-form-urlencoded' else: if self.params: uri = uri, '?', urllib.urlencode(self.params) if self.debug: print "METHOD: %s" % method print "PARAMS: ", self.params print "ENCODED_PARAMS: %s" % encoded_params print "REST CMD: %s %s" % (method,uri) self.makeRequest(method, uri, encoded_params, headers) response = self.conn.getresponse() if self.debug: print "response.status: %s" % response.status return response
sendCmd() This method handles accessing the REST API and returning either data, if a GET, or a success or error message if a POST
https://github.com/membase/membase-cli/blob/13195507facba8cb8f85dafb07df1eeff3ea7dcd/restclient.py#L87-L130
import sys import socket import httplib import urllib import base64 import simplejson as json import string class RestClient: def __init__(self, server, port, opts= {}): self.server = server self.port = port self.debug = opts.get('debug', False) self.uri = '/pools' self.method = 'GET' self.params = {} self.user = '' self.password = '' self.clientConnect(server, int(port)) def clientConnect(self, server, port): error_connect = "Unable to connect to %s" % self.server try: self.conn = httplib.HTTPConnection(server, port) except httplib.NotConnected: print error_connect sys.exit(2) except httplib.HTTPException: print error_connect sys.exit(2) except socket.error: print error_connect sys.exit(2) except socket.gaierror: print error_connect sys.exit(2) def setParam(self, param, value): self.params[param] = value def handleResponse(self, method, response, opts={ 'success_msg':'', 'error_msg':'' }): if response.status in [200, 201, 202, 204, 302]: if method == 'GET': return response.read() return "SUCCESS: %s" % opts['success_msg'] if response.status == 401: print 'ERROR: unable to access the REST API - please check your username (-u) and password (-p)' sys.exit(2) print 'ERROR: %s (%d) %s' % (opts['error_msg'], response.status, response.reason) output_json = json.loads(response.read()) print output_json if "errors" in output_json: for error_code,error_message in output_json["errors"].iteritems(): print "ERROR: %s" % error_message sys.exit(2) def bootStrap(self, headers): self.conn.request('GET', '/pools', '', headers) response = self.conn.getresponse() opts = {'error_msg':'bootstrap failed'} return self.handleResponse('GET', response, opts)
Apache License 2.0
packtpublishing/functional-python-programming-second-edition
Chapter04/ch04_ex4.py
mean
python
def mean(samples: Sequence) -> float: return s1(samples)/s0(samples)
Arithmetic mean. >>> d = [4, 36, 45, 50, 75] >>> mean(d) 42.0
https://github.com/packtpublishing/functional-python-programming-second-edition/blob/0ec14a74e3bf92fcbcea471ee43e8d22a61b7b35/Chapter04/ch04_ex4.py#L31-L39
from math import sqrt from collections import Sequence def s0(samples: Sequence) -> float: return sum(1 for x in samples) def s1(samples: Sequence) -> float: return sum(samples) def s2(samples: Sequence) -> float: return sum(x**2 for x in samples)
MIT License
masterscrat/chatistics
parsers/utils.py
detect_language
python
def detect_language(df, min_token_count=5): for name, group in df.groupby(df.conversationWithName): text = ' '.join(group['text'].dropna().values[:100]) if len(text.split()) >= min_token_count: try: lang = langdetect.detect(text) except LangDetectException: lang = 'unknown' else: lang = 'unknown' df.loc[group.index, 'language'] = lang return df
Detects language of input text
https://github.com/masterscrat/chatistics/blob/c091db38099f9edf9b39c2ed5fe99ace6a864d87/parsers/utils.py#L22-L34
import os import datetime import logging import langdetect from langdetect.lang_detect_exception import LangDetectException log = logging.getLogger(__name__) def export_dataframe(df, filename): filepath = os.path.join('data', filename) log.info(f'Saving to pickle file {filepath}...') df.to_pickle(filepath) def timestamp_to_ordinal(value): return datetime.datetime.fromtimestamp(float(value)).toordinal()
MIT License
okfn/bibserver
bibserver/web.py
set_current_user
python
def set_current_user(): return dict(current_user=current_user)
Set some template context globals.
https://github.com/okfn/bibserver/blob/96ab295e9f11a2d29ffabd60b0058ca7a1ec0f7c/bibserver/web.py#L34-L36
import os import urllib2 import unicodedata import httplib import json import subprocess from copy import deepcopy from datetime import datetime from flask import Flask, jsonify, json, request, redirect, abort, make_response from flask import render_template, flash from flask.views import View, MethodView from flask_login import login_user, current_user import bibserver.dao import bibserver.util as util import bibserver.importer import bibserver.ingest from bibserver.config import config from bibserver.core import app, login_manager from bibserver.view.account import blueprint as account from bibserver import auth app.register_blueprint(account, url_prefix='/account') @login_manager.user_loader def load_account_for_login_manager(userid): out = bibserver.dao.Account.get(userid) return out @app.context_processor
MIT License
roclark/sportsipy
sportsipy/nfl/teams.py
Team.losses
python
def losses(self): return self._losses
Returns an ``int`` of the number of games the team lost during the season.
https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/nfl/teams.py#L275-L280
import pandas as pd import re from .constants import (CONF_CHAMPIONSHIP, DIVISION, LOST_CONF_CHAMPS, LOST_DIVISIONAL, LOST_SUPER_BOWL, LOST_WILD_CARD, PARSING_SCHEME, SUPER_BOWL, WILD_CARD, WON_SUPER_BOWL) from ..constants import LOSS, WIN from ..decorators import float_property_decorator, int_property_decorator from .. import utils from .nfl_utils import _retrieve_all_teams from .roster import Roster from .schedule import Schedule class Team: def __init__(self, team_name=None, team_data=None, rank=None, year=None, season_page=None): self._year = year self._rank = rank self._abbreviation = None self._name = None self._wins = None self._losses = None self._win_percentage = None self._games_played = None self._points_for = None self._points_against = None self._points_difference = None self._margin_of_victory = None self._strength_of_schedule = None self._simple_rating_system = None self._offensive_simple_rating_system = None self._defensive_simple_rating_system = None self._yards = None self._plays = None self._yards_per_play = None self._turnovers = None self._fumbles = None self._first_downs = None self._pass_completions = None self._pass_attempts = None self._pass_yards = None self._pass_touchdowns = None self._interceptions = None self._pass_net_yards_per_attempt = None self._pass_first_downs = None self._rush_attempts = None self._rush_yards = None self._rush_touchdowns = None self._rush_yards_per_attempt = None self._rush_first_downs = None self._penalties = None self._yards_from_penalties = None self._first_downs_from_penalties = None self._percent_drives_with_points = None self._percent_drives_with_turnovers = None self._points_contributed_by_offense = None if team_name: team_data = self._retrieve_team_data(year, team_name, season_page) self._parse_team_data(team_data) def __str__(self): return f'{self.name} ({self.abbreviation}) - {self._year}' def __repr__(self): return self.__str__() def _retrieve_team_data(self, year, team_name, season_page): team_data_dict, year = _retrieve_all_teams(year, season_page) self._year = year team_data = team_data_dict[team_name]['data'] self._rank = team_data_dict[team_name]['rank'] return team_data def _parse_team_data(self, team_data): for field in self.__dict__: if field == '_rank' or field == '_year': continue value = utils._parse_field(PARSING_SCHEME, team_data, str(field)[1:]) setattr(self, field, value) @property def dataframe(self): fields_to_include = { 'abbreviation': self.abbreviation, 'defensive_simple_rating_system': self.defensive_simple_rating_system, 'first_downs': self.first_downs, 'first_downs_from_penalties': self.first_downs_from_penalties, 'fumbles': self.fumbles, 'games_played': self.games_played, 'interceptions': self.interceptions, 'losses': self.losses, 'margin_of_victory': self.margin_of_victory, 'name': self.name, 'offensive_simple_rating_system': self.offensive_simple_rating_system, 'pass_attempts': self.pass_attempts, 'pass_completions': self.pass_completions, 'pass_first_downs': self.pass_first_downs, 'pass_net_yards_per_attempt': self.pass_net_yards_per_attempt, 'pass_touchdowns': self.pass_touchdowns, 'pass_yards': self.pass_yards, 'penalties': self.penalties, 'percent_drives_with_points': self.percent_drives_with_points, 'percent_drives_with_turnovers': self.percent_drives_with_turnovers, 'plays': self.plays, 'points_against': self.points_against, 'points_contributed_by_offense': self.points_contributed_by_offense, 'points_difference': self.points_difference, 'points_for': self.points_for, 'post_season_result': self.post_season_result, 'rank': self.rank, 'rush_attempts': self.rush_attempts, 'rush_first_downs': self.rush_first_downs, 'rush_touchdowns': self.rush_touchdowns, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'simple_rating_system': self.simple_rating_system, 'strength_of_schedule': self.strength_of_schedule, 'turnovers': self.turnovers, 'win_percentage': self.win_percentage, 'wins': self.wins, 'yards': self.yards, 'yards_from_penalties': self.yards_from_penalties, 'yards_per_play': self.yards_per_play } return pd.DataFrame([fields_to_include], index=[self._abbreviation]) @int_property_decorator def rank(self): return self._rank @property def abbreviation(self): return self._abbreviation @property def schedule(self): return Schedule(self._abbreviation, self._year) @property def roster(self): return Roster(self._abbreviation, self._year) @property def name(self): return self._name @int_property_decorator def wins(self): return self._wins @int_property_decorator
MIT License
gabrielstanovsky/props
props/bottle.py
Request.COOKIES
python
def COOKIES(self): raw_dict = SimpleCookie(self.headers.get('Cookie','')) cookies = {} for cookie in raw_dict.itervalues(): cookies[cookie.key] = cookie.value return cookies
Cookies parsed into a dictionary. Secure cookies are NOT decoded automatically. See :meth:`get_cookie` for details.
https://github.com/gabrielstanovsky/props/blob/c6392016214ee582de4eaf364e518078f9bd182b/props/bottle.py#L882-L890
from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.9.dev' __license__ = 'MIT' import base64 import cgi import email.utils import functools import hmac import httplib import itertools import mimetypes import os import re import subprocess import sys import tempfile import thread import threading import time import warnings from Cookie import SimpleCookie from tempfile import TemporaryFile from traceback import format_exc from urllib import quote as urlquote from urlparse import urlunsplit, urljoin try: from collections import MutableMapping as DictMixin except ImportError: from UserDict import DictMixin try: from urlparse import parse_qs except ImportError: from cgi import parse_qs try: import cPickle as pickle except ImportError: import pickle try: from json import dumps as json_dumps except ImportError: try: from simplejson import dumps as json_dumps except ImportError: try: from django.utils.simplejson import dumps as json_dumps except ImportError: json_dumps = None if sys.version_info >= (3,0,0): from io import BytesIO from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass def touni(x, enc='utf8'): return str(x, encoding=enc) if isinstance(x, bytes) else str(x) else: from StringIO import StringIO as BytesIO bytes = str NCTextIOWrapper = None def touni(x, enc='utf8'): return x if isinstance(x, unicode) else unicode(str(x), encoding=enc) def tob(data, enc='utf8'): return data.encode(enc) if isinstance(data, unicode) else bytes(data) if sys.version_info >= (3,0,0): tonat = touni else: tonat = tob tonat.__doc__ = """ Convert anything to native strings """ def depr(message, critical=False): if critical: raise DeprecationWarning(message) warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if not obj: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise ApplicationError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise ApplicationError("Read-Only property.") del getattr(obj, self.attr)[self.key] def cached_property(func): return DictProperty('__dict__')(func) class lazy_attribute(object): def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value class BottleException(Exception): pass class HTTPResponse(BottleException): def __init__(self, output='', status=200, header=None): super(BottleException, self).__init__("HTTP Response %d" % status) self.status = int(status) self.output = output self.headers = HeaderDict(header) if header else None def apply(self, response): if self.headers: for key, value in self.headers.iterallitems(): response.headers[key] = value response.status = self.status class HTTPError(HTTPResponse): def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None): super(HTTPError, self).__init__(output, code, header) self.exception = exception self.traceback = traceback def __repr__(self): return template(ERROR_PAGE_TEMPLATE, e=self) class RouteError(BottleException): class RouteSyntaxError(RouteError): class RouteBuildError(RouteError): class Router(object): default = '[^/]+' @lazy_attribute def syntax(cls): return re.compile(r'(?<!\\):([a-zA-Z_][a-zA-Z_0-9]*)?(?:#(.*?)#)?') def __init__(self): self.routes = {} self.rules = [] self.named = {} self.static = {} self.dynamic = [] def add(self, rule, method, target, name=None): if rule in self.routes: self.routes[rule][method.upper()] = target else: self.routes[rule] = {method.upper(): target} self.rules.append(rule) if self.static or self.dynamic: self.static, self.dynamic = {}, {} if name: self.named[name] = (rule, None) def delete(self, rule, method=None): if rule not in self.routes and rule in self.named: rule = self.named[rule][0] if rule in self.routes: if method: del self.routes[rule][method] else: self.routes[rule].clear() if not self.routes[rule]: del self.routes[rule] self.rules.remove(rule) def build(self, _name, *anon, **args): if _name not in self.named: raise RouteBuildError("No route with that name.", _name) rule, pairs = self.named[_name] if not pairs: token = self.syntax.split(rule) parts = [p.replace('\\:',':') for p in token[::3]] names = token[1::3] if len(parts) > len(names): names.append(None) pairs = zip(parts, names) self.named[_name] = (rule, pairs) try: anon = list(anon) url = [s if k is None else s+str(args.pop(k)) if k else s+str(anon.pop()) for s, k in pairs] except IndexError: msg = "Not enough arguments to fill out anonymous wildcards." raise RouteBuildError(msg) except KeyError, e: raise RouteBuildError(*e.args) if args: url += ['?', urlencode(args.iteritems())] return ''.join(url) def match(self, environ): targets, urlargs = self._match_path(environ) if not targets: raise HTTPError(404, "Not found: " + environ['PATH_INFO']) environ['router.url_args'] = urlargs method = environ['REQUEST_METHOD'].upper() if method in targets: return targets[method], urlargs if method == 'HEAD' and 'GET' in targets: return targets['GET'], urlargs if 'ANY' in targets: return targets['ANY'], urlargs allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') raise HTTPError(405, "Method not allowed.", header=[('Allow',",".join(allowed))]) def _match_path(self, environ): path = environ['PATH_INFO'] or '/' match = self.static.get(path) if match: return match, {} for combined, rules in self.dynamic: match = combined.match(path) if not match: continue gpat, match = rules[match.lastindex - 1] return match, gpat.match(path).groupdict() if gpat else {} if self.static or self.dynamic or not self.routes: return None, {} if not environ.get('wsgi.run_once'): self._compile() return self._match_path(environ) epath = path.replace(':','\\:') match = self.routes.get(epath) if match: return match, {} for rule in self.rules: if rule.count(':') < rule.count('\\:'): continue match = self._compile_pattern(rule).match(path) if match: return self.routes[rule], match.groupdict() return None, {} def _compile(self): self.static = {} self.dynamic = [] def fpat_sub(m): return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:' for rule in self.rules: target = self.routes[rule] if not self.syntax.search(rule): self.static[rule.replace('\\:',':')] = target continue gpat = self._compile_pattern(rule) fpat = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, gpat.pattern) gpat = gpat if gpat.groupindex else None try: combined = '%s|(%s)' % (self.dynamic[-1][0].pattern, fpat) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((gpat, target)) except (AssertionError, IndexError), e: self.dynamic.append((re.compile('(^%s$)'%fpat), [(gpat, target)])) except re.error, e: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e)) def _compile_pattern(self, rule): out = '' for i, part in enumerate(self.syntax.split(rule)): if i%3 == 0: out += re.escape(part.replace('\\:',':')) elif i%3 == 1: out += '(?P<%s>' % part if part else '(?:' else: out += '%s)' % (part or '[^/]+') return re.compile('^%s$'%out) class Bottle(object): def __init__(self, catchall=True, autojson=True, config=None): self.routes = [] self.callbacks = {} self.router = Router() self.mounts = {} self.error_handler = {} self.catchall = catchall self.config = config or {} self.serve = True self.castfilter = [] if autojson and json_dumps: self.add_filter(dict, dict2json) self.hooks = {'before_request': [], 'after_request': []} def optimize(self, *a, **ka): depr("Bottle.optimize() is obsolete.") def mount(self, app, script_path): if not isinstance(app, Bottle): raise TypeError('Only Bottle instances are supported for now.') script_path = '/'.join(filter(None, script_path.split('/'))) path_depth = script_path.count('/') + 1 if not script_path: raise TypeError('Empty script_path. Perhaps you want a merge()?') for other in self.mounts: if other.startswith(script_path): raise TypeError('Conflict with existing mount: %s' % other) @self.route('/%s/:#.*#' % script_path, method="ANY") def mountpoint(): request.path_shift(path_depth) return app.handle(request.environ) self.mounts[script_path] = app def add_filter(self, ftype, func): if not isinstance(ftype, type): raise TypeError("Expected type object, got %s" % type(ftype)) self.castfilter = [(t, f) for (t, f) in self.castfilter if t != ftype] self.castfilter.append((ftype, func)) self.castfilter.sort() def match_url(self, path, method='GET'): return self.match({'PATH_INFO': path, 'REQUEST_METHOD': method}) def match(self, environ): target, args = self.router.match(environ) try: return self.callbacks[target], args except KeyError: callback, decorators = self.routes[target] wrapped = callback for wrapper in decorators[::-1]: wrapped = wrapper(wrapped) functools.update_wrapper(wrapped, callback) self.callbacks[target] = wrapped return wrapped, args def get_url(self, routename, **kargs): scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def route(self, path=None, method='GET', no_hooks=False, decorate=None, template=None, template_opts={}, callback=None, name=None, static=False): if callable(path): path, callback = None, path decorators = makelist(decorate) if template: decorators.insert(0, view(template, **template_opts)) if not no_hooks: decorators.append(self._add_hook_wrapper) def wrapper(func): for rule in makelist(path) or yieldroutes(func): for verb in makelist(method): if static: rule = rule.replace(':','\\:') depr("Use backslash to escape ':' in routes.") self.router.add(rule, verb, len(self.routes), name=name) self.routes.append((func, decorators)) return func return wrapper(callback) if callback else wrapper def _add_hook_wrapper(self, func): @functools.wraps(func) def wrapper(*a, **ka): for hook in self.hooks['before_request']: hook() response.output = func(*a, **ka) for hook in self.hooks['after_request']: hook() return response.output return wrapper def get(self, path=None, method='GET', **kargs): return self.route(path, method, **kargs) def post(self, path=None, method='POST', **kargs): return self.route(path, method, **kargs) def put(self, path=None, method='PUT', **kargs): return self.route(path, method, **kargs) def delete(self, path=None, method='DELETE', **kargs): return self.route(path, method, **kargs) def error(self, code=500): def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def hook(self, name): def wrapper(func): self.add_hook(name, func) return func return wrapper def add_hook(self, name, func): if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) if name in ('after_request'): self.hooks[name].insert(0, func) else: self.hooks[name].append(func) def remove_hook(self, name, func): if name not in self.hooks: raise ValueError("Unknown hook name %s" % name) self.hooks[name].remove(func) def handle(self, environ): if not self.serve: return HTTPError(503, "Server stopped") try: handler, args = self.match(environ) return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError)) or not self.catchall: raise return HTTPError(500, 'Unhandled exception', e, format_exc(10)) def _cast(self, out, request, response, peek=None): for testtype, filterfunc in self.castfilter: if isinstance(out, testtype): return self._cast(filterfunc(out), request, response) if not out: response.headers['Content-Length'] = 0 return [] if isinstance(out, (tuple, list)) and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) if isinstance(out, unicode): out = out.encode(response.charset) if isinstance(out, bytes): response.headers['Content-Length'] = str(len(out)) return [out] if isinstance(out, HTTPError): out.apply(response) return self._cast(self.error_handler.get(out.status, repr)(out), request, response) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output, request, response) if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) try: out = iter(out) first = out.next() while not first: first = out.next() except StopIteration: return self._cast('', request, response) except HTTPResponse, e: first = e except Exception, e: first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError)) or not self.catchall: raise if isinstance(first, HTTPResponse): return self._cast(first, request, response) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): return itertools.imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s' % type(first)), request, response) def wsgi(self, environ, start_response): try: environ['bottle.app'] = self request.bind(environ) response.bind() out = self.handle(environ) out = self._cast(out, request, response) if response.status in (100, 101, 204, 304) or request.method == 'HEAD': if hasattr(out, 'close'): out.close() out = [] status = '%d %s' % (response.status, HTTP_CODES[response.status]) start_response(status, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception, e: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' % environ.get('PATH_INFO', '/') if DEBUG: err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e) err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10) environ['wsgi.errors'].write(err) start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) return [tob(err)] def __call__(self, environ, start_response): return self.wsgi(environ, start_response) class Request(threading.local, DictMixin): def __init__(self, environ=None): self.bind(environ or {},) def bind(self, environ): self.environ = environ self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/') self.method = environ.get('REQUEST_METHOD', 'GET').upper() @property def _environ(self): depr("Request._environ renamed to Request.environ") return self.environ def copy(self): return Request(self.environ.copy()) def path_shift(self, shift=1): script_name = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift) self['PATH_INFO'] = self.path def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): self.environ[key] = value todelete = [] if key in ('PATH_INFO','REQUEST_METHOD'): self.bind(self.environ) elif key == 'wsgi.input': todelete = ('body','forms','files','params') elif key == 'QUERY_STRING': todelete = ('get','params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: if 'bottle.' + key in self.environ: del self.environ['bottle.' + key] @property def query_string(self): return self.environ.get('QUERY_STRING', '') @property def fullpath(self): return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path @property def url(self): scheme = self.environ.get('wsgi.url_scheme', 'http') host = self.environ.get('HTTP_X_FORWARDED_HOST') host = host or self.environ.get('HTTP_HOST', None) if not host: host = self.environ.get('SERVER_NAME') port = self.environ.get('SERVER_PORT', '80') if (scheme, port) not in (('https','443'), ('http','80')): host += ':' + port parts = (scheme, host, urlquote(self.fullpath), self.query_string, '') return urlunsplit(parts) @property def content_length(self): return int(self.environ.get('CONTENT_LENGTH', '') or -1) @property def header(self): depr("The Request.header property was renamed to Request.headers") return self.headers @DictProperty('environ', 'bottle.headers', read_only=True) def headers(self): return WSGIHeaderDict(self.environ) @DictProperty('environ', 'bottle.get', read_only=True) def GET(self): data = parse_qs(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = MultiDict() for key, values in data.iteritems(): for value in values: get[key] = value return get @DictProperty('environ', 'bottle.post', read_only=True) def POST(self): post = MultiDict() safe_env = {'QUERY_STRING':''} for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] if NCTextIOWrapper: fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') else: fb = self.body data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True) for item in data.list or []: post[item.name] = item if item.filename else item.value return post @DictProperty('environ', 'bottle.forms', read_only=True) def forms(self): forms = MultiDict() for name, item in self.POST.iterallitems(): if not hasattr(item, 'filename'): forms[name] = item return forms @DictProperty('environ', 'bottle.files', read_only=True) def files(self): files = MultiDict() for name, item in self.POST.iterallitems(): if hasattr(item, 'filename'): files[name] = item return files @DictProperty('environ', 'bottle.params', read_only=True) def params(self): params = MultiDict(self.GET) for key, value in self.forms.iterallitems(): params[key] = value return params @DictProperty('environ', 'bottle.body', read_only=True) def _body(self): maxread = max(0, self.content_length) stream = self.environ['wsgi.input'] body = BytesIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b') while maxread > 0: part = stream.read(min(maxread, MEMFILE_MAX)) if not part: break body.write(part) maxread -= len(part) self.environ['wsgi.input'] = body body.seek(0) return body @property def body(self): self._body.seek(0) return self._body @property def auth(self): return parse_auth(self.headers.get('Authorization','')) @DictProperty('environ', 'bottle.cookies', read_only=True)
MIT License
pennlabs/penn-courses
backend/review/management/commands/mergeinstructors.py
resolve_duplicates
python
def resolve_duplicates( duplicate_instructor_groups: List[List[Instructor]], dry_run: bool, stat, force=False ): for instructor_set in tqdm(duplicate_instructor_groups): potential_primary = [inst for inst in instructor_set if inst.user is not None] if len(potential_primary) == 0: stat(INSTRUCTORS_KEPT, 1) primary_instructor = instructor_set[0] elif len(potential_primary) == 1: stat(INSTRUCTORS_KEPT, 1) primary_instructor = potential_primary[0] else: if len(set([inst.user.pk for inst in potential_primary])) == 1 or force: stat(INSTRUCTORS_KEPT, 1) primary_instructor = potential_primary[0] else: stat(INSTRUCTORS_KEPT, len(instructor_set)) stat(INSTRUCTORS_UNMERGED, element=[i.pk for i in instructor_set]) continue duplicate_instructors = [ inst for inst in instructor_set if inst.pk != primary_instructor.pk ] for duplicate_instructor in duplicate_instructors: sections = Section.objects.filter(instructors=duplicate_instructor) for section in sections: stat(SECTIONS_MODIFIED, 1) if not dry_run: section.instructors.remove(duplicate_instructor) section.instructors.add(primary_instructor) reviews = Review.objects.filter(instructor=duplicate_instructor) for review in reviews: stat(REVIEWS_MODIFIED, 1) if not dry_run: review.instructor = primary_instructor review.save() stat(INSTRUCTORS_REMOVED, 1) if not dry_run: duplicate_instructor.delete()
Given a list of list of duplicate instructor groups, resolve the foreign key and many-to-many relationships among duplicates to all point to the same instance. :param duplicate_instructor_groups: List of lists of duplicate instructors e.g. [[a, a, a], [b, b]] :param dry_run: If true, just calculate stats without actually modifying the database. :param stat: Function to collect statistics. :param force: Manually override conflicting user information.
https://github.com/pennlabs/penn-courses/blob/6fd16c151e34a9660e883a41458a72cef6c1f8cd/backend/review/management/commands/mergeinstructors.py#L38-L102
import logging from typing import Callable, Dict, List, Optional from django.core.management import BaseCommand from django.db.models.functions import Lower from tqdm import tqdm from courses.models import Instructor, Section from review.models import Review INSTRUCTORS_KEPT = "instructors kept" INSTRUCTORS_REMOVED = "instructors removed" SECTIONS_MODIFIED = "sections modified" REVIEWS_MODIFIED = "reviews modified" INSTRUCTORS_UNMERGED = "instructors unmerged" def batch_duplicates(qs, get_prop) -> List[List[Instructor]]: rows_by_prop = dict() for row in tqdm(qs): prop = get_prop(row) if prop is None: continue rows_by_prop.setdefault(prop, []).append(row) return [rows for name, rows in rows_by_prop.items() if len(rows) > 1]
MIT License
bread-and-pepper/django-brookie
brookie/views.py
generate_pdf
python
def generate_pdf(filename, context_dict, template, save=False): template = get_template(template) context = Context(context_dict) html = template.render(context) html = html.replace('-pageskip-', '<pdf:nextpage />') result = StringIO.StringIO() pdf = pisa.pisaDocument(StringIO.StringIO( html.encode("UTF-8")), result, link_callback=fetch_resources) if not pdf.err: if not save: response = HttpResponse(result.getvalue(), mimetype='application/pdf') response['Content-Disposition'] = 'attachment; filename=%s.pdf' % filename return response else: f = open(br_settings.BROOKIE_SAVE_PATH + '%s.pdf' % filename, 'w') f.write(result.getvalue()) f.close() return True return http.HttpResponse('There was an error creating your PDF: %s' % cgi.escape(html))
Generates a invoice PDF in desired language
https://github.com/bread-and-pepper/django-brookie/blob/3ef1094b46af2d419227031700952a97222a5f78/brookie/views.py#L33-L54
from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.contrib.auth.decorators import user_passes_test from django.template.loader import get_template from django.template import Context from django.shortcuts import get_object_or_404 from django.conf import settings from django.core.urlresolvers import reverse import ho.pisa as pisa import cStringIO as StringIO import cgi, os, datetime from brookie.models import Invoice, Quote, Item from brookie import brookie_settings as br_settings def user_is_staff(user): return user.is_authenticated() and user.is_staff def fetch_resources(uri, rel): if 'django.contrib.staticfiles' in settings.INSTALLED_APPS: path = os.path.join(settings.STATIC_ROOT, uri.replace(settings.STATIC_URL, "")) else: path = os.path.join(settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, "")) return path
BSD 3-Clause New or Revised License
josephbestjames/airtable.py
airtable/__init__.py
Airtable.iterate
python
def iterate( self, table_name, batch_size=0, filter_by_formula=None, view=None, max_records=0, fields=None): offset = None while True: response = self.get( table_name, limit=batch_size, offset=offset, max_records=max_records, fields=fields, filter_by_formula=filter_by_formula, view=view) for record in response.pop('records'): yield record if 'offset' in response: offset = response['offset'] else: break
Iterate over all records of a table. Args: table_name: the name of the table to list. batch_size: the number of records to fetch per request. The default (0) is using the default of the API which is (as of 2016-09) 100. Note that the API does not allow more than that (but allow for less). filter_by_formula: a formula used to filter records. The formula will be evaluated for each record, and if the result is not 0, false, "", NaN, [], or #Error! the record will be included in the response. If combined with view, only records in that view which satisfy the formula will be returned. view: the name or ID of a view in the table. If set, only the records in that view will be returned. The records will be sorted according to the order of the view. Yields: A dict for each record containing at least three fields: "id", "createdTime" and "fields".
https://github.com/josephbestjames/airtable.py/blob/ad41a838a8a37e4711ee1997cbf07b994f8be252/airtable/__init__.py#L138-L171
from collections import OrderedDict import json import posixpath from typing import Any, Generic, Mapping, TypeVar import warnings import requests import six API_URL = 'https://api.airtable.com/v%s/' API_VERSION = '0' class IsNotInteger(Exception): pass class IsNotString(Exception): pass def check_integer(integer): if not integer: return False if not isinstance(integer, six.integer_types): raise IsNotInteger('Expected an integer', integer) return True def check_string(string): if not string: return False if not isinstance(string, six.string_types): raise IsNotString('Expected a string', string) return True def create_payload(data): return {'fields': data} _T = TypeVar('_T', bound=Mapping[str, Any]) class Record(Generic[_T]): def __init__(self): raise NotImplementedError( 'This class is only used as a type of records returned by this module, however ' 'it should only be used as a type (see typing stubs) and not as an actual class.' ) def __getitem__(self, key): pass def get(self, key, default=None): pass class AirtableError(Exception): def __init__(self, error_type, message): super(AirtableError, self).__init__() self.type = error_type self.message = message def __repr__(self): return '%s(%r, %r)' % (self.__class__.__name__, self.type, self.message) def __str__(self): return self.message or self.__class__.__name__ class Airtable(object): def __init__(self, base_id, api_key, dict_class=OrderedDict): self.airtable_url = API_URL % API_VERSION self.base_url = posixpath.join(self.airtable_url, base_id) self.headers = {'Authorization': 'Bearer %s' % api_key} self._dict_class = dict_class def __request(self, method, url, params=None, payload=None): if method in ['POST', 'PUT', 'PATCH']: self.headers.update({'Content-type': 'application/json'}) response = requests.request( method, posixpath.join(self.base_url, url), params=params, data=payload, headers=self.headers) if response.status_code == requests.codes.ok: return response.json(object_pairs_hook=self._dict_class) error_json = response.json().get('error', {}) raise AirtableError( error_type=error_json.get('type', str(response.status_code)), message=error_json.get('message', json.dumps(response.json()))) def get( self, table_name, record_id=None, limit=0, offset=None, filter_by_formula=None, view=None, max_records=0, fields=None): params = {} if check_string(record_id): url = posixpath.join(table_name, record_id) else: url = table_name if limit and check_integer(limit): params.update({'pageSize': limit}) if offset and check_string(offset): params.update({'offset': offset}) if filter_by_formula is not None: params.update({'filterByFormula': filter_by_formula}) if view is not None: params.update({'view': view}) if max_records and check_integer(max_records): params.update({'maxRecords': max_records}) if fields and isinstance(fields, (list, tuple)): for field in fields: check_string(field) if len(fields) == 1: fields = fields + fields params.update({'fields': fields}) return self.__request('GET', url, params)
MIT License
karlgong/easyium-python
easyium/web_driver.py
WebDriver.get_web_driver
python
def get_web_driver(self): return self
Get self. :return: self
https://github.com/karlgong/easyium-python/blob/3683c9895e0e1164848df082d456b6801f29e68a/easyium/web_driver.py#L46-L52
from appium.webdriver.clipboard_content_type import ClipboardContentType from appium.webdriver.common.multi_action import MultiAction from appium.webdriver.common.touch_action import TouchAction from appium.webdriver.webdriver import WebDriver as _Appium from selenium.common.exceptions import WebDriverException from selenium.webdriver import ActionChains, Ie as _Ie, Firefox as _Firefox, Chrome as _Chrome, Opera as _Opera, Safari as _Safari, Edge as _Edge, PhantomJS as _PhantomJS, Remote as _Remote from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from .alert import Alert from .context import Context from .decorator import SupportedBy from .enumeration import WebDriverPlatform, WebDriverContext from .utils import StringTypes from .waiter import WebDriverWaitFor class WebDriverInfo: def __init__(self, platform, context): self.platform = platform self.context = context class WebDriver(Context): def __init__(self, selenium_web_driver, web_driver_info): Context.__init__(self) self.__selenium_web_driver = selenium_web_driver self.__web_driver_info = web_driver_info self.set_wait_interval(1000) self.set_wait_timeout(30000) def _selenium_context(self): return self.__selenium_web_driver def _selenium_web_driver(self): return self.__selenium_web_driver
Apache License 2.0
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/comms/channelhandler.py
ChannelCommand.get_extra_info
python
def get_extra_info(self, caller, **kwargs): return _(" (channel)")
Let users know that this command is for communicating on a channel. Args: caller (TypedObject): A Character or Account who has entered an ambiguous command. Returns: A string with identifying information to disambiguate the object, conventionally with a preceding space.
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/comms/channelhandler.py#L147-L157
from django.conf import settings from evennia.commands import cmdset, command from evennia.utils.logger import tail_log_file from evennia.utils.utils import class_from_module from django.utils.translation import ugettext as _ _CHANNEL_COMMAND_CLASS = None _CHANNELDB = None class ChannelCommand(command.Command): is_channel = True key = "general" help_category = "Channel Names" obj = None arg_regex = r"\s.*?|/history.*?" def parse(self): channelname, msg = self.args.split(":", 1) self.history_start = None if msg.startswith("/history"): arg = msg[8:] try: self.history_start = int(arg) if arg else 0 except ValueError: pass self.args = (channelname.strip(), msg.strip()) def func(self): global _CHANNELDB if not _CHANNELDB: from evennia.comms.models import ChannelDB as _CHANNELDB channelkey, msg = self.args caller = self.caller if not msg: self.msg(_("Say what?")) return channel = _CHANNELDB.objects.get_channel(channelkey) if not channel: self.msg(_("Channel '%s' not found.") % channelkey) return if not channel.has_connection(caller): string = _("You are not connected to channel '%s'.") self.msg(string % channelkey) return if not channel.access(caller, "send"): string = _("You are not permitted to send to channel '%s'.") self.msg(string % channelkey) return if msg == "on": caller = caller if not hasattr(caller, "account") else caller.account unmuted = channel.unmute(caller) if unmuted: self.msg("You start listening to %s." % channel) return self.msg("You were already listening to %s." % channel) return if msg == "off": caller = caller if not hasattr(caller, "account") else caller.account muted = channel.mute(caller) if muted: self.msg("You stop listening to %s." % channel) return self.msg("You were already not listening to %s." % channel) return if self.history_start is not None: log_file = channel.attributes.get("log_file", default="channel_%s.log" % channel.key) def send_msg(lines): return self.msg( "".join(line.split("[-]", 1)[1] if "[-]" in line else line for line in lines) ) tail_log_file(log_file, self.history_start, 20, callback=send_msg) else: caller = caller if not hasattr(caller, "account") else caller.account if caller in channel.mutelist: self.msg("You currently have %s muted." % channel) return channel.msg(msg, senders=self.caller, online=True)
MIT License
osmr/imgclsmob
pytorch/pytorchcv/models/seresnet.py
seresnet16
python
def seresnet16(**kwargs): return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/pytorch/pytorchcv/models/seresnet.py#L286-L298
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26', 'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b', 'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet'] import os import torch.nn as nn import torch.nn.init as init from .common import conv1x1_block, SEBlock from .resnet import ResBlock, ResBottleneck, ResInitBlock class SEResUnit(nn.Module): def __init__(self, in_channels, out_channels, stride, bottleneck, conv1_stride): super(SEResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_stride=conv1_stride) else: self.body = ResBlock( in_channels=in_channels, out_channels=out_channels, stride=stride) self.se = SEBlock(channels=out_channels) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True) def forward(self, x): if self.resize_identity: identity = self.identity_conv(x) else: identity = x x = self.body(x) x = self.se(x) x = x + identity x = self.activ(x) return x class SEResNet(nn.Module): def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000): super(SEResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", ResInitBlock( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=7, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_seresnet(blocks, bottleneck=None, conv1_stride=True, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): if bottleneck is None: bottleneck = (blocks >= 50) if blocks == 10: layers = [1, 1, 1, 1] elif blocks == 12: layers = [2, 1, 1, 1] elif blocks == 14 and not bottleneck: layers = [2, 2, 1, 1] elif (blocks == 14) and bottleneck: layers = [1, 1, 1, 1] elif blocks == 16: layers = [2, 2, 2, 1] elif blocks == 18: layers = [2, 2, 2, 2] elif (blocks == 26) and not bottleneck: layers = [3, 3, 3, 3] elif (blocks == 26) and bottleneck: layers = [2, 2, 2, 2] elif blocks == 34: layers = [3, 4, 6, 3] elif (blocks == 38) and bottleneck: layers = [3, 3, 3, 3] elif blocks == 50: layers = [3, 4, 6, 3] elif blocks == 101: layers = [3, 4, 23, 3] elif blocks == 152: layers = [3, 8, 36, 3] elif blocks == 200: layers = [3, 24, 36, 3] else: raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks)) if bottleneck: assert (sum(layers) * 3 + 2 == blocks) else: assert (sum(layers) * 2 + 2 == blocks) init_block_channels = 64 channels_per_layers = [64, 128, 256, 512] if bottleneck: bottleneck_factor = 4 channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers] channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] net = SEResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def seresnet10(**kwargs): return get_seresnet(blocks=10, model_name="seresnet10", **kwargs) def seresnet12(**kwargs): return get_seresnet(blocks=12, model_name="seresnet12", **kwargs) def seresnet14(**kwargs): return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
MIT License
speedml/speedml
speedml/feature.py
Feature.round
python
def round(self, new, a, precision): Base.train[new] = round(Base.train[a], precision) Base.test[new] = round(Base.test[a], precision)
Create ``new`` numeric feature by rounding ``a`` feature value to ``precision`` decimal places.
https://github.com/speedml/speedml/blob/c78effcdf745b723a7b558e1ee4639f7ba173d22/speedml/feature.py#L157-L162
from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * from .base import Base from .util import DataFrameImputer import numpy as np from sklearn.preprocessing import LabelEncoder import re class Feature(Base): def drop(self, features): start = Base.train.shape[1] Base.train = Base.train.drop(features, axis=1) Base.test = Base.test.drop(features, axis=1) end = Base.train.shape[1] message = 'Dropped {} features with {} features available.' return message.format(start - end, end) def impute(self): start = Base.train.isnull().sum().sum() Base.test[Base.target] = -1 combine = Base.train.append(Base.test) combine = DataFrameImputer().fit_transform(combine) Base.train = combine[0:Base.train.shape[0]] Base.test = combine[Base.train.shape[0]::] Base.test = Base.test.drop([Base.target], axis=1) end = Base.train.isnull().sum().sum() message = 'Imputed {} empty values to {}.' return message.format(start, end) def mapping(self, a, data): Base.train[a] = Base.train[a].apply(lambda x: data[x]) Base.test[a] = Base.test[a].apply(lambda x: data[x]) def fillna(self, a, new): start = Base.train[a].isnull().sum() + Base.test[a].isnull().sum() Base.train[a] = Base.train[a].fillna(new) Base.test[a] = Base.test[a].fillna(new) message = 'Filled {} null values across test and train datasets.' return message.format(start) def replace(self, a, match, new): if type(match) is str: start = Base.train[Base.train[a] == match][a].shape[0] + Base.test[Base.test[a] == match][a].shape[0] message = 'Replaced {} matching values across train and test datasets.' message = message.format(start) else: message = 'Replaced matching list of strings across train and test datasets.' Base.train[a] = Base.train[a].replace(match, new) Base.test[a] = Base.test[a].replace(match, new) return message def outliers(self, a, lower = None, upper = None): if upper: upper_value = np.percentile(Base.train[a].values, upper) change = Base.train.loc[Base.train[a] > upper_value, a].shape[0] Base.train.loc[Base.train[a] > upper_value, a] = upper_value message = 'Fixed {} or {:.2f}% upper outliers. '.format(change, change/Base.train.shape[0]*100) if lower: lower_value = np.percentile(Base.train[a].values, lower) change = Base.train.loc[Base.train[a] < lower_value, a].shape[0] Base.train.loc[Base.train[a] < lower_value, a] = lower_value message = message + 'Fixed {} or {:.2f}% lower outliers.'.format(change, change/Base.train.shape[0]*100) return message def _density_by_feature(self, a): vals = Base.train[a].value_counts() dvals = vals.to_dict() Base.train[a + '_density'] = Base.train[a].apply(lambda x: dvals.get(x, vals.min())) Base.test[a + '_density'] = Base.test[a].apply(lambda x: dvals.get(x, vals.min())) def density(self, a): if isinstance(a, str): self._density_by_feature(a) if isinstance(a, list): for feature in a: self._density_by_feature(feature) def add(self, a, num): Base.train[a] = Base.train[a] + num Base.test[a] = Base.test[a] + num def sum(self, new, a, b): Base.train[new] = Base.train[a] + Base.train[b] Base.test[new] = Base.test[a] + Base.test[b] def diff(self, new, a, b): Base.train[new] = Base.train[a] - Base.train[b] Base.test[new] = Base.test[a] - Base.test[b] def product(self, new, a, b): Base.train[new] = Base.train[a] * Base.train[b] Base.test[new] = Base.test[a] * Base.test[b] def divide(self, new, a, b): Base.train[new] = Base.train[a] / Base.train[b] Base.test[new] = Base.test[a] / Base.test[b] Base.train[new] = Base.train[new].replace([np.inf, -np.inf], 0) Base.test[new] = Base.test[new].replace([np.inf, -np.inf], 0)
MIT License
digitalglobe/geoio
geoio/base.py
GeoImage.iter_components
python
def iter_components(self, **kwargs): for c in xrange(len(self.files.dfile_tiles)): yield self.get_data(component=c, **kwargs)
This is a convenience method that iterataes (via yield) through the components in the image object. Any kwargs valid for get_data can be passed through. kwargs can be any valid arugment for get_data Parameters ---------- None Yields ------ ndarray Three dimensional numpy array of pixel values from the requested region of the image.
https://github.com/digitalglobe/geoio/blob/94a9d4b45a9482bc5341aaac3c050f70ddff5550/geoio/base.py#L532-L551
from __future__ import division from osgeo import gdal, gdalconst, osr, ogr import numpy as np import os import warnings import collections import textwrap import tempfile import logging import math import platform from collections import Sequence import tinytools as tt import constants as const gdal.UseExceptions() ogr.UseExceptions() logger = logging.getLogger(__name__) class OverlapError(ValueError): pass class GeoImage(object): def __init__(self, file_in, derived_dir=None): if not os.path.isfile(file_in): raise ValueError("The file that was passed in does not exist.") ifile = os.path.abspath(file_in) fname = os.path.basename(ifile) fdir = os.path.dirname(ifile) flist = os.listdir(fdir) self.files = tt.bunch.OrderedBunch({}) if derived_dir: if not os.path.isdir(derived_dir): raise ValueError("The requested derived data directory does " "not exist.") if not os.access(os.path.join(derived_dir, ''), os.W_OK): raise ValueError("Write access is required for the requested " "location passed into derived_dir.") self.files.derived_dir = os.path.join(derived_dir, '') else: if os.access(fdir, os.W_OK): self.files.derived_dir = fdir else: self.files.derived_dir = fdir warnings.warn("The input file location is not writable. " "Derived file creation (i.e. spectral files) " "will not be available. Either write permissions " "need to be provided or the object can be " "reinstantiated with a writable location passed " "to the input variable dervied_store_dir.") (tmpfile,tmptiles)=self._get_file_and_tiles(ifile) self.files.dfile = tmpfile self.files.dfile_tiles = tmptiles self._fobj = self._get_gdal_obj(self.files.dfile, self.files.dfile_tiles) self._set_metadata() def _get_file_and_tiles(self, ifile): if tt.files.filter(ifile, '*.TIL', case_sensitive=False): file_loc = ifile tiles_loc = tt.pvl.read_from_pvl(ifile,'filename') dname = os.path.dirname(ifile) tiles_loc = [os.path.join(dname,x) for x in tiles_loc] elif tt.files.filter(ifile, '*.VRT', case_sensitive=False): file_loc = ifile tmp = gdal.Open(file_loc) tmp_files = tmp.GetFileList() tiles_loc = [x for x in tmp_files if not tt.files.filter(x, '*.VRT', case_sensitive=False)] tmp = None elif os.path.isfile(os.path.splitext(ifile)[0]): tmp = os.path.splitext(ifile)[0] file_loc = tmp tiles_loc = [tmp] else: file_loc = ifile tiles_loc = [ifile] return (file_loc,tiles_loc) def _get_gdal_obj(self, dfile, dfile_tiles): if tt.files.filter(dfile, '*.TIL', case_sensitive=False): file_temp = tempfile.NamedTemporaryFile(suffix=".VRT") if len(dfile_tiles) == 1: cmd = [] cmd.append("gdal_translate") cmd.append("-of") cmd.append("VRT") cmd.append(dfile_tiles[0]) cmd.append(file_temp.name) else: cmd = [] cmd.append("gdalbuildvrt") cmd.append(file_temp.name) for i in dfile_tiles: cmd.append(i) dump = tt.cmd_line.exec_cmd(cmd,ret_output=True) logger.debug(dump) del dump if not os.path.isfile(file_temp.name): raise StandardError("Creation of file " + file_temp.name + " " "failed. This could possibly be a " "write access problem?") vvv = gdal.Open(file_temp.name) drv = gdal.GetDriverByName('VRT') obj = drv.CreateCopy('',vvv) file_temp.close() if os.path.isfile(file_temp.name): raise StandardError("Removal of file " + file_temp.name + " " "failed. There is something wrong with " "the .TIL handling.") else: obj = gdal.Open(self.files.dfile, gdalconst.GA_ReadOnly) return obj def _set_metadata(self): meta_geoimg_dict = read_geo_file_info(self._fobj) if not os.path.isfile(meta_geoimg_dict['file_name']): meta_geoimg_dict['file_name'] = self.files.dfile meta_geoimg_dict['class_name'] = self.__class__.__name__ self.meta = tt.bunch.OrderedBunch(meta_geoimg_dict) self.shape = self.meta.shape self.resolution = self.meta.resolution def __repr__(self): sss = '' su = self.meta prefixes = collections.OrderedDict() prefixes['Class Name'] = (['class_name'],'') prefixes['Driver Name'] = (['driver_name'],'') if 'product_level' in su: prefixes['Product Level'] = (['product_level'],'') prefixes['Data Type'] = (['gdal_dtype_name'],'') prefixes['File Name'] = (['file_name'],'') prefixes['File List'] = (['file_list'], '') prefixes['Dimensions'] = (['shape'], ' (nlayers, nrows, ncols)') prefixes['Resolution'] = (['resolution'],' (x,y)') prefixes['Extent'] = (['extent'],' (ul_x, ul_y, lr_x, lr_y)') prefixes['Projection String'] = (['pprint_proj_string'],'') prefixes['Geo Transform'] = (['geo_transform'],'') if 'authority' in su: prefixes['Authority'] = (['authority'], '') if 'band_centers' in su: prefixes['Band Centers (nm)'] = (['band_centers'],'') prelen = max([len(x) for x in prefixes]) for x in prefixes: prefix = x+' '*(prelen-len(x))+' : ' width_set = 80 wrapper = textwrap.TextWrapper(initial_indent=prefix, width=width_set, replace_whitespace=False, subsequent_indent=' '*len(prefix)) message = ', '.join([str(su[y]) for y in prefixes[x][0]]) message = message+prefixes[x][1] if message.find('\n') != -1: sss = sss + prefix + message.replace('\n','\n'+' '*prelen)+'\n' elif message: sss = sss + wrapper.fill(message) + '\n' else: sss = sss + prefix + '\n' return sss def print_img_summary(self): print(self.__repr__()) def __iter__(self): for x in self.iter_window(): yield x def iter_base(self, xoff, yoff, win_xsize, win_ysize, **kwargs): logger.debug('*** begin iter_base ***') windows = np.broadcast(xoff,yoff,win_xsize,win_ysize) for w in windows: logger.debug('window parameters: xoff %s, yoff %s, ' 'win_xsize %s, win_ysize %s', w[0], w[1], w[2], w[3]) yield self.get_data(window=w,**kwargs) def iter_window(self, win_size=None, stride=None, **kwargs): logger.debug('*** begin iter_window ***') if win_size: if any(x <= 0 for x in win_size): raise ValueError('No value in win_size can be equal ' 'to or less than zero.') if stride: if any(x <= 0 for x in stride): raise ValueError('No value in stride can be equal ' 'to or less than zero.') if not win_size and not stride: b = self._fobj.GetRasterBand(1) win_size = b.GetBlockSize() logger.debug('win_size is: %s, stride is: %s', win_size, stride) if win_size and not stride: xs = self.meta.shape[1] ys = self.meta.shape[2] xsize, ysize = win_size x_extra_pixels = xs % win_size[0] xoff = int(x_extra_pixels / 2.0) y_extra_pixels = ys % win_size[1] yoff = int(y_extra_pixels / 2.0) xoff_start = xoff xsize, ysize = win_size while True: logger.debug(' xoff is %s,\tyoff is %s', xoff, yoff) yield self.get_data(window=[xoff, yoff, xsize, ysize],**kwargs) xoff += xsize if xoff > self.meta.shape[1]: xoff = xoff_start yoff += ysize if yoff > self.meta.shape[2]: break elif not win_size and stride: raise ValueError('Setting stride and not setting win_size is not ' 'allowed because there is no resonable value to ' 'set win_size to. In this case stride can be ' 'even or odd which could result in alternative ' 'size return blocks around the center pixel ' '(or fractional pixel).') elif win_size and stride: xs = self.meta.shape[1] ys = self.meta.shape[2] xsize, ysize = win_size xstride, ystride = stride x_extra_pixels = (xs - xsize) % xstride xoff = int(x_extra_pixels/2.0) y_extra_pixels = (ys - ysize) % ystride yoff = int(y_extra_pixels/2.0) xoff_start = xoff while True: logger.debug(' xoff is %s,\tyoff is %s', xoff, yoff) yield self.get_data(window=[xoff, yoff, xsize, ysize], **kwargs) xoff += xstride if xoff > self.meta.shape[1]: xoff = xoff_start yoff += ystride if yoff > self.meta.shape[2]: break def iter_window_random(self, win_size=None, no_chips=1000, **kwargs): if win_size: if any(x <= 0 for x in win_size): raise ValueError('No value in win_size can be equal ' 'to or less than zero.') counter = no_chips xs = self.meta.shape[1] ys = self.meta.shape[2] xsize, ysize = win_size while True: xoff = np.random.randint(xs-xsize+1) yoff = np.random.randint(ys-ysize+1) yield self.get_data(window=[xoff, yoff, xsize, ysize], **kwargs) counter -= 1 if counter == 0: break
MIT License
dit/dit
dit/multivariate/deweese.py
BaseDeWeeseOptimizer.__init__
python
def __init__(self, dist, rvs=None, crvs=None, deterministic=False, rv_mode=None): super().__init__(dist, rvs=rvs, crvs=crvs, rv_mode=rv_mode) self._construct_auxvars([({rv}, size) for rv, size in zip(self._rvs, self._shape)]) if deterministic: self.constraints = [{'type': 'eq', 'fun': self._constraint_deterministic(), }, ] self._default_hops *= 2
Initialize the optimizer. Parameters ---------- dist : Distribution The distribution to optimize. rvs : iter of iters The random variables of interest. crvs : iter The random variables to condition on. deterministic : bool Whether the functions to optimize over should be deterministic or not. Defaults to False. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {{'indices', 'names'}}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'.
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/multivariate/deweese.py#L119-L150
from itertools import product from ..algorithms import BaseAuxVarOptimizer from ..distconst import RVFunctions, insert_rvf from ..helpers import normalize_rvs from ..utils import extended_partition, partitions, unitful __all__ = ( 'deweese_coinformation', 'deweese_total_correlation', 'deweese_dual_total_correlation', 'deweese_caekl_mutual_information', ) def deweese_constructor(mmi): @unitful def deweese(dist, rvs=None, crvs=None, return_opt=False, rv_mode=None): rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode) dist = dist.coalesce(rvs + [crvs]) new_rvs = [[i + len(rvs) + 1] for i, _ in enumerate(rvs)] new_crvs = [dist.outcome_length() - 1] rvf = RVFunctions(dist) def all_funcs(): partss = [partitions({(o[i],) for o in dist.outcomes}) for i, _ in enumerate(rvs)] for parts in product(*partss): d = dist.copy() for i, part in enumerate(parts): new_part = extended_partition(d.outcomes, [i], part, d._outcome_ctor) d = insert_rvf(d, rvf.from_partition(new_part)) yield d possibilities = ((mmi(d, rvs=new_rvs, crvs=new_crvs), d) for d in all_funcs()) opt_val, opt_d = max(possibilities, key=lambda t: t[0]) if return_opt: return opt_val, opt_d else: return opt_val deweese.__doc__ = deweese.__doc__.format(name=mmi.__name__) return deweese class BaseDeWeeseOptimizer(BaseAuxVarOptimizer): construct_initial = BaseAuxVarOptimizer.construct_copy_initial _sign = -1 _shotgun = 5
BSD 3-Clause New or Revised License
iwangjian/bytecup2018
make_datafiles.py
write_to_tar
python
def write_to_tar(url_file, out_file, makevocab=False): print("Making bin file for URLs listed in {}...".format(url_file)) story_fnames = read_text_file(url_file) num_stories = len(story_fnames) if makevocab: vocab_counter = collections.Counter() with tarfile.open(out_file, 'w') as writer: for idx, s in enumerate(story_fnames): if idx % 1000 == 0: print("Writing story {} of {}; {:.2f} percent done".format( idx, num_stories, float(idx)*100.0/float(num_stories))) if os.path.isfile(os.path.join(train_tokenized_stories_dir, s)): story_file = os.path.join(train_tokenized_stories_dir, s) elif os.path.isfile(os.path.join(valid_tokenized_stories_dir, s)): story_file = os.path.join(valid_tokenized_stories_dir, s) elif os.path.isfile(os.path.join(test_tokenized_stories_dir, s)): story_file = os.path.join(test_tokenized_stories_dir, s) else: print("Error: Couldn't find tokenized story file %s in either tokenized story directories %s and %s. \ Was there an error during tokenization?" % ( s, train_tokenized_stories_dir, valid_tokenized_stories_dir)) print("Checking that the tokenized stories directories %s and %s contain correct number of files..." % (train_tokenized_stories_dir, valid_tokenized_stories_dir)) raise Exception("Tokenized stories directories %s and %s contain correct number of files but story file \ %s found in neither." % ( train_tokenized_stories_dir, valid_tokenized_stories_dir, s)) article_sents, abstract_sents = get_art_abs(story_file) js_example = {} js_example['id'] = s.replace('.story', '') js_example['article'] = article_sents js_example['abstract'] = abstract_sents js_serialized = json.dumps(js_example, indent=4).encode() save_file = io.BytesIO(js_serialized) tar_info = tarfile.TarInfo('{}/{}.json'.format( os.path.basename(out_file).replace('.tar', ''), idx)) tar_info.size = len(js_serialized) writer.addfile(tar_info, save_file) if makevocab: art_tokens = ' '.join(article_sents).split() abs_tokens = ' '.join(abstract_sents).split() tokens = art_tokens + abs_tokens tokens = [t.strip() for t in tokens] tokens = [t for t in tokens if t != ""] vocab_counter.update(tokens) print("Finished writing file {}\n".format(out_file)) if makevocab: print("Writing vocab file...") with open(os.path.join(finished_files_dir, "vocab_cnt.pkl"), 'wb') as vocab_file: pkl.dump(vocab_counter, vocab_file) print("Finished writing vocab file")
Reads the tokenized .story files corresponding to the urls listed in the url_file and writes them to a out_file.
https://github.com/iwangjian/bytecup2018/blob/c59c6a495f81c493eaaf7fda710c8acd7ef148b9/make_datafiles.py#L143-L209
import os import hashlib import subprocess import collections import json import tarfile import io import pickle as pkl import nltk.data tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') dm_single_close_quote = '\u2019' dm_double_close_quote = '\u201d' END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', dm_single_close_quote, dm_double_close_quote, ")"] all_train_urls = "./bytecup/data/lists/all_train.txt" all_val_urls = "./bytecup/data/lists/all_valid.txt" all_test_urls = "./bytecup/data/lists/all_test.txt" train_stories_dir = './bytecup/data/train' valid_stories_dir = './bytecup/data/valid' test_stories_dir = './bytecup/data/test' train_tokenized_stories_dir = "./bytecup/tokenized/train_tokenized" valid_tokenized_stories_dir = "./bytecup/tokenized/valid_tokenized" test_tokenized_stories_dir = "./bytecup/tokenized/test_tokenized" finished_files_dir = "./bytecup/finished_files" VOCAB_SIZE = 200000 def tokenize_stories(stories_dir, tokenized_stories_dir): print("Preparing to tokenize {} to {}...".format(stories_dir, tokenized_stories_dir)) stories = os.listdir(stories_dir) print("Making list of files to tokenize...") with open("mapping.txt", "w") as f: for s in stories: f.write( "{} \t {}\n".format( os.path.join(stories_dir, s), os.path.join(tokenized_stories_dir, s) ) ) command = ['java', 'edu.stanford.nlp.process.PTBTokenizer', '-ioFileList', '-preserveLines', 'mapping.txt'] print("Tokenizing {} files in {} and saving in {}...".format( len(stories), stories_dir, tokenized_stories_dir)) subprocess.call(command) print("Stanford CoreNLP Tokenizer has finished.") os.remove("mapping.txt") num_orig = len(os.listdir(stories_dir)) num_tokenized = len(os.listdir(tokenized_stories_dir)) if num_orig != num_tokenized: raise Exception( "The tokenized stories directory {} contains {} files, but it " "should contain the same number as {} (which has {} files). Was" " there an error during tokenization?".format( tokenized_stories_dir, num_tokenized, stories_dir, num_orig) ) print("Successfully finished tokenizing {} to {}.\n".format( stories_dir, tokenized_stories_dir)) def read_story_file(text_file): with open(text_file, "r") as f: lines = f.read().split('\n\n') return lines def read_text_file(text_file): lines = [] with open(text_file, "r") as f: for line in f: lines.append(line.strip()) return lines def hashhex(s): h = hashlib.sha1() h.update(s.encode()) return h.hexdigest() def get_url_hashes(url_list): return [hashhex(url) for url in url_list] def fix_missing_period(line): if "@highlight" in line: return line if line == "": return line if line[-1] in END_TOKENS: return line return line + " ." def get_art_abs(story_file): lines = read_story_file(story_file) lines = [' '.join(line.lower().strip().split()) for line in lines] lines = [fix_missing_period(line) for line in lines] article_lines = [] highlights = [] next_is_highlight = False for idx, line in enumerate(lines): if line == "": continue elif line.startswith("@highlight"): next_is_highlight = True elif next_is_highlight: highlights.append(line) else: article_lines.append(line) return article_lines, highlights
MIT License
neesarg123/pocketbiz
main.py
show_after_discount
python
def show_after_discount(tot_wout_tax, discount, root_win): discount_added.append(discount) receipt_file = open('receipt.txt', 'w+') discount_amount = Decimal(Decimal(tot_wout_tax) * Decimal(discount) * Decimal(0.01)).quantize(Decimal('.01')) return_val = Decimal(Decimal(tot_wout_tax) - discount_amount + Decimal(get_tax_amount())).quantize(Decimal('.01')) return_label = tk.Label(root_win, text="DUE:\n" + str(abs(return_val)), bg=TK_INPUT_BG, fg=FG_LABELS_COLOR, font=FONT) return_label.place(relx=0.3, rely=0.5, relheight=0.15, relwidth=0.4) receipt_line = "\nDISCOUNT " + str(discount) + "%" + "\t-$" + str(discount_amount) receipt_line += "\nBALANCE:\t$" + str(return_val) receipt_file.write(receipt_line) receipt_file.close()
amount owed after discount.
https://github.com/neesarg123/pocketbiz/blob/40e7468e12ea9224d36c66bc49bea445d5002335/main.py#L585-L602
import tkinter as tk import item_database import transactions_database import all_transactions_database from tkintertable import TableCanvas, TableModel import datetime import pandas as pd from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure from decimal import * import win32print import win32api import win32con import os.path import itertools import pygsheets BACKGROUND_FRAME_COLOR = '#42423f' BUTTON_AND_LABEL_COLOR = '#adada6' BACK_BUTTON_COLOR = '#d93027' ENTRY_COLOR = '#d9d1d0' TK_INPUT_WIN_H = 250 TK_INPUT_WIN_W = 250 TK_INPUT_BG = '#575353' FG_LABELS_COLOR = '#ffffff' ONLINE_IND_COLOR = '#5dc77a' OFFLINE_IND_COLOR = '#ed4c40' YES_BTN = '#82ba6e' NO_BTN = '#b52438' FONT = ('Courier', 15, 'bold') global PRINTER_NAME, G_INV_SH_NAME, G_TRAN_SH_NAME, GC, SH, WKS, SH_T, WKS_T, COLUMNS_GOOGLE_INVENTORY, ALL_TRANSACTIONS global INVENTORY_DF, ON_OFF_CYC input_file = open('inputs.txt', 'r') for idx, line in enumerate(input_file.readlines()): value = line.split("=")[1] if idx == 0: PRINTER_NAME = value[2:-2] elif idx == 1: G_INV_SH_NAME = value[2:-2] elif idx == 2: G_TRAN_SH_NAME = value[2:-1] else: COLUMNS_GOOGLE_INVENTORY = value.split('[')[1].split(']')[0].split(', ') if os.path.isfile('creds.json'): try: GC = pygsheets.authorize(service_file='creds.json') except Exception as e: print("Something went Wrong while getting authorizing credentials file:", str(e)) if len(G_INV_SH_NAME) > 0: try: print("Trying to open the google inventory file...") SH = GC.open(G_INV_SH_NAME) WKS = SH[0] print("Successfully opened the google inventory file!") except Exception as e: print("Something went wrong while opening the google inventory file:", str(e)) if len(G_TRAN_SH_NAME) > 0: try: print("Trying to open the google transactions file...") SH_T = GC.open(G_TRAN_SH_NAME) WKS_T = SH_T[0] print("Successfully opened the google transactions file!") except Exception as e: print("Something went wrong while opening the google transactions file:", str(e)) else: print("You don't yet have a google sheets API set up. Follow this link to set one up:\n" "https://developers.google.com/sheets/api/quickstart/python") if not os.path.isfile('Transactions.xlsx'): header_df = pd.DataFrame({'Name': [], 'S.Price': [], 'Date': [], 'P.Type': [], 'Total': []}) header_df.to_excel('Transactions.xlsx', index=False) ALL_TRANSACTIONS = pd.read_excel('Transactions.xlsx', ignore_index=True) all_transactions_database.deleteData() for idx, name in enumerate(list(ALL_TRANSACTIONS['Name'])): all_transactions_database.addData(name, str(ALL_TRANSACTIONS['S.Price'][idx]), str(ALL_TRANSACTIONS['Date'][idx]), str(ALL_TRANSACTIONS['P.Type'][idx]), str(ALL_TRANSACTIONS['Total'][idx])) if not os.path.isfile('Inventory.xlsx'): header_df = pd.DataFrame({'Name': [], 'Barcode': [], 'S.Price': [], 'P.Price': [], 'Quantity': [], 'Online_Price': [], 'Tax': []}) header_df.to_excel('Inventory.xlsx', index=False) INVENTORY_DF = pd.read_excel('Inventory.xlsx', ignore_index=True) item_database.deleteData() for idx, name in enumerate(list(INVENTORY_DF['Name'])): item_database.addData(name, str(INVENTORY_DF['Barcode'][idx]), str(INVENTORY_DF['P.Price'][idx]), str(INVENTORY_DF['S.Price'][idx]), str(INVENTORY_DF['Quantity'][idx]), str(INVENTORY_DF['Online_Price'][idx]), str(INVENTORY_DF['Tax'][idx])) ON_OFF_CYC = itertools.cycle('of') open('receipt.txt', 'w').write('') discount_added = [] def update_all_transaction_df(): all_transactions_data = {"Name": [names[0] for names in all_transactions_database.getNames()], "S.Price": [s_prices[0] for s_prices in all_transactions_database.getSPrices()], "Date": [dates[0] for dates in all_transactions_database.getDates()], "P.Type": [p_types[0] for p_types in all_transactions_database.getPTypes()], "Total": [tots[0] for tots in all_transactions_database.getTotals()]} new_all_transactions_df = pd.DataFrame(data=all_transactions_data) new_all_transactions_df['Total'] = pd.to_numeric(new_all_transactions_df['Total'], errors='coerce') taxes = [] for i in range(len(new_all_transactions_df)): if str(item_database.getTaxableFromNameAndSP(new_all_transactions_df['Name'][i], new_all_transactions_df['S.Price'][i])) == 'T': t = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) * Decimal(0.0825)).quantize(Decimal('.01')) taxes.append(t) elif str(new_all_transactions_df['Name'][i]) == 'Misc.': real_p = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) / Decimal(1.0825)).quantize(Decimal('.01')) t = Decimal(Decimal(new_all_transactions_df['S.Price'][i]) - real_p).quantize(Decimal('.01')) taxes.append(t) else: taxes.append(Decimal(0.0)) new_all_transactions_df['Tax'] = taxes new_all_transactions_df['Tax'] = pd.to_numeric(new_all_transactions_df['Tax']) return new_all_transactions_df def update_inventory_df(): inventory_data = {'Name': [names[0] for names in item_database.getNames()], 'Barcode': [barcodes[0] for barcodes in item_database.getBarcodes()], 'S.Price': [s_prices[0] for s_prices in item_database.getSPrices()], 'P.Price': [p_prices[0] for p_prices in item_database.getPPrices()], 'Quantity': [quantities[0] for quantities in item_database.getQtns()], 'Online_Price': [ops[0] for ops in item_database.getOPs()], 'Tax': [taxes[0] for taxes in item_database.getTaxes()]} new_inventory_df = pd.DataFrame(data=inventory_data) return new_inventory_df def update_excel_files(): update_inventory_df().to_excel('Inventory.xlsx', index=False) update_all_transaction_df().to_excel('Transactions.xlsx', index=False) def erase_previous_entry(entries): for entry in entries: entry.delete(0, 'end') def click_on_barcode_entry(): win32api.SetCursorPos((320, 140)) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 320, 140, 0, 0) win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 320, 140, 0, 0) def set_focus_on_entry(entry): entry.focus() return "break" def display_item_database_table(item_list_frame): items = item_database.getData() TableModel() data = {} for i in range(len(items)): data['row' + str(i + 1)] = {'Name': items[i][0], 'Barcode': items[i][1], 'P.Price': items[i][2], 'S.Price': items[i][3], 'Quantity': items[i][4], 'Online $': items[i][5], 'Tax': items[i][6]} table = TableCanvas(item_list_frame, data=data) table.show() def display_all_transactions_database_table(item_list_frame): items = all_transactions_database.getData() TableModel() data = {} for i in range(len(items)): data['row' + str(i + 1)] = {'Name': items[i][0], 'S.Price': items[i][1], 'Date': items[i][2], 'Payment Type': items[i][3], 'Total': items[i][4]} table = TableCanvas(item_list_frame, data=data) table.show() def get_tax_amount(): transaction_items = transactions_database.getData() total_sale = Decimal(0.0) for i in range(len(transaction_items)): if str(item_database.getTaxableFromNameAndSP(str(transaction_items[i][0]), transaction_items[i][1])) == 'T': total_sale += Decimal(transaction_items[i][1]).quantize(Decimal('.01')) elif str(item_database.getTaxableFromNameAndOP(str(transaction_items[i][0]), transaction_items[i][1])) == 'T': total_sale += Decimal(transaction_items[i][1]).quantize(Decimal('.01')) return Decimal(Decimal(0.0825) * total_sale).quantize(Decimal('.01')) def get_transactions_total_wout_tax(): transaction_items = transactions_database.getData() total_sale = Decimal(0.0) if len(transaction_items) > 0: for i in range(len(transaction_items)): total_sale += Decimal(transaction_items[i][1]).quantize(Decimal('.01')) return total_sale def get_transactions_total(): return Decimal(get_transactions_total_wout_tax() + get_tax_amount()).quantize(Decimal('.01')) def show_transaction_table(frame, frame1): model = TableModel() transaction_items = transactions_database.getData() data = {} data1 = {} total_sale = get_transactions_total() for i in range(len(transaction_items)): data['row' + str(i + 1)] = {'Name': transaction_items[i][0], 'S.Price': transaction_items[i][1], 'Date': transaction_items[i][2]} data1['row1'] = {'Total ($)': str(total_sale)} table1 = TableCanvas(frame1, data=data1, model=model) table = TableCanvas(frame, data=data, model=model) click_on_barcode_entry() table.show() table1.show() def delete_pressed(frame, frame1): transactions_database.deleteData() show_transaction_table(frame, frame1) def print_receipt(): filename = "receipt.txt" if len(PRINTER_NAME) > 0: try: win32print.SetDefaultPrinter(PRINTER_NAME) win32api.ShellExecute( 0, "printto", filename, '"%s"' % win32print.GetDefaultPrinter(), ".", 0 ) except Exception as e: print("Something went wrong when trying to print:", str(e)) else: print("You don't have a usb-printer set up yet. If you have one, make sure to add its name in the input.txt " "file. Or check whether you have the driver downloaded in your computer.") def print_option(barcodes): root_tk_in = tk.Tk() root_tk_in.configure(bg=TK_INPUT_BG) root_tk_in.title("Print Option") canvas_in = tk.Canvas(root_tk_in, height=TK_INPUT_WIN_H, width=500, bg=TK_INPUT_BG) canvas_in.pack() option_label = tk.Label(root_tk_in, text='Do you want to print a receipt?', bg=TK_INPUT_BG, fg=FG_LABELS_COLOR, font=FONT) option_label.place(relx=0.1, rely=0.4, relheight=0.1, relwidth=0.8) yes_btn = tk.Button(root_tk_in, text='Yes', bg=YES_BTN, fg=FG_LABELS_COLOR, font=FONT, command=lambda: [print_receipt(), root_tk_in.destroy(), done_btn_pressed(barcodes), update_inventory_df(), update_all_transaction_df(), update_excel_files()]) yes_btn.place(relx=0.2, rely=0.55, relheight=0.1, relwidth=0.2) no_btn = tk.Button(root_tk_in, text='No', bg=NO_BTN, fg=FG_LABELS_COLOR, font=FONT, command=lambda: [root_tk_in.destroy(), done_btn_pressed(barcodes), update_inventory_df(), update_all_transaction_df(), update_excel_files()]) no_btn.place(relx=0.6, rely=0.55, relheight=0.1, relwidth=0.2) no_btn.focus_force() root_tk_in.mainloop() def done_btn_pressed(barcodes): filename = "receipt.txt" transaction_items = transactions_database.getData() for idx, t in enumerate(list(transaction_items)): if idx != len(list(transaction_items)) - 1: all_transactions_database.addData(t[0], t[1], t[2], t[3], '') else: if len(discount_added) == 0: all_transactions_database.addData(t[0], t[1], t[2], t[3], str(get_transactions_total())) else: discounted_total = Decimal(get_transactions_total() - ( Decimal(discount_added[0]) * Decimal(0.01) * get_transactions_total())).quantize( Decimal('.01')) all_transactions_database.addData(t[0], t[1], t[2], t[3], str(discounted_total)) decrease_qtn(barcodes) barcodes.clear() transactions_database.deleteData() if len(G_INV_SH_NAME) > 0 and len(G_TRAN_SH_NAME) > 0: inv_df = pd.read_excel('Inventory.xlsx') new_invn = pd.DataFrame() for col in COLUMNS_GOOGLE_INVENTORY: new_invn[str(col)] = inv_df[str(col)] WKS.set_dataframe(new_invn, (1, 1)) trans_df = pd.read_excel("Transactions.xlsx") WKS_T.set_dataframe(trans_df, (1, 1)) open(filename, 'w').write('') def update_receipt_text(): receipt_file = open("receipt.txt", "r+") present_lines = receipt_file.readlines() receipt_file.close() transaction_items = transactions_database.getData() dup_list_of_item_names = [t[0] for t in transaction_items] list_of_sp = [t[1] for t in transaction_items] list_of_qtns = [] list_of_item_names = list(set(dup_list_of_item_names)) for i in list_of_item_names: list_of_qtns.append(dup_list_of_item_names.count(i)) header = " LIQUOR PALACE\n\t6965 Harwin Dr\n\t 346 980 8859\n\n" subtotal = Decimal(0.0) for idx, item in enumerate(list_of_item_names): header += str(item) + " " + str(list_of_qtns[idx]) + " $" + str(list_of_sp[idx]) + "\n" subtotal += Decimal(Decimal(list_of_sp[idx]) * Decimal(list_of_qtns[idx])).quantize(Decimal('.01')) tax = Decimal(Decimal(0.0825) * subtotal).quantize(Decimal('.01')) total = Decimal(subtotal + tax).quantize(Decimal('.01')) header += "\nSUBTOTAL" + "\t$" + str(subtotal) + "\n" + "TAX" + "\t\t$" + str(tax) + "\n" + "TOTAL" + "\t\t$" + str(total) + "\n" for line in present_lines: header += line open('receipt.txt', 'w').write(header) def show_monthly_analysis(frame): fig = Figure(figsize=(5, 5), dpi=80) subplot = fig.add_subplot(111) updated_at_df = pd.read_excel('Transactions.xlsx') updated_at_df['Revenue'] = updated_at_df['S.Price'] updated_at_df['Month'] = updated_at_df['Date'].str[5:7] updated_at_df['Month'] = updated_at_df['Month'].astype('int32') monthly_revs = updated_at_df.groupby('Month').sum() months = [x for x in updated_at_df['Month']] non_rep_months = [] for month in months: if month not in non_rep_months: non_rep_months.append(month) subplot.set_xticks(non_rep_months) subplot.set_xlabel('Month Number') subplot.set_ylabel('Sale ($)') subplot.set_title('Total Sale Per Month for the Year') subplot.bar(non_rep_months, monthly_revs['Revenue'], color="#eb6e34") canvas = FigureCanvasTkAgg(fig, frame) canvas.draw() canvas.get_tk_widget().place(relx=0.05, rely=0.3, relheight=0.5, relwidth=0.4) def show_sale_frequency_analysis(frame): updated_at_df = pd.read_excel('Transactions.xlsx') fig1 = Figure(figsize=(5, 5), dpi=80) subplot1 = fig1.add_subplot(111) item_names = updated_at_df['Name'] item_names_and_freq = [] for name in item_names: count = updated_at_df['Name'] .where(updated_at_df['Name'] == name).count() if [name, count] not in item_names_and_freq: item_names_and_freq.append([name, count]) item_names_and_freq.sort(key=lambda x: x[1], reverse=True) x_var = [n[0] for n in item_names_and_freq] y_var = [f[1] for f in item_names_and_freq] if len(x_var) > 10: x_var = x_var[:10] y_var = y_var[:10] subplot1.bar(x_var, y_var, color="#eb6e34") subplot1.set_title("Top Selling Items (up to 10)") subplot1.set_xlabel("Item Name") subplot1.set_xticklabels([n[0:7] for n in item_names]) subplot1.set_ylabel("Sale Frequency") canvas1 = FigureCanvasTkAgg(fig1, frame) canvas1.draw() canvas1.get_tk_widget().place(relx=0.55, rely=0.3, relheight=0.5, relwidth=0.4) def add_item_to_item_database(name, barcode, pp, sp, qtn, op, tax, text): if (barcode,) in item_database.getBarcodes(): text.set("Item is already in your inventory :)") else: if tax == '0': tax = 'T' else: tax = 'N' item_database.addData(name, barcode, pp, sp, qtn, op, tax) text.set("Item was added :)") def add_item_to_transactions_databases(name, sp, date, op, payment_type='CREDIT'): if name is not None: next(ON_OFF_CYC) if next(ON_OFF_CYC) == 'o': transactions_database.addData(name, op, date, payment_type) else: transactions_database.addData(name, sp, date, payment_type) def update_name(new_name, barcode, text): if (barcode,) in item_database.getBarcodes(): item_database.updateName(new_name, barcode) text.set("Item was updated :)") else: text.set("Item is not in your inventory :(") def update_pp(new_pp, barcode, text): if (barcode,) in item_database.getBarcodes(): item_database.updatePurchasePrice(new_pp, barcode) text.set("Item was updated :)") else: text.set("Item is not in your inventory :(") def update_sp(new_sp, barcode, text): if (barcode,) in item_database.getBarcodes(): item_database.updateSalePrice(new_sp, barcode) text.set("Item was updated :)") else: text.set("Item is not in your inventory :(") def update_qtn(new_qtn, barcode, text): if (barcode,) in item_database.getBarcodes(): item_database.updateQuantity(new_qtn, barcode) text.set("Item was updated :)") else: text.set("Item is not in your inventory :(") def update_op(new_op, barcode, text): if (barcode,) in item_database.getBarcodes(): item_database.updateOP(new_op, barcode) text.set("Item was updated :)") else: text.set("Item is not in your inventory :(") def decrease_qtn(barcodes): for b in barcodes: item_database.decreaseQuantityByOne(b) def get_return_value(total, cash_given): return Decimal(Decimal(total) - Decimal(cash_given)).quantize(Decimal('.01')) def get_return_value_with_discount(tot_wout_tax, discount): discount_amount = Decimal(Decimal(tot_wout_tax) * Decimal(discount) * Decimal(0.01)).quantize(Decimal('.01')) return_val = Decimal(Decimal(tot_wout_tax) - discount_amount + Decimal(get_tax_amount())).quantize(Decimal('.01')) return return_val
MIT License
statueofmike/rtsp
rtsp/ffmpegstream.py
Client.__exit__
python
def __exit__(self, type=None, value=None, traceback=None): self.close()
Together with __enter__, allows support for `with-` clauses.
https://github.com/statueofmike/rtsp/blob/e2916ea22d404de2bd786e004ba9dc592f95da89/rtsp/ffmpegstream.py#L34-L36
import cv2 from io import BytesIO from PIL import Image from threading import Thread class Client: _stream = None def __init__(self, rtsp_server_uri, verbose = False): self.rtsp_server_uri = rtsp_server_uri self._verbose = verbose if isinstance(rtsp_server_uri,str) and 'picam' in rtsp_server_uri: self.__class__ = PicamVideoFeed _pc = PicamVideoFeed() self.__dict__.update(_pc.__dict__) self._bg_run = False self.open() def __enter__(self,*args,**kwargs): return self
MIT License
vincent-lg/tsunami
src/secondaires/peche/__init__.py
Module.ajouter_banc
python
def ajouter_banc(self, banc): self.bancs[banc.cle] = banc
Ajoute le banc de poisson dans le dictionnaire.
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/peche/__init__.py#L111-L113
import random from abstraits.module import * from corps.aleatoire import * from corps.fonctions import valider_cle from .banc import Banc from . import commandes from .editeurs.schooledit import EdtSchooledit from . import types TERRAINS_PECHE = ("rive", ) class Module(BaseModule): def __init__(self, importeur): BaseModule.__init__(self, importeur, "peche", "secondaire") self.commandes = [] self.bancs = {} def config(self): pecher = self.importeur.perso.ajouter_etat("pecher") pecher.msg_refus = "Vous êtes en train de pêcher" pecher.msg_visible = "pêche ici" pecher.act_autorisees = ["regarder", "parler", "geste", "asseoir", "lever"] BaseModule.config(self) def init(self): importeur.perso.ajouter_talent("peche_terre", "pêche à quai", "survie", 0.45) importeur.perso.ajouter_talent("peche_mer", "pêche embarquée", "survie", 0.42) bancs = self.importeur.supenr.charger_groupe(Banc) for banc in bancs: self.bancs[banc.cle] = banc importeur.diffact.ajouter_action("bancs", 60, self.tick_bancs) BaseModule.init(self) def ajouter_commandes(self): self.commandes = [ commandes.appater.CmdAppater(), commandes.banc.CmdBanc(), commandes.pecher.CmdPecher(), ] for cmd in self.commandes: self.importeur.interpreteur.ajouter_commande(cmd) self.importeur.interpreteur.ajouter_editeur(EdtSchooledit) def creer_banc(self, cle): valider_cle(cle) if cle in self.bancs: raise KeyError("le banc de poisson '{}' existe déjà".format(cle)) banc = Banc(cle) self.ajouter_banc(banc) return banc
BSD 3-Clause New or Revised License
dmitriy-serdyuk/twinnet-asr
libs/Theano/theano/tensor/type.py
TensorType.values_eq_approx
python
def values_eq_approx(a, b, allow_remove_inf=False, allow_remove_nan=False, rtol=None, atol=None): if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray): if a.shape != b.shape: return False if a.dtype != b.dtype: return False if 'int' in str(a.dtype): return numpy.all(a == b) else: if a.ndim == 0 and numpy.isinf(a): a = a.reshape(1) b = b.reshape(1) cmp = theano.tensor.basic._allclose(a, b, rtol=rtol, atol=atol) if cmp: return True a_missing = numpy.isnan(a) a_inf = numpy.isinf(a) if not (a_missing.any() or (allow_remove_inf and a_inf.any())): _logger.info( 'numpy allclose failed for abs_err %f and rel_err %f', numpy.max(abs(a - b)), numpy.max(abs(a - b) / (abs(a) + abs(b)))) return False rtol = 1.0000000000000001e-05 atol = 1e-8 cmp_elemwise = (numpy.absolute(a - b) <= (atol + rtol * numpy.absolute(b))) both_missing = a_missing * numpy.isnan(b) both_inf = a_inf * numpy.isinf(b) cmp_elemwise = numpy.where( both_inf & cmp_elemwise, a == b, cmp_elemwise) both_inf = numpy.where(both_inf, (a == b), both_inf) if allow_remove_inf: both_inf += a_inf if allow_remove_nan: both_missing += a_missing return (cmp_elemwise + both_missing + both_inf).all() return False
Parameters ---------- allow_remove_inf If True, when there is an inf in a, we allow any value in b in that position. Event -inf allow_remove_nan If True, when there is a nan in a, we allow any value in b in that position. Event +-inf rtol Relative tolerance, passed to _allclose. atol Absolute tolerance, passed to _allclose.
https://github.com/dmitriy-serdyuk/twinnet-asr/blob/799220d682306467a2b401e42e788f8c33382b00/libs/Theano/theano/tensor/type.py#L320-L404
import logging import warnings import numpy import theano from theano import config from theano.gof import hashtype, Type, Variable from theano import scalar as scal _logger = logging.getLogger("theano.tensor.type") class TensorType(Type): filter_checks_isfinite = False def __init__(self, dtype, broadcastable, name=None, sparse_grad=False): self.dtype = str(dtype) if self.dtype == 'floatX': self.dtype = config.floatX self.broadcastable = tuple(bool(b) for b in broadcastable) self.dtype_specs() self.name = name self.numpy_dtype = numpy.dtype(self.dtype) self.sparse_grad = sparse_grad if sparse_grad: warnings.warn( "DEPRECATION WARNING: You use an old interface to" " AdvancedSubtensor1 sparse_grad. Now use" " theano.sparse_grad(a_tensor[an_int_vector]).") def clone(self, dtype=None, broadcastable=None): if dtype is None: dtype = self.dtype if broadcastable is None: broadcastable = self.broadcastable return self.__class__(dtype, broadcastable, name=self.name, sparse_grad=self.sparse_grad) def filter(self, data, strict=False, allow_downcast=None): if isinstance(data, Variable): raise TypeError( 'Expected an array-like object, but found a Variable: ' 'maybe you are trying to call a function on a (possibly ' 'shared) variable instead of a numeric array?') if ((type(data) is numpy.ndarray) and (data.dtype == self.numpy_dtype)): if data.dtype.num != self.numpy_dtype.num: data = theano._asarray(data, dtype=self.dtype) elif ((type(data) is numpy.memmap) and (data.dtype == self.numpy_dtype)): pass elif strict: if not (type(data) is numpy.ndarray): raise TypeError("%s expected a ndarray object." % self, data, type(data)) if data.dtype != self.numpy_dtype: raise TypeError(("%s expected a ndarray object with " "dtype = %s (got %s).") % (self, self.numpy_dtype, data.dtype)) assert False, "This point should never be reached." else: if allow_downcast: data = theano._asarray(data, dtype=self.dtype) else: if isinstance(data, numpy.ndarray): up_dtype = scal.upcast(self.dtype, data.dtype) if up_dtype == self.dtype: data = theano._asarray(data, dtype=self.dtype) if up_dtype != self.dtype: err_msg = ( '%s cannot store a value of dtype %s without ' 'risking loss of precision. If you do not mind ' 'this loss, you can: ' '1) explicitly cast your data to %s, or ' '2) set "allow_input_downcast=True" when calling ' '"function".' % (self, data.dtype, self.dtype)) raise TypeError(err_msg, data) elif (allow_downcast is None and type(data) is float and self.dtype == theano.config.floatX): data = theano._asarray(data, self.dtype) else: converted_data = theano._asarray(data, self.dtype) if TensorType.values_eq(numpy.asarray(data), converted_data, force_same_dtype=False): data = converted_data else: str_data = str(data) if len(str_data) > 80: str_data = str_data[:75] + '(...)' err_msg = ( '%s cannot store accurately value %s, ' 'it would be represented as %s. ' 'If you do not mind this precision loss, you can: ' '1) explicitly convert your data to a numpy array ' 'of dtype %s, or ' '2) set "allow_input_downcast=True" when calling ' '"function".' % (self, data, converted_data, self.dtype)) raise TypeError(err_msg, data) if self.ndim != data.ndim: raise TypeError("Wrong number of dimensions: expected %s," " got %s with shape %s." % (self.ndim, data.ndim, data.shape)) if not data.flags.aligned: try: msg = "object buffer" + str(data.data) except AttributeError: msg = "" raise TypeError("The numpy.ndarray object is not aligned." " Theano C code does not support that.", msg, "object shape", data.shape, "object strides", data.strides, "object dtype", data.dtype) i = 0 for b in self.broadcastable: if b and data.shape[i] != 1: raise TypeError("Non-unit value on shape on a broadcastable" " dimension.", data.shape, self.broadcastable) i += 1 if (self.filter_checks_isfinite and not numpy.all(numpy.isfinite(data))): raise ValueError("non-finite elements not allowed") return data def filter_variable(self, other, allow_convert=True): if hasattr(other, '_as_TensorVariable'): other = other._as_TensorVariable() if not isinstance(other, Variable): other = self.Constant(type=self, data=other) if other.type == self: return other if allow_convert: other2 = self.convert_variable(other) if other2 is not None and other2.type == self: return other2 raise TypeError( 'Cannot convert Type %(othertype)s ' '(of Variable %(other)s) into Type %(self)s. ' 'You can try to manually convert %(other)s into a %(self)s.' % dict(othertype=other.type, other=other, self=self)) def value_validity_msg(self, a): try: self.filter(a, strict=True) except Exception as e: return str(e) return "value is valid" def dtype_specs(self): try: return { 'float16': (float, 'npy_float16', 'NPY_FLOAT16'), 'float32': (float, 'npy_float32', 'NPY_FLOAT32'), 'float64': (float, 'npy_float64', 'NPY_FLOAT64'), 'uint8': (int, 'npy_uint8', 'NPY_UINT8'), 'int8': (int, 'npy_int8', 'NPY_INT8'), 'uint16': (int, 'npy_uint16', 'NPY_UINT16'), 'int16': (int, 'npy_int16', 'NPY_INT16'), 'uint32': (int, 'npy_uint32', 'NPY_UINT32'), 'int32': (int, 'npy_int32', 'NPY_INT32'), 'uint64': (int, 'npy_uint64', 'NPY_UINT64'), 'int64': (int, 'npy_int64', 'NPY_INT64'), 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'), 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64') }[self.dtype] except KeyError: raise TypeError("Unsupported dtype for %s: %s" % (self.__class__.__name__, self.dtype)) def to_scalar_type(self): return scal.get_scalar_type(dtype=self.dtype) def __eq__(self, other): return type(self) == type(other) and other.dtype == self.dtype and other.broadcastable == self.broadcastable def convert_variable(self, var): if (type(self) == type(var.type) and self.dtype == var.type.dtype and self.ndim == var.type.ndim and all(sb == ob or ob for sb, ob in zip(self.broadcastable, var.type.broadcastable))): return theano.tensor.patternbroadcast(var, self.broadcastable) @staticmethod def may_share_memory(a, b): if isinstance(a, numpy.ndarray) and isinstance(b, numpy.ndarray): return numpy.may_share_memory(a, b) else: return False @staticmethod def values_eq(a, b, force_same_dtype=True): if a.shape != b.shape: return False if force_same_dtype and a.dtype != b.dtype: return False a_eq_b = (a == b) r = numpy.all(a_eq_b) if r: return True a_missing = numpy.isnan(a) if a_missing.any(): b_missing = numpy.isnan(b) return numpy.all(a_eq_b + (a_missing == b_missing)) else: return False @staticmethod
MIT License
mardix/assembly
assembly/assembly.py
_get_action_endpoint
python
def _get_action_endpoint(action): _endpoint = None if inspect.ismethod(action) and hasattr(action, "_rule_cache"): rc = action._rule_cache if rc: k = list(rc.keys())[0] rules = rc[k] len_rules = len(rules) if len_rules == 1: rc_kw = rules[0][1] _endpoint = rc_kw.get("endpoint", None) if not _endpoint: _endpoint = _make_routename_from_endpoint(action) elif len_rules > 1: _prefix = _make_routename_from_endpoint(action) for r in Assembly._app.url_map.iter_rules(): if ('GET' in r.methods or 'POST' in r.methods) and _prefix in r.endpoint: _endpoint = r.endpoint break return _endpoint
Return the endpoint base on the view's action :param action: :return:
https://github.com/mardix/assembly/blob/4c993d19bc9d33c1641323e03231e9ecad711b38/assembly/assembly.py#L745-L769
import re import os import sys import six import jinja2 import inspect import logging import werkzeug import functools import arrow as date import pkg_resources import logging.config from . import utils, _db, about from flask_assets import Environment import werkzeug.exceptions as HTTPError from werkzeug.wrappers import BaseResponse from werkzeug.exceptions import HTTPException from werkzeug.middleware.proxy_fix import ProxyFix from werkzeug.routing import (BaseConverter, parse_rule) from flask import (Flask, g, render_template, flash, session, make_response, Response, request, abort, url_for as f_url_for, redirect as f_redirect) __all__ = [ "Assembly", "db", "env", "ext", "date", "views", "config", "models", "url_for", "decorate", "redirect", "HTTPError", "get_cookie", "set_cookie", "app_context", "delete_cookie", ] env = os.environ config = utils.DotDict() views = type('', (), {}) models = type('', (), {}) ext = type('', (), {}) db = _db.ActiveAlchemyProxy() def app_context(kls): if not hasattr(kls, "__call__"): raise AssemblyError("app_context: '%s' is not callable" % kls) Assembly._init_apps.add(kls) return kls def url_for(endpoint, **kw): _endpoint = None if isinstance(endpoint, six.string_types): return f_url_for(endpoint, **kw) else: if isinstance(endpoint, Assembly): fn = sys._getframe().f_back.f_code.co_name endpoint = getattr(endpoint, fn) if inspect.ismethod(endpoint) or inspect.isfunction(endpoint): _endpoint = _get_action_endpoint(endpoint) if not _endpoint: _endpoint = _make_routename_from_endpoint(endpoint) if _endpoint: return f_url_for(_endpoint, **kw) else: raise AssemblyError('Assembly `url_for` received an invalid endpoint') def redirect(endpoint, **kw): if isinstance(endpoint, six.string_types): if "/" in endpoint: return f_redirect(endpoint) else: for r in Assembly._app.url_map.iter_rules(): if 'GET' in r.methods and endpoint in r.endpoint: endpoint = r.endpoint return f_redirect(url_for(endpoint, **kw)) def decorate(fn): def decorator(kls): if inspect.isclass(kls): apply_function_to_members(kls, fn) return kls return decorator __ASM_SET_COOKIES__ = "__ASM_SET_COOKIES__" def set_cookie(*a, **kw): cookies = [] if __ASM_SET_COOKIES__ in g: cookies = getattr(g, __ASM_SET_COOKIES__) cookies.append((a, kw)) setattr(g, __ASM_SET_COOKIES__, cookies) def delete_cookie(key, path="/", domain=None): set_cookie(key, expires=0, max_age=0, path=path, domain=domain) def get_cookie(key): return request.cookies.get(key) class Assembly(object): decorators = [] base_route = None route_prefix = None trailing_slash = True assets = None _ext = set() __special_methods = ["get", "put", "patch", "post", "delete", "index"] _app = None _init_apps = set() _template_fsl = {} _static_paths = set() @classmethod def init(cls, import_name, views_list, app_name="default", app_env="Development" ): app = import_name if isinstance(import_name, Flask) else Flask(import_name) if env.get("ASSEMBLY_APP"): app_name = env.get("ASSEMBLY_APP") if env.get("ASSEMBLY_ENV"): app_env = env.get("ASSEMBLY_ENV") app_env = app_env.lower().capitalize() app.config.from_object("lib.config.%s" % app_env) if not config: config.update(app.config.items()) if app.config.get("USE_PROXY_FIX", True): app.wsgi_app = ProxyFix(app.wsgi_app) app.url_map.converters['regex'] = _RegexConverter cls._app = app cls.assets = Environment(app) cls._setup_db__(app) werkzeug.import_string("lib.models", True) try: if app_name not in views_list: raise AssemblyError("Missing project: %s" % app_name) for view in views_list[app_name]: cls._load_view_from_string__(view) except ImportError as ie1: logging.critical(ie1) cls._register_models__() if cls._template_fsl: loader = [app.jinja_loader, _JinjaPrefixLoader(cls._template_fsl)] app.jinja_loader = jinja2.ChoiceLoader(loader) cls.assets.load_path = [app.static_folder] + list(cls._static_paths) for p in cls.assets.load_path: f = "%s/assets.yml" % p if os.path.isfile(f): cls.assets.from_yaml(f) [init_app(app) for init_app in cls._init_apps] for subcls in cls.__subclasses__(): base_route = subcls.base_route if not base_route: base_route = utils.dasherize(utils.underscore(subcls.__name__)) if subcls.__name__.lower() == "index": base_route = "/" subcls._register__(app, base_route=base_route) return app @classmethod def _load_view_from_string__(cls, view): werkzeug.import_string(view) _register_application_template(view, view) @classmethod def render(cls, data={}, __template__=None, **kwargs): vars = dict( __NAME__=about.__title__, __VERSION__=about.__version__, __YEAR__=date.utcnow().year ) for k, v in vars.items(): setattr(g, k, v) if not __template__: stack = inspect.stack()[1] action_name = stack[3] __template__ = _make_template_path(cls, action_name) data = data or {} data.update(kwargs) return render_template(__template__, **data) @classmethod def _setup_db__(cls, app): uri = config.get("DB_URL") if uri: db.connect__(uri, app) @classmethod def _register_models__(cls): if db._IS_OK_: _register_models(**{m.__name__: m for m in db.Model.__subclasses__() if not hasattr(models, m.__name__)}) @classmethod def _register__(cls, app, base_route=None, subdomain=None, route_prefix=None, trailing_slash=True): if cls is Assembly: raise TypeError("cls must be a subclass of Assembly, not Assembly itself") _register_view(cls) if base_route: cls.orig_base_route = cls.base_route cls.base_route = base_route if route_prefix: cls.orig_route_prefix = cls.route_prefix cls.route_prefix = route_prefix if not subdomain: if hasattr(app, "subdomain") and app.subdomain is not None: subdomain = app.subdomain elif hasattr(cls, "subdomain"): subdomain = cls.subdomain if trailing_slash is not None: cls.orig_trailing_slash = cls.trailing_slash cls.trailing_slash = trailing_slash for name, value in _get_interesting_members(Assembly, cls): proxy = cls._make_proxy_method__(name) route_name = _make_routename_from_cls(cls, name) try: if hasattr(value, "_rule_cache") and name in value._rule_cache: for idx, cached_rule in enumerate(value._rule_cache[name]): rule, options = cached_rule rule = cls._build_rule__(rule) sub, ep, options = cls._parse_options__(options) if not subdomain and sub: subdomain = sub if ep: endpoint = ep elif len(value._rule_cache[name]) == 1: endpoint = route_name else: endpoint = "%s_%d" % (route_name, idx,) app.add_url_rule(rule, endpoint, proxy, subdomain=subdomain, **options) elif name in cls.__special_methods: if name in ["get", "index"]: methods = ["GET"] if name == "index": if hasattr(value, "_methods_cache"): methods = value._methods_cache else: methods = [name.upper()] rule = cls._build_rule__("/", value) if not cls.trailing_slash: rule = rule.rstrip("/") app.add_url_rule(rule, route_name, proxy, methods=methods, subdomain=subdomain) else: methods = value._methods_cache if hasattr(value, "_methods_cache") else ["GET"] name = utils.dasherize(name) route_str = '/%s/' % name if not cls.trailing_slash: route_str = route_str.rstrip('/') rule = cls._build_rule__(route_str, value) app.add_url_rule(rule, route_name, proxy, subdomain=subdomain, methods=methods) except DecoratorCompatibilityError: raise DecoratorCompatibilityError( "Incompatible decorator detected on %s in class %s" % (name, cls.__name__)) for name, fn in _get_interesting_members_http_error(Assembly, cls): match = _match_http_error(name) if match: try: mname = match.groups()[0] exc = int(mname) if mname.isdigit() else HTTPException app.register_error_handler(exc, lambda e: cls._error_handler__(fn, e)) except KeyError as kE: raise AssemblyError(str(kE) + " - module: '%s'" % _get_full_method_name(fn)) if hasattr(cls, "orig_base_route"): cls.base_route = cls.orig_base_route del cls.orig_base_route if hasattr(cls, "orig_route_prefix"): cls.route_prefix = cls.orig_route_prefix del cls.orig_route_prefix if hasattr(cls, "orig_trailing_slash"): cls.trailing_slash = cls.orig_trailing_slash del cls.orig_trailing_slash @classmethod def _parse_options__(cls, options): options = options.copy() subdomain = options.pop('subdomain', None) endpoint = options.pop('endpoint', None) return subdomain, endpoint, options, @classmethod def _make_proxy_method__(cls, name): i = cls() view = getattr(i, name) for decorator in cls.decorators: view = decorator(view) @functools.wraps(view) def proxy(**forgettable_view_args): del forgettable_view_args if hasattr(i, "_before_request"): response = i._before_request(name, **request.view_args) if response is not None: return response before_view_name = "_before_" + name if hasattr(i, before_view_name): before_view = getattr(i, before_view_name) response = before_view(**request.view_args) if response is not None: return response response = view(**request.view_args) if isinstance(response, dict) or response is None: response = response or {} if hasattr(i, "_renderer"): response = i._renderer(response) else: template = _make_template_path(cls, view.__name__) response.setdefault("__template__", template) response = i.render(**response) if not isinstance(response, Response): response = make_response(response) for ext in cls._ext: response = ext(response) after_view_name = "_after_" + name if hasattr(i, after_view_name): after_view = getattr(i, after_view_name) response = after_view(response) if hasattr(i, "_after_request"): response = i._after_request(name, response) if __ASM_SET_COOKIES__ in g: cookies = g.pop(__ASM_SET_COOKIES__) for cookie in cookies: response.set_cookie(*cookie[0], **cookie[1]) return response return proxy @classmethod def _build_rule__(cls, rule, method=None): rule_parts = [] if cls.route_prefix: rule_parts.append(cls.route_prefix) base_route = cls._get_base_route__() if base_route: rule_parts.append(base_route) rule_parts.append(rule) ignored_rule_args = ['self'] if hasattr(cls, 'base_args'): ignored_rule_args += cls.base_args if method: args = _get_true_argspec(method)[0] for arg in args: if arg not in ignored_rule_args: rule_parts.append("<%s>" % arg) result = "/%s" % "/".join(rule_parts) return re.sub(r'(/)\1+', r'\1', result) @classmethod def _get_base_route__(cls): base_route = cls.__name__.lower() if cls.base_route is not None: base_route = cls.base_route base_rule = parse_rule(base_route) cls.base_args = [r[2] for r in base_rule] return base_route.strip("/") @classmethod def _error_handler__(cls, fn, e): resp = fn(cls, e) if isinstance(resp, Response) or isinstance(resp, BaseResponse): return resp if isinstance(resp, dict) or isinstance(resp, tuple) or resp is None: data, status, headers = utils.prepare_view_response(resp) if "__template__" not in data: data["__template__"] = _make_template_path(cls, fn.__name__.lstrip("_")) return cls.render(**resp), e.code, headers return resp def apply_function_to_members(cls, fn): for name, method in _get_interesting_members(Assembly, cls): setattr(cls, name, fn(method)) def _sanitize_module_name(module_name): return module_name.replace(".views", "") def _get_full_method_name(mtd): return "%s.%s" % (mtd.__module__, mtd.__name__) def _register_models(**kwargs): [setattr(models, k, v) for k, v in kwargs.items()] def _register_view(cls): mod = views module_name = _sanitize_module_name(cls.__module__) if "." in module_name: for k in module_name.split("."): if not hasattr(mod, k): setattr(mod, k, type('', (), {})) mod = getattr(mod, k) setattr(mod, cls.__name__, cls)
MIT License
qinenergy/adanet
convlarge/cifar10.py
load_cifar10
python
def load_cifar10(): dest_directory = FLAGS.data_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory) print("Loading training data...") train_images = np.zeros((NUM_EXAMPLES_TRAIN, 3 * 32 * 32), dtype=np.float32) train_labels = [] for i, data_fn in enumerate( sorted(glob.glob(FLAGS.data_dir + '/cifar-10-batches-py/data_batch*'))): batch = unpickle(data_fn) train_images[i * 10000:(i + 1) * 10000] = batch['data'] train_labels.extend(batch['labels']) train_images = (train_images - 127.5) / 255. train_labels = np.asarray(train_labels, dtype=np.int64) rand_ix = np.random.permutation(NUM_EXAMPLES_TRAIN) train_images = train_images[rand_ix] train_labels = train_labels[rand_ix] print("Loading test data...") test = unpickle(FLAGS.data_dir + '/cifar-10-batches-py/test_batch') test_images = test['data'].astype(np.float32) test_images = (test_images - 127.5) / 255. test_labels = np.asarray(test['labels'], dtype=np.int64) """ print("Apply ZCA whitening") components, mean, train_images = ZCA(train_images) np.save('{}/components'.format(FLAGS.data_dir), components) np.save('{}/mean'.format(FLAGS.data_dir), mean) test_images = np.dot(test_images - mean, components.T) """ train_images = train_images.reshape( (NUM_EXAMPLES_TRAIN, 3, 32, 32)).transpose((0, 2, 3, 1)).reshape((NUM_EXAMPLES_TRAIN, -1)) test_images = test_images.reshape( (NUM_EXAMPLES_TEST, 3, 32, 32)).transpose((0, 2, 3, 1)).reshape((NUM_EXAMPLES_TEST, -1)) return (train_images, train_labels), (test_images, test_labels)
Download and extract the tarball from Alex's website.
https://github.com/qinenergy/adanet/blob/569037ac1784513b4980d0e95cc291ca2899646d/convlarge/cifar10.py#L55-L109
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tarfile import numpy as np from scipy import linalg import glob import pickle from six.moves import xrange from six.moves import urllib import tensorflow as tf from dataset_utils_cifar import * DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('data_dir', './dataset/cifar10', 'where to store the dataset') tf.app.flags.DEFINE_integer('num_labeled_examples', 4000, "The number of labeled examples") tf.app.flags.DEFINE_integer('num_valid_examples', 1000, "The number of validation examples") tf.app.flags.DEFINE_integer('dataset_seed', 1, "dataset seed") IMAGE_SIZE = 32 NUM_CLASSES = 10 NUM_EXAMPLES_TRAIN = 50000 NUM_EXAMPLES_TEST = 10000
MIT License
rangilyu/nanodet
nanodet/trainer/task.py
TrainingTask.scalar_summary
python
def scalar_summary(self, tag, phase, value, step): if self.local_rank < 1: self.logger.experiment.add_scalars(tag, {phase: value}, step)
Write Tensorboard scalar summary log. Args: tag: Name for the tag phase: 'Train' or 'Val' value: Value to record step: Step value to record
https://github.com/rangilyu/nanodet/blob/d7caaca17b08731b9ed42441e09916902f735c69/nanodet/trainer/task.py#L332-L343
import copy import json import logging import os import warnings from typing import Any, List import torch import torch.distributed as dist from pytorch_lightning import LightningModule from nanodet.data.batch_process import stack_batch_img from nanodet.util import gather_results, mkdir from ..model.arch import build_model class TrainingTask(LightningModule): def __init__(self, cfg, evaluator=None): super(TrainingTask, self).__init__() self.cfg = cfg self.model = build_model(cfg.model) self.evaluator = evaluator self.save_flag = -10 self.log_style = "NanoDet" def _preprocess_batch_input(self, batch): batch_imgs = batch["img"] if isinstance(batch_imgs, list): batch_imgs = [img.to(self.device) for img in batch_imgs] batch_img_tensor = stack_batch_img(batch_imgs, divisible=32) batch["img"] = batch_img_tensor return batch def forward(self, x): x = self.model(x) return x @torch.no_grad() def predict(self, batch, batch_idx=None, dataloader_idx=None): batch = self._preprocess_batch_input(batch) preds = self.forward(batch["img"]) results = self.model.head.post_process(preds, batch) return results def on_train_start(self) -> None: if self.current_epoch > 0: self.lr_scheduler.last_epoch = self.current_epoch - 1 def training_step(self, batch, batch_idx): batch = self._preprocess_batch_input(batch) preds, loss, loss_states = self.model.forward_train(batch) if self.log_style == "Lightning": self.log( "lr", self.optimizers().param_groups[0]["lr"], on_step=True, on_epoch=False, prog_bar=True, ) for k, v in loss_states.items(): self.log( "Train/" + k, v, on_step=True, on_epoch=True, prog_bar=True, sync_dist=True, ) elif ( self.log_style == "NanoDet" and self.global_step % self.cfg.log.interval == 0 ): lr = self.optimizers().param_groups[0]["lr"] log_msg = "Train|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format( self.current_epoch + 1, self.cfg.schedule.total_epochs, self.global_step, batch_idx, lr, ) self.scalar_summary("Train_loss/lr", "Train", lr, self.global_step) for loss_name in loss_states: log_msg += "{}:{:.4f}| ".format( loss_name, loss_states[loss_name].mean().item() ) self.scalar_summary( "Train_loss/" + loss_name, "Train", loss_states[loss_name].mean().item(), self.global_step, ) self.info(log_msg) return loss def training_epoch_end(self, outputs: List[Any]) -> None: self.trainer.save_checkpoint(os.path.join(self.cfg.save_dir, "model_last.ckpt")) self.lr_scheduler.step() def validation_step(self, batch, batch_idx): batch = self._preprocess_batch_input(batch) preds, loss, loss_states = self.model.forward_train(batch) if self.log_style == "Lightning": self.log( "Val/loss", loss, on_step=True, on_epoch=False, prog_bar=True, logger=False, ) for k, v in loss_states.items(): self.log( "Val/" + k, v, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, ) elif self.log_style == "NanoDet" and batch_idx % self.cfg.log.interval == 0: lr = self.optimizers().param_groups[0]["lr"] log_msg = "Val|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format( self.current_epoch + 1, self.cfg.schedule.total_epochs, self.global_step, batch_idx, lr, ) for loss_name in loss_states: log_msg += "{}:{:.4f}| ".format( loss_name, loss_states[loss_name].mean().item() ) self.info(log_msg) dets = self.model.head.post_process(preds, batch) return dets def validation_epoch_end(self, validation_step_outputs): results = {} for res in validation_step_outputs: results.update(res) all_results = ( gather_results(results) if dist.is_available() and dist.is_initialized() else results ) if all_results: eval_results = self.evaluator.evaluate( all_results, self.cfg.save_dir, rank=self.local_rank ) metric = eval_results[self.cfg.evaluator.save_key] if metric > self.save_flag: self.save_flag = metric best_save_path = os.path.join(self.cfg.save_dir, "model_best") mkdir(self.local_rank, best_save_path) self.trainer.save_checkpoint( os.path.join(best_save_path, "model_best.ckpt") ) txt_path = os.path.join(best_save_path, "eval_results.txt") if self.local_rank < 1: with open(txt_path, "a") as f: f.write("Epoch:{}\n".format(self.current_epoch + 1)) for k, v in eval_results.items(): f.write("{}: {}\n".format(k, v)) else: warnings.warn( "Warning! Save_key is not in eval results! Only save model last!" ) if self.log_style == "Lightning": for k, v in eval_results.items(): self.log( "Val_metrics/" + k, v, on_step=False, on_epoch=True, prog_bar=False, sync_dist=True, ) elif self.log_style == "NanoDet": for k, v in eval_results.items(): self.scalar_summary( "Val_metrics/" + k, "Val", v, self.current_epoch + 1 ) else: self.info("Skip val on rank {}".format(self.local_rank)) def test_step(self, batch, batch_idx): dets = self.predict(batch, batch_idx) return dets def test_epoch_end(self, test_step_outputs): results = {} for res in test_step_outputs: results.update(res) all_results = ( gather_results(results) if dist.is_available() and dist.is_initialized() else results ) if all_results: res_json = self.evaluator.results2json(all_results) json_path = os.path.join(self.cfg.save_dir, "results.json") json.dump(res_json, open(json_path, "w")) if self.cfg.test_mode == "val": eval_results = self.evaluator.evaluate( all_results, self.cfg.save_dir, rank=self.local_rank ) txt_path = os.path.join(self.cfg.save_dir, "eval_results.txt") with open(txt_path, "a") as f: for k, v in eval_results.items(): f.write("{}: {}\n".format(k, v)) else: self.info("Skip test on rank {}".format(self.local_rank)) def configure_optimizers(self): optimizer_cfg = copy.deepcopy(self.cfg.schedule.optimizer) name = optimizer_cfg.pop("name") build_optimizer = getattr(torch.optim, name) optimizer = build_optimizer(params=self.parameters(), **optimizer_cfg) schedule_cfg = copy.deepcopy(self.cfg.schedule.lr_schedule) name = schedule_cfg.pop("name") build_scheduler = getattr(torch.optim.lr_scheduler, name) self.lr_scheduler = build_scheduler(optimizer=optimizer, **schedule_cfg) return optimizer def optimizer_step( self, epoch=None, batch_idx=None, optimizer=None, optimizer_idx=None, optimizer_closure=None, on_tpu=None, using_native_amp=None, using_lbfgs=None, ): if self.trainer.global_step <= self.cfg.schedule.warmup.steps: if self.cfg.schedule.warmup.name == "constant": warmup_lr = ( self.cfg.schedule.optimizer.lr * self.cfg.schedule.warmup.ratio ) elif self.cfg.schedule.warmup.name == "linear": k = (1 - self.trainer.global_step / self.cfg.schedule.warmup.steps) * ( 1 - self.cfg.schedule.warmup.ratio ) warmup_lr = self.cfg.schedule.optimizer.lr * (1 - k) elif self.cfg.schedule.warmup.name == "exp": k = self.cfg.schedule.warmup.ratio ** ( 1 - self.trainer.global_step / self.cfg.schedule.warmup.steps ) warmup_lr = self.cfg.schedule.optimizer.lr * k else: raise Exception("Unsupported warm up type!") for pg in optimizer.param_groups: pg["lr"] = warmup_lr optimizer.step(closure=optimizer_closure) optimizer.zero_grad() def get_progress_bar_dict(self): items = super().get_progress_bar_dict() items.pop("v_num", None) items.pop("loss", None) return items
Apache License 2.0
gregorch/ipet
ipet/validation/Validation.py
Validation.getPbValue
python
def getPbValue(self, pb : float, objsense : int) -> float: if pd.isnull(pb): pb = infty() if objsense == ObjectiveSenseCode.MINIMIZE else -infty() return pb
returns a floating point value computed from a given primal bound
https://github.com/gregorch/ipet/blob/e4135ff936d3aa447a960d854f9c51554e5ba7dc/ipet/validation/Validation.py#L179-L184
import pandas as pd from ipet.Key import ProblemStatusCodes, SolverStatusCodes, ObjectiveSenseCode from ipet import Key from ipet.misc import getInfinity as infty from ipet.misc import isInfinite as isInf import numpy as np import logging import sqlite3 logger = logging.getLogger(__name__) DEFAULT_RELTOL = 1e-4 DEFAULT_FEASTOL = 1e-6 class SolufileMarkers: OPT = "=opt=" INF = "=inf=" BEST = "=best=" UNKN = "=unkn=" BESTDUAL = "=bestdual=" FEAS = "=feas=" class DataBaseMarkers: OPT = "opt" INF = "inf" BEST = "best" class Validation: __primalidx__ = 0 __dualidx__ = 1 __feas__ = 1e99 __infeas__ = 1e100 def __init__(self, solufilename : str = None, tol : float = DEFAULT_RELTOL, feastol : float = DEFAULT_FEASTOL): if solufilename: if solufilename.endswith(".solu"): self.referencedict = self.readSoluFile(solufilename) self.objsensedict = {} else: self.referencedict, self.objsensedict = self.connectToDataBase(solufilename) logger.debug("Data base connection finished, {} items".format(len(self.referencedict.items()))) else: self.referencedict, self.objsensedict = {}, {} self.tol = tol self.inconsistentset = set() self.feastol = feastol def set_tol(self, tol : float): self.tol = tol def set_feastol(self, feastol : float): self.feastol = feastol def connectToDataBase(self, databasefilename): soludict = {} objsensedict = {} with sqlite3.connect(databasefilename) as conn: c = conn.cursor() c.execute('SELECT DISTINCT name, objsense,primbound,dualbound,status FROM instances') for name, objsense, primbound, dualbound, status in c: if name in soludict: logger.warning("Warning: Duplicate name {} with different data in data base".format(name)) infotuple = [None, None] if status == DataBaseMarkers.OPT: infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = primbound elif status == DataBaseMarkers.BEST: if primbound is not None: infotuple[self.__primalidx__] = primbound if dualbound is not None: infotuple[self.__dualidx__] = dualbound elif status == DataBaseMarkers.INF: infotuple[self.__primalidx__] = self.__infeas__ objsensedict[name] = ObjectiveSenseCode.MAXIMIZE if objsense == "max" else ObjectiveSenseCode.MINIMIZE soludict[name] = tuple(infotuple) return soludict, objsensedict def readSoluFile(self, solufilename : str) -> dict: soludict = dict() with open(solufilename, "r") as solufile: for line in solufile: if line.strip() == "": continue spline = line.split() marker = spline[0] problemname = spline[1] infotuple = list(soludict.get(problemname, (None, None))) if marker == SolufileMarkers.OPT: infotuple[self.__primalidx__] = infotuple[self.__dualidx__] = float(spline[2]) elif marker == SolufileMarkers.BEST: infotuple[self.__primalidx__] = float(spline[2]) elif marker == SolufileMarkers.BESTDUAL: infotuple[self.__dualidx__] = float(spline[2]) elif marker == SolufileMarkers.FEAS: infotuple[self.__primalidx__] = self.__feas__ elif marker == SolufileMarkers.INF: infotuple[self.__primalidx__] = self.__infeas__ soludict[problemname] = tuple(infotuple) return soludict
MIT License
pajbot/pajbot
pajbot/apiwrappers/authentication/access_token.py
AccessToken.should_refresh
python
def should_refresh(self): if not self.can_refresh(): return False if self.expires_at is not None: expires_after = self.expires_at - self.created_at else: expires_after = datetime.timedelta(hours=1) token_age = pajbot.utils.now() - self.created_at max_token_age = expires_after * self.SHOULD_REFRESH_THRESHOLD return token_age >= max_token_age
Returns True if less than 10% of the token's lifetime remains, False otherwise
https://github.com/pajbot/pajbot/blob/42e19a692eb663556bc78d0d86eef1a667728f46/pajbot/apiwrappers/authentication/access_token.py#L39-L60
import datetime from abc import ABC, abstractmethod import pajbot class AccessToken(ABC): SHOULD_REFRESH_THRESHOLD = 0.9 def __init__(self, access_token, created_at, expires_in, token_type, refresh_token, scope): self.access_token = access_token self.created_at = created_at self.expires_in = expires_in if self.expires_in is not None: self.expires_at = self.created_at + self.expires_in else: self.expires_at = None self.token_type = token_type self.refresh_token = refresh_token self.scope = scope @abstractmethod def can_refresh(self): pass
MIT License
cn-uofbasel/picn
PiCN/Demos/DetectionMap/YOLO/ObjectDetection.py
detect_objects
python
def detect_objects(image, id: int=0): model_path = os.path.join(ROOT_DIR, "Demos/DetectionMap/YOLO/Model") if not os.path.exists(model_path): print("Model for object detection not found. \nDownloading from GitHub...") path = os.path.join(ROOT_DIR, "Demos/DetectionMap/YOLO") url = "https://github.com/cn-uofbasel/PiCN/releases/download/0.0.1/YOLO.zip" response = requests.get(url) print("Download done!") with zipfile.ZipFile(io.BytesIO(response.content)) as zf: for info in zf.infolist(): zf.extract(info, path) labels_file = open(os.path.join(model_path, "labels")) labels = labels_file.read().strip().split("\n") labels_file.close() labels_include_file = open(os.path.join(model_path, "labels_included")) labels_include = labels_include_file.read().strip().split("\n") labels_include_file.close() np.random.seed(42) colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8") config_path = os.path.join(model_path, "yolov3.cfg") weights_path = os.path.join(model_path, "yolov3.weights") net = cv2.dnn.readNetFromDarknet(config_path, weights_path) ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] original_height, original_width = image.shape[:2] blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False) net.setInput(blob) layer_outputs = net.forward(ln) detections = [] boxes = [] confidences = [] class_ids = [] for output in layer_outputs: for detection in output: scores = detection[5:] class_id = np.argmax(scores) confidence = scores[class_id] name = labels[int(class_id)] if confidence > 0.5 and name in labels_include: box = detection[0:4] * np.array([original_width, original_height, original_width, original_height]) (x, y, width, height) = box.astype("int") detections.append({"id": int(class_id), "coords": (x,y)}) x = int(x - (width / 2)) y = int(y - (height / 2)) boxes.append([x, y, int(width), int(height)]) confidences.append(float(confidence)) class_ids.append(class_id) idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) if len(idxs) > 0: for i in idxs.flatten(): (x, y) = (boxes[i][0], boxes[i][1]) (w, h) = (boxes[i][2], boxes[i][3]) color = [int(c) for c in colors[class_ids[i]]] cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) if not os.path.exists(os.path.join(ROOT_DIR, "Demos/DetectionMap/Assets/Classified")): os.makedirs(os.path.join(ROOT_DIR, "Demos/DetectionMap/Assets/Classified")) plt.imsave(os.path.join(ROOT_DIR, f"Demos/DetectionMap/Assets/Classified/classified{id}.jpg"), image) return [detections[i] for i in idxs.flatten()], colors
Detect objects and classify them. The model being used is YOLOv3, while only labels in "/Demos/DetectionMap/Model/labels_inculded" are considered. :param image: The input image :return: List of dicts containing the name and the x, y-coordinates of the classified objects
https://github.com/cn-uofbasel/picn/blob/64ed40242657238e9f1d522d5873173f0b93a30e/PiCN/Demos/DetectionMap/YOLO/ObjectDetection.py#L12-L99
import io import os import zipfile import matplotlib.pyplot as plt import requests from PiCN.definitions import ROOT_DIR from PiCN.Demos.DetectionMap.DetectionMapObject import * from PiCN.Demos.DetectionMap.Monodepth.MonodepthModel import *
BSD 3-Clause New or Revised License
bbn-q/qgl
QGL/BasicSequences/RB.py
SingleQubitIRB_AC
python
def SingleQubitIRB_AC(qubit, seqFile, showPlot=False): deprication(unmaintained_str) pulseLib = [AC(qubit, cliffNum) for cliffNum in range(24)] pulseLib.append(pulseLib[0]) measBlock = MEAS(qubit) with open(seqFile, 'r') as FID: fileReader = reader(FID) seqs = [] for pulseSeqStr in fileReader: seq = [] for pulseStr in pulseSeqStr: seq.append(pulseLib[int(pulseStr)]) seq.append(measBlock) seqs.append(seq) numRandomizations = 36 for ct in range(numRandomizations): chunk = seqs[ct::numRandomizations] chunk1 = chunk[::2] chunk2 = chunk[1::2] chunk1 += [[Id(qubit), measBlock], [X(qubit), measBlock]] metafile = compile_to_hardware(chunk1, 'RB/RB', suffix='_{0}'.format(2 * ct + 1)) chunk2 += [[Id(qubit), measBlock], [X(qubit), measBlock]] metafile = compile_to_hardware(chunk2, 'RB/RB', suffix='_{0}'.format(2 * ct + 2)) if showPlot: plot_pulse_files(metafile) return metafile
Single qubit interleaved randomized benchmarking using atomic Clifford pulses. Parameters ---------- qubit : Channels.LogicalChannel Logical channel to implement sequence seqsFiles : string String defining the path to the file with sequence strings showPlot : boolean, optional Whether to plot Returns ------- metafile : string Path to a json metafile with details about the sequences and paths to compiled machine files Examples -------- >>> seqs = create_RB_seqs(1, [2,4,8], repeats=2, interleaveGate=1); >>> mf = SingleQubitIRB_AC(q1, '/path/to/seq/strings/file'); Compiled 10 sequences. >>> mf '/path/to/exp/exp-meta.json'
https://github.com/bbn-q/qgl/blob/8df9ce5d68dca61c8c057d34a1c08b98eb910a43/QGL/BasicSequences/RB.py#L662-L729
from ..PulsePrimitives import * from ..Cliffords import * from ..Compiler import compile_to_hardware from ..PulseSequencePlotter import plot_pulse_files from ..tools.clifford_tools import clifford_mat, inverse_clifford from .helpers import create_cal_seqs, cal_descriptor from ..config import logger import os from csv import reader import numpy as np from functools import reduce import warnings from typing import List, Mapping, Iterable def create_RB_seqs(numQubits: int, lengths: Iterable[int], repeats: int = 32, interleaveGate: int = None, recovery: bool = True) -> List[List[int]]: if numQubits == 1: cliffGroupSize = 24 elif numQubits == 2: cliffGroupSize = 11520 else: raise Exception("Can only handle one or two qubits.") seqs = [] for length in lengths: seqs += np.random.randint(0, cliffGroupSize, size=(repeats, length - 1)).tolist() if interleaveGate: newSeqs = [] for seq in seqs: newSeqs.append(np.vstack((np.array( seq, dtype=np.int), interleaveGate * np.ones( len(seq), dtype=np.int))).flatten(order='F').tolist()) seqs = newSeqs if recovery: for seq in seqs: if len(seq) == 1: mat = clifford_mat(seq[0], numQubits) else: mat = reduce(lambda x, y: np.dot(y, x), [clifford_mat(c, numQubits) for c in seq]) seq.append(inverse_clifford(mat)) return seqs def SingleQubitRB(qubit: Channels.LogicalChannel, seqs: List[List[int]], cliff_type: str = 'std', purity: bool = False, showPlot: bool = False, add_cals: bool = True) -> str: if cliff_type.upper() not in clifford_map.keys(): raise ValueError(f"Unknown clifford type: must be one of {clifford.map.keys()}.") clifford = clifford_map[cliff_type.upper()] seqsBis = [] op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)] for ct in range(3 if purity else 1): for seq in seqs: seqsBis.append([clifford(qubit,c) for c in seq]) seqsBis[-1].append(op[ct]) seqsBis[-1].append(MEAS(qubit)) axis_descriptor = [{ 'name': 'length', 'unit': None, 'points': list(map(len, seqs)), 'partition': 1 }] if add_cals: seqsBis += create_cal_seqs((qubit, ), 2) axis_descriptor.append(cal_descriptor((qubit,), 2)) metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile def SingleQubitLeakageRB(qubit: Channels.LogicalChannel, seqs: List[List[int]], pi2args: Mapping[int, str], cliff_type: str = 'std', showPlot: bool = False) -> str: if cliff_type.upper() not in clifford_map.keys(): raise ValueError(f"Unknown clifford type: must be one of {clifford.map.keys()}.") clifford = clifford_map[cliff_type.upper()] seqsBis = [] for seq in seqs: combined_seq = [clifford(qubit, c) for c in seq] seqsBis.append(combined_seq + [Id(qubit), Id(qubit), MEAS(qubit)]) seqsBis.append(combined_seq + [X90(qubit), X90(qubit), MEAS(qubit)]) seqsBis.append([Id(qubit), Id(qubit), Id(qubit), Id(qubit), MEAS(qubit)]) seqsBis.append([X90(qubit), X90(qubit), Id(qubit), Id(qubit), MEAS(qubit)]) seqsBis.append([X90(qubit), X90(qubit), X90(qubit, **pi2args), X90(qubit, **pi2args), MEAS(qubit)]) axis_descriptor = [ { 'name': 'length', 'unit': None, 'points': [len(s) for s in seqs for i in range(2)], 'partition': 1 }, { 'name': 'calibration', 'unit': 'state', 'partition': 2, 'points': ['0', '1', '2'] }] metafile = compile_to_hardware(seqsBis, 'RB/LRB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile def TwoQubitRB(q1: Channels.LogicalChannel, q2: Channels.LogicalChannel, seqs: List[List[int]], meas_qubits: Iterable[Channels.LogicalChannel] = None, cliff_type: str = 'std', showPlot: bool = False, suffix: str = "", add_cals: bool = True) -> str: seqsBis = [] for seq in seqs: seqsBis.append(reduce(operator.add, [TwoQubitClifford(q2, q1, c, kind=cliff_type) for c in seq])) for seq in seqsBis: if not meas_qubits: meas_qubits = (q1,q2) seq.append(reduce(operator.mul, [MEAS(q) for q in meas_qubits])) axis_descriptor = [{ 'name': 'length', 'unit': None, 'points': list(map(len, seqs)), 'partition': 1 }] if add_cals: seqsBis += create_cal_seqs((q1, q2), 2, measChans = meas_qubits) axis_descriptor.append(cal_descriptor((q1, q2), 2)) metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, suffix = suffix, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile def TwoQubitLeakageRB(q1: Channels.LogicalChannel, q2: Channels.LogicalChannel, meas_qubit: Iterable[Channels.LogicalChannel], seqs: List[List[int]], pi2args: Mapping[int, str], cliff_type: str = 'std', showPlot: bool = False) -> str: seqsBis = [] for seq in seqs: combined_seq = reduce(operator.add, [TwoQubitClifford(q2, q1, c, kind=cliff_type) for c in seq]) seqsBis.append(combined_seq + [Id(meas_qubit), Id(meas_qubit), MEAS(meas_qubit)]) seqsBis.append(combined_seq + [X90(meas_qubit), X90(meas_qubit), MEAS(meas_qubit)]) seqsBis.append([Id(meas_qubit), Id(meas_qubit), Id(meas_qubit), Id(meas_qubit), MEAS(meas_qubit)]) seqsBis.append([X90(meas_qubit), X90(meas_qubit), Id(meas_qubit), Id(meas_qubit), MEAS(meas_qubit)]) seqsBis.append([X90(meas_qubit), X90(meas_qubit), X90(meas_qubit, **pi2args), X90(meas_qubit, **pi2args), MEAS(meas_qubit)]) axis_descriptor = [ { 'name': 'length', 'unit': None, 'points': [len(s) for s in seqs for i in range(2)], 'partition': 1 }, { 'name': 'calibration', 'unit': 'state', 'partition': 2, 'points': ['0', '1', '2'] }] metafile = compile_to_hardware(seqsBis, 'RB/LRB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile def SimultaneousRB(qubits: Iterable[Channels.LogicalChannel], seqs: List[List[int]], showPlot: bool = False, cliff_type: str = 'std', add_cals: bool = True) -> str: if cliff_type.upper() not in clifford_map.keys(): raise ValueError(f"Unknown clifford type: must be one of {clifford.map.keys()}.") clifford = clifford_map[cliff_type.upper()] seqsBis = [] for seq in zip(*seqs): seqsBis.append([reduce(operator.__mul__, [clifford(q, c) for q, c in zip(qubits, pulseNums)]) for pulseNums in zip(*seq)]) for seq in seqsBis: seq.append(reduce(operator.mul, [MEAS(q) for q in qubits])) axis_descriptor = [{ 'name': 'length', 'unit': None, 'points': list(map(len, seqs)), 'partition': 1 }] if add_cals: seqsBis += create_cal_seqs((qubits), 2) axis_descriptor.append(cal_descriptor((qubits), 2)) metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' unmaintained_str = f"{bcolors.WARNING}This function is unmaintained \ and likely not to work! Please read \ the source and make sure you are \ doing what you want to do.{bcolors.ENDC}" depricated_str = f"{bcolors.WARNING}This function is depricated and will be \ removed in future releases! Please Use \ `SingleQubitRB` with the `cliff_type` \ keyword argument instead.{bcolors.ENDC}" def deprecation(message): warnings.warn(message, DeprecationWarning, stacklevel=1) def SingleQubitRB_AC(qubit, seqs, purity=False, showPlot=False, add_cals=True): deprication(depricated_str) logger.warning("This function is deprecated and may be removed in a future release of QGL! " + "Use `SingleQubitRB` with the `cliff_type` keyword argument instead.") seqsBis = [] op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)] for ct in range(3 if purity else 1): for seq in seqs: seqsBis.append([AC(qubit, c) for c in seq]) seqsBis[-1].append(op[ct]) seqsBis[-1].append(MEAS(qubit)) axis_descriptor = [{ 'name': 'length', 'unit': None, 'points': list(map(len, seqs)), 'partition': 1 }] if add_cals: seqsBis += create_cal_seqs((qubit, ), 2) axis_descriptor.append(cal_descriptor((qubit,), 2)) metafile = compile_to_hardware(seqsBis, 'RB/RB', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile def SingleQubitRB_DiAC(qubit, seqs, compiled=True, purity=False, showPlot=False, add_cals=True): deprication(depricated_str) logger.warning("This function is deprecated and may be removed in a future release of QGL! " + "Use `SingleQubitRB` with the `cliff_type` keyword argument instead.") seqsBis = [] op = [Id(qubit, length=0), Y90m(qubit), X90(qubit)] for ct in range(3 if purity else 1): for seq in seqs: seqsBis.append([DiAC(qubit, c, compiled) for c in seq]) seqsBis[-1].append(op[ct]) seqsBis[-1].append(MEAS(qubit)) axis_descriptor = [{ 'name': 'length', 'unit': None, 'points': list(map(len, seqs)), 'partition': 1 }] if add_cals: seqsBis += [[Id(qubit), MEAS(qubit)], [Id(qubit), MEAS(qubit)], [X90(qubit), X90(qubit), MEAS(qubit)], [X90(qubit), X90(qubit), MEAS(qubit)]] axis_descriptor.append(cal_descriptor((qubit,), 2)) metafile = compile_to_hardware(seqsBis, 'RB_DiAC/RB_DiAC', axis_descriptor = axis_descriptor, extra_meta = {'sequences':seqs}) if showPlot: plot_pulse_files(metafile) return metafile
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flashblade/FB_2_1/models/admin_cache.py
AdminCache.__init__
python
def __init__( self, name=None, id=None, role=None, time=None, ): if name is not None: self.name = name if id is not None: self.id = id if role is not None: self.role = role if time is not None: self.time = time
Keyword args: name (str): Name of the object (e.g., a file system or snapshot). id (str): A non-modifiable, globally unique ID chosen by the system. role (FixedReference): A reference to the `role` of this user. time (int): Time the role was cached in milliseconds since UNIX epoch.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flashblade/FB_2_1/models/admin_cache.py#L49-L70
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flashblade.FB_2_1 import models class AdminCache(object): swagger_types = { 'name': 'str', 'id': 'str', 'role': 'FixedReference', 'time': 'int' } attribute_map = { 'name': 'name', 'id': 'id', 'role': 'role', 'time': 'time' } required_args = { }
BSD 2-Clause Simplified License
earthobservations/wetterdienst
wetterdienst/core/scalar/values.py
ScalarValuesCore._coerce_integers
python
def _coerce_integers(series: pd.Series) -> pd.Series: return ( pd.to_numeric(series, errors="coerce") .astype(pd.Float64Dtype()) .astype(pd.Int64Dtype()) )
Method to parse integers for type coercion. Uses pandas.Int64Dtype() to allow missing values. :param series: :return:
https://github.com/earthobservations/wetterdienst/blob/8e66d14c1338fb86d8f96827c72bfd261a667ff0/wetterdienst/core/scalar/values.py#L614-L626
import logging import operator from abc import abstractmethod from enum import Enum from typing import Dict, Generator, List, Tuple, Union import numpy as np import pandas as pd from pint import Quantity from pytz import timezone from tqdm import tqdm from wetterdienst.core.scalar.result import StationsResult, ValuesResult from wetterdienst.metadata.columns import Columns from wetterdienst.metadata.resolution import Resolution from wetterdienst.metadata.timezone import Timezone from wetterdienst.metadata.unit import REGISTRY, OriginUnit, SIUnit from wetterdienst.util.enumeration import parse_enumeration_from_template from wetterdienst.util.logging import TqdmToLogger log = logging.getLogger(__name__) class ScalarValuesCore: @property def _meta_fields(self) -> List[str]: if not self.stations.stations.tidy: fields = [ Columns.STATION_ID.value, Columns.DATE.value, ] else: fields = [ Columns.STATION_ID.value, Columns.DATASET.value, Columns.PARAMETER.value, Columns.DATE.value, Columns.VALUE.value, Columns.QUALITY.value, ] return fields _date_fields = [Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value] @property def data_tz(self) -> timezone: return timezone(self._data_tz.value) @property @abstractmethod def _data_tz(self) -> Timezone: pass @property @abstractmethod def _irregular_parameters(self) -> Tuple[str]: pass @property @abstractmethod def _integer_parameters(self) -> Tuple[str]: pass @property @abstractmethod def _string_parameters(self) -> Tuple[str]: pass @property def _complete_dates(self) -> pd.DatetimeIndex: start_date, end_date = self.stations.start_date, self.stations.end_date if self.stations.stations.resolution == Resolution.MONTHLY: end_date += pd.Timedelta(days=31) elif self.stations.stations.resolution == Resolution.ANNUAL: end_date += pd.Timedelta(year=366) date_range = pd.date_range( start_date, end_date, freq=self.stations.frequency.value, tz=self.data_tz, ) return date_range @property def _base_df(self) -> pd.DataFrame: return pd.DataFrame({Columns.DATE.value: self._complete_dates}) def convert_values_to_si(self, df: pd.DataFrame, dataset) -> pd.DataFrame: def _convert_values_to_si(series): op, factor = conversion_factors.get(series.name, (None, None)) if not op or not factor: return series return op(series, factor) conversion_factors = self._create_conversion_factors(dataset) df = df.apply(_convert_values_to_si, axis=0) return df def _create_conversion_factors( self, dataset ) -> Dict[str, Tuple[Union[operator.add, operator.mul], float]]: dataset = dataset.name dataset_accessor = self.stations.stations._dataset_accessor if self.stations.stations._unique_dataset: units = self.stations.stations._unit_tree[dataset_accessor] else: units = self.stations.stations._unit_tree[dataset_accessor][dataset] conversion_factors = {} for parameter in units: origin_unit, si_unit = parameter.value parameter = parameter.name if self.stations.stations._unique_dataset: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][parameter].value else: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][dataset][parameter].value if si_unit == SIUnit.KILOGRAM_PER_SQUARE_METER.value: if origin_unit == OriginUnit.MILLIMETER.value: conversion_factors[parameter_value] = (operator.mul, 1) else: raise ValueError( "manually set conversion factor for precipitation unit" ) elif si_unit == SIUnit.DEGREE_KELVIN.value: degree_offset = Quantity(0, origin_unit).to(si_unit).magnitude conversion_factors[parameter_value] = (operator.add, degree_offset) elif si_unit == SIUnit.PERCENT.value: factor = REGISTRY(str(origin_unit)).to(str(si_unit)).magnitude conversion_factors[parameter_value] = (operator.mul, factor) else: conversion_factors[parameter_value] = ( operator.mul, Quantity(1, origin_unit).to(si_unit).magnitude, ) return conversion_factors def __init__(self, stations: StationsResult) -> None: self.stations = stations @classmethod def from_stations(cls, stations: StationsResult): return cls(stations) def __eq__(self, other): return ( self.stations.station_id == other.stations.station_id and self.stations.parameter == other.stations.parameter and self.stations.start_date == other.stations.start_date and self.stations.end_date == other.stations.end_date ) pass def __str__(self): station_ids_joined = "& ".join( [str(station_id) for station_id in self.stations.station_id] ) parameters_joined = "& ".join( [ parameter.value for parameter, parameter_set in self.stations.stations.parameter ] ) return ", ".join( [ f"station_ids {station_ids_joined}", f"parameters {parameters_joined}", str(self.stations.start_date), str(self.stations.end_date), ] ) pass def _create_empty_station_parameter_df( self, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: dataset_tree = self.stations.stations._dataset_tree resolution = self.stations.stations.resolution if parameter == dataset: if self.stations.stations._unique_dataset: parameter = [*dataset_tree[resolution.name]] else: parameter = [*dataset_tree[resolution.name][dataset.name]] if self.stations.stations.tidy: if not self.stations.stations.start_date: return pd.DataFrame(None, columns=self._meta_fields) data = [] for par in pd.Series(parameter): if par.name.startswith("QUALITY"): continue par_df = self._base_df par_df[Columns.PARAMETER.value] = par.value data.append(par_df) df = pd.concat(data) df[Columns.STATION_ID.value] = station_id df[Columns.DATASET.value] = dataset.name df[Columns.VALUE.value] = pd.NA df[Columns.QUALITY.value] = pd.NA return df else: parameter = pd.Series(parameter).map(lambda x: x.value).tolist() columns = [*self._meta_fields, *parameter] if self.stations.stations.start_date: return pd.DataFrame(None, columns=columns) df = self._base_df df = df.reindex(columns=columns) df[Columns.STATION_ID.value] = station_id return df def _build_complete_df( self, df: pd.DataFrame, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: if not self.stations.start_date: return df if parameter != dataset or not self.stations.stations.tidy: df = pd.merge( left=self._base_df, right=df, left_on=Columns.DATE.value, right_on=Columns.DATE.value, how="left", ) df[Columns.STATION_ID.value] = station_id if self.stations.tidy: df[Columns.PARAMETER.value] = parameter.value df[Columns.PARAMETER.value] = pd.Categorical( df[Columns.PARAMETER.value] ) if dataset: df[Columns.DATASET.value] = dataset.name.lower() df[Columns.DATASET.value] = pd.Categorical( df[Columns.DATASET.value] ) return df else: data = [] for parameter, group in df.groupby(Columns.PARAMETER.value, sort=False): if self.stations.stations._unique_dataset: parameter_ = parse_enumeration_from_template( parameter, self.stations.stations._parameter_base[ self.stations.resolution.name ], ) else: parameter_ = parse_enumeration_from_template( parameter, self.stations.stations._dataset_tree[ self.stations.resolution.name ][dataset.name], ) df = pd.merge( left=self._base_df, right=group, left_on=Columns.DATE.value, right_on=Columns.DATE.value, how="left", ) df[Columns.STATION_ID.value] = station_id df[Columns.PARAMETER.value] = parameter_.value df[Columns.DATASET.value] = dataset.name.lower() df[Columns.DATASET.value] = pd.Categorical(df[Columns.DATASET.value]) data.append(df) return pd.concat(data) def _organize_df_columns(self, df: pd.DataFrame) -> pd.DataFrame: columns = self._meta_fields columns.extend(df.columns.difference(columns, sort=False)) df = df.reindex(columns=columns) return df def query(self) -> Generator[ValuesResult, None, None]: for station_id in self.stations.station_id: station_data = [] for parameter, dataset in self.stations.parameter: parameter_df = self._collect_station_parameter( station_id, parameter, dataset ) if parameter_df.empty: continue self._coerce_date_fields(parameter_df) parameter_df = self._coerce_parameter_types(parameter_df) if self.stations.stations.si_units: parameter_df = self.convert_values_to_si(parameter_df, dataset) if self.stations.stations.tidy: parameter_df = self.tidy_up_df(parameter_df, dataset) if parameter != dataset: parameter_df = parameter_df[ parameter_df[Columns.PARAMETER.value] == parameter.value.lower() ] parameter_df = self._build_complete_df( parameter_df, station_id, parameter, dataset ) parameter_df = self._organize_df_columns(parameter_df) station_data.append(parameter_df) try: station_df = pd.concat(station_data, ignore_index=True) except ValueError: station_df = self._create_empty_station_parameter_df( station_id, parameter ) station_df = self._coerce_meta_fields(station_df) if not station_df.empty and self.stations.start_date: station_df = station_df[ (station_df[Columns.DATE.value] >= self.stations.start_date) & (station_df[Columns.DATE.value] <= self.stations.end_date) ] station_df = self._coerce_parameter_types(station_df) if self.stations.humanize: station_df = self._humanize(station_df) if station_df.empty: continue yield ValuesResult(stations=self.stations, df=station_df) @abstractmethod def _collect_station_parameter( self, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: pass def tidy_up_df(self, df: pd.DataFrame, dataset: Enum) -> pd.DataFrame: df = self._tidy_up_df(df, dataset) df[Columns.DATASET.value] = pd.Series( dataset.name.lower(), index=df.index, dtype=str ) df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype(float) if Columns.QUALITY.value not in df: df[Columns.QUALITY.value] = np.nan df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype( float ) df.loc[df[Columns.VALUE.value].isna(), Columns.QUALITY.value] = np.NaN return df @abstractmethod def _tidy_up_df(self, df: pd.DataFrame, dataset) -> pd.DataFrame: pass def _coerce_date_fields(self, df: pd.DataFrame) -> pd.DataFrame: for column in ( Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value, ): try: df[column] = self._coerce_dates(df[column]) except KeyError: pass return df def _coerce_meta_fields(self, df: pd.DataFrame) -> pd.DataFrame: df[Columns.STATION_ID.value] = self._parse_station_id( df[Columns.STATION_ID.value] ).astype("category") if self.stations.stations.tidy: for column in (Columns.DATASET.value, Columns.PARAMETER.value): df[column] = self._coerce_strings(df[column]).astype("category") df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype( float ) df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype( float ) return df def _parse_station_id(self, series: pd.Series) -> pd.Series: return self.stations.stations._parse_station_id(series) def _coerce_dates(self, series: pd.Series) -> pd.Series: return pd.to_datetime(series, infer_datetime_format=True).dt.tz_localize( self.data_tz ) @staticmethod
MIT License
ing-bank/doing-cli
src/doing/init/commands.py
init
python
def init(reference_issue): cmd_init(reference_issue)
Create a .doing-cli-config file. REFERENCE_ISSUE (optional): Find a representative work item and pass its url to automatically fill the config.
https://github.com/ing-bank/doing-cli/blob/712304f7a217b571206d2834c391c51916fe55b0/src/doing/init/commands.py#L8-L14
import click from doing.init._init import cmd_init @click.command() @click.argument("reference_issue", required=False, default="")
MIT License
menpo/menpo
menpo/shape/mesh/textured.py
TexturedTriMesh.from_vector
python
def from_vector(self, flattened): return TexturedTriMesh( flattened.reshape([-1, self.n_dims]), self.tcoords.points, self.texture, trilist=self.trilist, )
r""" Builds a new :class:`TexturedTriMesh` given the `flattened` 1D vector. Note that the trilist, texture, and tcoords will be drawn from self. Parameters ---------- flattened : ``(N,)`` `ndarray` Vector representing a set of points. Returns -------- trimesh : :map:`TriMesh` A new trimesh created from the vector with ``self`` trilist.
https://github.com/menpo/menpo/blob/b8bd3cefd8dbb3ca8572892126c8726e2b14969f/menpo/shape/mesh/textured.py#L178-L198
import numpy as np from menpo.shape import PointCloud from menpo.transform import tcoords_to_image_coords from ..adjacency import mask_adjacency_array, reindex_adjacency_array from .base import TriMesh, grid_tcoords class TexturedTriMesh(TriMesh): def __init__(self, points, tcoords, texture, trilist=None, copy=True): super(TexturedTriMesh, self).__init__(points, trilist=trilist, copy=copy) self.tcoords = PointCloud(tcoords, copy=copy) if not copy: self.texture = texture else: self.texture = texture.copy() @property def n_channels(self): return self.texture.n_channels @classmethod def init_2d_grid(cls, shape, spacing=None, tcoords=None, texture=None): pc = TriMesh.init_2d_grid(shape, spacing=spacing) points = pc.points trilist = pc.trilist if tcoords is not None: tcoords = tcoords.copy() else: tcoords = grid_tcoords(shape) if texture is not None: texture = texture.copy() else: from menpo.image import Image texture = Image.init_blank(shape) return TexturedTriMesh(points, tcoords, texture, trilist=trilist, copy=False) @classmethod def init_from_depth_image(cls, depth_image, tcoords=None, texture=None): from menpo.image import MaskedImage new_tmesh = cls.init_2d_grid( depth_image.shape, tcoords=tcoords, texture=texture ) if isinstance(depth_image, MaskedImage): new_tmesh = new_tmesh.from_mask(depth_image.mask.as_vector()) return cls( np.hstack([new_tmesh.points, depth_image.as_vector(keep_channels=True).T]), new_tmesh.tcoords.points, new_tmesh.texture, trilist=new_tmesh.trilist, copy=False, ) def tcoords_pixel_scaled(self): return tcoords_to_image_coords(self.texture.shape).apply(self.tcoords)
BSD 3-Clause New or Revised License
pachyderm/python-pachyderm
src/python_pachyderm/experimental/mixin/identity.py
IdentityMixin.create_oidc_client
python
def create_oidc_client( self, client: identity_proto.OIDCClient ) -> identity_proto.OIDCClient: return self._req(Service.IDENTITY, "CreateOIDCClient", client=client).client
Create an OIDC client in the identity server. Parameters ---------- client : identity_proto.OIDCClient A protobuf object representing an OIDC client. Returns ------- identity_proto.OIDCClient A protobuf object that returns a client with info on the client ID, name, secret, and lists of redirect URIs and trusted peers.
https://github.com/pachyderm/python-pachyderm/blob/9dbffba91ac753e7c63c58d71768f53f83789cb9/src/python_pachyderm/experimental/mixin/identity.py#L93-L109
from typing import List from python_pachyderm.service import Service, identity_proto class IdentityMixin: def set_identity_server_config( self, config: identity_proto.IdentityServerConfig ) -> None: self._req(Service.IDENTITY, "SetIdentityServerConfig", config=config) def get_identity_server_config(self) -> identity_proto.IdentityServerConfig: return self._req(Service.IDENTITY, "GetIdentityServerConfig").config def create_idp_connector(self, connector: identity_proto.IDPConnector) -> None: self._req(Service.IDENTITY, "CreateIDPConnector", connector=connector) def list_idp_connectors(self) -> List[identity_proto.IDPConnector]: return self._req(Service.IDENTITY, "ListIDPConnectors").connectors def update_idp_connector(self, connector: identity_proto.IDPConnector) -> None: self._req(Service.IDENTITY, "UpdateIDPConnector", connector=connector) def get_idp_connector(self, id: str) -> identity_proto.IDPConnector: return self._req(Service.IDENTITY, "GetIDPConnector", id=id).connector def delete_idp_connector(self, id: str) -> None: self._req(Service.IDENTITY, "DeleteIDPConnector", id=id)
Apache License 2.0
netflix/consoleme
consoleme/lib/google.py
get_group_memberships
python
async def get_group_memberships(user_email, dry_run=None, service=None): function = f"{__name__}.{sys._getframe().f_code.co_name}" stats.count(function) log_data = { "function": function, "user": user_email, "message": "Getting list of groups for user", } log.debug(log_data) if not service: service = await get_service("admin", "directory_v1", user_email) groups = [] if not dry_run: try: page_token = None while True: results = await list_user_groups_call(service, user_email, page_token) for g in results.get("groups"): groups.append(g.get("email")) page_token = results.get("nextPageToken") if not page_token: break except HttpError as he: errors = json.loads(he.content.decode()) log.debug(errors) raise he return groups return []
Get group memberships for a user
https://github.com/netflix/consoleme/blob/76313ea75fd721453a6381d7882bc9494c860500/consoleme/lib/google.py#L315-L344
import asyncio import html import sys from typing import Any, Dict, List, Optional, Union import googleapiclient.discovery import ujson as json from asgiref.sync import sync_to_async from google.oauth2 import service_account from googleapiclient.discovery import Resource from googleapiclient.errors import HttpError from retrying import retry from validate_email import validate_email from consoleme.config import config from consoleme.exceptions.exceptions import ( BackgroundCheckNotPassedException, BulkAddPrevented, DifferentUserGroupDomainException, MissingConfigurationValue, NoCredentialSubjectException, NoGroupsException, NotAMemberException, UnableToModifyRestrictedGroupMembers, UnauthorizedToAccess, UserAlreadyAMemberOfGroupException, ) from consoleme.lib.auth import can_modify_members from consoleme.lib.dynamo import UserDynamoHandler from consoleme.lib.groups import does_group_require_bg_check from consoleme.lib.plugins import get_plugin_by_name stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))() log = config.get_logger() auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))() async def add_user_to_group_task( member: str, group: str, requesting_user: str, requesting_users_groups: List[str], semaphore=None, service: None = None, ) -> Dict[str, Union[str, bool]]: if not semaphore: semaphore = asyncio.BoundedSemaphore(10) async with semaphore: stats.count( "add_user_to_group_task.attempt", tags={"member": member, "group": group, "requesting_user": requesting_user}, ) member = member.strip() result = { "Action": "Add user", "Member": member, "Group": group, "Error": False, } log_data = { "function": f"{__name__, sys._getframe().f_code.co_name}", "action": "Add user", "member": member, "group": group, } try: group_info = await auth.get_group_info(group, members=False) can_add_remove_members = can_modify_members( requesting_user, requesting_users_groups, group_info ) if not can_add_remove_members: result[ "Result" ] = "You are unable to add members to this group. Maybe it is restricted." result["Error"] = True error = f"There was at least one problem. {result['Result']}" log_data["error"] = error log.warning(log_data, exc_info=True) return result if not validate_email(member): result["Result"] = "Invalid e-mail address entered" result["Error"] = True log_data["message"] = "Error" log_data["error"] = result["Result"] log.warning(log_data, exc_info=True) return result if ( not group_info.allow_third_party_users and not await auth.does_user_exist(member) ): result[ "Result" ] = "User does not exist in our environment and this group doesn't allow third party users." result["Error"] = True log_data["message"] = "Error" log_data["error"] = result["Result"] log.warning(log_data, exc_info=True) return result await add_user_to_group(member, group, requesting_user, service=service) result["Result"] = "Successfully added user to group" return result except Exception as e: result["Result"] = html.escape(str(e)) result["Error"] = True error = f"There was at least one problem. {e}" log_data["message"] = "Error" log_data["error"] = error log.error(log_data, exc_info=True) return result async def remove_user_from_group_task( member: str, group: str, requesting_user: str, requesting_users_groups: List[str], semaphore=None, service: None = None, ) -> Dict[str, Union[str, bool]]: if not semaphore: semaphore = asyncio.BoundedSemaphore(10) async with semaphore: stats.count( "remove_user_from_group_task.attempt", tags={"member": member, "group": group, "requesting_user": requesting_user}, ) member = member.strip() result = { "Action": "Remove user", "Member": member, "Requesting User": requesting_user, "Group": group, "Error": False, } log_data = { "function": f"{__name__, sys._getframe().f_code.co_name}", "action": "Remove user", "member": member, "group": group, } try: group_info = await auth.get_group_info(group, members=False) can_add_remove_members = can_modify_members( requesting_user, requesting_users_groups, group_info ) if not can_add_remove_members: result[ "Result" ] = "You are unable to remove members from this group. Maybe it is restricted." result["Error"] = True error = f"There was at least one problem. {result['Result']}" log_data["error"] = error log.warning(log_data, exc_info=True) return result if not validate_email(member): result[ "Result" ] = "Invalid e-mail address entered, or user doesn't exist" result["Error"] = True log_data["message"] = "Error" log_data["error"] = result["Result"] log.warning(log_data, exc_info=True) return result await remove_user_from_group( member, group, requesting_user, service=service ) result["Result"] = "Successfully removed user from group" return result except Exception as e: result["Result"] = str(e) result["Error"] = True error = f"There was at least one problem. {e}" log_data["message"] = "Error" log_data["error"] = error log.error(log_data, exc_info=True) return result async def get_service(service_name: str, service_path: str, group: str) -> Resource: function = f"{__name__}.{sys._getframe().f_code.co_name}" stats.count(function) log_data = { "function": function, "service_name": service_name, "service_path": service_path, "group": group, "message": f"Building service connection for {service_name} / {service_path}", } log.debug(log_data) if config.get("google.service_key_file"): admin_credentials = service_account.Credentials.from_service_account_file( config.get("google.service_key_file"), scopes=config.get( "google.admin_scopes", ["https://www.googleapis.com/auth/admin.directory.group"], ), ) elif config.get("google.service_key_dict"): admin_credentials = service_account.Credentials.from_service_account_info( config.get("google.service_key_dict"), scopes=config.get( "google.admin_scopes", ["https://www.googleapis.com/auth/admin.directory.group"], ), ) else: raise MissingConfigurationValue( "Missing configuration for Google. You must configure either `google.service_key_file` " "or `google.service_key_dict`." ) credential_subjects = config.get("google.credential_subject") credential_subject = None for k, v in credential_subjects.items(): if k == group.split("@")[1]: credential_subject = v break if not credential_subject: raise NoCredentialSubjectException( "Error: Unable to find Google credential subject for domain {}. " "{}".format(group.split("@")[1], config.get("ses.support_reference", "")) ) admin_delegated_credentials = admin_credentials.with_subject(credential_subject) service = await sync_to_async(googleapiclient.discovery.build)( service_name, service_path, credentials=admin_delegated_credentials ) return service @sync_to_async def list_group_members_call(service, email): return service.members().list(groupKey=email).execute() @retry(wait_exponential_multiplier=1000, wait_exponential_max=10000) async def list_group_members( email: str, dry_run: None = None, service: Optional[Resource] = None ) -> List[str]: function = f"{__name__}.{sys._getframe().f_code.co_name}" stats.count(function) log_data = { "function": function, "user": email, "message": "Getting list of members for group", } log.debug(log_data) if not service: service = await get_service("admin", "directory_v1", email) if not dry_run: try: results = await list_group_members_call(service, email) except HttpError as he: errors = json.loads(he.content.decode()) log.debug(errors) raise he return list(map(lambda x: x.get("email", ""), results.get("members", []))) return [] @sync_to_async def list_user_groups_call(service, user_email, page_token=None): if page_token: results = ( service.groups().list(userKey=user_email, pageToken=page_token).execute() ) else: results = service.groups().list(userKey=user_email).execute() return results
Apache License 2.0
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/linux_benchmarks/lmbench_benchmark.py
_ConfigureRun
python
def _ConfigureRun(vm): logging.info('Set Lmbench run parameters') vm.RemoteCommand('cd {0} && mkdir bin && cd bin && ' 'mkdir x86_64-linux-gnu'.format(lmbench.LMBENCH_DIR)) vm.RobustRemoteCommand( 'cd {0} && cd scripts && echo "1" >>input.txt && ' 'echo "1" >>input.txt && ./config-run <input.txt'.format( lmbench.LMBENCH_DIR)) sed_cmd = ( 'sed -i -e "s/OUTPUT=\\/dev\\/tty/OUTPUT=\\/dev\\/null/" ' '{0}/bin/x86_64-linux-gnu/CONFIG.*'.format(lmbench.LMBENCH_DIR)) vm.RemoteCommand(sed_cmd) if FLAGS.lmbench_mem_size: sed_cmd = ( 'sed -i -e "s/MB=/MB={0}/" {1}/bin/x86_64-linux-gnu/CONFIG.*'.format( FLAGS.lmbench_mem_size, lmbench.LMBENCH_DIR)) vm.RemoteCommand(sed_cmd) if FLAGS.lmbench_hardware == _LMBENCH_HARDWARE_DEFAULT: sed_cmd = ( 'sed -i -e "s/BENCHMARK_HARDWARE=YES/BENCHMARK_HARDWARE={0}/" ' '{1}/bin/x86_64-linux-gnu/CONFIG.*'.format( FLAGS.lmbench_hardware, lmbench.LMBENCH_DIR)) vm.RemoteCommand(sed_cmd)
Configure Lmbench tests.
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/linux_benchmarks/lmbench_benchmark.py#L64-L89
import itertools import logging from absl import flags from perfkitbenchmarker import configs from perfkitbenchmarker import regex_util from perfkitbenchmarker import sample from perfkitbenchmarker.linux_packages import lmbench FLAGS = flags.FLAGS BENCHMARK_NAME = 'lmbench' BENCHMARK_CONFIG = """ lmbench: description: Runs Lmbench Microbenchmark. vm_groups: default: vm_spec: *default_dual_core vm_count: null """ _LMBENCH_HARDWARE_DEFAULT = 'NO' flags.DEFINE_integer( 'lmbench_mem_size', None, 'The range of memory on which several benchmarks operate. If not provided, ' 'the memory size should be 8MB as default' ) flags.DEFINE_enum( 'lmbench_hardware', _LMBENCH_HARDWARE_DEFAULT, ['YES', 'NO'], 'The decision to run BENCHMARK_HARDWARE tests: YES or NO. The default is NO' ) def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def _PrepareLmbench(vm): logging.info('Installing Lmbench on %s', vm) vm.Install('lmbench')
Apache License 2.0
ebay/wextracto
wex/etree.py
css
python
def css(expression): return parse | map_if_list(CSSSelector(expression))
Returns a :func:`composable <wex.composed.composable>` callable that will select elements defined by a `CSS selector <http://en.wikipedia.org/wiki/Cascading_Style_Sheets#Selector>`_ expression. :param expression: The CSS selector expression. The callable returned accepts a :class:`wex.response.Response`, a list of elements or an individual element as an argument.
https://github.com/ebay/wextracto/blob/9c789b1c98d95a1e87dbedfd1541a8688d128f5c/wex/etree.py#L174-L185
from __future__ import absolute_import, unicode_literals, print_function import wex.py2compat ; assert wex.py2compat import logging from itertools import islice, chain from copy import deepcopy from operator import methodcaller, itemgetter from six import string_types, PY2 from six.moves import map, reduce from six.moves.urllib_parse import urljoin, quote, unquote from lxml.etree import (XPath, _ElementTree, _Element, Element, FunctionNamespace) from lxml.cssselect import CSSSelector from lxml.html import XHTML_NAMESPACE, HTMLParser from .composed import composable, Composable from .cache import cached from .iterable import _do_not_iter_append, filter_if_iter from .htmlstream import HTMLStream from .ncr import replace_invalid_ncr from .url import URL, public_suffix if PY2: def quote_base_url(base_url): if isinstance(base_url, unicode): return quote(base_url.encode('utf-8')) return quote(base_url) def unquote_base_url(quoted): assert isinstance(quoted, unicode) quoted = quoted.encode('ascii') unquoted = unquote(quoted) return unquoted.decode('utf-8') else: quote_base_url = quote unquote_base_url = unquote NEWLINE = u'\n' EMPTY = u'' SPACE = u' ' _do_not_iter_append(_Element) UNPARSEABLE = Element('unparseable') base_href = XPath('//base[@href]/@href | //x:base[@href]/@href', namespaces={'x': XHTML_NAMESPACE}) default_namespaces = {'re': 'http://exslt.org/regular-expressions'} function_namespace = FunctionNamespace(None) _html_text_nodes = XPath( 'descendant-or-self::node()' + '[not(local-name()) or not(text())]' + '[not(ancestor::script or ancestor::style or ancestor::noscript)]' ) def _wex_html_text(context, arg=None): if arg is None: arg = [context.context_node] html_text = [] for node in chain.from_iterable(map(_html_text_nodes, arg)): tag = getattr(node, 'tag', None) if tag is None: html_text.append(node) elif tag == 'br': html_text.append(NEWLINE) else: html_text.append(EMPTY) return EMPTY.join(html_text) function_namespace['wex-html-text'] = _wex_html_text @composable @cached def parse(src): if not hasattr(src, 'read'): return src etree = _ElementTree() try: stream = HTMLStream(src) quoted_base_url = quote_base_url(src.url) if src.url else src.url while True: try: fp = replace_invalid_ncr(stream) parser = HTMLParser(encoding='utf-8') etree.parse(fp, parser=parser, base_url=quoted_base_url) break except UnicodeDecodeError as exc: stream.next_encoding() except IOError as exc: logger = logging.getLogger(__name__) logger.warning("IOError parsing %s (%s)", src.url, exc) root = etree.getroot() if root is None: etree._setroot(UNPARSEABLE) return etree @cached def get_base_url_from_root(root): if root.base_url: base_url = unquote_base_url(root.base_url) else: base_url = root.base_url return reduce(urljoin, base_href(root)[:1], base_url) def get_base_url(elem_or_tree): if hasattr(elem_or_tree, 'getroottree'): tree = elem_or_tree.getroottree() else: tree = elem_or_tree return get_base_url_from_root(tree.getroot()) class map_if_list(Composable): def __init__(self, func): self.func = func def __repr__(self): return '%s(%r)' % (self.__class__, self.func) def __compose__(self): return (self,) def __call__(self, *args, **kwargs): if args and isinstance(args[0], list): return [res for res in map(self.func, *args, **kwargs)] return self.func(*args, **kwargs)
BSD 3-Clause New or Revised License
fatiando/fatiando
fatiando/vis/myv.py
figure
python
def figure(size=None, zdown=True, color=(1, 1, 1)): _lazy_import_mlab() if size is None: fig = mlab.figure(bgcolor=color) else: fig = mlab.figure(bgcolor=color, size=size) if zdown: fig.scene.camera.view_up = numpy.array([0., 0., -1.]) fig.scene.camera.elevation(60.) fig.scene.camera.azimuth(180.) return fig
Create a default figure in Mayavi with white background Parameters: * size : tuple = (dx, dy) The size of the figure. If ``None`` will use the default size. * zdown : True or False If True, will turn the figure upside-down to make the z-axis point down * color : tuple = (r, g, b) RGB of the color of the background Return: * fig : Mayavi figure object The figure
https://github.com/fatiando/fatiando/blob/ac2afbcb2d99b18f145cc1ed40075beb5f92dd5a/fatiando/vis/myv.py#L600-L628
from __future__ import absolute_import, division from future.builtins import range import warnings import numpy from fatiando import utils from fatiando.constants import MEAN_EARTH_RADIUS warnings.warn("This module will be removed in v0.7.") mlab = None tvtk = None BuiltinSurface = None def _lazy_import_BuiltinSurface(): global BuiltinSurface if BuiltinSurface is None: from mayavi.sources.builtin_surface import BuiltinSurface def _lazy_import_mlab(): global mlab if mlab is None: try: from mayavi import mlab except ImportError: from enthought.mayavi import mlab def _lazy_import_tvtk(): global tvtk if tvtk is None: try: from tvtk.api import tvtk except ImportError: from enthought.tvtk.api import tvtk def title(text, color=(0, 0, 0), size=0.3, height=1): _lazy_import_mlab() mlab.title(text, color=color, size=size, height=height) def savefig(fname, magnification=None): _lazy_import_mlab() if magnification is None: mlab.savefig(fname) else: mlab.savefig(fname, magnification=magnification) def show(): _lazy_import_mlab() mlab.show() def points(points, color=(0, 0, 0), size=200., opacity=1, spherical=False): _lazy_import_mlab() if spherical: lon, lat, height = numpy.transpose(points) x, y, z = utils.sph2cart(lon, lat, height) else: x, y, z = numpy.transpose(points) glyph = mlab.points3d(x, y, z, color=color, opacity=opacity) glyph.glyph.glyph.scaling = False glyph.glyph.glyph_source.glyph_source.radius = size return glyph def polyprisms(prisms, prop=None, style='surface', opacity=1, edges=True, vmin=None, vmax=None, cmap='blue-red', color=None, linewidth=1, edgecolor=(0, 0, 0), scale=(1, 1, 1)): if style not in ['surface', 'wireframe']: raise ValueError("Invalid style '%s'" % (style)) if opacity > 1. or opacity < 0: raise ValueError("Invalid opacity %g. Must be in range [1,0]" % (opacity)) _lazy_import_mlab() _lazy_import_tvtk() if prop is None: label = 'scalar' else: label = prop points = [] polygons = [] scalars = [] offset = 0 for prism in prisms: if prism is None or (prop is not None and prop not in prism.props): continue x, y = prism.x, prism.y nverts = prism.nverts if prop is None: scalar = 0. else: p = prism.props[prop] if isinstance(p, int) or isinstance(p, float): scalar = p else: scalar = numpy.linalg.norm(p) points.extend( reversed(numpy.transpose([x, y, prism.z1 * numpy.ones_like(x)]))) polygons.append(list(range(offset, offset + nverts))) scalars.extend(scalar * numpy.ones(nverts)) offset += nverts points.extend( reversed(numpy.transpose([x, y, prism.z2 * numpy.ones_like(x)]))) polygons.append(list(range(offset, offset + nverts))) scalars.extend(scalar * numpy.ones(nverts)) offset += nverts for i in range(nverts): x1, y1 = x[i], y[i] x2, y2 = x[(i + 1) % nverts], y[(i + 1) % nverts] points.extend([[x1, y1, prism.z1], [x2, y2, prism.z1], [x2, y2, prism.z2], [x1, y1, prism.z2]]) polygons.append(list(range(offset, offset + 4))) scalars.extend(scalar * numpy.ones(4)) offset += 4 mesh = tvtk.PolyData(points=points, polys=polygons) mesh.point_data.scalars = numpy.array(scalars) mesh.point_data.scalars.name = label if vmin is None: vmin = min(scalars) if vmax is None: vmax = max(scalars) if style == 'wireframe': surf = mlab.pipeline.surface(mlab.pipeline.add_dataset(mesh), vmax=vmax, vmin=vmin, colormap=cmap) surf.actor.property.representation = 'wireframe' surf.actor.property.line_width = linewidth if style == 'surface': dataset = mlab.pipeline.triangle_filter( mlab.pipeline.add_dataset(mesh)) surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin, colormap=cmap) surf.actor.property.representation = 'surface' surf.actor.property.edge_visibility = 0 if edges: edge = mlab.pipeline.surface(mlab.pipeline.add_dataset(mesh)) edge.actor.property.representation = 'wireframe' edge.actor.mapper.scalar_visibility = 0 edge.actor.property.line_width = linewidth edge.actor.property.opacity = opacity edge.actor.property.color = edgecolor edge.actor.actor.scale = scale surf.actor.property.opacity = opacity if color is not None: surf.actor.mapper.scalar_visibility = 0 surf.actor.property.color = color surf.actor.actor.scale = scale return surf def tesseroids(tesseroids, prop=None, style='surface', opacity=1, edges=True, vmin=None, vmax=None, cmap='blue-red', color=None, linewidth=1, edgecolor=(0, 0, 0), scale=(1, 1, 1)): if style not in ['surface', 'wireframe']: raise ValueError("Invalid style '%s'" % (style)) if opacity > 1. or opacity < 0: raise ValueError("Invalid opacity %g. Must be in range [1,0]" % (opacity)) _lazy_import_mlab() _lazy_import_tvtk() if prop is None: label = 'scalar' else: label = prop points = [] cells = [] offsets = [] offset = 0 mesh_size = 0 celldata = [] start = 0 for tess in tesseroids: if tess is None or (prop is not None and prop not in tess.props): continue w, e, s, n, top, bottom = tess.get_bounds() w *= scale[0] e *= scale[0] s *= scale[1] n *= scale[1] top *= scale[2] bottom *= scale[2] if prop is None: scalar = 0. else: p = tess.props[prop] if isinstance(p, int) or isinstance(p, float): scalar = p else: scalar = numpy.linalg.norm(p) points.extend([ utils.sph2cart(w, s, bottom), utils.sph2cart(e, s, bottom), utils.sph2cart(e, n, bottom), utils.sph2cart(w, n, bottom), utils.sph2cart(w, s, top), utils.sph2cart(e, s, top), utils.sph2cart(e, n, top), utils.sph2cart(w, n, top), utils.sph2cart(0.5 * (w + e), s, bottom), utils.sph2cart(e, 0.5 * (s + n), bottom), utils.sph2cart(0.5 * (w + e), n, bottom), utils.sph2cart(w, 0.5 * (s + n), bottom), utils.sph2cart(0.5 * (w + e), s, top), utils.sph2cart(e, 0.5 * (s + n), top), utils.sph2cart(0.5 * (w + e), n, top), utils.sph2cart(w, 0.5 * (s + n), top), utils.sph2cart(w, s, 0.5 * (top + bottom)), utils.sph2cart(e, s, 0.5 * (top + bottom)), utils.sph2cart(e, n, 0.5 * (top + bottom)), utils.sph2cart(w, n, 0.5 * (top + bottom))]) cells.append(20) cells.extend(list(range(start, start + 20))) start += 20 offsets.append(offset) offset += 21 celldata.append(scalar) mesh_size += 1 cell_array = tvtk.CellArray() cell_array.set_cells(mesh_size, numpy.array(cells)) cell_types = numpy.array([25] * mesh_size, 'i') vtkmesh = tvtk.UnstructuredGrid(points=numpy.array(points, 'f')) vtkmesh.set_cells(cell_types, numpy.array(offsets, 'i'), cell_array) vtkmesh.cell_data.scalars = numpy.array(celldata) vtkmesh.cell_data.scalars.name = label dataset = mlab.pipeline.threshold(mlab.pipeline.add_dataset(vtkmesh)) if vmin is None: vmin = min(vtkmesh.cell_data.scalars) if vmax is None: vmax = max(vtkmesh.cell_data.scalars) if style == 'wireframe': surf = mlab.pipeline.surface(mlab.pipeline.extract_edges(dataset), vmax=vmax, vmin=vmin, colormap=cmap) surf.actor.property.representation = 'wireframe' surf.actor.property.line_width = linewidth if style == 'surface': surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin, colormap=cmap) surf.actor.property.representation = 'surface' if edges: edge = mlab.pipeline.surface(mlab.pipeline.extract_edges(dataset), vmax=vmax, vmin=vmin) edge.actor.property.representation = 'wireframe' edge.actor.mapper.scalar_visibility = 0 edge.actor.property.line_width = linewidth edge.actor.property.opacity = opacity edge.actor.property.color = edgecolor surf.actor.property.opacity = opacity surf.actor.property.backface_culling = False if color is not None: surf.actor.mapper.scalar_visibility = 0 surf.actor.property.color = color return surf def prisms(prisms, prop=None, style='surface', opacity=1, edges=True, vmin=None, vmax=None, cmap='blue-red', color=None, linewidth=1, edgecolor=(0, 0, 0), scale=(1, 1, 1)): if style not in ['surface', 'wireframe']: raise ValueError("Invalid style '%s'" % (style)) if opacity > 1. or opacity < 0: raise ValueError("Invalid opacity %g. Must be in range [1,0]" % (opacity)) _lazy_import_mlab() _lazy_import_tvtk() if prop is None: label = 'scalar' else: label = prop points = [] cells = [] offsets = [] offset = 0 mesh_size = 0 celldata = [] start = 0 for prism in prisms: if prism is None or (prop is not None and prop not in prism.props): continue x1, x2, y1, y2, z1, z2 = prism.get_bounds() if prop is None: scalar = 0. else: p = prism.props[prop] if isinstance(p, int) or isinstance(p, float): scalar = p else: scalar = numpy.linalg.norm(p) points.extend([[x1, y1, z1], [x2, y1, z1], [x2, y2, z1], [x1, y2, z1], [x1, y1, z2], [x2, y1, z2], [x2, y2, z2], [x1, y2, z2]]) cells.append(8) cells.extend([i for i in range(start, start + 8)]) start += 8 offsets.append(offset) offset += 9 celldata.append(scalar) mesh_size += 1 cell_array = tvtk.CellArray() cell_array.set_cells(mesh_size, numpy.array(cells)) cell_types = numpy.array([12] * mesh_size, 'i') vtkmesh = tvtk.UnstructuredGrid(points=numpy.array(points, 'f')) vtkmesh.set_cells(cell_types, numpy.array(offsets, 'i'), cell_array) vtkmesh.cell_data.scalars = numpy.array(celldata) vtkmesh.cell_data.scalars.name = label dataset = mlab.pipeline.threshold(mlab.pipeline.add_dataset(vtkmesh)) if vmin is None: vmin = min(vtkmesh.cell_data.scalars) if vmax is None: vmax = max(vtkmesh.cell_data.scalars) surf = mlab.pipeline.surface(dataset, vmax=vmax, vmin=vmin, colormap=cmap) if style == 'wireframe': surf.actor.property.representation = 'wireframe' surf.actor.property.line_width = linewidth if style == 'surface': surf.actor.property.representation = 'surface' if edges: surf.actor.property.edge_visibility = 1 surf.actor.property.line_width = linewidth surf.actor.property.edge_color = edgecolor surf.actor.property.opacity = opacity if color is not None: surf.actor.mapper.scalar_visibility = 0 surf.actor.property.color = color surf.actor.actor.scale = scale return surf
BSD 3-Clause New or Revised License
nuagenetworks/vspk-python
vspk/v5_0/nuenterprise.py
NUEnterprise.bgp_enabled
python
def bgp_enabled(self): return self._bgp_enabled
Get bgp_enabled value. Notes: Read-only flag to display if BGP is enabled for this enterprise This attribute is named `BGPEnabled` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nuenterprise.py#L680-L690
from .fetchers import NUL2DomainsFetcher from .fetchers import NUL2DomainTemplatesFetcher from .fetchers import NUL4ServicesFetcher from .fetchers import NUL4ServiceGroupsFetcher from .fetchers import NUL7applicationsignaturesFetcher from .fetchers import NUSaaSApplicationGroupsFetcher from .fetchers import NUSaaSApplicationTypesFetcher from .fetchers import NUCaptivePortalProfilesFetcher from .fetchers import NURateLimitersFetcher from .fetchers import NUGatewaysFetcher from .fetchers import NUGatewaysLocationsFetcher from .fetchers import NUGatewayTemplatesFetcher from .fetchers import NUPATNATPoolsFetcher from .fetchers import NULDAPConfigurationsFetcher from .fetchers import NUWebCategoriesFetcher from .fetchers import NUWebDomainNamesFetcher from .fetchers import NURedundancyGroupsFetcher from .fetchers import NUDeploymentFailuresFetcher from .fetchers import NUPerformanceMonitorsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUNetconfProfilesFetcher from .fetchers import NUNetworkMacroGroupsFetcher from .fetchers import NUNetworkPerformanceMeasurementsFetcher from .fetchers import NUKeyServerMonitorsFetcher from .fetchers import NUZFBRequestsFetcher from .fetchers import NUBGPProfilesFetcher from .fetchers import NUEgressQOSPoliciesFetcher from .fetchers import NUSharedNetworkResourcesFetcher from .fetchers import NUFirewallAclsFetcher from .fetchers import NUFirewallRulesFetcher from .fetchers import NUIKECertificatesFetcher from .fetchers import NUIKEEncryptionprofilesFetcher from .fetchers import NUIKEGatewaysFetcher from .fetchers import NUIKEGatewayProfilesFetcher from .fetchers import NUIKEPSKsFetcher from .fetchers import NUAlarmsFetcher from .fetchers import NUAllAlarmsFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUVMsFetcher from .fetchers import NUVNFsFetcher from .fetchers import NUVNFMetadatasFetcher from .fetchers import NUVNFThresholdPoliciesFetcher from .fetchers import NUIngressQOSPoliciesFetcher from .fetchers import NUEnterpriseNetworksFetcher from .fetchers import NUEnterpriseSecuritiesFetcher from .fetchers import NUJobsFetcher from .fetchers import NUPolicyGroupCategoriesFetcher from .fetchers import NUPolicyObjectGroupsFetcher from .fetchers import NUDomainsFetcher from .fetchers import NUDomainTemplatesFetcher from .fetchers import NUContainersFetcher from .fetchers import NUCOSRemarkingPolicyTablesFetcher from .fetchers import NURoutingPoliciesFetcher from .fetchers import NUApplicationsFetcher from .fetchers import NUApplicationperformancemanagementsFetcher from .fetchers import NUGroupsFetcher from .fetchers import NUGroupKeyEncryptionProfilesFetcher from .fetchers import NUTrunksFetcher from .fetchers import NUDSCPForwardingClassTablesFetcher from .fetchers import NUDSCPRemarkingPolicyTablesFetcher from .fetchers import NUUsersFetcher from .fetchers import NUNSGatewaysFetcher from .fetchers import NUNSGatewaysCountsFetcher from .fetchers import NUNSGatewaySummariesFetcher from .fetchers import NUNSGatewayTemplatesFetcher from .fetchers import NUNSGGroupsFetcher from .fetchers import NUNSRedundantGatewayGroupsFetcher from .fetchers import NUPublicNetworkMacrosFetcher from .fetchers import NUMultiCastListsFetcher from .fetchers import NUAvatarsFetcher from .fetchers import NUEventLogsFetcher from .fetchers import NUOverlayManagementProfilesFetcher from .fetchers import NUSyslogDestinationsFetcher from bambou import NURESTObject class NUEnterprise(NURESTObject): __rest_name__ = "enterprise" __resource_name__ = "enterprises" CONST_ENCRYPTION_MANAGEMENT_MODE_MANAGED = "MANAGED" CONST_FLOW_COLLECTION_ENABLED_ENABLED = "ENABLED" CONST_AVATAR_TYPE_COMPUTEDURL = "COMPUTEDURL" CONST_ALLOWED_FORWARDING_MODE_LOCAL_AND_REMOTE = "LOCAL_AND_REMOTE" CONST_ALLOWED_FORWARDING_MODE_DISABLED = "DISABLED" CONST_ALLOWED_FORWARDING_CLASSES_NONE = "NONE" CONST_AVATAR_TYPE_BASE64 = "BASE64" CONST_ENCRYPTION_MANAGEMENT_MODE_DISABLED = "DISABLED" CONST_FLOW_COLLECTION_ENABLED_DISABLED = "DISABLED" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_ALLOWED_FORWARDING_CLASSES_D = "D" CONST_ALLOWED_FORWARDING_CLASSES_E = "E" CONST_ALLOWED_FORWARDING_CLASSES_F = "F" CONST_ALLOWED_FORWARDING_CLASSES_G = "G" CONST_AVATAR_TYPE_URL = "URL" CONST_ALLOWED_FORWARDING_CLASSES_A = "A" CONST_ALLOWED_FORWARDING_CLASSES_B = "B" CONST_ALLOWED_FORWARDING_CLASSES_C = "C" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_ALLOWED_FORWARDING_CLASSES_H = "H" CONST_ALLOWED_FORWARDING_MODE_LOCAL_ONLY = "LOCAL_ONLY" def __init__(self, **kwargs): super(NUEnterprise, self).__init__() self._ldap_authorization_enabled = None self._ldap_enabled = None self._bgp_enabled = None self._dhcp_lease_interval = None self._vnf_management_enabled = None self._name = None self._last_updated_by = None self._web_filter_enabled = None self._receive_multi_cast_list_id = None self._send_multi_cast_list_id = None self._description = None self._shared_enterprise = None self._dictionary_version = None self._virtual_firewall_rules_enabled = None self._allow_advanced_qos_configuration = None self._allow_gateway_management = None self._allow_trusted_forwarding_class = None self._allowed_forwarding_classes = None self._allowed_forwarding_mode = None self._floating_ips_quota = None self._floating_ips_used = None self._flow_collection_enabled = None self._enable_application_performance_management = None self._encryption_management_mode = None self._enterprise_profile_id = None self._entity_scope = None self._local_as = None self._use_global_mac = None self._associated_enterprise_security_id = None self._associated_group_key_encryption_profile_id = None self._associated_key_server_monitor_id = None self._customer_id = None self._avatar_data = None self._avatar_type = None self._external_id = None self.expose_attribute(local_name="ldap_authorization_enabled", remote_name="LDAPAuthorizationEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="ldap_enabled", remote_name="LDAPEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="bgp_enabled", remote_name="BGPEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="dhcp_lease_interval", remote_name="DHCPLeaseInterval", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="vnf_management_enabled", remote_name="VNFManagementEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="web_filter_enabled", remote_name="webFilterEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="receive_multi_cast_list_id", remote_name="receiveMultiCastListID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="send_multi_cast_list_id", remote_name="sendMultiCastListID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="shared_enterprise", remote_name="sharedEnterprise", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="dictionary_version", remote_name="dictionaryVersion", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="virtual_firewall_rules_enabled", remote_name="virtualFirewallRulesEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="allow_advanced_qos_configuration", remote_name="allowAdvancedQOSConfiguration", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="allow_gateway_management", remote_name="allowGatewayManagement", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="allow_trusted_forwarding_class", remote_name="allowTrustedForwardingClass", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="allowed_forwarding_classes", remote_name="allowedForwardingClasses", attribute_type=list, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE']) self.expose_attribute(local_name="allowed_forwarding_mode", remote_name="allowedForwardingMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'LOCAL_AND_REMOTE', u'LOCAL_ONLY']) self.expose_attribute(local_name="floating_ips_quota", remote_name="floatingIPsQuota", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="floating_ips_used", remote_name="floatingIPsUsed", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="flow_collection_enabled", remote_name="flowCollectionEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED']) self.expose_attribute(local_name="enable_application_performance_management", remote_name="enableApplicationPerformanceManagement", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="encryption_management_mode", remote_name="encryptionManagementMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'MANAGED']) self.expose_attribute(local_name="enterprise_profile_id", remote_name="enterpriseProfileID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="local_as", remote_name="localAS", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="use_global_mac", remote_name="useGlobalMAC", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_enterprise_security_id", remote_name="associatedEnterpriseSecurityID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_group_key_encryption_profile_id", remote_name="associatedGroupKeyEncryptionProfileID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_key_server_monitor_id", remote_name="associatedKeyServerMonitorID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="customer_id", remote_name="customerID", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="avatar_data", remote_name="avatarData", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="avatar_type", remote_name="avatarType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BASE64', u'COMPUTEDURL', u'URL']) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.l2_domains = NUL2DomainsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.l2_domain_templates = NUL2DomainTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.l4_services = NUL4ServicesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.l4_service_groups = NUL4ServiceGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.l7applicationsignatures = NUL7applicationsignaturesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.saa_s_application_groups = NUSaaSApplicationGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.saa_s_application_types = NUSaaSApplicationTypesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.captive_portal_profiles = NUCaptivePortalProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.rate_limiters = NURateLimitersFetcher.fetcher_with_object(parent_object=self, relationship="child") self.gateways = NUGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="child") self.gateways_locations = NUGatewaysLocationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.gateway_templates = NUGatewayTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.patnat_pools = NUPATNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ldap_configurations = NULDAPConfigurationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.web_categories = NUWebCategoriesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.web_domain_names = NUWebDomainNamesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.redundancy_groups = NURedundancyGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child") self.performance_monitors = NUPerformanceMonitorsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.netconf_profiles = NUNetconfProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.network_macro_groups = NUNetworkMacroGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.network_performance_measurements = NUNetworkPerformanceMeasurementsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.key_server_monitors = NUKeyServerMonitorsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.zfb_requests = NUZFBRequestsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.bgp_profiles = NUBGPProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.egress_qos_policies = NUEgressQOSPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.shared_network_resources = NUSharedNetworkResourcesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.firewall_acls = NUFirewallAclsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.firewall_rules = NUFirewallRulesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ike_certificates = NUIKECertificatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ike_encryptionprofiles = NUIKEEncryptionprofilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ike_gateways = NUIKEGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ike_gateway_profiles = NUIKEGatewayProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ikepsks = NUIKEPSKsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.all_alarms = NUAllAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vms = NUVMsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vnfs = NUVNFsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vnf_metadatas = NUVNFMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vnf_threshold_policies = NUVNFThresholdPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ingress_qos_policies = NUIngressQOSPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.enterprise_networks = NUEnterpriseNetworksFetcher.fetcher_with_object(parent_object=self, relationship="child") self.enterprise_securities = NUEnterpriseSecuritiesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.policy_group_categories = NUPolicyGroupCategoriesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.policy_object_groups = NUPolicyObjectGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.domain_templates = NUDomainTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.containers = NUContainersFetcher.fetcher_with_object(parent_object=self, relationship="child") self.cos_remarking_policy_tables = NUCOSRemarkingPolicyTablesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.routing_policies = NURoutingPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.applications = NUApplicationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.applicationperformancemanagements = NUApplicationperformancemanagementsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.groups = NUGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.group_key_encryption_profiles = NUGroupKeyEncryptionProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.trunks = NUTrunksFetcher.fetcher_with_object(parent_object=self, relationship="child") self.dscp_forwarding_class_tables = NUDSCPForwardingClassTablesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.dscp_remarking_policy_tables = NUDSCPRemarkingPolicyTablesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.users = NUUsersFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ns_gateways = NUNSGatewaysFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ns_gateways_counts = NUNSGatewaysCountsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ns_gateway_summaries = NUNSGatewaySummariesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ns_gateway_templates = NUNSGatewayTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.nsg_groups = NUNSGGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.ns_redundant_gateway_groups = NUNSRedundantGatewayGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.public_network_macros = NUPublicNetworkMacrosFetcher.fetcher_with_object(parent_object=self, relationship="child") self.multi_cast_lists = NUMultiCastListsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.avatars = NUAvatarsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.overlay_management_profiles = NUOverlayManagementProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.syslog_destinations = NUSyslogDestinationsFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def ldap_authorization_enabled(self): return self._ldap_authorization_enabled @ldap_authorization_enabled.setter def ldap_authorization_enabled(self, value): self._ldap_authorization_enabled = value @property def ldap_enabled(self): return self._ldap_enabled @ldap_enabled.setter def ldap_enabled(self, value): self._ldap_enabled = value @property
BSD 3-Clause New or Revised License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/encoding/configurations/video/vp9/vp9_api.py
Vp9Api.create
python
def create(self, vp9_video_configuration, **kwargs): return self.api_client.post( '/encoding/configurations/video/vp9', vp9_video_configuration, type=Vp9VideoConfiguration, **kwargs )
Create VP9 Codec Configuration :param vp9_video_configuration: The VP9 Codec Configuration to be created :type vp9_video_configuration: Vp9VideoConfiguration, required :return: VP9 video configuration :rtype: Vp9VideoConfiguration
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/encoding/configurations/video/vp9/vp9_api.py#L34-L49
from __future__ import absolute_import from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase from bitmovin_api_sdk.common.poscheck import poscheck_except from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope from bitmovin_api_sdk.models.response_error import ResponseError from bitmovin_api_sdk.models.vp9_video_configuration import Vp9VideoConfiguration from bitmovin_api_sdk.encoding.configurations.video.vp9.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.configurations.video.vp9.vp9_video_configuration_list_query_params import Vp9VideoConfigurationListQueryParams class Vp9Api(BaseApi): @poscheck_except(2) def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None): super(Vp9Api, self).__init__( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.customdata = CustomdataApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger )
MIT License
kste/cryptosmt
ciphers/chaskeymachalf.py
ChasKeyMacHalf.createSTP
python
def createSTP(self, stp_filename, parameters): wordsize = parameters["wordsize"] rounds = parameters["rounds"] weight = parameters["sweight"] self.num_messages = parameters["nummessages"] with open(stp_filename, 'w') as stp_file: stp_file.write("% Input File for STP\n% ChasKeyMac w={} rounds={}" "\n\n\n".format(wordsize, rounds)) v0 = ["v0{}".format(i) for i in range((rounds + 1) * self.num_messages)] v1 = ["v1{}".format(i) for i in range((rounds + 1) * self.num_messages)] v2 = ["v2{}".format(i) for i in range((rounds + 1) * self.num_messages)] v3 = ["v3{}".format(i) for i in range((rounds + 1) * self.num_messages)] w0 = ["w0{}".format(i) for i in range(rounds * self.num_messages)] w1 = ["w1{}".format(i) for i in range(rounds * self.num_messages)] stpcommands.setupVariables(stp_file, v0, wordsize) stpcommands.setupVariables(stp_file, v1, wordsize) stpcommands.setupVariables(stp_file, v2, wordsize) stpcommands.setupVariables(stp_file, v3, wordsize) stpcommands.setupVariables(stp_file, w0, wordsize) stpcommands.setupVariables(stp_file, w1, wordsize) stpcommands.setupWeightComputation(stp_file, weight, w0 + w1, wordsize, 1) for i in range(rounds): self.setupChasKeyRound(stp_file, i, v0[i], v1[i], v2[i], v3[i], v0[i + 1], v1[i + 1], v2[i + 1], v3[i + 1], w0[i], w1[i], wordsize) stpcommands.assertNonZero(stp_file, v0 + v1 + v2 + v3, wordsize) for key, value in parameters["fixedVariables"].items(): stpcommands.assertVariableValue(stp_file, key, value) for char in parameters["blockedCharacteristics"]: stpcommands.blockCharacteristic(stp_file, char, wordsize) stpcommands.setupQuery(stp_file) return
Creates an STP file to find a characteristic for ChasKey with the given parameters.
https://github.com/kste/cryptosmt/blob/90c1b401c62422b5ebe4fac17a57b8777172de3d/ciphers/chaskeymachalf.py#L32-L92
from parser import stpcommands from ciphers.cipher import AbstractCipher from parser.stpcommands import getStringLeftRotate as rotl from parser.stpcommands import getStringRightRotate as rotr class ChasKeyMacHalf(AbstractCipher): name = "chaskeyhalf" num_messages = 1 def getFormatString(self): return ['v0', 'v1', 'v2', 'v3', 'w0', 'w1', 'weight']
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_0/models/volume_get_response.py
VolumeGetResponse.__init__
python
def __init__( self, more_items_remaining=None, total_item_count=None, items=None, total=None, ): if more_items_remaining is not None: self.more_items_remaining = more_items_remaining if total_item_count is not None: self.total_item_count = total_item_count if items is not None: self.items = items if total is not None: self.total = total
Keyword args: more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved. total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned. items (list[Volume]): Returns a list of all items after filtering. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty. total (list[Volume]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_0/models/volume_get_response.py#L49-L70
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_0 import models class VolumeGetResponse(object): swagger_types = { 'more_items_remaining': 'bool', 'total_item_count': 'int', 'items': 'list[Volume]', 'total': 'list[Volume]' } attribute_map = { 'more_items_remaining': 'more_items_remaining', 'total_item_count': 'total_item_count', 'items': 'items', 'total': 'total' } required_args = { }
BSD 2-Clause Simplified License
spaam/svtplay-dl
versioneer.py
get_root
python
def get_root(): root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ( "Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND')." ) raise VersioneerBadRootError(err) try: me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print(f"Warning: build in {os.path.dirname(me)} is using versioneer.py from {versioneer_py}") except NameError: pass return root
Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py .
https://github.com/spaam/svtplay-dl/blob/adcaa73210dda94f40e24d83f2020360c4b83919/versioneer.py#L293-L330
try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig:
MIT License
jplusplus/statscraper
statscraper/scrapers/work_injury_scraper.py
WorkInjuries.initiate_browser
python
def initiate_browser(self): tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR) tempsubdir = uuid4().hex self.tempdir = os.path.join(tempdir, tempsubdir) try: os.makedirs(self.tempdir) except OSError: if not os.path.isdir(self.tempdir): raise profile = webdriver.FirefoxProfile() profile.set_preference('browser.download.folderList', 2) profile.set_preference('browser.download.manager.showWhenStarting', False) profile.set_preference('browser.download.manager.closeWhenDone', True) profile.set_preference('browser.download.dir', self.tempdir) profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/vnd.ms-excel") profile.set_preference("browser.helperApps.alwaysAsk.force", False) profile.set_preference("browser.download.manager.useWindow", False) self.browser = webdriver.Firefox(profile) self.browser.get('http://webbstat.av.se') detailed_cls = "Document_TX_GOTOTAB_Avancerad" WebDriverWait(self.browser, PAGELOAD_TIMEOUT) .until(EC.presence_of_element_located((By.CLASS_NAME, detailed_cls))) self.browser.implicitly_wait(3) self.browser .find_element_by_class_name(detailed_cls) .find_element_by_tag_name("td") .click() WebDriverWait(self.browser, PAGELOAD_TIMEOUT) .until(EC.presence_of_element_located((By.CLASS_NAME, detailed_cls))) self.browser.implicitly_wait(3)
The button for expanded detailed options. This also happens to be a good indicator as to wheter all content is loaded.
https://github.com/jplusplus/statscraper/blob/acb85da07c4349c06a947e07ee316fb618e92bfa/statscraper/scrapers/work_injury_scraper.py#L35-L85
from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.wait import WebDriverWait from statscraper import BaseScraper, Collection, Dataset, Result, Dimension import os from glob import iglob from time import sleep from uuid import uuid4 from xlrd import open_workbook from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By DEFAULT_TEMPDIR = "./tmp" TEMPDIR_ENVVAR = "STATSCRAPER_TEMPDIR" PAGELOAD_TIMEOUT = 90 class WorkInjuries(BaseScraper): tempdir = "./tmp" @BaseScraper.on("init")
MIT License
google/uncertainty-baselines
baselines/cifar/hyperbatchensemble.py
log_uniform_sample
python
def log_uniform_sample(sample_size, lambda_parameters): log_lower, log_upper = lambda_parameters ens_size = log_lower.shape[0] lambdas_dim = log_lower.shape[1] log_lower_ = tf.expand_dims(log_lower, 1) log_upper_ = tf.expand_dims(log_upper, 1) u = tf.random.uniform(shape=(ens_size, sample_size, lambdas_dim)) return tf.exp((log_upper_-log_lower_) * u + log_lower_)
Sample batch of lambdas distributed according log-unif(lower, upper).
https://github.com/google/uncertainty-baselines/blob/d37c17c4b08a88d6546bbf299b59127a03398404/baselines/cifar/hyperbatchensemble.py#L88-L99
import os import pickle import time from absl import app from absl import flags from absl import logging import robustness_metrics as rm import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds import tensorflow_probability as tfp import uncertainty_baselines as ub import utils from uncertainty_baselines.models import hyperbatchensemble_e_factory as e_factory from uncertainty_baselines.models import HyperBatchEnsembleLambdaConfig as LambdaConfig from uncertainty_baselines.models import wide_resnet_hyperbatchensemble from tensorboard.plugins.hparams import api as hp flags.DEFINE_boolean('restore_checkpoint', False, 'Start training from latest checkpoint.') flags.DEFINE_bool('e_model_use_bias', False, 'Whether to use bias in e models.') flags.DEFINE_float('min_l2_range', 1e-1, 'Min value of l2 range.') flags.DEFINE_float('max_l2_range', 1e2, 'Max value of l2 range.') flags.DEFINE_float( 'e_body_hidden_units', 0, 'Number of hidden units used in e_models. ' 'If zero a linear model is used.') flags.DEFINE_float( 'l2_batchnorm', 15, 'L2 reg. parameter for batchnorm layers (not tuned, constant).') flags.DEFINE_float('ens_init_delta_bounds', 0.2, 'If ensemble is initialized with lambdas, this values' 'determines the spread of the log-uniform distribution' 'around it (used by ens_init: random, default).') flags.DEFINE_float('init_emodels_stddev', 1e-4, 'Init e_models weights.') flags.DEFINE_integer('ensemble_size', 4, 'Size of the ensemble.') flags.DEFINE_float('lr_tuning', 1e-3, 'Learning rate for hparam tuning.') flags.DEFINE_float('tau', 1e-3, 'Regularization of the entropy of the lambda distribution.') flags.DEFINE_bool('use_gibbs_ce', True, 'Use Gibbs cross entropy for training.') flags.DEFINE_bool( 'sample_and_tune', True, 'Whether to do tuning step with sampling from lambda distribution or not.') flags.DEFINE_float('random_sign_init', -0.75, 'Use random sign init for fast weights.') flags.DEFINE_float('fast_weight_lr_multiplier', 0.5, 'fast weights lr multiplier to scale (alpha, gamma).') flags.DEFINE_integer('tuning_warmup_epochs', 0, 'Number of epochs before starting tuning of lambdas') flags.DEFINE_integer('tuning_every_x_step', 3, 'Do tunning step after x training steps.') flags.DEFINE_bool('regularize_fast_weights', False, 'Whether to egularize fast weights in BatchEnsemble layers.') flags.DEFINE_bool('fast_weights_eq_contraint', True, 'If true, set u,v:=r,s') flags.DEFINE_integer( 'num_eval_samples', 0, 'Number of samples taken for each batch-ens. member.' 'If >=0, we take num_eval_samples + the mean of lambdas.' '(by default, notice that when = 0, predictions are with the mean only)' 'If < 0, we take -num_eval_samples, without including the mean of lambdas.') flags.FLAGS.set_default('lr_decay_epochs', ['100', '200', '225']) flags.FLAGS.set_default('train_epochs', 250) flags.FLAGS.set_default('train_proportion', 0.95) FLAGS = flags.FLAGS @tf.function
Apache License 2.0
ratsgo/embedding
models/bilm/model.py
_pretrained_initializer
python
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None): weight_name_map = {} for i in range(2): for j in range(8): root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j) weight_name_map[root + '/rnn/lstm_cell/kernel'] = root + '/LSTMCell/W_0' weight_name_map[root + '/rnn/lstm_cell/bias'] = root + '/LSTMCell/B' weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = root + '/LSTMCell/W_P_0' varname_in_file = varname[5:] if varname_in_file.startswith('RNN'): varname_in_file = weight_name_map[varname_in_file] if varname_in_file == 'embedding': with h5py.File(embedding_weight_file, 'r') as fin: embed_weights = fin[varname_in_file][...] weights = np.zeros( (embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = embed_weights else: with h5py.File(weight_file, 'r') as fin: if varname_in_file == 'char_embed': char_embed_weights = fin[varname_in_file][...] weights = np.zeros( (char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = char_embed_weights else: weights = fin[varname_in_file][...] def ret(shape, **kwargs): if list(shape) != list(weights.shape): raise ValueError( "Invalid shape initializing {0}, got {1}, expected {2}".format( varname_in_file, shape, weights.shape) ) return weights return ret
We'll stub out all the initializers in the pretrained LM with a function that loads the weights from the file
https://github.com/ratsgo/embedding/blob/2e0b22f97b0f1f7ad23036afd4b168e718e41c8b/models/bilm/model.py#L185-L241
import numpy as np import tensorflow as tf import h5py import json from .data import UnicodeCharsVocabulary, Batcher, InvalidNumberOfCharacters DTYPE = 'float32' DTYPE_INT = 'int64' class BidirectionalLanguageModel(object): def __init__( self, options_file: str, weight_file: str, use_character_inputs=True, embedding_weight_file=None, max_batch_size=128, ): with open(options_file, 'r') as fin: options = json.load(fin) if not use_character_inputs: if embedding_weight_file is None: raise ValueError( "embedding_weight_file is required input with " "not use_character_inputs" ) self._options = options self._weight_file = weight_file self._embedding_weight_file = embedding_weight_file self._use_character_inputs = use_character_inputs self._max_batch_size = max_batch_size self._ops = {} self._graphs = {} def __call__(self, ids_placeholder): if ids_placeholder in self._ops: ret = self._ops[ids_placeholder] else: if len(self._ops) == 0: lm_graph = BidirectionalLanguageModelGraph( self._options, self._weight_file, ids_placeholder, embedding_weight_file=self._embedding_weight_file, use_character_inputs=self._use_character_inputs, max_batch_size=self._max_batch_size) else: with tf.variable_scope('', reuse=True): lm_graph = BidirectionalLanguageModelGraph( self._options, self._weight_file, ids_placeholder, embedding_weight_file=self._embedding_weight_file, use_character_inputs=self._use_character_inputs, max_batch_size=self._max_batch_size) ops = self._build_ops(lm_graph) self._ops[ids_placeholder] = ops self._graphs[ids_placeholder] = lm_graph ret = ops return ret def _build_ops(self, lm_graph): with tf.control_dependencies([lm_graph.update_state_op]): token_embeddings = lm_graph.embedding layers = [ tf.concat([token_embeddings, token_embeddings], axis=2) ] n_lm_layers = len(lm_graph.lstm_outputs['forward']) for i in range(n_lm_layers): layers.append( tf.concat( [lm_graph.lstm_outputs['forward'][i], lm_graph.lstm_outputs['backward'][i]], axis=-1 ) ) sequence_length_wo_bos_eos = lm_graph.sequence_lengths - 2 layers_without_bos_eos = [] for layer in layers: layer_wo_bos_eos = layer[:, 1:, :] layer_wo_bos_eos = tf.reverse_sequence( layer_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :] layer_wo_bos_eos = tf.reverse_sequence( layer_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, ) layers_without_bos_eos.append(layer_wo_bos_eos) lm_embeddings = tf.concat( [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos], axis=1 ) mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32') mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, lm_graph.sequence_lengths - 1, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = mask_wo_bos_eos[:, 1:] mask_wo_bos_eos = tf.reverse_sequence( mask_wo_bos_eos, sequence_length_wo_bos_eos, seq_axis=1, batch_axis=0, ) mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool') return { 'lm_embeddings': lm_embeddings, 'lengths': sequence_length_wo_bos_eos, 'token_embeddings': lm_graph.embedding, 'mask': mask_wo_bos_eos, }
MIT License
berkeleyphotonicsgenerator/bpg
BPG/compiler/dataprep_gdspy.py
Dataprep.gdspy_manh
python
def gdspy_manh(self, polygon_gdspy: Union[gdspy.Polygon, gdspy.PolygonSet, None], manh_grid_size: float, do_manh: bool, ) -> Union[gdspy.Polygon, gdspy.PolygonSet]: start = time.time() if do_manh: manh_type = 'inc' else: manh_type = 'non' if polygon_gdspy is None: polygon_out = None elif isinstance(polygon_gdspy, gdspy.Polygon): coord_list = self.manh_skill(polygon_gdspy.points, manh_grid_size, manh_type) polygon_out = self.dataprep_cleanup_gdspy(gdspy.Polygon(coord_list), do_cleanup=self.do_cleanup) elif isinstance(polygon_gdspy, gdspy.PolygonSet): polygon_list = [] for poly in polygon_gdspy.polygons: coord_list = self.manh_skill(poly, manh_grid_size, manh_type) polygon_list.append(coord_list) polygon_out = self.dataprep_cleanup_gdspy(gdspy.PolygonSet(polygon_list), do_cleanup=self.do_cleanup) else: raise ValueError('polygon_gdspy should be either a Polygon or PolygonSet') end = time.time() dataprep_logger.debug(f'gdspy_man took {end-start}s') return polygon_out
Performs Manhattanization on a gdspy representation of a polygon, and returns a gdspy representation of the Manhattanized polygon Parameters ---------- polygon_gdspy : Union[gdspy.Polygon, gdspy.PolygonSet, None] The gdspy representation of the polygons to be Manhattanized manh_grid_size : float grid size for Manhattanization, edge length after Manhattanization should be larger than it do_manh : bool True to perform Manhattanization Returns ------- polygon_out : Union[gdspy.Polygon, gdspy.PolygonSet] The Manhattanized polygon, in gdspy representation
https://github.com/berkeleyphotonicsgenerator/bpg/blob/27221c9bbfd5e25547ad048fcbfacf940db9ac8c/BPG/compiler/dataprep_gdspy.py#L748-L797
import warnings import gdspy import time import numpy as np import sys import logging import re from BPG.objects import PhotonicRect, PhotonicPolygon, PhotonicRound from BPG.compiler.point_operations import coords_cleanup from BPG.content_list import ContentList from math import ceil from typing import TYPE_CHECKING, Tuple, List, Union, Dict, Optional, Pattern, Iterable if TYPE_CHECKING: from BPG.photonic_core import PhotonicTechInfo from bag.layout.routing import RoutingGrid from BPG.bpg_custom_types import lpp_type MAX_SIZE = sys.maxsize dataprep_logger = logging.getLogger('dataprep') warnings.filterwarnings( action='ignore', message='.*polygon with more than 199 points was created.*', ) IMPLEMENTED_DATAPREP_OPERATIONS = ['rad', 'add', 'manh', 'ouo', 'sub', 'ext', 'and', 'xor'] class Dataprep: def __init__(self, photonic_tech_info: "PhotonicTechInfo", grid: "RoutingGrid", content_list_flat: "ContentList", is_lsf: bool = False, impl_cell=None, ) -> None: self.photonic_tech_info: PhotonicTechInfo = photonic_tech_info self.grid = grid self.content_list_flat: "ContentList" = content_list_flat self.is_lsf = is_lsf start = time.time() self.content_list_flat_sorted_by_layer = content_list_flat.sort_content_list_by_layers() end = time.time() logging.info(f'Sorting flat content list by layer took {end - start:.4g}s') self.global_grid_size = self.photonic_tech_info.global_grid_size self.global_rough_grid_size = self.photonic_tech_info.global_rough_grid_size self.global_operation_precision = self.global_grid_size / 10 self.global_clean_up_grid_size = self.global_grid_size / 10 self.offset_tolerance = 4.35250 self.do_cleanup = True self.GLOBAL_DO_MANH_AT_BEGINNING = False self.GLOBAL_DO_MANH_DURING_OP = True self.GLOBAL_DO_FINAL_MANH = False self.flat_gdspy_polygonsets_by_layer: Dict[Tuple[str, str], Union[gdspy.PolygonSet, gdspy.Polygon]] = {} self.post_dataprep_polygon_pointlist_by_layer: Dict[Tuple[str, str], List] = {} self.content_list_flat_post_dataprep: "ContentList" = None self.dataprep_ignore_list: List[Tuple[str, str]] = [] self.dataprep_bypass_list: List[Tuple[str, str]] = [] dataprep_ignore_list_temp = self.photonic_tech_info.dataprep_routine_data.get( 'dataprep_ignore_list', []) dataprep_bypass_list_temp = self.photonic_tech_info.dataprep_routine_data.get( 'dataprep_bypass_list', []) if dataprep_ignore_list_temp is None: self.dataprep_ignore_list = [] else: for lpp_entry in dataprep_ignore_list_temp: self.dataprep_ignore_list.extend( self.regex_search_lpps( regex=self._check_input_lpp_entry_and_convert_to_regex(lpp_entry), keys=self.content_list_flat_sorted_by_layer.keys() ) ) if dataprep_bypass_list_temp is None: self.dataprep_bypass_list = [] else: for lpp_entry in dataprep_bypass_list_temp: self.dataprep_bypass_list.extend( self.regex_search_lpps( regex=self._check_input_lpp_entry_and_convert_to_regex(lpp_entry), keys=self.content_list_flat_sorted_by_layer.keys() ) ) self.ouuo_regex_list: List[Tuple[Pattern, Pattern]] = [] self.dataprep_groups: List[Dict] = [] if self.is_lsf: dataprep_groups_temp = self.photonic_tech_info.lsf_export_parameters.get('dataprep_groups', []) ouuo_list_temp = self.photonic_tech_info.lsf_export_parameters.get('over_under_under_over', []) else: dataprep_groups_temp = self.photonic_tech_info.dataprep_routine_data.get('dataprep_groups', []) ouuo_list_temp = self.photonic_tech_info.dataprep_routine_data.get('over_under_under_over', []) if dataprep_groups_temp is None: self.dataprep_groups = [] else: for dataprep_group in dataprep_groups_temp: self.dataprep_groups.append(self._check_dataprep_ops(dataprep_group)) if ouuo_list_temp is None: self.ouuo_regex_list = [] else: for lpp_entry in ouuo_list_temp: self.ouuo_regex_list.append(self._check_input_lpp_entry_and_convert_to_regex(lpp_entry)) self.polygon_cache: Dict[Tuple, Union[gdspy.Polygon, gdspy.PolygonSet]] = {} if not isinstance(impl_cell, str): raise ValueError(f'impl_cell must be a string') self.impl_cell = impl_cell @staticmethod def _check_input_lpp_entry_and_convert_to_regex(lpp_entry, ) -> Tuple[Pattern, Pattern]: if not isinstance(lpp_entry, dict): raise ValueError(f'lpp list entries must be dictionaries.\n' f'Entry {lpp_entry} violates this.') lpp_layer = lpp_entry.get('lpp', None) if lpp_layer is None: raise ValueError(f'List entries must be dictionaries with an lpp key:' f' - {{lpp: [layer, purpose]}}\n' f'Entry {lpp_entry} violates this.') if len(lpp_layer) != 2: raise ValueError(f'lpp entry must specify a layer and a purpose, in that order.\n' f'Specified lpp {lpp_layer} does not meet this criteria.') if not (isinstance(lpp_layer[0], str) and isinstance(lpp_layer[1], str)): raise ValueError(f'Lpp layers and purposes must be specified as a list of two strings.\n' f'Entry {lpp_layer} does not meet this criteria.') layer_regex = re.compile(lpp_layer[0]) purpose_regex = re.compile(lpp_layer[1]) return layer_regex, purpose_regex def _check_dataprep_ops(self, dataprep_group, ) -> Dict[str, List]: if 'lpp_in' not in dataprep_group: raise ValueError(f'Dataprep group entry must be a dictionary containing a key named \'lpp_in\'.\n' f'Dataprep group {dataprep_group} does not meet this criteria.') if 'lpp_ops' not in dataprep_group: raise ValueError(f'Dataprep group entry must be a dictionary containing a key named \'lpp_ops\'.\n' f'Dataprep group {dataprep_group} does not meet this criteria.') if not (isinstance(dataprep_group['lpp_in'], list)): raise ValueError(f'lpp_in must be a list of dictionaries.\n' f'Dataprep group {dataprep_group} does not meet this criteria.') if not (isinstance(dataprep_group['lpp_ops'], list)): raise ValueError(f'lpp_ops must be a list of dictionaries.\n' f'Dataprep group {dataprep_group} does not meet this criteria.') lpp_in_clean = [] for lpp_in_entry in dataprep_group['lpp_in']: lpp_in_clean.append(self._check_input_lpp_entry_and_convert_to_regex(lpp_in_entry)) lpp_op_clean = [] for lpp_op_entry in dataprep_group['lpp_ops']: if not isinstance(lpp_op_entry, dict): raise ValueError(f'lpp_ops entries must be dictionaries.\n' f'Dataprep group {dataprep_group} does not meet this criteria.') if 'operation' not in lpp_op_entry: raise ValueError(f'lpp_ops entry must specify a value for the key \'operation\'\n' f'Dataprep group {dataprep_group} does not meet this criteria.') if lpp_op_entry['operation'] not in IMPLEMENTED_DATAPREP_OPERATIONS: raise ValueError(f'The following dataprep operations are implemented at this ' f'time: {IMPLEMENTED_DATAPREP_OPERATIONS}\n' f'Dataprep group {dataprep_group} uses an unsupported dataprep ' f'operation {lpp_op_entry["operation"]}.') operation = lpp_op_entry['operation'] amount = lpp_op_entry.get('amount', None) if (amount is None) and (operation != 'manh'): raise ValueError(f'Amount must be specified for operation \'{operation}\' ' f'in dataprep group {dataprep_group}') if (amount is not None) and not (isinstance(amount, int) or isinstance(amount, float)): raise ValueError(f'amount must be a float or int.\n' f'Operation \'{operation}\' in dataprep group {dataprep_group} ' f'does not meet this criteria.') out_layer = lpp_op_entry.get('lpp', None) if (out_layer is None) and (operation != 'manh'): raise ValueError(f'output lpp must be specified for operation \'{operation}\' ' f'in dataprep group {dataprep_group}') if out_layer is not None: if len(out_layer) != 2: raise ValueError(f'lpp entry must specify a layer and a purpose, in that order.\n' f'Specified entry {out_layer} does not meet this criteria.') if not (isinstance(out_layer[0], str) and isinstance(out_layer[1], str)): raise ValueError(f'Lpp layers and purposes must be specified as a list of two strings.\n' f'{out_layer} in dataprep group {dataprep_group} does not meet this criteria.') out_layer = (out_layer[0], out_layer[1]) lpp_op_clean.append( dict( operation=operation, amount=amount, lpp=out_layer, ) ) return dict( lpp_in=lpp_in_clean, lpp_ops=lpp_op_clean, ) def dataprep_cleanup_gdspy(self, polygon: Union[gdspy.Polygon, gdspy.PolygonSet, None], do_cleanup: bool = True, ) -> Union[gdspy.Polygon, gdspy.PolygonSet, None]: if do_cleanup: if polygon is None: clean_polygon = None elif isinstance(polygon, (gdspy.Polygon, gdspy.PolygonSet)): clean_polygon = gdspy.offset( polygons=polygon, distance=0, tolerance=self.offset_tolerance, max_points=MAX_SIZE, join_first=True, precision=self.global_clean_up_grid_size, ) clean_coords = [] if isinstance(clean_polygon, gdspy.Polygon): clean_coords = self.global_grid_size * np.round(clean_polygon.points / self.global_grid_size, 0) clean_polygon = gdspy.Polygon(points=clean_coords) elif isinstance(clean_polygon, gdspy.PolygonSet): for poly in clean_polygon.polygons: clean_coords.append(self.global_grid_size * np.round(poly / self.global_grid_size, 0)) clean_polygon = gdspy.PolygonSet(polygons=clean_coords) else: raise ValueError('input polygon must be a gdspy.Polygon, gdspy.PolygonSet or NonType') else: clean_polygon = polygon return clean_polygon def dataprep_coord_to_gdspy( self, pos_neg_list_list: Tuple[List[List[Tuple[float, float]]], List[List[Tuple[float, float]]]], manh_grid_size: float, do_manh: bool, ) -> Union[gdspy.Polygon, gdspy.PolygonSet]: pos_coord_list_list = pos_neg_list_list[0] neg_coord_list_list = pos_neg_list_list[1] polygon_out = self.dataprep_cleanup_gdspy(gdspy.PolygonSet(pos_coord_list_list), do_cleanup=self.do_cleanup) if len(neg_coord_list_list): polygon_neg = self.dataprep_cleanup_gdspy(gdspy.PolygonSet(neg_coord_list_list), do_cleanup=self.do_cleanup) polygon_out = self.dataprep_cleanup_gdspy( gdspy.fast_boolean(polygon_out, polygon_neg, 'not', precision=self.global_operation_precision, max_points=MAX_SIZE), do_cleanup=self.do_cleanup ) polygon_out = self.gdspy_manh(polygon_out, manh_grid_size=manh_grid_size, do_manh=do_manh) polygon_out = self.dataprep_cleanup_gdspy( polygon_out, do_cleanup=self.do_cleanup ) return polygon_out def polyop_gdspy_to_point_list(self, polygon_gdspy_in: Union[gdspy.Polygon, gdspy.PolygonSet], fracture: bool = True, do_manh: bool = True, manh_grid_size: Optional[float] = None, ) -> List[List[Tuple[float, float]]]: if manh_grid_size is None: manh_grid_size = self.global_grid_size if do_manh: start = time.time() polygon_gdspy_in = self.gdspy_manh(polygon_gdspy_in, manh_grid_size=manh_grid_size, do_manh=do_manh) end = time.time() dataprep_logger.debug(f'polyop_gdspy_to_point_list: gdspy_manh took: {end-start}s') if fracture: start = time.time() polygon_gdspy = polygon_gdspy_in.fracture(max_points=4094, precision=self.global_grid_size) end = time.time() dataprep_logger.debug(f'polyop_gdspy_to_point_list: fracturing took: {end-start}s') else: polygon_gdspy = polygon_gdspy_in output_list_of_coord_lists = [] if isinstance(polygon_gdspy, gdspy.Polygon): output_list_of_coord_lists = [np.round(polygon_gdspy.points, 3)] non_manh_edge = self.not_manh(polygon_gdspy.points) if non_manh_edge: logging.debug(f'Warning: a non-Manhattanized polygon is created in polyop_gdspy_to_point_list, ' f'number of non-manh edges is {non_manh_edge}') elif isinstance(polygon_gdspy, gdspy.PolygonSet): for poly in polygon_gdspy.polygons: output_list_of_coord_lists.append(np.round(poly, 3)) non_manh_edge = self.not_manh(poly) if non_manh_edge: logging.debug(f'Warning: a non-Manhattanized polygon is created in polyop_gdspy_to_point_list, ' f'number of non-manh edges is {non_manh_edge}') else: raise ValueError('polygon_gdspy must be a gdspy.Polygon or gdspy.PolygonSet') return output_list_of_coord_lists @staticmethod def merge_adjacent_duplicate(coord_set: np.ndarray, eps_grid: float = 1e-6, ) -> np.ndarray: coord_set_shift = np.roll(coord_set, 1, axis=0) coord_cmp_eq = np.abs(coord_set_shift - coord_set) < eps_grid select = np.sum(coord_cmp_eq, axis=1) <= 1 coord_set_merged = coord_set[select] return coord_set_merged @staticmethod def not_manh(coord_list: np.ndarray, eps_grid: float = 1e-6, ) -> int: coord_set_shift = np.roll(coord_list, 1, axis=0) coord_cmp = np.abs(coord_set_shift - coord_list) > eps_grid edge_not_manh = np.sum(coord_cmp, axis=1) > 1 non_manh_edge = int(np.sum(edge_not_manh, axis=0)) return non_manh_edge @staticmethod def manh_edge_tran(p1: np.ndarray, dx: float, dy: float, nstep: int, inc_x_first: bool, manh_grid_size: float, eps_grid: float = 1e-4, ) -> np.ndarray: if (abs(dx) < eps_grid) or (abs(dy) < eps_grid): edge_coord_set = np.array([p1.tolist()]) else: x_set = np.empty((2 * nstep,), dtype=p1.dtype) y_set = np.empty((2 * nstep,), dtype=p1.dtype) if inc_x_first: x_set_pre = np.round( np.linspace(p1[0], p1[0] + nstep * dx, nstep + 1) / manh_grid_size) * manh_grid_size y_set_pre = np.round( np.linspace(p1[1], p1[1] + (nstep - 1) * dy, nstep) / manh_grid_size) * manh_grid_size x_set[0::2] = x_set_pre[:-1] x_set[1::2] = x_set_pre[1:] y_set[0::2] = y_set_pre y_set[1::2] = y_set_pre else: x_set_pre = np.round( np.linspace(p1[0], p1[0] + (nstep - 1) * dx, nstep) / manh_grid_size) * manh_grid_size y_set_pre = np.round( np.linspace(p1[1], p1[1] + nstep * dy, nstep + 1) / manh_grid_size) * manh_grid_size x_set[0::2] = x_set_pre x_set[1::2] = x_set_pre y_set[0::2] = y_set_pre[:-1] y_set[1::2] = y_set_pre[1:] edge_coord_set = np.stack((x_set, y_set), axis=-1) return edge_coord_set def manh_skill(self, poly_coords: Union[List[Tuple[float, float]], np.ndarray], manh_grid_size: float, manh_type: str, ) -> np.ndarray: def apprx_equal(float1: float, float2: float, eps_grid: float = 1e-9, ) -> bool: return abs(float1 - float2) < eps_grid def apprx_equal_coord(coord1: Tuple[float, float], coord2: Tuple[float, float], eps_grid: float = 1e-9, ) -> bool: return apprx_equal(coord1[0], coord2[0], eps_grid) and (apprx_equal(coord1[1], coord2[0], eps_grid)) if isinstance(poly_coords, np.ndarray): poly_coords_ori = poly_coords else: poly_coords_ori = np.array(poly_coords) dataprep_logger.debug(f'in manh_skill, manh_grid_size: {manh_grid_size}') dataprep_logger.debug(f'in manh_skill, poly_coords before mapping to manh grid: {poly_coords_ori}') if poly_coords_ori.size == 0: return poly_coords_ori poly_coords_manhgrid = manh_grid_size * np.round(poly_coords_ori / manh_grid_size) dataprep_logger.debug(f'in manh_skill, poly_coords after mapping to manh grid: {poly_coords_manhgrid}') poly_coords_manhgrid = self.merge_adjacent_duplicate(poly_coords_manhgrid) if not apprx_equal_coord(poly_coords_manhgrid[0], poly_coords_manhgrid[-1]): poly_coords_manhgrid = np.append(poly_coords_manhgrid, [poly_coords_manhgrid[0]], axis=0) if manh_type == 'non': return poly_coords elif (manh_type == 'inc') or (manh_type == 'dec'): n_coords = poly_coords_manhgrid.size / poly_coords_manhgrid[0].size coord_in = np.sum(poly_coords_manhgrid, axis=0) / n_coords poly_coords_manhgrid_leftshift = np.roll(poly_coords_manhgrid, -1, axis=0) edge_vec_set = poly_coords_manhgrid_leftshift - poly_coords_manhgrid p2c_vec_set = coord_in - poly_coords_manhgrid deltax_set = edge_vec_set[:, 0] deltay_set = edge_vec_set[:, 1] nstep_set = np.round(np.minimum(np.abs(deltax_set), np.abs(deltay_set)) / manh_grid_size).astype(int) nstep_fordivide_set = nstep_set + (nstep_set == 0) dx_set = deltax_set / nstep_fordivide_set dy_set = deltay_set / nstep_fordivide_set p2c_x_set = p2c_vec_set[:, 0] p2c_y_set = p2c_vec_set[:, 1] product1_set = deltax_set * p2c_y_set - deltay_set * p2c_x_set product2_set = deltax_set * 0.0 - deltax_set * deltay_set inc_x_first_set = (product1_set * product2_set < 0) == (manh_type == 'inc') poly_coords_orth = [] for i in range(0, len(poly_coords_manhgrid)): coord_curr = poly_coords_manhgrid[i] edge_coords_set = self.manh_edge_tran(coord_curr, dx_set[i], dy_set[i], nstep_set[i], inc_x_first_set[i], manh_grid_size) poly_coords_orth.append(edge_coords_set) poly_coords_orth = np.concatenate(poly_coords_orth, axis=0) poly_coords_orth_manhgrid = poly_coords_orth nonmanh_edge_pre = self.not_manh(poly_coords_orth_manhgrid) if nonmanh_edge_pre: for i in range(0, len(poly_coords_orth_manhgrid) - 1): p1 = poly_coords_orth_manhgrid[i] p2 = poly_coords_orth_manhgrid[i + 1] if p1[0] != p2[0] and p1[1] != p2[1]: print('non_manh_edge:', p1, p2) raise ValueError(f'Manhattanization failed before the clean-up, ' f'number of non-manh edges is {nonmanh_edge_pre}') poly_coords_cleanup = coords_cleanup(poly_coords_orth_manhgrid) if poly_coords_cleanup.size != 0: poly_coords_cleanup = np.append(poly_coords_cleanup, [poly_coords_cleanup[0]], axis=0) nonmanh_edge_post = self.not_manh(poly_coords_cleanup) if nonmanh_edge_post: for i in range(0, len(poly_coords_cleanup)): p1 = poly_coords_cleanup[i] if i == len(poly_coords_cleanup) - 1: p2 = poly_coords_cleanup[0] else: p2 = poly_coords_orth_manhgrid[i + 1] if p1[0] != p2[0] and p1[1] != p2[1]: print('non_manh_edge:', p1, p2) raise ValueError(f'Manhattanization failed after the clean-up, ' f'number of non-manh edges is {nonmanh_edge_post}') return poly_coords_cleanup else: raise ValueError(f'manh_type = {manh_type} should be either "non", "inc" or "dec"')
BSD 3-Clause New or Revised License
pyansys/pymapdl
ansys/mapdl/core/parameters.py
Parameters._parm
python
def _parm(self): return interp_star_status(self._mapdl.starstatus())
Current MAPDL parameters
https://github.com/pyansys/pymapdl/blob/e5cc21471c3a8fcef1f7b88359e38aa89cd63f73/ansys/mapdl/core/parameters.py#L246-L248
import os import tempfile import weakref import numpy as np from ansys.mapdl.core.mapdl import _MapdlCore from ansys.mapdl.core.misc import supress_logging from ansys.mapdl.reader._reader import write_array ROUTINE_MAP = { 0: "Begin level", 17: "PREP7", 21: "SOLUTION", 31: "POST1", 36: "POST26", 52: "AUX2", 53: "AUX3", 62: "AUX12", 65: "AUX15", } UNITS_MAP = { -1: "NONE", 0: "USER", 1: "SI", 2: "CGS", 3: "BFT", 4: "BIN", 5: "MKS", 6: "MPA", 7: "uMKS", } class Parameters: def __init__(self, mapdl): if not isinstance(mapdl, _MapdlCore): raise TypeError("Must be implemented from MAPDL class") self._mapdl_weakref = weakref.ref(mapdl) @property def _mapdl(self): return self._mapdl_weakref() def _set_log_level(self, level): self._mapdl.set_log_level(level) @property def _log(self): return self._mapdl._log @property def routine(self) -> str: value = self._mapdl.get_value("ACTIVE", item1="ROUT") return ROUTINE_MAP[int(value)] @property def units(self) -> str: value = self._mapdl.get_value("ACTIVE", item1="UNITS") return UNITS_MAP[int(value)] @property def revision(self) -> float: return float(self._mapdl.get_value("ACTIVE", item1="REV")) @property def platform(self) -> str: return self._mapdl.get_value("ACTIVE", item1="PLATFORM") @property def csys(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="CSYS")) @property def dsys(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="DSYS")) @property def rsys(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="RSYS")) @property def esys(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="ESYS")) @property def section(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="SECT")) @property def material(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="MAT")) @property def real(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="REAL")) @property def type(self) -> int: return int(self._mapdl.get_value("ACTIVE", item1="type")) @property @supress_logging
MIT License
dfoderick/fullcycle
fullcyclepy/helpers/queuehelper.py
BroadcastSender.broadcast
python
def broadcast(self, msg): if self.channel != None: self.channel.basic_publish(exchange=self._exchangename, routing_key='', body=msg)
broadcast a message to anyone that is listening
https://github.com/dfoderick/fullcycle/blob/b53a35b1b051db27d947f2768c96712ad01f2328/fullcyclepy/helpers/queuehelper.py#L204-L207
from enum import Enum import pika class QueueName(Enum): Q_DUMMY = 'dummy' Q_COMPONENT = 'component' Q_LOG = 'log' Q_PROVISION = 'provision' Q_SWITCH = 'switch' Q_RESTART = 'restart' Q_ALERT = 'alert' Q_DISCOVER = 'discover' Q_DISCOVERED = 'discovered' Q_MONITOR = 'monitor' Q_MONITORMINER = 'monitorminer' Q_SHUTDOWN = 'shutdown' Q_OFFLINE = 'offline' Q_ONLINE = 'online' Q_STATISTICSUPDATED = 'statisticsupdated' Q_POOLCONFIGURATIONCHANGED = 'poolconfigurationchanged' Q_CLOUDPULL = 'cloudpull' Q_EMAIL = 'email' Q_SENSOR = 'sensor' Q_UPDATEWEB = 'updateweb' Q_SAVE = 'save' @classmethod def value(cls, queue_name): return queue_name._name_.lower()[2:] @classmethod def has_value(cls, queue_name): return any(queue_name == item.value for item in cls) def __str__(self): return "%s" % (self._name_.lower()) class QueueType: broadcast = 'broadcast' publish = 'publish' class QueueEntry(object): def __init__(self, queuename, message, eventtype=QueueType.publish): self.queuename = queuename self.eventtype = eventtype self.message = message class QueueEntries(object): def __init__(self): self.entries = [] def add(self, queuename, message): self.entries.append(QueueEntry(queuename, message, QueueType.publish)) def addbroadcast(self, queuename, message): self.entries.append(QueueEntry(queuename, message, QueueType.broadcast)) def addalert(self, message): self.addbroadcast(QueueName.Q_ALERT, message) def hasentries(self): if self.entries is None: return 0 return len(self.entries) class Queue: queue_name = None _connection = None channel = None state = None _userlogin = None def __init__(self, queueName, servicelogin): self.queue_name = queueName self._servicelogin = servicelogin self._userlogin = self._servicelogin.user if self._userlogin is None: self._userlogin = 'fullcycle' self.initialize(queueName) def connection(self): return self._connection def getparameters(self): credentials = pika.PlainCredentials(self._userlogin, self._servicelogin.password) parameters = pika.ConnectionParameters(self._servicelogin.host, self._servicelogin.port, '/', credentials) return parameters def initialize(self, name): self.setupchannel() self.declare(name) def setupchannel(self): self._connection = pika.BlockingConnection(self.getparameters()) self.channel = self._connection.channel() def declare(self, name): self.state = self.channel.queue_declare(queue=name) def publish(self, msg, exchange=''): if self.channel != None: self.channel.basic_publish(exchange=exchange, routing_key=self.queue_name, body=msg) def publish_channel(self, queue_name, msg, exchange=''): localchannel = self._connection.channel() localchannel.basic_publish(exchange=exchange, routing_key=queue_name, body=msg) localchannel.close() def broadcast_channel(self, exchange_name, msg): localchannel = self._connection.channel() localchannel.basic_publish(exchange=exchange_name, routing_key='', body=msg) localchannel.close() def subscribe(self, callback, no_acknowledge=True, prefetch_count=1): self.channel.basic_qos(prefetch_count=prefetch_count) self.channel.basic_consume(callback, queue=self.queue_name, no_ack=no_acknowledge) def listen(self): self.channel.start_consuming() def sleep(self, duration): self._connection.sleep(duration) def getmessage(self, no_acknowledge=True): queue_empty = self.state.method.message_count == 0 if not queue_empty: return self.channel.basic_get(self.queue_name, no_ack=no_acknowledge) return (None, None, None) def acknowledge(self, delivery_tag): self.channel.basic_ack(delivery_tag) def reject(self, delivery_tag): self.channel.basic_nack(delivery_tag) def close(self): if self.channel: self.channel.close() self.channel = None self._connection = None self.state = None class BroadcastBase(Queue): _exchangename = None _exchangetype = None def initialize(self, name): self.setupchannel() self.setupbroadcast(name) self.declare(name) def setupexchange(self, name, exchange_type): self._exchangename = name self._exchangetype = exchange_type self.channel.exchange_declare(exchange=name, exchange_type=exchange_type) return self def setupbroadcast(self, name): return self.setupexchange(name, 'fanout') class BroadcastSender(BroadcastBase): def initialize(self, name): self.setupchannel() self.setupbroadcast(name)
MIT License