repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
yahooarchive/crow
crow.py
apply_crow_aggregation
python
def apply_crow_aggregation(X): S = compute_crow_spatial_weight(X) C = compute_crow_channel_weight(X) X = X * S X = X.sum(axis=(1, 2)) return X * C
Given a tensor of activations, compute the aggregate CroW feature, weighted spatially and channel-wise. :param ndarray X: 3d tensor of activations with dimensions (channels, height, width) :returns ndarray: CroW aggregated global image feature
https://github.com/yahooarchive/crow/blob/75c227715c47c81c960a36232b4ec4ce8c22d364/crow.py#L51-L65
import numpy as np import scipy from sklearn.preprocessing import normalize as sknormalize from sklearn.decomposition import PCA def compute_crow_spatial_weight(X, a=2, b=2): S = X.sum(axis=0) z = (S**a).sum()**(1./a) return (S / z)**(1./b) if b != 1 else (S / z) def compute_crow_channel_weight(X): K, w, h = X.shape area = float(w * h) nonzeros = np.zeros(K, dtype=np.float32) for i, x in enumerate(X): nonzeros[i] = np.count_nonzero(x) / area nzsum = nonzeros.sum() for i, d in enumerate(nonzeros): nonzeros[i] = np.log(nzsum / d) if d > 0. else 0. return nonzeros
Apache License 2.0
uber/tchannel-python
tchannel/tornado/message_factory.py
MessageFactory.build_raw_response_message
python
def build_raw_response_message(self, response, args, is_completed=False): response.flags = FlagsType.none if is_completed else FlagsType.fragment if response.state == StreamState.init: message = CallResponseMessage( flags=response.flags, code=response.code, tracing=response.tracing, headers=response.headers, checksum=response.checksum, args=args ) response.state = (StreamState.completed if is_completed else StreamState.streaming) elif response.state == StreamState.streaming: message = CallResponseContinueMessage( flags=response.flags, checksum=response.checksum, args=args ) response.state = (StreamState.completed if is_completed else StreamState.streaming) message.id = response.id return message
build protocol level message based on response and args. response object contains meta information about outgoing response. args are the currently chunk data from argstreams is_completed tells the flags of the message :param response: Response :param args: array of arg streams :param is_completed: message flags :return: CallResponseMessage/CallResponseContinueMessage
https://github.com/uber/tchannel-python/blob/613faa9cf4cf200e27f7ba24383ad67962e44245/tchannel/tornado/message_factory.py#L116-L152
from __future__ import absolute_import import logging from ..errors import InvalidChecksumError from ..errors import TChannelError from ..errors import FatalProtocolError from ..messages import RW from ..messages import Types from ..messages import common from ..messages.call_continue import CallContinueMessage from ..messages.call_request import CallRequestMessage from ..messages.call_request_continue import CallRequestContinueMessage from ..messages.call_response import CallResponseMessage from ..messages.call_response_continue import CallResponseContinueMessage from ..messages.common import CHECKSUM_MSG_TYPES from ..messages.common import FlagsType from ..messages.common import StreamState from ..messages.common import generate_checksum from ..messages.common import verify_checksum from ..messages.error import ErrorMessage from .request import Request from .response import Response from .stream import InMemStream from six.moves import range log = logging.getLogger('tchannel') def build_raw_error_message(protocol_exception): message = ErrorMessage( id=protocol_exception.id, code=protocol_exception.code, tracing=protocol_exception.tracing, description=protocol_exception.description, ) return message class MessageFactory(object): def __init__(self, remote_host=None, remote_host_port=None): self.message_buffer = {} self.remote_host = remote_host self.remote_host_port = remote_host_port self.in_checksum = {} self.out_checksum = {} def build_raw_request_message(self, request, args, is_completed=False): request.flags = FlagsType.none if is_completed else FlagsType.fragment if request.state == StreamState.init: message = CallRequestMessage( flags=request.flags, ttl=request.ttl * 1000, tracing=request.tracing, service=request.service, headers=request.headers, checksum=request.checksum, args=args ) request.state = (StreamState.completed if is_completed else StreamState.streaming) elif request.state == StreamState.streaming: message = CallRequestContinueMessage( flags=request.flags, checksum=request.checksum, args=args ) request.state = (StreamState.completed if is_completed else StreamState.streaming) message.id = request.id return message
MIT License
faucetsdn/ryu
ryu/services/protocols/bgp/api/rtconf.py
get_neighbor_in_filter
python
def get_neighbor_in_filter(neigh_ip_address): core = CORE_MANAGER.get_core_service() peer = core.peer_manager.get_by_addr(neigh_ip_address) return peer.in_filters
Returns a neighbor in_filter for given ip address if exists.
https://github.com/faucetsdn/ryu/blob/537f35f4b2bc634ef05e3f28373eb5e24609f989/ryu/services/protocols/bgp/api/rtconf.py#L130-L134
import logging from ryu.services.protocols.bgp.api.base import register from ryu.services.protocols.bgp.api.base import RegisterWithArgChecks from ryu.services.protocols.bgp.api.base import FLOWSPEC_FAMILY from ryu.services.protocols.bgp.api.base import FLOWSPEC_RULES from ryu.services.protocols.bgp.api.base import FLOWSPEC_ACTIONS from ryu.services.protocols.bgp.core_manager import CORE_MANAGER from ryu.services.protocols.bgp.rtconf.base import ConfWithId from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError from ryu.services.protocols.bgp.rtconf import neighbors from ryu.services.protocols.bgp.rtconf.neighbors import NeighborConf from ryu.services.protocols.bgp.rtconf.vrfs import ROUTE_DISTINGUISHER from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4 from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf from ryu.services.protocols.bgp import constants as const LOG = logging.getLogger('bgpspeaker.api.rtconf') def _get_neighbor_conf(neigh_ip_address): neigh_conf = CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address) if not neigh_conf: raise RuntimeConfigError(desc='No Neighbor configuration with IP' ' address %s' % neigh_ip_address) assert isinstance(neigh_conf, NeighborConf) return neigh_conf @register(name='neighbor.create') def create_neighbor(**kwargs): neigh_conf = NeighborConf(**kwargs) CORE_MANAGER.neighbors_conf.add_neighbor_conf(neigh_conf) return True @RegisterWithArgChecks(name='neighbor.update_enabled', req_args=[neighbors.IP_ADDRESS, neighbors.ENABLED]) def update_neighbor_enabled(neigh_ip_address, enabled): neigh_conf = _get_neighbor_conf(neigh_ip_address) neigh_conf.enabled = enabled return True @RegisterWithArgChecks(name='neighbor.update', req_args=[neighbors.IP_ADDRESS, neighbors.CHANGES]) def update_neighbor(neigh_ip_address, changes): rets = [] for k, v in changes.items(): if k == neighbors.MULTI_EXIT_DISC: rets.append(_update_med(neigh_ip_address, v)) if k == neighbors.ENABLED: rets.append(update_neighbor_enabled(neigh_ip_address, v)) if k == neighbors.CONNECT_MODE: rets.append(_update_connect_mode(neigh_ip_address, v)) return all(rets) def _update_med(neigh_ip_address, value): neigh_conf = _get_neighbor_conf(neigh_ip_address) neigh_conf.multi_exit_disc = value LOG.info('MED value for neigh: %s updated to %s', neigh_conf, value) return True def _update_connect_mode(neigh_ip_address, value): neigh_conf = _get_neighbor_conf(neigh_ip_address) neigh_conf.connect_mode = value return True @RegisterWithArgChecks(name='neighbor.delete', req_args=[neighbors.IP_ADDRESS]) def delete_neighbor(neigh_ip_address): neigh_conf = _get_neighbor_conf(neigh_ip_address) if neigh_conf: neigh_conf.enabled = False CORE_MANAGER.neighbors_conf.remove_neighbor_conf(neigh_ip_address) return True return False @RegisterWithArgChecks(name='neighbor.get', req_args=[neighbors.IP_ADDRESS]) def get_neighbor_conf(neigh_ip_address): neigh_conf = _get_neighbor_conf(neigh_ip_address) return neigh_conf.settings @register(name='neighbors.get') def get_neighbors_conf(): return CORE_MANAGER.neighbors_conf.settings @RegisterWithArgChecks(name='neighbor.in_filter.get', req_args=[neighbors.IP_ADDRESS])
Apache License 2.0
2ndwatch/cloudendure-python
cloudendure/cloudendure_api/models/cloud_endure_cloud_credentials_list.py
CloudEndureCloudCredentialsList.items
python
def items(self): return self._items
Gets the items of this CloudEndureCloudCredentialsList. # noqa: E501 :return: The items of this CloudEndureCloudCredentialsList. # noqa: E501 :rtype: list[CloudEndureCloudCredentials]
https://github.com/2ndwatch/cloudendure-python/blob/f81d1be1422b7c19adedb06c584803eaaa811919/cloudendure/cloudendure_api/models/cloud_endure_cloud_credentials_list.py#L48-L55
import pprint import re import six from cloudendure.cloudendure_api.models.cloud_endure_cloud_credentials import ( CloudEndureCloudCredentials, ) class CloudEndureCloudCredentialsList: """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = {"items": "list[CloudEndureCloudCredentials]"} attribute_map = {"items": "items"} def __init__(self, items=None): self._items = None self.discriminator = None if items is not None: self.items = items @property
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/django/utils/cache.py
_generate_cache_key
python
def _generate_cache_key(request, method, headerlist, key_prefix): ctx = hashlib.md5() for header in headerlist: value = request.META.get(header) if value is not None: ctx.update(force_bytes(value)) url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri()))) cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % ( key_prefix, method, url.hexdigest(), ctx.hexdigest()) return _i18n_cache_key_suffix(request, cache_key)
Return a cache key from the headers given in the header list.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/utils/cache.py#L324-L334
import hashlib import logging import re import time import warnings from django.conf import settings from django.core.cache import caches from django.http import HttpResponse, HttpResponseNotModified from django.utils.deprecation import RemovedInDjango21Warning from django.utils.encoding import force_bytes, force_text, iri_to_uri from django.utils.http import ( http_date, parse_etags, parse_http_date_safe, quote_etag, ) from django.utils.timezone import get_current_timezone_name from django.utils.translation import get_language cc_delim_re = re.compile(r'\s*,\s*') logger = logging.getLogger('django.request') def patch_cache_control(response, **kwargs): def dictitem(s): t = s.split('=', 1) if len(t) > 1: return (t[0].lower(), t[1]) else: return (t[0].lower(), True) def dictvalue(t): if t[1] is True: return t[0] else: return '%s=%s' % (t[0], t[1]) if response.get('Cache-Control'): cc = cc_delim_re.split(response['Cache-Control']) cc = dict(dictitem(el) for el in cc) else: cc = {} if 'max-age' in cc and 'max_age' in kwargs: kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age']) if 'private' in cc and 'public' in kwargs: del cc['private'] elif 'public' in cc and 'private' in kwargs: del cc['public'] for (k, v) in kwargs.items(): cc[k.replace('_', '-')] = v cc = ', '.join(dictvalue(el) for el in cc.items()) response['Cache-Control'] = cc def get_max_age(response): if not response.has_header('Cache-Control'): return cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control'])) if 'max-age' in cc: try: return int(cc['max-age']) except (ValueError, TypeError): pass def set_response_etag(response): if not response.streaming: response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest()) return response def _precondition_failed(request): logger.warning( 'Precondition Failed: %s', request.path, extra={ 'status_code': 412, 'request': request, }, ) return HttpResponse(status=412) def _not_modified(request, response=None): new_response = HttpResponseNotModified() if response: for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'): if header in response: new_response[header] = response[header] new_response.cookies = response.cookies return new_response def get_conditional_response(request, etag=None, last_modified=None, response=None): if response and not (200 <= response.status_code < 300): return response if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', '')) if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE') if if_unmodified_since: if_unmodified_since = parse_http_date_safe(if_unmodified_since) if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', '')) if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE') if if_modified_since: if_modified_since = parse_http_date_safe(if_modified_since) if if_match_etags and not _if_match_passes(etag, if_match_etags): return _precondition_failed(request) if (not if_match_etags and if_unmodified_since and not _if_unmodified_since_passes(last_modified, if_unmodified_since)): return _precondition_failed(request) if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags): if request.method in ('GET', 'HEAD'): return _not_modified(request, response) else: return _precondition_failed(request) if (not if_none_match_etags and if_modified_since and not _if_modified_since_passes(last_modified, if_modified_since)): if request.method in ('GET', 'HEAD'): return _not_modified(request, response) return response def _if_match_passes(target_etag, etags): if not target_etag: return False elif etags == ['*']: return True elif target_etag.startswith('W/'): return False else: return target_etag in etags def _if_unmodified_since_passes(last_modified, if_unmodified_since): return last_modified and last_modified <= if_unmodified_since def _if_none_match_passes(target_etag, etags): if not target_etag: return True elif etags == ['*']: return False else: target_etag = target_etag.strip('W/') etags = (etag.strip('W/') for etag in etags) return target_etag not in etags def _if_modified_since_passes(last_modified, if_modified_since): return not last_modified or last_modified > if_modified_since def patch_response_headers(response, cache_timeout=None): if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS if cache_timeout < 0: cache_timeout = 0 if settings.USE_ETAGS and not response.has_header('ETag'): warnings.warn( "The USE_ETAGS setting is deprecated in favor of " "ConditionalGetMiddleware which sets the ETag regardless of the " "setting. patch_response_headers() won't do ETag processing in " "Django 2.1.", RemovedInDjango21Warning ) if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback(set_response_etag) else: response = set_response_etag(response) if not response.has_header('Expires'): response['Expires'] = http_date(time.time() + cache_timeout) patch_cache_control(response, max_age=cache_timeout) def add_never_cache_headers(response): patch_response_headers(response, cache_timeout=-1) patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True) def patch_vary_headers(response, newheaders): if response.has_header('Vary'): vary_headers = cc_delim_re.split(response['Vary']) else: vary_headers = [] existing_headers = {header.lower() for header in vary_headers} additional_headers = [newheader for newheader in newheaders if newheader.lower() not in existing_headers] response['Vary'] = ', '.join(vary_headers + additional_headers) def has_vary_header(response, header_query): if not response.has_header('Vary'): return False vary_headers = cc_delim_re.split(response['Vary']) existing_headers = {header.lower() for header in vary_headers} return header_query.lower() in existing_headers def _i18n_cache_key_suffix(request, cache_key): if settings.USE_I18N or settings.USE_L10N: cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language()) if settings.USE_TZ: tz_name = force_text(get_current_timezone_name(), errors='ignore') cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_') return cache_key
MIT License
codeperfectplus/machine-learning-web-applications
Quiz Application/quiz/models.py
Progress.list_all_cat_scores
python
def list_all_cat_scores(self): score_before = self.score output = {} for cat in Category.objects.all(): to_find = re.escape(cat.category) + r",(\d+),(\d+)," match = re.search(to_find, self.score, re.IGNORECASE) if match: score = int(match.group(1)) possible = int(match.group(2)) try: percent = int(round((float(score) / float(possible)) * 100)) except: percent = 0 output[cat.category] = [score, possible, percent] else: self.score += cat.category + ",0,0," output[cat.category] = [0, 0] if len(self.score) > len(score_before): self.save() return output
Returns a dict in which the key is the category name and the item is a list of three integers. The first is the number of questions correct, the second is the possible best score, the third is the percentage correct. The dict will have one key for every category that you have defined
https://github.com/codeperfectplus/machine-learning-web-applications/blob/6385ca0582aa19261c883e53005854ebeb057143/Quiz Application/quiz/models.py#L208-L248
from __future__ import unicode_literals import re import json from django.db import models from django.core.exceptions import ValidationError, ImproperlyConfigured from django.core.validators import ( MaxValueValidator, validate_comma_separated_integer_list, ) from django.utils.translation import ugettext_lazy as _ from django.utils.timezone import now from six import python_2_unicode_compatible from django.conf import settings from model_utils.managers import InheritanceManager class CategoryManager(models.Manager): def new_category(self, category): new_category = self.create(category=re.sub('\s+', '-', category) .lower()) new_category.save() return new_category @python_2_unicode_compatible class Category(models.Model): category = models.CharField( verbose_name=_("Category"), max_length=250, blank=True, unique=True, null=True) objects = CategoryManager() class Meta: verbose_name = _("Category") verbose_name_plural = _("Categories") def __str__(self): return self.category @python_2_unicode_compatible class SubCategory(models.Model): sub_category = models.CharField( verbose_name=_("Sub-Category"), max_length=250, blank=True, null=True) category = models.ForeignKey( Category, null=True, blank=True, verbose_name=_("Category"), on_delete=models.CASCADE) objects = CategoryManager() class Meta: verbose_name = _("Sub-Category") verbose_name_plural = _("Sub-Categories") def __str__(self): return self.sub_category + " (" + self.category.category + ")" @python_2_unicode_compatible class Quiz(models.Model): title = models.CharField( verbose_name=_("Title"), max_length=60, blank=False) description = models.TextField( verbose_name=_("Description"), blank=True, help_text=_("a description of the quiz")) url = models.SlugField( max_length=60, blank=False, help_text=_("a user friendly url"), verbose_name=_("user friendly url")) category = models.ForeignKey( Category, null=True, blank=True, verbose_name=_("Category"), on_delete=models.CASCADE) random_order = models.BooleanField( blank=False, default=False, verbose_name=_("Random Order"), help_text=_("Display the questions in " "a random order or as they " "are set?")) max_questions = models.PositiveIntegerField( blank=True, null=True, verbose_name=_("Max Questions"), help_text=_("Number of questions to be answered on each attempt.")) answers_at_end = models.BooleanField( blank=False, default=False, help_text=_("Correct answer is NOT shown after question." " Answers displayed at the end."), verbose_name=_("Answers at end")) exam_paper = models.BooleanField( blank=False, default=False, help_text=_("If yes, the result of each" " attempt by a user will be" " stored. Necessary for marking."), verbose_name=_("Exam Paper")) single_attempt = models.BooleanField( blank=False, default=False, help_text=_("If yes, only one attempt by" " a user will be permitted." " Non users cannot sit this exam."), verbose_name=_("Single Attempt")) pass_mark = models.SmallIntegerField( blank=True, default=0, verbose_name=_("Pass Mark"), help_text=_("Percentage required to pass exam."), validators=[MaxValueValidator(100)]) success_text = models.TextField( blank=True, help_text=_("Displayed if user passes."), verbose_name=_("Success Text")) fail_text = models.TextField( verbose_name=_("Fail Text"), blank=True, help_text=_("Displayed if user fails.")) draft = models.BooleanField( blank=True, default=False, verbose_name=_("Draft"), help_text=_("If yes, the quiz is not displayed" " in the quiz list and can only be" " taken by users who can edit" " quizzes.")) def save(self, force_insert=False, force_update=False, *args, **kwargs): self.url = re.sub('\s+', '-', self.url).lower() self.url = ''.join(letter for letter in self.url if letter.isalnum() or letter == '-') if self.single_attempt is True: self.exam_paper = True if self.pass_mark > 100: raise ValidationError('%s is above 100' % self.pass_mark) super(Quiz, self).save(force_insert, force_update, *args, **kwargs) class Meta: verbose_name = _("Quiz") verbose_name_plural = _("Quizzes") def __str__(self): return self.title def get_questions(self): return self.question_set.all().select_subclasses() @property def get_max_score(self): return self.get_questions().count() def anon_score_id(self): return str(self.id) + "_score" def anon_q_list(self): return str(self.id) + "_q_list" def anon_q_data(self): return str(self.id) + "_data" class ProgressManager(models.Manager): def new_progress(self, user): new_progress = self.create(user=user, score="") new_progress.save() return new_progress class Progress(models.Model): user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name=_("User"), on_delete=models.CASCADE) score = models.CharField(max_length=1024, verbose_name=_("Score"), validators=[validate_comma_separated_integer_list]) objects = ProgressManager() class Meta: verbose_name = _("User Progress") verbose_name_plural = _("User progress records") @property
MIT License
lake-lerna/hydra
src/main/python/hydra/lib/common.py
execute_local_cmd
python
def execute_local_cmd(cmd, timeout=10): l.info("Executing local command [%s]", cmd) pg_cmd = PySysCommand(cmd) pg_cmd.run(timeout=timeout) output = pg_cmd.stdout + pg_cmd.stderr l.info("Result: %s", output)
Execute a local command @args: cmd: Command to execute timeout: Command timeout
https://github.com/lake-lerna/hydra/blob/ec0793f8c1f49ceb93bf1f1a9789085b68d55f08/src/main/python/hydra/lib/common.py#L33-L45
__author__ = 'AbdullahS' from pprint import pprint, pformat import logging from hydra.lib import util from hydra.lib.utility.py_sys_cmd import PySysCommand l = util.createlogger('h_common', logging.INFO) def execute_remote_cmd(ip, user, cmd, timeout=10, suppress_output=False): cmd = "ssh -o StrictHostKeyChecking=no %s@%s \"%s\"" % (user, ip, cmd) l.info("Executing remote command [%s] on ip[%s], user[%s]", cmd, ip, user) pg_cmd = PySysCommand(cmd) pg_cmd.run(timeout=timeout) output = pg_cmd.stdout + pg_cmd.stderr if not suppress_output: l.info("Result: %s", output) return output
Apache License 2.0
pandera-dev/pandera
pandera/schema_components.py
MultiIndex.names
python
def names(self): return [index.name for index in self.indexes]
Get index names in the MultiIndex schema component.
https://github.com/pandera-dev/pandera/blob/96415a02b7737fd23d6606eaac304f20c65d577c/pandera/schema_components.py#L541-L543
import warnings from copy import copy, deepcopy from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd from . import errors from . import strategies as st from .deprecations import deprecate_pandas_dtype from .error_handlers import SchemaErrorHandler from .schemas import ( CheckList, DataFrameSchema, PandasDtypeInputTypes, SeriesSchemaBase, ) def _is_valid_multiindex_tuple_str(x: Tuple[Any, ...]) -> bool: return isinstance(x, tuple) and all(isinstance(i, str) for i in x) class Column(SeriesSchemaBase): @deprecate_pandas_dtype def __init__( self, dtype: PandasDtypeInputTypes = None, checks: CheckList = None, nullable: bool = False, unique: bool = False, allow_duplicates: Optional[bool] = None, coerce: bool = False, required: bool = True, name: Union[str, Tuple[str, ...], None] = None, regex: bool = False, pandas_dtype: PandasDtypeInputTypes = None, ) -> None: super().__init__( dtype, checks, nullable, unique, allow_duplicates, coerce, name, pandas_dtype, ) if ( name is not None and not isinstance(name, str) and not _is_valid_multiindex_tuple_str(name) and regex ): raise ValueError( "You cannot specify a non-string name when setting regex=True" ) self.required = required self._name = name self._regex = regex @property def regex(self) -> bool: return self._regex @property def _allow_groupby(self) -> bool: return True @property def properties(self) -> Dict[str, Any]: return { "dtype": self.dtype, "checks": self._checks, "nullable": self._nullable, "unique": self._unique, "coerce": self._coerce, "required": self.required, "name": self._name, "regex": self._regex, } def set_name(self, name: str): self._name = name return self def coerce_dtype(self, obj: Union[pd.DataFrame, pd.Series, pd.Index]): if isinstance(obj, (pd.Series, pd.Index)): return super(Column, self).coerce_dtype(obj) return obj.apply( lambda x: super(Column, self).coerce_dtype(x), axis="columns" ) def validate( self, check_obj: pd.DataFrame, head: Optional[int] = None, tail: Optional[int] = None, sample: Optional[int] = None, random_state: Optional[int] = None, lazy: bool = False, inplace: bool = False, ) -> pd.DataFrame: if not inplace: check_obj = check_obj.copy() if self._name is None: raise errors.SchemaError( self, check_obj, "column name is set to None. Pass the ``name` argument when " "initializing a Column object, or use the ``set_name`` " "method.", ) def validate_column(check_obj, column_name): super(Column, copy(self).set_name(column_name)).validate( check_obj, head, tail, sample, random_state, lazy, inplace=inplace, ) column_keys_to_check = ( self.get_regex_columns(check_obj.columns) if self._regex else [self._name] ) for column_name in column_keys_to_check: if self.coerce: check_obj.loc[:, column_name] = self.coerce_dtype( check_obj.loc[:, column_name] ) if isinstance(check_obj[column_name], pd.DataFrame): for i in range(check_obj[column_name].shape[1]): validate_column( check_obj[column_name].iloc[:, [i]], column_name ) else: validate_column(check_obj, column_name) return check_obj def get_regex_columns( self, columns: Union[pd.Index, pd.MultiIndex] ) -> Union[pd.Index, pd.MultiIndex]: if isinstance(self.name, tuple): if len(self.name) != columns.nlevels: raise IndexError( f"Column regex name='{self.name}' is a tuple, expected a " f"MultiIndex columns with {len(self.name)} number of " f"levels, found {columns.nlevels} level(s)" ) matches = np.ones(len(columns)).astype(bool) for i, name in enumerate(self.name): matched = pd.Index( columns.get_level_values(i).astype(str).str.match(name) ).fillna(False) matches = matches & np.array(matched.tolist()) column_keys_to_check = columns[matches] else: if isinstance(columns, pd.MultiIndex): raise IndexError( f"Column regex name {self.name} is a string, expected a " "dataframe where the index is a pd.Index object, not a " "pd.MultiIndex object" ) column_keys_to_check = columns[ pd.Index(columns.astype(str).str.match(self.name)) .fillna(False) .tolist() ] if column_keys_to_check.shape[0] == 0: raise errors.SchemaError( self, columns, f"Column regex name='{self.name}' did not match any columns " "in the dataframe. Update the regex pattern so that it " f"matches at least one column:\n{columns.tolist()}", ) return column_keys_to_check.drop_duplicates() @st.strategy_import_error def strategy(self, *, size=None): return super().strategy(size=size).map(lambda x: x.to_frame()) @st.strategy_import_error def strategy_component(self): return st.column_strategy( self.dtype, checks=self.checks, unique=self.unique, name=self.name, ) def example(self, size=None) -> pd.DataFrame: import hypothesis with warnings.catch_warnings(): warnings.simplefilter( "ignore", category=hypothesis.errors.NonInteractiveExampleWarning, ) return ( super() .strategy(size=size) .example() .rename(self.name) .to_frame() ) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented def _compare_dict(obj): return { k: v if k != "_checks" else set(v) for k, v in obj.__dict__.items() } return _compare_dict(self) == _compare_dict(other) class Index(SeriesSchemaBase): @property def names(self): return [self.name] @property def _allow_groupby(self) -> bool: return False def validate( self, check_obj: Union[pd.DataFrame, pd.Series], head: Optional[int] = None, tail: Optional[int] = None, sample: Optional[int] = None, random_state: Optional[int] = None, lazy: bool = False, inplace: bool = False, ) -> Union[pd.DataFrame, pd.Series]: if isinstance(check_obj.index, pd.MultiIndex): raise errors.SchemaError( self, check_obj, "Attempting to validate mismatch index" ) if self.coerce: check_obj.index = self.coerce_dtype(check_obj.index) obj_to_validate = self.dtype.coerce( pd.Series(check_obj.index, name=check_obj.index.name) ) else: obj_to_validate = pd.Series( check_obj.index, name=check_obj.index.name ) assert isinstance( super().validate( obj_to_validate, head, tail, sample, random_state, lazy, inplace, ), pd.Series, ) return check_obj @st.strategy_import_error def strategy(self, *, size: int = None): return st.index_strategy( self.dtype, checks=self.checks, nullable=self.nullable, unique=self.unique, name=self.name, size=size, ) @st.strategy_import_error def strategy_component(self): return st.column_strategy( self.dtype, checks=self.checks, unique=self.unique, name=self.name, ) def example(self, size: int = None) -> pd.Index: import hypothesis with warnings.catch_warnings(): warnings.simplefilter( "ignore", category=hypothesis.errors.NonInteractiveExampleWarning, ) return self.strategy(size=size).example() def __eq__(self, other): return self.__dict__ == other.__dict__ class MultiIndex(DataFrameSchema): def __init__( self, indexes: List[Index], coerce: bool = False, strict: bool = False, name: str = None, ordered: bool = True, unique: Optional[Union[str, List[str]]] = None, ) -> None: if any(not isinstance(i, Index) for i in indexes): raise errors.SchemaInitError( f"expected a list of Index objects, found {indexes} " f"of type {[type(x) for x in indexes]}" ) self.indexes = indexes columns = {} for i, index in enumerate(indexes): if not ordered and index.name is None: raise errors.SchemaInitError( "You must specify index names if MultiIndex schema " "component is not ordered." ) columns[i if index.name is None else index.name] = Column( dtype=index._dtype, checks=index.checks, nullable=index._nullable, unique=index._unique, ) super().__init__( columns=columns, coerce=coerce, strict=strict, name=name, ordered=ordered, unique=unique, ) @property
MIT License
offlineimap/imapfw
imapfw/edmp.py
_raiseError
python
def _raiseError(cls_Exception: ExceptionClass, reason: str): try: raise cls_Exception(reason) except NameError as e: runtime.ui.exception(e) raise RuntimeError("exception from receiver cannot be raised %s: %s"% (cls_Exception.__name__, reason))
Default callback for errors.
https://github.com/offlineimap/imapfw/blob/740a4fed1a1de28e4134a115a1dd9c6e90e29ec1/imapfw/edmp.py#L228-L236
import time from typing import TypeVar from imapfw import runtime from imapfw.constants import EMT, SLEEP from imapfw.annotation import ExceptionClass from imapfw.concurrency import Queue _SILENT_TIMES = 100 class TopicError(Exception): pass
MIT License
mtgjson/mtgjson
mtgjson5/compiled_classes/mtgjson_card_types.py
MtgjsonCardTypesObject.__init__
python
def __init__(self, magic_rules: str) -> None: planar_regex = re.compile(r".*The planar types are (.*)\.") self.artifact = ScryfallProvider().get_catalog_entry("artifact-types") self.conspiracy = [] self.creature = ScryfallProvider().get_catalog_entry("creature-types") self.enchantment = ScryfallProvider().get_catalog_entry("enchantment-types") self.instant = ScryfallProvider().get_catalog_entry("spell-types") self.land = ScryfallProvider().get_catalog_entry("land-types") self.phenomenon = [] self.plane = regex_str_to_list(planar_regex.search(magic_rules)) self.planeswalker = ScryfallProvider().get_catalog_entry( "planeswalker-types" ) self.scheme = [] self.sorcery = self.instant self.tribal = [] self.vanguard = []
Internal initializer :param magic_rules: Rules for MTG from Wizards
https://github.com/mtgjson/mtgjson/blob/205b92451c225d2b60a6b8398f242b88e628f237/mtgjson5/compiled_classes/mtgjson_card_types.py#L37-L58
import re import string from typing import Any, Dict, List, Match, Optional from ..providers.scryfall import ScryfallProvider from ..providers.wizards import WizardsProvider from ..utils import parse_magic_rules_subset, to_camel_case class MtgjsonCardTypesObject: class MtgjsonCardTypesInnerObject: artifact: List[str] conspiracy: List[str] creature: List[str] enchantment: List[str] instant: List[str] land: List[str] phenomenon: List[str] plane: List[str] planeswalker: List[str] scheme: List[str] sorcery: List[str] tribal: List[str] vanguard: List[str]
MIT License
getavalon/core
avalon/vendor/jsonschema/_utils.py
indent
python
def indent(string, times=1): return "\n".join(" " * (4 * times) + line for line in string.splitlines())
A dumb version of :func:`textwrap.indent` from Python 3.3.
https://github.com/getavalon/core/blob/31e8cb4760e00e3db64443f6f932b7fd8e96d41d/avalon/vendor/jsonschema/_utils.py#L61-L67
import itertools import json import pkgutil import re from .compat import str_types, MutableMapping, urlsplit class URIDict(MutableMapping): def normalize(self, uri): return urlsplit(uri).geturl() def __init__(self, *args, **kwargs): self.store = dict() self.store.update(*args, **kwargs) def __getitem__(self, uri): return self.store[self.normalize(uri)] def __setitem__(self, uri, value): self.store[self.normalize(uri)] = value def __delitem__(self, uri): del self.store[self.normalize(uri)] def __iter__(self): return iter(self.store) def __len__(self): return len(self.store) def __repr__(self): return repr(self.store) class Unset(object): def __repr__(self): return "<unset>" def load_schema(name): data = pkgutil.get_data(__package__, "schemas/{0}.json".format(name)) return json.loads(data.decode("utf-8"))
MIT License
mplanchard/safetywrap
bench/sample.py
Monadic.run
python
def run(self) -> None: for unconnected in self._stores.values(): connected = unconnected.connect() if connected.is_err(): continue store = connected.unwrap() inserted = ( store.get("you") .ok_or("no such val") .map(lambda val: str(val + "et")) .and_then( lambda val: store.insert("you", val).or_else( lambda _: store.insert("you", val, overwrite=True) ) ) ) if inserted.is_ok(): assert inserted.unwrap() == "meet" break else: raise RuntimeError("Could not get value anywhere")
Run the program.
https://github.com/mplanchard/safetywrap/blob/a517b6ef9a4fdfab7aefd3f5a276de127cea0579/bench/sample.py#L114-L135
import sys import typing as t from timeit import timeit from safetywrap import Some, Nothing, Ok, Err, Option, Result T = t.TypeVar("T") class ClassicalDataStore: def __init__(self, values: dict = None) -> None: self._values = values or {} def connect(self, fail: bool = False) -> "ClassicalDataStore": if fail: raise RuntimeError("Failed to connect") return self def get(self, key: str) -> t.Any: return self._values.get(key) def insert(self, key: str, val: T, overwrite: bool = False) -> T: if key in self._values and not overwrite: raise KeyError("Key already exists") self._values[key] = val return val class MonadicDataStore: def __init__(self, values: dict = None) -> None: self._values = values or {} def connect(self, fail: bool = False) -> Result["MonadicDataStore", str]: if fail: return Err("failed to connect") return Ok(self) def get(self, key: str) -> Option[t.Any]: if key in self._values: return Some(self._values[key]) return Nothing() def insert( self, key: str, val: T, overwrite: bool = False ) -> Result[T, str]: if key in self._values and not overwrite: return Err("Key already exists") self._values[key] = val return Ok(val) class Classical: def __init__(self) -> None: self._stores = { 0: ClassicalDataStore(), 1: ClassicalDataStore(), 2: ClassicalDataStore(), 3: ClassicalDataStore({"you": "me"}), } def run(self) -> None: for store in self._stores.values(): try: store = store.connect() except RuntimeError: continue val = store.get("you") if val is not None: new_val = val + "et" try: inserted = store.insert("you", new_val) except KeyError: inserted = store.insert("you", new_val, overwrite=True) assert inserted == "meet" break else: raise RuntimeError("Could not get value anywhere.") class Monadic: def __init__(self) -> None: self._stores = { 0: MonadicDataStore(), 1: MonadicDataStore(), 2: MonadicDataStore(), 3: MonadicDataStore({"you": "me"}), }
Apache License 2.0
officium/rl-experiments
src/common/wrappers.py
FireResetEnv.__init__
python
def __init__(self, env): super(FireResetEnv, self).__init__(env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3
Take action on reset for environments that are fixed until firing.
https://github.com/officium/rl-experiments/blob/57cad375c7cdf42b4ccb4f6b7e669a64b5c37b88/src/common/wrappers.py#L82-L86
from collections import deque from multiprocessing import Process, Pipe import cv2 import gym import numpy as np from gym import spaces __all__ = ( 'TimeLimit', 'NoopResetEnv', 'FireResetEnv', 'EpisodicLifeEnv', 'MaxAndSkipEnv', 'ClipRewardEnv', 'WarpFrame', 'FrameStack', 'LazyFrames', 'RewardScaler', 'SubprocVecEnv', 'VecFrameStack', 'Monitor', ) cv2.ocl.setUseOpenCL(False) class TimeLimit(gym.Wrapper): def __init__(self, env, max_episode_steps=None): super(TimeLimit, self).__init__(env) self._max_episode_steps = max_episode_steps self._elapsed_steps = 0 def step(self, ac): observation, reward, done, info = self.env.step(ac) self._elapsed_steps += 1 if self._elapsed_steps >= self._max_episode_steps: done = True info['TimeLimit.truncated'] = True return observation, reward, done, info def reset(self, **kwargs): self._elapsed_steps = 0 return self.env.reset(**kwargs) class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): super(NoopResetEnv, self).__init__(env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self, **kwargs): self.env.reset(**kwargs) if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac) class FireResetEnv(gym.Wrapper):
MIT License
cortex-lab/phy
phy/cluster/views/histogram.py
HistogramView.set_n_bins
python
def set_n_bins(self, n_bins): self.n_bins = n_bins logger.debug("Change number of bins to %d for %s.", n_bins, self.__class__.__name__) self.plot()
Set the number of bins in the histogram.
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/cluster/views/histogram.py#L235-L239
import logging import numpy as np from phylib.io.array import _clip from phy.plot.visuals import HistogramVisual, TextVisual from phy.utils.color import selected_cluster_color from .base import ManualClusteringView, ScalingMixin logger = logging.getLogger(__name__) def _compute_histogram( data, x_max=None, x_min=None, n_bins=None, normalize=True, ignore_zeros=False): assert x_min <= x_max assert n_bins >= 0 n_bins = _clip(n_bins, 2, 1000000) bins = np.linspace(float(x_min), float(x_max), int(n_bins)) if ignore_zeros: data = data[data != 0] histogram, _ = np.histogram(data, bins=bins) if not normalize: return histogram hist_sum = histogram.sum() * (bins[1] - bins[0]) return histogram / (hist_sum or 1.) def _first_not_null(*l): for x in l: if x is not None: return x class HistogramView(ScalingMixin, ManualClusteringView): max_n_clusters = 20 _default_position = 'right' cluster_ids = () n_bins = 100 x_delta = .01 x_min = None x_max = None bin_unit = 's' alias_char = 'h' default_shortcuts = { 'change_window_size': 'ctrl+wheel', } default_snippets = { 'set_n_bins': '%sn' % alias_char, 'set_bin_size (%s)' % bin_unit: '%sb' % alias_char, 'set_x_min (%s)' % bin_unit: '%smin' % alias_char, 'set_x_max (%s)' % bin_unit: '%smax' % alias_char, } _state_attrs = ('n_bins', 'x_min', 'x_max') _local_state_attrs = () def __init__(self, cluster_stat=None): super(HistogramView, self).__init__() self.state_attrs += self._state_attrs self.local_state_attrs += self._local_state_attrs self.canvas.set_layout(layout='stacked', n_plots=1) self.canvas.enable_axes() self.cluster_stat = cluster_stat self.visual = HistogramVisual() self.canvas.add_visual(self.visual) self.text_visual = TextVisual(color=(1., 1., 1., 1.)) self.canvas.add_visual(self.text_visual) def _plot_cluster(self, bunch): assert bunch n_bins = self.n_bins assert n_bins >= 0 self.visual.add_batch_data( hist=bunch.histogram, ylim=bunch.ylim, color=bunch.color, box_index=bunch.index) text = bunch.get('text', None) if not text: return text = text.splitlines() n = len(text) self.text_visual.add_batch_data( text=text, pos=[(-1, .8)] * n, anchor=[(1, -1 - 2 * i) for i in range(n)], box_index=bunch.index, ) def get_clusters_data(self, load_all=None): bunchs = [] for i, cluster_id in enumerate(self.cluster_ids): bunch = self.cluster_stat(cluster_id) if not bunch.data.size: continue bmin, bmax = bunch.data.min(), bunch.data.max() self.x_min = _first_not_null(self.x_min, bunch.get('x_min', None), bmin) self.x_max = _first_not_null(self.x_max, bunch.get('x_max', None), bmax) self.x_min, self.x_max = sorted((self.x_min, self.x_max)) assert self.x_min is not None assert self.x_max is not None assert self.x_min <= self.x_max bunch.histogram = _compute_histogram( bunch.data, x_min=self.x_min, x_max=self.x_max, n_bins=self.n_bins) bunch.ylim = bunch.histogram.max() bunch.color = selected_cluster_color(i) bunch.index = i bunch.cluster_id = cluster_id bunchs.append(bunch) return bunchs def _get_data_bounds(self, bunchs): ylim = max(bunch.ylim for bunch in bunchs) if bunchs else 1 return (self.x_min, 0, self.x_max, ylim * len(self.cluster_ids)) def plot(self, **kwargs): bunchs = self.get_clusters_data() self.data_bounds = self._get_data_bounds(bunchs) self.canvas.stacked.n_boxes = len(self.cluster_ids) self.visual.reset_batch() self.text_visual.reset_batch() for bunch in bunchs: self._plot_cluster(bunch) self.canvas.update_visual(self.visual) self.canvas.update_visual(self.text_visual) self._update_axes() self.canvas.update() self.update_status() def attach(self, gui): super(HistogramView, self).attach(gui) self.actions.add( self.set_n_bins, alias=self.alias_char + 'n', prompt=True, prompt_default=lambda: self.n_bins) self.actions.add( self.set_bin_size, alias=self.alias_char + 'b', prompt=True, prompt_default=lambda: self.bin_size) self.actions.add( self.set_x_min, alias=self.alias_char + 'min', prompt=True, prompt_default=lambda: self.x_min) self.actions.add( self.set_x_max, alias=self.alias_char + 'max', prompt=True, prompt_default=lambda: self.x_max) self.actions.separator() @property def status(self): f = 1 if self.bin_unit == 's' else 1000 return '[{:.1f}{u}, {:.1f}{u:s}]'.format( (self.x_min or 0) * f, (self.x_max or 0) * f, u=self.bin_unit) def _get_scaling_value(self): return self.x_max def _set_scaling_value(self, value): if self.bin_unit == 'ms': value *= 1000 self.set_x_max(value)
BSD 3-Clause New or Revised License
django-fluent/django-fluent-contents
fluent_contents/admin/placeholderfield.py
PlaceholderFieldAdmin.get_all_allowed_plugins
python
def get_all_allowed_plugins(self): if not hasattr(self.model, "_meta_placeholder_fields"): return [] plugins = [] for name, field in self.model._meta_placeholder_fields.items(): assert isinstance(field, PlaceholderField) if field.plugins is None: return extensions.plugin_pool.get_plugins() else: plugins += field.plugins return list(set(plugins))
Return which plugins are allowed by the placeholder fields.
https://github.com/django-fluent/django-fluent-contents/blob/b82a527a4a1007a62857ba9f6980d6c4482e8f2d/fluent_contents/admin/placeholderfield.py#L81-L99
from functools import partial from django.forms import Media from fluent_contents import extensions from fluent_contents.admin.placeholdereditor import ( PlaceholderEditorAdmin, PlaceholderEditorInline, ) from fluent_contents.models import PlaceholderData from fluent_contents.models.fields import PlaceholderField class PlaceholderFieldInline(PlaceholderEditorInline): template = "admin/fluent_contents/placeholderfield/inline_init.html" @property def media(self): media = super(PlaceholderFieldInline, self).media return Media( js=[f for f in media._js if not f.endswith("cp_tabs.js")], css=media._css ) class PlaceholderFieldAdmin(PlaceholderEditorAdmin): placeholder_inline = PlaceholderFieldInline def get_form(self, request, obj=None, **kwargs): kwargs["formfield_callback"] = partial( self.formfield_for_dbfield, request=request, obj=obj ) return super(PlaceholderFieldAdmin, self).get_form(request, obj=obj, **kwargs) def formfield_for_dbfield(self, db_field, **kwargs): obj = kwargs.pop("obj", None) if isinstance(db_field, PlaceholderField): kwargs["parent_object"] = obj return super(PlaceholderFieldAdmin, self).formfield_for_dbfield( db_field, **kwargs ) def get_placeholder_data(self, request, obj=None): if not hasattr(self.model, "_meta_placeholder_fields"): return [] data = [] for name, field in self.model._meta_placeholder_fields.items(): assert isinstance(field, PlaceholderField) data.append( PlaceholderData( slot=field.slot, title=field.verbose_name.capitalize(), fallback_language=None, ) ) return data
Apache License 2.0
lisa-lab/pylearn2
pylearn2/datasets/dense_design_matrix.py
DenseDesignMatrix.get_design_matrix
python
def get_design_matrix(self, topo=None): if topo is not None: if self.view_converter is None: raise Exception("Tried to convert from topological_view to " "design matrix using a dataset that has no " "view converter") return self.view_converter.topo_view_to_design_mat(topo) return self.X
Return topo (a batch of examples in topology preserving format), in design matrix format. Parameters ---------- topo : ndarray, optional An array containing a topological representation of training examples. If unspecified, the entire dataset (`self.X`) is used instead. Returns ------- WRITEME
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/datasets/dense_design_matrix.py#L787-L810
__authors__ = "Ian Goodfellow and Mehdi Mirza" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import functools import logging import warnings import numpy as np from theano.compat.six.moves import xrange from pylearn2.datasets import cache from pylearn2.utils.iteration import ( FiniteDatasetIterator, resolve_iterator_class ) import copy tables = None from pylearn2.datasets.dataset import Dataset from pylearn2.datasets import control from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace, IndexSpace from pylearn2.utils import safe_zip from pylearn2.utils.exc import reraise_as from pylearn2.utils.rng import make_np_rng from pylearn2.utils import contains_nan from theano import config logger = logging.getLogger(__name__) def ensure_tables(): global tables if tables is None: import tables class DenseDesignMatrix(Dataset): _default_seed = (17, 2, 946) def __init__(self, X=None, topo_view=None, y=None, view_converter=None, axes=('b', 0, 1, 'c'), rng=_default_seed, preprocessor=None, fit_preprocessor=False, X_labels=None, y_labels=None): self.X = X self.y = y self.view_converter = view_converter self.X_labels = X_labels self.y_labels = y_labels self._check_labels() if topo_view is not None: assert view_converter is None self.set_topological_view(topo_view, axes) else: assert X is not None, ("DenseDesignMatrix needs to be provided " "with either topo_view, or X") if view_converter is not None: if not hasattr(view_converter, 'topo_space'): raise NotImplementedError("Not able to get a topo_space " "from this converter: %s" % view_converter) self.X_topo_space = view_converter.topo_space else: self.X_topo_space = None X_source = 'features' if X_labels is None: X_space = VectorSpace(dim=X.shape[1]) else: if X.ndim == 1: dim = 1 else: dim = X.shape[-1] X_space = IndexSpace(dim=dim, max_labels=X_labels) if y is None: space = X_space source = X_source else: if y.ndim == 1: dim = 1 else: dim = y.shape[-1] if y_labels is not None: y_space = IndexSpace(dim=dim, max_labels=y_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self.compress = False self.design_loc = None self.rng = make_np_rng(rng, which_method="random_integers") self._iter_mode = resolve_iterator_class('sequential') self._iter_topo = False self._iter_targets = False self._iter_data_specs = (self.X_space, 'features') if preprocessor: preprocessor.apply(self, can_fit=fit_preprocessor) self.preprocessor = preprocessor def _check_labels(self): if self.X_labels is not None: assert self.X is not None assert self.view_converter is None assert self.X.ndim <= 2 assert np.all(self.X < self.X_labels) if self.y_labels is not None: assert self.y is not None assert self.y.ndim <= 2 assert np.all(self.y < self.y_labels) @functools.wraps(Dataset.iterator) def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None, data_specs=None, return_tuple=False): [mode, batch_size, num_batches, rng, data_specs] = self._init_iterator( mode, batch_size, num_batches, rng, data_specs) space, source = data_specs if isinstance(space, CompositeSpace): sub_spaces = space.components sub_sources = source else: sub_spaces = (space,) sub_sources = (source,) convert = [] for sp, src in safe_zip(sub_spaces, sub_sources): if src == 'features' and getattr(self, 'view_converter', None) is not None: conv_fn = ( lambda batch, self=self, space=sp: self.view_converter.get_formatted_batch(batch, space)) else: conv_fn = None convert.append(conv_fn) return FiniteDatasetIterator(self, mode(self.get_num_examples(), batch_size, num_batches, rng), data_specs=data_specs, return_tuple=return_tuple, convert=convert) def get_data(self): if self.y is None: return self.X else: return (self.X, self.y) def use_design_loc(self, path): if not path.endswith('.npy'): raise ValueError("path should end with '.npy'") self.design_loc = path def get_topo_batch_axis(self): axis = self.view_converter.axes.index('b') return axis def enable_compression(self): self.compress = True def __getstate__(self): rval = copy.copy(self.__dict__) if self.compress: rval['compress_min'] = rval['X'].min(axis=0) rval['X'] = rval['X'] - rval['compress_min'] rval['compress_max'] = rval['X'].max(axis=0) rval['compress_max'][rval['compress_max'] == 0] = 1 rval['X'] *= 255. / rval['compress_max'] rval['X'] = np.cast['uint8'](rval['X']) if self.design_loc is not None: np.save(self.design_loc, rval['X']) del rval['X'] return rval def __setstate__(self, d): if d['design_loc'] is not None: if control.get_load_data(): fname = cache.datasetCache.cache_file(d['design_loc']) d['X'] = np.load(fname) else: d['X'] = None if d['compress']: X = d['X'] mx = d['compress_max'] mn = d['compress_min'] del d['compress_max'] del d['compress_min'] d['X'] = 0 self.__dict__.update(d) if X is not None: self.X = np.cast['float32'](X) * mx / 255. + mn else: self.X = None else: self.__dict__.update(d) if not all(m in d for m in ('data_specs', 'X_space', '_iter_data_specs', 'X_topo_space')): X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if self.y is None: space = X_space source = X_source else: y_space = VectorSpace(dim=self.y.shape[-1]) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source) view_converter = d.get('view_converter', None) if view_converter is not None: if not hasattr(view_converter, 'topo_space'): raise NotImplementedError("Not able to get a topo_space " "from this converter: %s" % view_converter) self.X_topo_space = view_converter.topo_space def _apply_holdout(self, _mode="sequential", train_size=0, train_prop=0): """ This function splits the dataset according to the number of train_size if defined by the user with respect to the mode provided by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters ----------- _mode : WRITEME train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of training dataset split. Returns ------- WRITEME """ if train_size != 0: size = train_size elif train_prop != 0: size = np.round(self.get_num_examples() * train_prop) else: raise ValueError("Initialize either split ratio and split size to " "non-zero value.") if size < self.get_num_examples() - size: dataset_iter = self.iterator( mode=_mode, batch_size=(self.get_num_examples() - size)) valid = dataset_iter.next() train = dataset_iter.next()[:(self.get_num_examples() - valid.shape[0])] else: dataset_iter = self.iterator(mode=_mode, batch_size=size) train = dataset_iter.next() valid = dataset_iter.next()[:(self.get_num_examples() - train.shape[0])] return (train, valid) def split_dataset_nfolds(self, nfolds=0): folds_iter = self.iterator(mode="sequential", num_batches=nfolds) folds = list(folds_iter) return folds def split_dataset_holdout(self, train_size=0, train_prop=0): return self._apply_holdout("sequential", train_size, train_prop) def bootstrap_nfolds(self, nfolds, rng=None): folds_iter = self.iterator(mode="random_slice", num_batches=nfolds, rng=rng) folds = list(folds_iter) return folds def bootstrap_holdout(self, train_size=0, train_prop=0, rng=None): return self._apply_holdout("random_slice", train_size, train_prop) def get_stream_position(self): return copy.copy(self.rng) def set_stream_position(self, pos): self.rng = copy.copy(pos) def restart_stream(self): self.reset_RNG() def reset_RNG(self): if 'default_rng' not in dir(self): self.default_rng = make_np_rng(None, [17, 2, 946], which_method="random_integers") self.rng = copy.copy(self.default_rng) def apply_preprocessor(self, preprocessor, can_fit=False): preprocessor.apply(self, can_fit) def get_topological_view(self, mat=None): if self.view_converter is None: raise Exception("Tried to call get_topological_view on a dataset " "that has no view converter") if mat is None: mat = self.X return self.view_converter.design_mat_to_topo_view(mat) def get_formatted_view(self, mat, dspace): if self.view_converter is None: raise Exception("Tried to call get_formatted_view on a dataset " "that has no view converter") self.X_space.np_validate(mat) return self.view_converter.get_formatted_batch(mat, dspace) def get_weights_view(self, mat): if self.view_converter is None: raise Exception("Tried to call get_weights_view on a dataset " "that has no view converter") return self.view_converter.design_mat_to_weights_view(mat) def set_topological_view(self, V, axes=('b', 0, 1, 'c')): if len(V.shape) != len(axes): raise ValueError("The topological view must have exactly 4 " "dimensions, corresponding to %s" % str(axes)) assert not contains_nan(V) rows = V.shape[axes.index(0)] cols = V.shape[axes.index(1)] channels = V.shape[axes.index('c')] self.view_converter = DefaultViewConverter([rows, cols, channels], axes=axes) self.X = self.view_converter.topo_view_to_design_mat(V) self.X_topo_space = self.view_converter.topo_space assert not contains_nan(self.X) X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if self.y is None: space = X_space source = X_source else: if self.y.ndim == 1: dim = 1 else: dim = self.y.shape[-1] if getattr(self, 'y_labels', None) is not None: y_space = IndexSpace(dim=dim, max_labels=self.y_labels) elif getattr(self, 'max_labels', None) is not None: y_space = IndexSpace(dim=dim, max_labels=self.max_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source)
BSD 3-Clause New or Revised License
alibaba-miil/imagenet21k
src_files/models/ofa/utils.py
MyNetwork.init_model
python
def init_model(self, model_init): for m in self.modules(): if isinstance(m, nn.Conv2d): if model_init == 'he_fout': n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif model_init == 'he_fin': n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels m.weight.data.normal_(0, math.sqrt(2. / n)) else: raise NotImplementedError if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): stdv = 1. / math.sqrt(m.weight.size(1)) m.weight.data.uniform_(-stdv, stdv) if m.bias is not None: m.bias.data.zero_()
Conv2d, BatchNorm2d, BatchNorm1d, Linear,
https://github.com/alibaba-miil/imagenet21k/blob/9dd5725438785413832303b5626eaa4bb5dc0535/src_files/models/ofa/utils.py#L121-L142
import math import os import sys from collections import OrderedDict try: from urllib import urlretrieve except ImportError: from urllib.request import urlretrieve import torch import torch.nn as nn def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res class AverageMeter(object): def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count class MyModule(nn.Module): def forward(self, x): raise NotImplementedError @property def module_str(self): raise NotImplementedError @property def config(self): raise NotImplementedError @staticmethod def build_from_config(config): raise NotImplementedError class MyNetwork(MyModule): def forward(self, x): raise NotImplementedError @property def module_str(self): raise NotImplementedError @property def config(self): raise NotImplementedError @staticmethod def build_from_config(config): raise NotImplementedError def zero_last_gamma(self): raise NotImplementedError """ implemented methods """ def set_bn_param(self, momentum, eps): for m in self.modules(): if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): m.momentum = momentum m.eps = eps return def get_bn_param(self): for m in self.modules(): if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): return { 'momentum': m.momentum, 'eps': m.eps, } return None
MIT License
sjenni/learningtospotartifacts
datasets/download_and_convert_stl10.py
read_all_images
python
def read_all_images(path_to_data): with open(path_to_data, 'rb') as f: everything = np.fromfile(f, dtype=np.uint8) images = np.reshape(everything, (-1, 3, 96, 96)) images = np.transpose(images, (0, 3, 2, 1)) return images
:param path_to_data: the file containing the binary images from the STL-10 dataset :return: an array containing all the images
https://github.com/sjenni/learningtospotartifacts/blob/10b58c35d44771878ae3fc61099494afe170939a/datasets/download_and_convert_stl10.py#L40-L66
import os import sys import tarfile import urllib import numpy as np import tensorflow as tf import dataset_utils from globals import STL10_DATADIR, STL10_TF_DATADIR HEIGHT = 96 WIDTH = 96 DEPTH = 3 SIZE = HEIGHT * WIDTH * DEPTH DATA_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz' DATA_PATH = os.path.join(STL10_DATADIR, 'stl10_binary/train_X.bin') LABEL_PATH = os.path.join(STL10_DATADIR, 'stl10_binary/train_y.bin') def read_labels(path_to_labels): with open(path_to_labels, 'rb') as f: labels = np.fromfile(f, dtype=np.uint8) return labels
MIT License
s41m0n/dechainy
dechainy/plugins.py
Mitigator.get
python
def get(self) -> str: return dumps(self.__rules)
Function to retrieve all the rules Returns: str: The string representation of the rules array
https://github.com/s41m0n/dechainy/blob/8a1c4f71b5d13f984048bf64039e0d7f76e386a8/dechainy/plugins.py#L289-L295
import ctypes as ct from math import ceil from types import ModuleType from typing import List, Union from json import dumps from bcc.table import QueueStack, ArrayBase, TableBase from .exceptions import HookDisabledException, MetricUnspecifiedException from .configurations import ClusterConfig, FirewallRule, MetricFeatures, MitigatorRule, ProbeConfig from .ebpf import BPF, LpmKey, Program, SwapStateCompile, ProbeCompilation, ClusterCompilation, is_batch_supp from .utility import Dict, CPThread, protocol_to_int, ipv4_to_network_int, port_to_network_int, ctype_to_normal class BaseEntity: def __init__(self, config: Union[ProbeConfig, ClusterConfig], module: ModuleType, programs: Union[ProbeCompilation, ClusterCompilation]): self._is_destroyed: bool = False self._thread: CPThread = None self._module: ModuleType = module self._config: Union[ProbeConfig, ClusterConfig] = config self.programs: Union[ProbeCompilation, ClusterCompilation] = programs if hasattr(module, "post_compilation"): module.post_compilation(self) if hasattr(module, "reaction_function"): self._thread = CPThread( target=module.reaction_function, args=(self,), time_window=self._config.time_window) self._thread.start() def __del__(self): if self._is_destroyed: return self._is_destroyed = True if self._thread: self._thread.stop() def __repr__(self): return self._config def exec(self) -> any: if not hasattr(self._module, "reaction_function_rest"): raise AttributeError return self._module.reaction_function_rest(self) def __getitem__(self, key: str) -> Union[str, Program]: return self.programs[key] if not self._is_destroyed else exit(1) class Cluster(BaseEntity): class Plugin(BaseEntity): def __init__(self, config: ProbeConfig, module: ModuleType, programs: ProbeCompilation): super().__init__(config, module, programs) self.__active_hooks: List[str] = [] for hook in self.__class__.accepted_hooks(): if config[hook]: self.__active_hooks.append(hook) def _check_hook_active(self, program_type: str): if program_type not in self.__active_hooks: raise HookDisabledException( f"The hook {program_type} is not active for this probe") def is_in_cluster(self) -> bool: return self._config.is_in_cluster @staticmethod def accepted_hooks() -> List[str]: return ["ingress", "egress"] @staticmethod def is_programmable() -> bool: return False @staticmethod def get_cflags(config: ProbeConfig) -> List[str]: return [] class Adaptmon(Plugin): @staticmethod def is_programmable(): return True def __do_retrieve_metric(map_ref: Union[QueueStack, TableBase], features: List[MetricFeatures]) -> any: ret = [] if isinstance(map_ref, QueueStack): ret = [ctype_to_normal(x) for x in map_ref.values()] elif isinstance(map_ref, ArrayBase): ret = [ctype_to_normal(v) for _, v in map_ref.items_lookup_batch( )] if is_batch_supp() else [ctype_to_normal(v) for v in map_ref.values()] if features.empty: length = len(ret) keys = (map_ref.Key * length)() new_values = (map_ref.Leaf * length)() holder = map_ref.Leaf() for i in range(length): keys[i] = ct.c_int(i) new_values[i] = holder map_ref.items_update_batch(keys, new_values) else: exception = False if is_batch_supp(): try: for k, v in map_ref.items_lookup_and_delete_batch() if features.empty else map_ref.items_lookup_batch(): ret.append((ctype_to_normal(k), ctype_to_normal(v))) except Exception: exception = True if not ret and exception: for k, v in map_ref.items(): ret.append((ctype_to_normal(k), ctype_to_normal(v))) if features.empty: del map_ref[k] return str(ret[0]) if len(ret) == 1 else ret def retrieve_metric(self, program_type: str, metric_name: str) -> any: self._check_hook_active(program_type) if isinstance(self.programs[program_type], SwapStateCompile): self.programs[program_type].trigger_read() features = self.programs[program_type].features[metric_name] if not features.export: raise MetricUnspecifiedException( f"Metric {metric_name} unspecified") return self.__do_retrieve_metric(self.programs[program_type][metric_name], features) def retrieve_metrics(self, program_type: str) -> any: self._check_hook_active(program_type) if isinstance(self.programs[program_type], SwapStateCompile): self.programs[program_type].trigger_read() ret = {} for map_name, features in self.programs[program_type].features.items(): if not features.export: continue ret[map_name] = self.__do_retrieve_metric( self.programs[program_type][map_name], features) return ret class Mitigator(Plugin): _MAX_IPS = 1000 def __init__(self, config: ProbeConfig, module: ModuleType, programs: ProbeCompilation): super().__init__(config, module, programs) self.__rules: List[MitigatorRule] = [] self.__max_ips: int = Mitigator._MAX_IPS if not config.extra or "max_ips" not in config.extra else config.extra["max_ips"] def get_at(self, rule_id: int) -> MitigatorRule: if rule_id >= len(self.__rules): raise IndexError("The Rule ID provided is wrong") return self.__rules[rule_id]
Apache License 2.0
sissaschool/xmlschema
xmlschema/documents.py
XmlDocument.write
python
def write(self, file: Union[str, TextIO, BinaryIO], encoding: str = 'us-ascii', xml_declaration: bool = False, default_namespace: Optional[str] = None, method: str = "xml") -> None: if self._lazy: raise XMLResourceError("cannot serialize a lazy XML document") kwargs: Dict[str, Any] = { 'xml_declaration': xml_declaration, 'encoding': encoding, 'method': method, } if not default_namespace: kwargs['namespaces'] = self.namespaces else: namespaces: Optional[Dict[Optional[str], str]] if self.namespaces is None: namespaces = {} else: namespaces = {k: v for k, v in self.namespaces.items()} if hasattr(self._root, 'nsmap'): namespaces[None] = default_namespace else: namespaces[''] = default_namespace kwargs['namespaces'] = namespaces fp: Union[TextIO, BinaryIO] _string = etree_tostring(self._root, **kwargs) if isinstance(file, str): if isinstance(_string, str): with open(file, 'w', encoding='utf-8') as fp: fp.write(_string) else: with open(file, 'wb') as fp: fp.write(_string) elif isinstance(file, TextIOBase): if isinstance(_string, bytes): file.write(_string.decode('utf-8')) else: file.write(_string) elif isinstance(file, IOBase): if isinstance(_string, str): file.write(_string.encode('utf-8')) else: file.write(_string) else: raise XMLSchemaTypeError(f"unexpected type {type(file)} for 'file' argument")
Serialize an XML resource to a file. Cannot be used with lazy resources.
https://github.com/sissaschool/xmlschema/blob/155c31aefe558399c4e6ce98aa8829ec67c18fe9/xmlschema/documents.py#L588-L639
import json from io import IOBase, TextIOBase from typing import Any, Dict, List, Optional, Type, Union, Tuple, IO, BinaryIO, TextIO, Iterator from .exceptions import XMLSchemaTypeError, XMLSchemaValueError, XMLResourceError from .names import XSD_NAMESPACE, XSI_TYPE from .etree import ElementTree, etree_tostring from .aliases import ElementType, XMLSourceType, NamespacesType, LocationsType, LazyType, SchemaSourceType, ConverterType, DecodeType, EncodeType, JsonDecodeType from .helpers import is_etree_document from .resources import fetch_schema_locations, XMLResource from .validators import XMLSchema10, XMLSchemaBase, XMLSchemaValidationError def get_context(xml_document: Union[XMLSourceType, XMLResource], schema: Optional[Union[XMLSchemaBase, SchemaSourceType]] = None, cls: Optional[Type[XMLSchemaBase]] = None, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False, dummy_schema: bool = False) -> Tuple[XMLResource, XMLSchemaBase]: resource: XMLResource kwargs: Dict[Any, Any] if cls is None: cls = XMLSchema10 elif not issubclass(cls, XMLSchemaBase): raise XMLSchemaTypeError(f"invalid schema class {cls}") if isinstance(xml_document, XMLResource): resource = xml_document else: resource = XMLResource(xml_document, base_url, defuse=defuse, timeout=timeout, lazy=lazy) if isinstance(schema, XMLSchemaBase) and resource.namespace in schema.maps.namespaces: return resource, schema if isinstance(resource, XmlDocument) and isinstance(resource.schema, XMLSchemaBase): return resource, resource.schema try: schema_location, locations = fetch_schema_locations(resource, locations, base_url=base_url) except ValueError: if schema is None: if XSI_TYPE in resource.root.attrib and cls.meta_schema is not None: return resource, cls.meta_schema elif dummy_schema: return resource, get_dummy_schema(resource, cls) else: msg = "no schema can be retrieved for the provided XML data" raise XMLSchemaValueError(msg) from None elif isinstance(schema, XMLSchemaBase): return resource, schema else: return resource, cls(schema, locations=locations, base_url=base_url, defuse=defuse, timeout=timeout) else: kwargs = dict(locations=locations, defuse=defuse, timeout=timeout) if schema is None or isinstance(schema, XMLSchemaBase): return resource, cls(schema_location, **kwargs) else: return resource, cls(schema, **kwargs) def get_dummy_schema(resource: XMLResource, cls: Type[XMLSchemaBase]) -> XMLSchemaBase: tag = resource.root.tag if tag.startswith('{'): namespace, name = tag[1:].split('}') else: namespace, name = '', tag if namespace: return cls( '<xs:schema xmlns:xs="{0}" targetNamespace="{1}">\n' ' <xs:element name="{2}"/>\n' '</xs:schema>'.format(XSD_NAMESPACE, namespace, name) ) else: return cls( '<xs:schema xmlns:xs="{0}">\n' ' <xs:element name="{1}"/>\n' '</xs:schema>'.format(XSD_NAMESPACE, name) ) def get_lazy_json_encoder(errors: List[XMLSchemaValidationError]) -> Type[json.JSONEncoder]: class JSONLazyEncoder(json.JSONEncoder): def default(self, obj: Any) -> Any: if isinstance(obj, Iterator): while True: result = next(obj, None) if isinstance(result, XMLSchemaValidationError): errors.append(result) else: return result return json.JSONEncoder.default(self, obj) return JSONLazyEncoder def validate(xml_document: Union[XMLSourceType, XMLResource], schema: Optional[XMLSchemaBase] = None, cls: Optional[Type[XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[NamespacesType] = None, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False) -> None: source, _schema = get_context( xml_document, schema, cls, locations, base_url, defuse, timeout, lazy ) _schema.validate(source, path, schema_path, use_defaults, namespaces) def is_valid(xml_document: Union[XMLSourceType, XMLResource], schema: Optional[XMLSchemaBase] = None, cls: Optional[Type[XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[NamespacesType] = None, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False) -> bool: source, schema = get_context( xml_document, schema, cls, locations, base_url, defuse, timeout, lazy ) return schema.is_valid(source, path, schema_path, use_defaults, namespaces) def iter_errors(xml_document: Union[XMLSourceType, XMLResource], schema: Optional[XMLSchemaBase] = None, cls: Optional[Type[XMLSchemaBase]] = None, path: Optional[str] = None, schema_path: Optional[str] = None, use_defaults: bool = True, namespaces: Optional[NamespacesType] = None, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False) -> Iterator[XMLSchemaValidationError]: source, schema = get_context( xml_document, schema, cls, locations, base_url, defuse, timeout, lazy ) return schema.iter_errors(source, path, schema_path, use_defaults, namespaces) def to_dict(xml_document: Union[XMLSourceType, XMLResource], schema: Optional[XMLSchemaBase] = None, cls: Optional[Type[XMLSchemaBase]] = None, path: Optional[str] = None, process_namespaces: bool = True, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False, **kwargs: Any) -> DecodeType[Any]: source, _schema = get_context( xml_document, schema, cls, locations, base_url, defuse, timeout, lazy ) return _schema.decode(source, path=path, process_namespaces=process_namespaces, **kwargs) def to_json(xml_document: Union[XMLSourceType, XMLResource], fp: Optional[IO[str]] = None, schema: Optional[XMLSchemaBase] = None, cls: Optional[Type[XMLSchemaBase]] = None, path: Optional[str] = None, converter: Optional[ConverterType] = None, process_namespaces: bool = True, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False, json_options: Optional[Dict[str, Any]] = None, **kwargs: Any) -> JsonDecodeType: source, _schema = get_context( xml_document, schema, cls, locations, base_url, defuse, timeout, lazy ) if json_options is None: json_options = {} if 'decimal_type' not in kwargs: kwargs['decimal_type'] = float kwargs['converter'] = converter kwargs['process_namespaces'] = process_namespaces errors: List[XMLSchemaValidationError] = [] if path is None and source.is_lazy() and 'cls' not in json_options: json_options['cls'] = get_lazy_json_encoder(errors) obj = _schema.decode(source, path=path, **kwargs) if isinstance(obj, tuple): errors.extend(obj[1]) if fp is not None: json.dump(obj[0], fp, **json_options) return tuple(errors) else: result = json.dumps(obj[0], **json_options) return result, tuple(errors) elif fp is not None: json.dump(obj, fp, **json_options) return None if not errors else tuple(errors) else: result = json.dumps(obj, **json_options) return result if not errors else (result, tuple(errors)) def from_json(source: Union[str, bytes, IO[str]], schema: XMLSchemaBase, path: Optional[str] = None, converter: Optional[ConverterType] = None, json_options: Optional[Dict[str, Any]] = None, **kwargs: Any) -> EncodeType[ElementType]: if not isinstance(schema, XMLSchemaBase): raise XMLSchemaTypeError("invalid type %r for argument 'schema'" % type(schema)) elif json_options is None: json_options = {} if isinstance(source, (str, bytes)): obj = json.loads(source, **json_options) else: obj = json.load(source, **json_options) return schema.encode(obj, path=path, converter=converter, **kwargs) class XmlDocument(XMLResource): schema: Optional[XMLSchemaBase] = None _fallback_schema: Optional[XMLSchemaBase] = None validation: str = 'skip' namespaces: Optional[NamespacesType] = None errors: Union[Tuple[()], List[XMLSchemaValidationError]] = () def __init__(self, source: XMLSourceType, schema: Optional[Union[XMLSchemaBase, SchemaSourceType]] = None, cls: Optional[Type[XMLSchemaBase]] = None, validation: str = 'strict', namespaces: Optional[NamespacesType] = None, locations: Optional[LocationsType] = None, base_url: Optional[str] = None, allow: str = 'all', defuse: str = 'remote', timeout: int = 300, lazy: LazyType = False) -> None: if cls is None: cls = XMLSchema10 self.validation = validation self._namespaces = namespaces super(XmlDocument, self).__init__(source, base_url, allow, defuse, timeout, lazy) if isinstance(schema, XMLSchemaBase) and self.namespace in schema.maps.namespaces: self.schema = schema elif schema is not None and not isinstance(schema, XMLSchemaBase): self.schema = cls( source=schema, base_url=base_url, allow=allow, defuse=defuse, timeout=timeout, ) else: try: schema_location, locations = fetch_schema_locations(self, locations, base_url) except ValueError: if XSI_TYPE in self._root.attrib: self.schema = cls.meta_schema elif validation != 'skip': msg = "no schema can be retrieved for the XML resource" raise XMLSchemaValueError(msg) from None else: self._fallback_schema = get_dummy_schema(self, cls) else: self.schema = cls( source=schema_location, validation='strict', locations=locations, defuse=defuse, allow=allow, timeout=timeout, ) if self.schema is None: pass elif validation == 'strict': self.schema.validate(self, namespaces=self.namespaces) elif validation == 'lax': self.errors = [e for e in self.schema.iter_errors(self, namespaces=self.namespaces)] elif validation != 'skip': raise XMLSchemaValueError("{!r}: not a validation mode".format(validation)) def parse(self, source: XMLSourceType, lazy: LazyType = False) -> None: super(XmlDocument, self).parse(source, lazy) self.namespaces = self.get_namespaces(self._namespaces) if self.schema is None: pass elif self.validation == 'strict': self.schema.validate(self, namespaces=self.namespaces) elif self.validation == 'lax': self.errors = [e for e in self.schema.iter_errors(self, namespaces=self.namespaces)] def getroot(self) -> ElementType: return self._root def get_etree_document(self) -> Any: if is_etree_document(self._source): return self._source elif self._lazy: msg = "cannot create an ElementTree from a lazy resource" raise XMLResourceError(msg) elif hasattr(self._root, 'nsmap'): return self._root.getroottree() else: return ElementTree.ElementTree(self._root) def tostring(self, indent: str = '', max_lines: Optional[int] = None, spaces_for_tab: int = 4, xml_declaration: bool = False, encoding: str = 'unicode', method: str = 'xml') -> str: if self._lazy: raise XMLResourceError("cannot serialize a lazy XML document") _string = etree_tostring( elem=self._root, namespaces=self.namespaces, xml_declaration=xml_declaration, encoding=encoding, method=method ) if isinstance(_string, bytes): return _string.decode('utf-8') return _string def decode(self, **kwargs: Any) -> DecodeType[Any]: if 'validation' not in kwargs: kwargs['validation'] = self.validation if 'namespaces' not in kwargs: kwargs['namespaces'] = self.namespaces schema = self.schema or self._fallback_schema if schema is None: return None obj = schema.to_dict(self, **kwargs) return obj[0] if isinstance(obj, tuple) else obj def to_json(self, fp: Optional[IO[str]] = None, json_options: Optional[Dict[str, Any]] = None, **kwargs: Any) -> JsonDecodeType: if json_options is None: json_options = {} path = kwargs.pop('path', None) if 'validation' not in kwargs: kwargs['validation'] = self.validation if 'namespaces' not in kwargs: kwargs['namespaces'] = self.namespaces if 'decimal_type' not in kwargs: kwargs['decimal_type'] = float errors: List[XMLSchemaValidationError] = [] if path is None and self._lazy and 'cls' not in json_options: json_options['cls'] = get_lazy_json_encoder(errors) kwargs['lazy_decode'] = True schema = self.schema or self._fallback_schema if schema is not None: obj = schema.decode(self, path=path, **kwargs) else: obj = None if isinstance(obj, tuple): if fp is not None: json.dump(obj[0], fp, **json_options) obj[1].extend(errors) return tuple(obj[1]) else: result = json.dumps(obj[0], **json_options) obj[1].extend(errors) return result, tuple(obj[1]) elif fp is not None: json.dump(obj, fp, **json_options) return None if not errors else tuple(errors) else: result = json.dumps(obj, **json_options) return result if not errors else (result, tuple(errors))
MIT License
ricmoo/pycoind
pycoind/script/script.py
_math_op
python
def _math_op(stack, func, check_overflow = True): count = len(inspect.getargspec(func).args) if len(stack) < count: return False args = stack[-count:] stack[-count:] = [] if check_overflow: for arg in args: if len(arg) > 4: return False result = func(*args) if result == True: result = One elif result == False: result = Zero if result is not None: stack.append(result) return True
Replaces the top N items from the stack with the result of the callable func; N is func's argument count. A boolean result will push either a 0 or 1 on the stack. None will push nothing. Otherwise, the result must be a ByteVector. False is returned on error, otherwise True.
https://github.com/ricmoo/pycoind/blob/33a600e617ec89c5a0f680ce7725b777d6176ea8/pycoind/script/script.py#L131-L163
import inspect import struct from .bytevector import ByteVector from . import opcodes from .. import coins from .. import protocol from .. import util from ..protocol import format __all__ = ['Script', 'Tokenizer'] Zero = ByteVector.from_value(0) One = ByteVector.from_value(1) def _is_pubkey(opcode, bytes, data): if opcode != Tokenizer.OP_LITERAL: return False if len(data) != 65 or data[0] != chr(0x04): return False return True def _is_hash160(opcode, bytes, data): if opcode != Tokenizer.OP_LITERAL: return False if len(data) != 20: return False return True def _is_hash256(opcode, bytes, data): if opcode != Tokenizer.OP_LITERAL: return False if len(data) != 32: return False return True def _too_long(opcode, bytes, data): return False SCRIPT_FORM_NON_STANDARD = 'non-standard' SCRIPT_FORM_PAY_TO_PUBKEY_HASH = 'pay-to-pubkey-hash' SCRIPT_FORM_PAY_TO_PUBKEY = 'pay-to-pubkey' SCRIPT_FORM_UNSPENDABLE = 'unspendable' SCRIPT_FORM_ANYONE_CAN_SPEND = 'anyone-can-spend' SCRIPT_FORM_TRANSACTION_PUZZLE_HASH256 = 'transaction-puzzle-hash256' STANDARD_SCRIPT_FORMS = [ SCRIPT_FORM_PAY_TO_PUBKEY_HASH, SCRIPT_FORM_PAY_TO_PUBKEY ] TEMPLATE_PAY_TO_PUBKEY_HASH = (lambda t: len(t) == 5, opcodes.OP_DUP, opcodes.OP_HASH160, _is_hash160, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG) TEMPLATE_PAY_TO_PUBKEY = (lambda t: len(t) == 2, _is_pubkey, opcodes.OP_CHECKSIG) Templates = [ (SCRIPT_FORM_PAY_TO_PUBKEY_HASH, TEMPLATE_PAY_TO_PUBKEY_HASH), (SCRIPT_FORM_PAY_TO_PUBKEY, TEMPLATE_PAY_TO_PUBKEY), ] def _stack_op(stack, func): count = len(inspect.getargspec(func).args) if len(stack) < count: return False args = stack[-count:] stack[-count:] = [] for item in func(*args): stack.append(item) return True
MIT License
david-ds/adventofcode-2020
day-02/part-2/coco.py
CocoSubmission.run
python
def run(self, s): inputs = [l.strip().split(" ") for l in s.split("\n")] N = 0 for policy, letter, password in inputs: a, b = policy.split("-") a, b = int(a) - 1, int(b) - 1 letter = letter[0] if (password[a] == letter) != (password[b] == letter): N += 1 return N
:param s: input in string format :return: solution flag
https://github.com/david-ds/adventofcode-2020/blob/dfa320e6b36a586b80cc174a8c9862673ea58374/day-02/part-2/coco.py#L5-L18
from tool.runners.python import SubmissionPy class CocoSubmission(SubmissionPy):
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/greengrass/client.py
Client.create_function_definition
python
def create_function_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict: pass
Creates a Lambda function definition which contains a list of Lambda functions and their configurations to be used in a group. You can create an initial version of the definition by providing a list of Lambda functions and their configurations now, or use ''CreateFunctionDefinitionVersion'' later. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/CreateFunctionDefinition>`_ **Request Syntax** :: response = client.create_function_definition( AmznClientToken='string', InitialVersion={ 'DefaultConfig': { 'Execution': { 'IsolationMode': 'GreengrassContainer'|'NoContainer', 'RunAs': { 'Gid': 123, 'Uid': 123 } } }, 'Functions': [ { 'FunctionArn': 'string', 'FunctionConfiguration': { 'EncodingType': 'binary'|'json', 'Environment': { 'AccessSysfs': True|False, 'Execution': { 'IsolationMode': 'GreengrassContainer'|'NoContainer', 'RunAs': { 'Gid': 123, 'Uid': 123 } }, 'ResourceAccessPolicies': [ { 'Permission': 'ro'|'rw', 'ResourceId': 'string' }, ], 'Variables': { 'string': 'string' } }, 'ExecArgs': 'string', 'Executable': 'string', 'MemorySize': 123, 'Pinned': True|False, 'Timeout': 123 }, 'Id': 'string' }, ] }, Name='string', tags={ 'string': 'string' } ) **Response Syntax** :: { 'Arn': 'string', 'CreationTimestamp': 'string', 'Id': 'string', 'LastUpdatedTimestamp': 'string', 'LatestVersion': 'string', 'LatestVersionArn': 'string', 'Name': 'string' } **Response Structure** - *(dict) --* - **Arn** *(string) --* The ARN of the definition. - **CreationTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was created. - **Id** *(string) --* The ID of the definition. - **LastUpdatedTimestamp** *(string) --* The time, in milliseconds since the epoch, when the definition was last updated. - **LatestVersion** *(string) --* The latest version of the definition. - **LatestVersionArn** *(string) --* The ARN of the latest version of the definition. - **Name** *(string) --* The name of the definition. :type AmznClientToken: string :param AmznClientToken: A client token used to correlate requests and responses. :type InitialVersion: dict :param InitialVersion: Information about the initial version of the function definition. - **DefaultConfig** *(dict) --* The default configuration that applies to all Lambda functions in this function definition version. Individual Lambda functions can override these settings. - **Execution** *(dict) --* Configuration information that specifies how a Lambda function runs. - **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group. - **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'. - **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function. - **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function. - **Functions** *(list) --* A list of Lambda functions in this function definition version. - *(dict) --* Information about a Lambda function. - **FunctionArn** *(string) --* The ARN of the Lambda function. - **FunctionConfiguration** *(dict) --* The configuration of the Lambda function. - **EncodingType** *(string) --* The expected encoding type of the input payload for the function. The default is \'\'json\'\'. - **Environment** *(dict) --* The environment configuration of the function. - **AccessSysfs** *(boolean) --* If true, the Lambda function is allowed to access the host\'s /sys folder. Use this when the Lambda function needs to read device information from /sys. This setting applies only when you run the Lambda function in a Greengrass container. - **Execution** *(dict) --* Configuration related to executing the Lambda function - **IsolationMode** *(string) --* Specifies whether the Lambda function runs in a Greengrass container (default) or without containerization. Unless your scenario requires that you run without containerization, we recommend that you run in a Greengrass container. Omit this value to run the Lambda function with the default containerization for the group. - **RunAs** *(dict) --* Specifies the user and group whose permissions are used when running the Lambda function. You can specify one or both values to override the default values. We recommend that you avoid running as root unless absolutely necessary to minimize the risk of unintended changes or malicious attacks. To run as root, you must set \'\'IsolationMode\'\' to \'\'NoContainer\'\' and update config.json in \'\'greengrass-root/config\'\' to set \'\'allowFunctionsToRunAsRoot\'\' to \'\'yes\'\'. - **Gid** *(integer) --* The group ID whose permissions are used to run a Lambda function. - **Uid** *(integer) --* The user ID whose permissions are used to run a Lambda function. - **ResourceAccessPolicies** *(list) --* A list of the resources, with their permissions, to which the Lambda function will be granted access. A Lambda function can have at most 10 resources. ResourceAccessPolicies apply only when you run the Lambda function in a Greengrass container. - *(dict) --* A policy used by the function to access a resource. - **Permission** *(string) --* The permissions that the Lambda function has to the resource. Can be one of \'\'rw\'\' (read/write) or \'\'ro\'\' (read-only). - **ResourceId** *(string) --* The ID of the resource. (This ID is assigned to the resource when you create the resource definiton.) - **Variables** *(dict) --* Environment variables for the Lambda function\'s configuration. - *(string) --* - *(string) --* - **ExecArgs** *(string) --* The execution arguments. - **Executable** *(string) --* The name of the function executable. - **MemorySize** *(integer) --* The memory size, in KB, which the function requires. This setting is not applicable and should be cleared when you run the Lambda function without containerization. - **Pinned** *(boolean) --* True if the function is pinned. Pinned means the function is long-lived and starts when the core starts. - **Timeout** *(integer) --* The allowed function execution time, after which Lambda should terminate the function. This timeout still applies to pinned Lambda functions for each request. - **Id** *(string) --* A descriptive or arbitrary ID for the function. This value must be unique within the function definition version. Max length is 128 characters with pattern \'\'[a-zA-Z0-9:_-]+\'\'. :type Name: string :param Name: The name of the function definition. :type tags: dict :param tags: Tag(s) to add to the new resource - *(string) --* - *(string) --* :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/greengrass/client.py#L482-L606
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from botocore.waiter import Waiter from typing import Union from typing import List class Client(BaseClient): def associate_role_to_group(self, GroupId: str, RoleArn: str = None) -> Dict: pass def associate_service_role_to_account(self, RoleArn: str = None) -> Dict: pass def can_paginate(self, operation_name: str = None): pass def create_connector_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict: pass def create_connector_definition_version(self, ConnectorDefinitionId: str, AmznClientToken: str = None, Connectors: List = None) -> Dict: pass def create_core_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict: pass def create_core_definition_version(self, CoreDefinitionId: str, AmznClientToken: str = None, Cores: List = None) -> Dict: pass def create_deployment(self, GroupId: str, AmznClientToken: str = None, DeploymentId: str = None, DeploymentType: str = None, GroupVersionId: str = None) -> Dict: pass def create_device_definition(self, AmznClientToken: str = None, InitialVersion: Dict = None, Name: str = None, tags: Dict = None) -> Dict: pass def create_device_definition_version(self, DeviceDefinitionId: str, AmznClientToken: str = None, Devices: List = None) -> Dict: pass
MIT License
kawaboongawa/zolver
src/Puzzle/Edge.py
Edge.is_compatible
python
def is_compatible(self, e2): return (self.type == TypeEdge.HOLE and e2.type == TypeEdge.HEAD) or (self.type == TypeEdge.HEAD and e2.type == TypeEdge.HOLE) or self.type == TypeEdge.UNDEFINED or e2.type == TypeEdge.UNDEFINED
Helper to determine if two edges are compatible
https://github.com/kawaboongawa/zolver/blob/c4ea07041e7fa617ebfe2c1cbf117eb345c7ce1e/src/Puzzle/Edge.py#L45-L49
import numpy as np from Puzzle.Enums import TypeEdge, Directions class Edge: def __init__(self, shape, color, type=TypeEdge.HOLE, connected=False, direction=Directions.N): self.shape = shape self.shape_backup = shape self.color = color self.type = type self.connected = connected self.direction = direction def is_border(self, threshold): def dist_to_line(p1, p2, p3): return np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1) total_dist = 0 for p in self.shape: total_dist += dist_to_line(self.shape[0], self.shape[-1], p) return total_dist < threshold def backup_shape(self): self.shape_backup = np.copy(self.shape) def restore_backup_shape(self): self.shape = self.shape_backup
MIT License
seanlee97/clfzoo
clfzoo/rcnn/model.py
RCNN.run_epoch
python
def run_epoch(self, train, dev, epoch): total_loss, total_accu = 0.0, 0.0 for idx, batch in enumerate(train, 1): feed_dict = { self.s: batch['token_ids'], self.sh: batch['token_char_ids'], self.label: batch['label_ids'], self.dropout: self.config.dropout, } try: _, loss, accu = self.sess.run([self.train_op, self.loss, self.accu], feed_dict) total_loss += loss total_accu += accu except Exception as e: print(e) continue if idx % self.config.log_per_batch == 0: self.logger.info("Average loss from batch {} to {} is {}, accuracy is {}".format( idx-self.config.log_per_batch+1, idx, total_loss/self.config.log_per_batch, total_accu/self.config.log_per_batch )) total_loss = 0 total_accu = 0 _, metrics = self.run_evaluate(dev) msg = " - ".join(["{} {:04.2f}".format(k, v) for k, v in metrics.items()]) self.logger.info(msg) return metrics
train epoch Args: train: train dataset dev: dev dataset epoch: current epoch
https://github.com/seanlee97/clfzoo/blob/8c51ee316d51a4ec1d3e0c5c91a64248d6705214/clfzoo/rcnn/model.py#L142-L186
import tensorflow as tf import clfzoo.libs as libs import clfzoo.libs.loss as loss from clfzoo.libs.conv import Conv from clfzoo.libs.rnn import CudnnRNN, RNN from clfzoo.libs.highway import Highway from clfzoo.base import BaseModel class RCNN(BaseModel): def __init__(self, vocab, config): super(RCNN, self).__init__(config) self.vocab = vocab self.n_classes = len(vocab.label2idx) self.build_graph() def build_graph(self): self.layer_placeholder() self.layer_embedding() self.layer_encoder() self.layer_predict() self.init_session() def layer_placeholder(self): self.s = tf.placeholder(tf.int32, [None, self.config.max_sent_len], name="sentence") self.sh = tf.placeholder(tf.int32, [None, self.config.max_sent_len, self.config.max_char_len], name="sentence_char") self.label = tf.placeholder(tf.int32, [None], name="label") self.dropout = tf.placeholder(dtype=tf.float32, shape=[], name="dropout") self.s_mask = tf.cast(self.s, tf.bool) self.s_len = tf.reduce_sum(tf.cast(self.s_mask, tf.int32), axis=1) self.sh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.sh, tf.bool), tf.int32), axis=2), [-1]) def layer_embedding(self): with tf.variable_scope('embeddings'): self.word_mat = tf.get_variable( 'word_embeddings', shape=[self.vocab.word_size(), self.vocab.word_embed_dim], initializer=tf.constant_initializer(self.vocab.word_embeddings), trainable=False) self.char_mat = tf.get_variable( 'char_embeddins', shape=[self.vocab.char_size(), self.vocab.char_embed_dim], initializer=tf.constant_initializer(self.vocab.char_embeddings), trainable=False) sh_emb = tf.reshape(tf.nn.embedding_lookup(self.char_mat, self.sh), [self.config.batch_size*self.config.max_sent_len, self.config.max_char_len, self.vocab.char_embed_dim]) sh_emb = Conv(bias=True)(sh_emb, self.config.filter_size, scope="sh_conv") sh_emb = tf.reduce_max(sh_emb, axis=1) sh_emb = tf.reshape(sh_emb, [self.config.batch_size, self.config.max_sent_len, -1]) s_emb = tf.nn.embedding_lookup(self.word_mat, self.s) s_emb = tf.concat([s_emb, sh_emb], axis=2) s_emb = Conv(bias=True)(s_emb, self.config.filter_size, scope="s_proj") self.s_emb = Highway(activation=tf.nn.relu, kernel='conv', dropout=self.dropout)(s_emb, scope='highway') def layer_encoder(self): with tf.variable_scope('cnn_encoder'): outputs = [] for kernel_size in self.config.kernel_sizes: with tf.variable_scope('conv-%d' % kernel_size) as scope: output = tf.layers.conv1d(self.s_emb, self.config.filter_size, kernel_size, padding='SAME', name='conv1d-%d' % kernel_size) output = tf.nn.elu(output) outputs.append(output) self.output = tf.concat(outputs, -1, name='outputs_concate') """ RNN Encoder """ with tf.variable_scope('rnn_encoder'): shape = self.output.get_shape() if self.config.gpu >= 0: rnn_kernel = CudnnRNN else: rnn_kernel = RNN rnn = rnn_kernel(self.config.hidden_dim, shape[0], shape[-1], dropout=self.dropout, kernel='gru') output, _ = rnn(self.output, seq_len=self.s_len) self.output = tf.reduce_max(output, axis=1) def layer_predict(self): with tf.variable_scope('predict'): output = tf.layers.batch_normalization(self.output) h = tf.layers.dense(output, self.config.filter_size//2) h = tf.tanh(h) self.logit = tf.layers.dense(h, self.n_classes) if self.config.loss_type == 'cross_entropy': self.loss = loss.cross_entropy(self.label, self.logit) elif self.config.loss_type == 'focal_loss': self.loss = loss.focal_loss(self.label, self.logit) else: raise NotImplementedError("No loss type named {}".format(self.config.loss_type)) self.predict = tf.argmax(tf.nn.softmax(self.logit), axis=1) self.probs = tf.nn.top_k(tf.nn.softmax(self.logit), 1)[0] true_pred = tf.equal(tf.cast(self.label, tf.int64), self.predict) self.accu = tf.reduce_mean(tf.cast(true_pred, tf.float32)) self.add_train_op(self.loss)
MIT License
deepmind/jax_verify
jax_verify/src/synthetic_primitives.py
group_posbilinear
python
def group_posbilinear(graph: jax.core.Jaxpr, var_is_bound: VarIsBoundDict, ) -> jax.core.Jaxpr: new_eqns = [] for eqn in graph.eqns: if _is_posbilinear_eqn(eqn, var_is_bound): non_literal_invars = [invar for invar in eqn.invars if isinstance(invar, jax.core.Var)] posbilinear_jaxpr = jax.core.Jaxpr( constvars=[], invars=non_literal_invars, outvars=eqn.outvars, eqns=[eqn]) new_eqns.append(jax.core.new_jaxpr_eqn( posbilinear_jaxpr.invars, posbilinear_jaxpr.outvars, posbilinear_p, {'jax_verify_subgraph': posbilinear_jaxpr, 'jax_verify_keepjvargs': True})) else: new_eqns.append(eqn) return jax.core.Jaxpr(graph.constvars, graph.invars, graph.outvars, new_eqns)
Simplifier identifying the PosBilinear terms in the graph. A PosBilinear equation can be written in the form of: x^T M y where x and y are variable for which we have bound and M is a matrix with positive entries. Args: graph: Jaxpr to simplify var_is_bound: Dict mapping whether a given variable is a bound or not. Returns: Simplified Jaxpr, where all the PosBilinear have been identified.
https://github.com/deepmind/jax_verify/blob/96e4abb160f5022af4bf1aa8bb854822eb45a59b/jax_verify/src/synthetic_primitives.py#L537-L569
import collections import functools from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, TypeVar, Union import jax from jax import lax import jax.numpy as jnp import numpy as np VarIsBoundDict = Dict[jax.core.Var, bool] Tensor = jnp.ndarray T = TypeVar('T') ListNest = Union[T, List[Any]] GraphSimplifier = ListNest[ Callable[[jax.core.Jaxpr, VarIsBoundDict], jax.core.Jaxpr]] SimpleSimplifier = Callable[[jax.core.Jaxpr], jax.core.Jaxpr] def simplify_graph( graph_simplifier: GraphSimplifier, graph: jax.core.Jaxpr, var_is_bound: VarIsBoundDict, ) -> jax.core.Jaxpr: _propagate_var_is_bound(graph, var_is_bound) return _simplify_graph(var_is_bound, graph, graph_simplifier) def _simplify_graph( var_is_bound: VarIsBoundDict, graph: jax.core.Jaxpr, graph_simplifier: GraphSimplifier, ) -> jax.core.Jaxpr: if isinstance(graph_simplifier, list): return functools.reduce( functools.partial(_simplify_graph, var_is_bound), graph_simplifier, graph) else: simplified_graph = graph_simplifier(graph, var_is_bound) for eqn in simplified_graph.eqns: if eqn.primitive == jax.custom_derivatives.custom_jvp_call_jaxpr_p: eqn.params['fun_jaxpr'].jaxpr = _simplify_graph( var_is_bound, eqn.params['fun_jaxpr'].jaxpr, graph_simplifier) elif eqn.primitive == jax.xla.xla_call_p: eqn.params['call_jaxpr'] = _simplify_graph( var_is_bound, eqn.params['call_jaxpr'], graph_simplifier) return simplified_graph capture_float32 = object() class SyntheticPrimitiveSpec: def __init__( self, fn: Callable[..., Tensor], upstream_simplifier: Optional[SimpleSimplifier], primitive: 'FakePrimitive', *arg_shapes: Sequence[int], **params): subscript = lambda key, captures: captures[key] params = {key: functools.partial(subscript, key) if param is capture_float32 else param for key, param in params.items()} capture_keys = [key for key, param in params.items() if callable(param)] placeholder_inputs = list(map(jnp.zeros, arg_shapes)) placeholder_params = {key: 7. for key in capture_keys} make_jaxpr = jax.make_jaxpr(functools.partial(fn, **placeholder_params)) self._graph = make_jaxpr(*placeholder_inputs).jaxpr self._capture_literals = {} for capture_key in capture_keys: alt_params = { key: 8. if key == capture_key else 7. for key in capture_keys} make_alt_jaxpr = jax.make_jaxpr(functools.partial(fn, **alt_params)) alt_graph = make_alt_jaxpr(*placeholder_inputs).jaxpr for literal in _differing_literals(self._graph, alt_graph): assert id(literal) not in self._capture_literals self._capture_literals[id(literal)] = capture_key if upstream_simplifier: self._graph = upstream_simplifier(self._graph) self._primitive = primitive self._params = params @property def graph(self) -> jax.core.Jaxpr: return self._graph @property def capture_literals(self) -> Dict[int, str]: return self._capture_literals @property def primitive(self) -> 'FakePrimitive': return self._primitive @property def params(self) -> Dict[str, Any]: return self._params def simplify(self, graph_simplifier: SimpleSimplifier): self._graph = graph_simplifier(self._graph) def _mark_outputs_whether_bounds(eqn, var_is_bound): non_literal_inps = [invar for invar in eqn.invars if not isinstance(invar, jax.core.Literal)] outputs_are_bounds = any(var_is_bound[invar] for invar in non_literal_inps) for outvar in eqn.outvars: var_is_bound[outvar] = outputs_are_bounds def jax_primitive_subgraph(eqn: jax.core.JaxprEqn) -> Optional[jax.core.Jaxpr]: if eqn.primitive == jax.custom_derivatives.custom_jvp_call_jaxpr_p: return eqn.params['fun_jaxpr'].jaxpr elif eqn.primitive == jax.xla.xla_call_p: return eqn.params['call_jaxpr'] def _propagate_var_is_bound(graph: jax.core.Jaxpr, var_is_bound: VarIsBoundDict): for cvar in graph.constvars: var_is_bound[cvar] = False for eqn in graph.eqns: _mark_outputs_whether_bounds(eqn, var_is_bound) eqn_subgraph = jax_primitive_subgraph(eqn) if eqn_subgraph: subgraph_var_is_bound = {} for subgraph_invar, eqn_invar in zip(eqn_subgraph.invars, eqn.invars): if isinstance(eqn_invar, jax.core.Var): subgraph_var_is_bound[subgraph_invar] = var_is_bound[eqn_invar] else: subgraph_var_is_bound[subgraph_invar] = False _propagate_var_is_bound(eqn_subgraph, subgraph_var_is_bound) var_is_bound.update(subgraph_var_is_bound) def detect( synthetic_primitive_specs: Sequence[SyntheticPrimitiveSpec], graph: jax.core.Jaxpr, ) -> jax.core.Jaxpr: new_eqns = [] eqn_idx = 0 while eqn_idx < len(graph.eqns): eqn, eqn_idx = _next_equation(synthetic_primitive_specs, graph, eqn_idx) new_eqns.append(eqn) return jax.core.Jaxpr(graph.constvars, graph.invars, graph.outvars, new_eqns) def _next_equation( synthetic_primitive_specs: Sequence[SyntheticPrimitiveSpec], graph: jax.core.Jaxpr, eqn_idx: int, ) -> Tuple[jax.core.JaxprEqn, int]: for spec in synthetic_primitive_specs: ( spec_matches, match_len, primitive_invars, primitive_outvars, captures ) = _matches(spec.graph, spec.capture_literals, graph, eqn_idx) if spec_matches: sub_jaxpr = jax.core.Jaxpr( constvars=[], invars=primitive_invars, outvars=primitive_outvars, eqns=graph.eqns[eqn_idx:(eqn_idx + match_len)]) spec_params = { key: param(captures) if callable(param) else param for key, param in spec.params.items()} return jax.core.new_jaxpr_eqn( primitive_invars, primitive_outvars, spec.primitive, {'jax_verify_subgraph': sub_jaxpr, **spec_params}, graph.eqns[eqn_idx].source_info), eqn_idx + match_len return graph.eqns[eqn_idx], eqn_idx + 1 def _equal_literal_values(lhs, rhs) -> bool: if isinstance(lhs, jnp.ndarray): return np.all(lhs.item() == rhs.item()) else: return lhs == rhs def _matches( spec: jax.core.Jaxpr, capture_literals: Dict[int, str], graph: jax.core.Jaxpr, eqn_idx: int, ) -> Tuple[ bool, int, Sequence[jax.core.Var], Sequence[jax.core.Var], Dict[str, Any]]: no_match = False, 0, [], [], {} eqn_idx_orig = eqn_idx spec_invar_indices = {invar: j for j, invar in enumerate(spec.invars)} graph_vars_by_spec_var = {} captures = {} for spec_eqn in spec.eqns: if eqn_idx >= len(graph.eqns): return no_match graph_eqn = graph.eqns[eqn_idx] eqn_idx += 1 if graph_eqn.primitive != spec_eqn.primitive: return no_match for spec_eqn_invar, graph_eqn_invar in zip( spec_eqn.invars, graph_eqn.invars): if (isinstance(spec_eqn_invar, jax.core.Literal) != isinstance(graph_eqn_invar, jax.core.Literal)): return no_match if isinstance(spec_eqn_invar, jax.core.Literal): if not isinstance(spec_eqn_invar.val, type(graph_eqn_invar.val)): return no_match if id(spec_eqn_invar) in capture_literals: key = capture_literals[id(spec_eqn_invar)] if key in captures and not _equal_literal_values( spec_eqn_invar.item(), captures[key]): return no_match captures[key] = graph_eqn_invar.val elif not _equal_literal_values(spec_eqn_invar.val, graph_eqn_invar.val): return no_match else: if (spec_eqn_invar in spec_invar_indices and spec_eqn_invar not in graph_vars_by_spec_var): graph_vars_by_spec_var[spec_eqn_invar] = graph_eqn_invar if graph_vars_by_spec_var[spec_eqn_invar] != graph_eqn_invar: return no_match if set(spec_eqn.params) != set(graph_eqn.params): return no_match for key in set(spec_eqn.params): spec_param = spec_eqn.params[key] graph_param = graph_eqn.params[key] if key in ('fun_jaxpr', 'call_jaxpr', 'jax_verify_subgraph'): subspec = spec_param.jaxpr if key == 'fun_jaxpr' else spec_param subgraph = graph_param.jaxpr if key == 'fun_jaxpr' else graph_param ( subgraph_matches, _, subgraph_invars, subgraph_outvars, subgraph_captures ) = _matches(subspec, capture_literals, subgraph, 0) captures.update(subgraph_captures) if not subgraph_matches: return no_match if subgraph.invars != subgraph_invars: return no_match if subgraph.outvars != subgraph_outvars: return no_match if any(key in captures and not _equal_literal_values(capture, captures[key]) for key, capture in subgraph_captures.items()): return no_match elif key in ('shape', 'new_sizes'): if len(spec_param) != len(graph_param): return no_match elif not callable(spec_param): if spec_param != graph_param: return no_match graph_vars_by_spec_var.update({ spec_eqn_outvar: graph_eqn_outvar for spec_eqn_outvar, graph_eqn_outvar in zip( spec_eqn.outvars, graph_eqn.outvars)}) graph_invars = [ graph_vars_by_spec_var[spec_invar] for spec_invar in spec.invars] assert all(graph_invar is not None for graph_invar in graph_invars) graph_outvars = [ graph_vars_by_spec_var[spec_outvar] for spec_outvar in spec.outvars] assert all(graph_outvar is not None for graph_outvar in graph_outvars) return True, eqn_idx - eqn_idx_orig, graph_invars, graph_outvars, captures def _differing_literals( graph: jax.core.Jaxpr, alt_graph: jax.core.Jaxpr, ) -> Sequence[jax.core.Literal]: literals = [] assert len(graph.eqns) == len(alt_graph.eqns), 'Different number of equations' for eqn, alt_eqn in zip(graph.eqns, alt_graph.eqns): assert eqn.primitive == alt_eqn.primitive, 'Different primitives' for eqn_invar, alt_eqn_invar in zip(eqn.invars, alt_eqn.invars): assert ( isinstance(eqn_invar, jax.core.Literal) == isinstance(alt_eqn_invar, jax.core.Literal) ), 'Different literal occurrences' if (isinstance(eqn_invar, jax.core.Literal) and not _equal_literal_values(eqn_invar.val, alt_eqn_invar.val)): literals.append(eqn_invar) assert set(eqn.params) == set(alt_eqn.params), 'Different param key sets' for key in set(eqn.params): param = eqn.params[key] alt_param = alt_eqn.params[key] if key in ('fun_jaxpr', 'call_jaxpr', 'jax_verify_subgraph'): subgraph = param.jaxpr if key == 'fun_jaxpr' else param alt_subgraph = alt_param.jaxpr if key == 'fun_jaxpr' else alt_param literals.extend(_differing_literals(subgraph, alt_subgraph)) return literals def _is_linear_eqn(eqn: jax.core.JaxprEqn, var_is_bound: VarIsBoundDict): subgraph = jax_primitive_subgraph(eqn) if subgraph: grouped_subgraph = group_linear_sequence(subgraph, var_is_bound) return (len(grouped_subgraph.eqns) == 1 and grouped_subgraph.eqns[0].primitive is linear_p) prim = eqn.primitive non_literal_inps = [invar for invar in eqn.invars if not isinstance(invar, jax.core.Literal)] nb_bound_input = sum(var_is_bound[invar] for invar in non_literal_inps) if not any(var_is_bound[outvar] for outvar in eqn.outvars): return False assert len(eqn.outvars) == 1 return (prim in LINEAR_OP or (prim in BILINEAR_OP and nb_bound_input == 1) or (prim is jax.lax.div_p and nb_bound_input == 1 and var_is_bound[eqn.invars[0]])) def _is_posbilinear_eqn(eqn, var_is_bound): subgraph = jax_primitive_subgraph(eqn) if subgraph: grouped_subgraph = group_posbilinear(subgraph, var_is_bound) return (len(grouped_subgraph.eqns) == 1 and grouped_subgraph.eqns[0].primitive is posbilinear_p) prim = eqn.primitive non_literal_inps = [invar for invar in eqn.invars if not isinstance(invar, jax.core.Literal)] nb_bound_input = sum(var_is_bound[invar] for invar in non_literal_inps) return (prim in BILINEAR_OP) and (nb_bound_input == 2) def _find_eqn(eqn_list: List[jax.core.JaxprEqn], var: jax.core.Var) -> int: eqn_idx = 0 for eqn_idx, eqn in enumerate(eqn_list): if eqn.outvars[0] == var: return eqn_idx else: assert False
Apache License 2.0
zimeon/ocfl-py
ocfl-sidecar.py
main
python
def main(): args = parse_arguments() logging.basicConfig(level=logging.INFO if args.verbose else logging.WARN) paths = ["."] if len(args.path) == 0 else args.path for path in paths: logging.info("Looking at path %s", path) if os.path.isdir(path): create_sidecar(args, path) else: (directory, filename) = os.path.split(path) if filename == INVENTORY_NAME: create_sidecar(args, directory) else: logging.error("Ignoring path %s with filename that is not inventory.json")
Run from command line.
https://github.com/zimeon/ocfl-py/blob/3d696d797aa89a90a2a471d907acff07ef4a364d/ocfl-sidecar.py#L44-L58
import argparse import logging import os.path import ocfl INVENTORY_NAME = "inventory.json" def parse_arguments(): parser = argparse.ArgumentParser(description="Update OCFL inventory sidecar file", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("path", type=str, nargs="*", help="OCFL inventory files or directories containing them") parser.add_argument("--digest", default=None, help="Digest algorithm to use overriding any in inventory") ocfl.add_shared_args(parser) args = parser.parse_args() ocfl.check_shared_args(args) return args def create_sidecar(args, directory): inventory_path = os.path.join(directory, INVENTORY_NAME) if not os.path.isfile(inventory_path): logging.error("Ignoring path %s because there is no inventory file %s.", directory, inventory_path) else: obj = ocfl.Object(path=directory) if args.digest is not None: obj.digest_algorithm = args.digest else: try: obj.parse_inventory() except ocfl.ObjectException as e: logging.warning("Failed to read inventory in directory %s (%s)", directory, e) sidecar = obj.write_inventory_sidecar() logging.info("Written sidecar file %s", sidecar)
MIT License
pytorchlightning/lightning-bolts
pl_bolts/models/rl/dqn_model.py
DQN.train_batch
python
def train_batch( self, ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: episode_reward = 0 episode_steps = 0 while True: self.total_steps += 1 action = self.agent(self.state, self.device) next_state, r, is_done, _ = self.env.step(action[0]) episode_reward += r episode_steps += 1 exp = Experience(state=self.state, action=action[0], reward=r, done=is_done, new_state=next_state) self.agent.update_epsilon(self.global_step) self.buffer.append(exp) self.state = next_state if is_done: self.done_episodes += 1 self.total_rewards.append(episode_reward) self.total_episode_steps.append(episode_steps) self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len :])) self.state = self.env.reset() episode_steps = 0 episode_reward = 0 states, actions, rewards, dones, new_states = self.buffer.sample(self.batch_size) for idx, _ in enumerate(dones): yield states[idx], actions[idx], rewards[idx], dones[idx], new_states[idx] if self.total_steps % self.batches_per_epoch == 0: break
Contains the logic for generating a new batch of data to be passed to the DataLoader. Returns: yields a Experience tuple containing the state, action, reward, done and next_state.
https://github.com/pytorchlightning/lightning-bolts/blob/f4f6d53a039c521f3441750fa5297c7694320119/pl_bolts/models/rl/dqn_model.py#L210-L252
import argparse from collections import OrderedDict from typing import Dict, List, Optional, Tuple import numpy as np import torch from pytorch_lightning import LightningModule, Trainer, seed_everything from pytorch_lightning.callbacks import ModelCheckpoint from torch import Tensor, optim from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from pl_bolts.datamodules.experience_source import Experience, ExperienceSourceDataset from pl_bolts.losses.rl import dqn_loss from pl_bolts.models.rl.common.agents import ValueAgent from pl_bolts.models.rl.common.gym_wrappers import make_environment from pl_bolts.models.rl.common.memory import MultiStepBuffer from pl_bolts.models.rl.common.networks import CNN from pl_bolts.utils import _GYM_AVAILABLE, _PL_GREATER_EQUAL_1_4 from pl_bolts.utils.warnings import warn_missing_pkg if _GYM_AVAILABLE: from gym import Env else: warn_missing_pkg("gym") Env = object class DQN(LightningModule): def __init__( self, env: str, eps_start: float = 1.0, eps_end: float = 0.02, eps_last_frame: int = 150000, sync_rate: int = 1000, gamma: float = 0.99, learning_rate: float = 1e-4, batch_size: int = 32, replay_size: int = 100000, warm_start_size: int = 10000, avg_reward_len: int = 100, min_episode_reward: int = -21, seed: int = 123, batches_per_epoch: int = 1000, n_steps: int = 1, **kwargs, ): super().__init__() self.exp = None self.env = self.make_environment(env, seed) self.test_env = self.make_environment(env) self.obs_shape = self.env.observation_space.shape self.n_actions = self.env.action_space.n self.buffer = None self.dataset = None self.net = None self.target_net = None self.build_networks() self.agent = ValueAgent( self.net, self.n_actions, eps_start=eps_start, eps_end=eps_end, eps_frames=eps_last_frame, ) self.sync_rate = sync_rate self.gamma = gamma self.lr = learning_rate self.batch_size = batch_size self.replay_size = replay_size self.warm_start_size = warm_start_size self.batches_per_epoch = batches_per_epoch self.n_steps = n_steps self.save_hyperparameters() self.total_episode_steps = [0] self.total_rewards = [0] self.done_episodes = 0 self.total_steps = 0 self.avg_reward_len = avg_reward_len for _ in range(avg_reward_len): self.total_rewards.append(torch.tensor(min_episode_reward, device=self.device)) self.avg_rewards = float(np.mean(self.total_rewards[-self.avg_reward_len :])) self.state = self.env.reset() def run_n_episodes(self, env, n_epsiodes: int = 1, epsilon: float = 1.0) -> List[int]: total_rewards = [] for _ in range(n_epsiodes): episode_state = env.reset() done = False episode_reward = 0 while not done: self.agent.epsilon = epsilon action = self.agent(episode_state, self.device) next_state, reward, done, _ = env.step(action[0]) episode_state = next_state episode_reward += reward total_rewards.append(episode_reward) return total_rewards def populate(self, warm_start: int) -> None: if warm_start > 0: self.state = self.env.reset() for _ in range(warm_start): self.agent.epsilon = 1.0 action = self.agent(self.state, self.device) next_state, reward, done, _ = self.env.step(action[0]) exp = Experience(state=self.state, action=action[0], reward=reward, done=done, new_state=next_state) self.buffer.append(exp) self.state = next_state if done: self.state = self.env.reset() def build_networks(self) -> None: self.net = CNN(self.obs_shape, self.n_actions) self.target_net = CNN(self.obs_shape, self.n_actions) def forward(self, x: Tensor) -> Tensor: output = self.net(x) return output
Apache License 2.0
openstack/tempest
tempest/lib/services/identity/v3/groups_client.py
GroupsClient.delete_group_user
python
def delete_group_user(self, group_id, user_id): resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id)) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body)
Delete user in group.
https://github.com/openstack/tempest/blob/d458bf329739ae7b7652d329e6415ad6ba54e490/tempest/lib/services/identity/v3/groups_client.py#L106-L110
from urllib import parse as urllib from oslo_serialization import jsonutils as json from tempest.lib.common import rest_client class GroupsClient(rest_client.RestClient): api_version = "v3" def create_group(self, **kwargs): post_body = json.dumps({'group': kwargs}) resp, body = self.post('groups', post_body) self.expected_success(201, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_group(self, group_id): resp, body = self.get('groups/%s' % group_id) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_groups(self, **params): url = 'groups' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def update_group(self, group_id, **kwargs): post_body = json.dumps({'group': kwargs}) resp, body = self.patch('groups/%s' % group_id, post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_group(self, group_id): resp, body = self.delete('groups/%s' % group_id) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def add_group_user(self, group_id, user_id): resp, body = self.put('groups/%s/users/%s' % (group_id, user_id), None) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def list_group_users(self, group_id, **params): url = 'groups/%s/users' % group_id if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Apache License 2.0
ufora/ufora
ufora/util/TypeAwareComparison.py
typecmp
python
def typecmp(self, other, ownTypeComp): if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) return ownTypeComp(self, other)
Compares objects of varying types. If they are different types it returns the lexical comparison of their type string. Otherwise it uses the provided type comparison callable
https://github.com/ufora/ufora/blob/04db96ab049b8499d6d6526445f4f9857f1b6c7e/ufora/util/TypeAwareComparison.py#L16-L23
Apache License 2.0
ronreiter/interactive-tutorials
markdown/extensions/fenced_code.py
FencedCodeExtension.extendMarkdown
python
def extendMarkdown(self, md, md_globals): md.preprocessors.add('fenced_code_block', FencedBlockPreprocessor(md), "_begin")
Add FencedBlockPreprocessor to the Markdown instance.
https://github.com/ronreiter/interactive-tutorials/blob/aee01e7198f454fc8ca37b2f9575f7d12b94eced/markdown/extensions/fenced_code.py#L76-L81
import markdown, re FENCED_BLOCK_RE = re.compile( r'(?P<fence>^~{3,})[ ]*(\{?\.(?P<lang>[a-zA-Z0-9_-]*)\}?)?[ ]*\n(?P<code>.*?)(?P=fence)[ ]*$', re.MULTILINE|re.DOTALL ) CODE_WRAP = '<pre><code%s>%s</code></pre>' LANG_TAG = ' class="%s"' class FencedCodeExtension(markdown.Extension):
Apache License 2.0
sonibla/pytorch_keras_converter
pytorch_keras_converter/utility/t2k_equivalents/batchnorm.py
BatchNorm2d
python
def BatchNorm2d(model, file=False, weights=True): if keras is None: raise ImportError("Could not import keras. Conversion failed !") pytorchLayer = model.equivalent['torch'] name = model.completeName() argumentsBatchNorm = {'axis': 1, 'momentum': pytorchLayer.momentum, 'epsilon': pytorchLayer.eps, 'center': pytorchLayer.affine, 'scale': pytorchLayer.affine, 'input_shape': model.input_shape, 'name': name} if weights: parameters = dict() for key, val in dict(pytorchLayer.state_dict()).items(): parameters[key] = val.detach().numpy() paramList = [parameters['weight'], parameters['bias'], parameters['running_mean'], parameters['running_var']] if not file: BatchNormLayer = keras.layers.BatchNormalization(**argumentsBatchNorm) kerasLayer = keras.Sequential() kerasLayer.add(BatchNormLayer) if weights: kerasLayer.layers[0].set_weights(paramList) return kerasLayer else: outstr = 'keras.layers.BatchNormalization(' for arg, val in argumentsBatchNorm.items(): outstr = outstr + arg + '=' + str(val) + ', ' outstr = outstr[:-2] + ')' return outstr
Converts a torch.nn.BatchNorm2d layer Arguments: -model: A LayerRepresentation object of the layer BatchNorm2d to convert -file (bool): If we want to write the equivalent in a python file -weights (bool): Also convert weights Raises: -ImportError: If Keras import failed Returns: Keras equivalent. If file is True, returns as a str to put in a python file Else, return the keras layer
https://github.com/sonibla/pytorch_keras_converter/blob/21925b67b6eb3cbbfa8eb6d33f682d57dafd357d/pytorch_keras_converter/utility/t2k_equivalents/batchnorm.py#L10-L72
try: import tensorflow.keras as keras except ImportError: try: import keras except ImportError: keras = None
MIT License
gridai-labs/aavae
src/datamodules/cifar10.py
CIFAR10DataModule.train_dataloader
python
def train_dataloader(self): transforms = self._default_transforms() if self.train_transforms is None else self.train_transforms dataset = self.DATASET(self.data_dir, train=True, download=False, transform=transforms, **self.extra_args) train_length = len(dataset) dataset_train, _ = random_split( dataset, [train_length - self.val_split, self.val_split], generator=torch.Generator().manual_seed(self.seed) ) loader = DataLoader( dataset_train, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, drop_last=True, pin_memory=True ) return loader
CIFAR train set removes a subset to use for validation
https://github.com/gridai-labs/aavae/blob/87692a5d0383169b6aae35db6409d6261f799e1e/src/datamodules/cifar10.py#L61-L82
import os from typing import Optional, Sequence import torch from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader, random_split import torchvision.transforms as transforms from torchvision.datasets import CIFAR10 from src.datamodules import cifar10_normalization class CIFAR10DataModule(LightningDataModule): name = 'cifar10' extra_args = {} def __init__( self, data_dir: Optional[str] = None, val_split: int = 5000, num_workers: int = 16, batch_size: int = 32, seed: int = 42, *args, **kwargs, ): super().__init__(*args, **kwargs) self.dims = (3, 32, 32) self.DATASET = CIFAR10 self.val_split = val_split self.num_workers = num_workers self.batch_size = batch_size self.seed = seed self.data_dir = data_dir if data_dir is not None else os.getcwd() self.num_samples = 50000 - val_split @property def num_classes(self): return 10 def prepare_data(self): self.DATASET(self.data_dir, train=True, download=True, transform=transforms.ToTensor(), **self.extra_args) self.DATASET(self.data_dir, train=False, download=True, transform=transforms.ToTensor(), **self.extra_args)
Apache License 2.0
shsiders/oclubs
oclubs/blueprints/userblueprint.py
personalsubmitinfo
python
def personalsubmitinfo(): if request.form['name']: current_user.nickname = request.form['name'] current_user.email = request.form['email'] phone = request.form['phone'] try: phone = int(phone) except ValueError: phone = None current_user.phone = phone if 'picture' in request.form: pic = int(request.form['picture']) if -pic in range(1, 21): current_user.picture = Upload(pic) if 'receive_email' in request.form: current_user.set_preference('receive_email', True) else: current_user.set_preference('receive_email', False) flash('Your information has been successfully changed.', 'status_info') return redirect(url_for('.personal'))
Change user's information in database
https://github.com/shsiders/oclubs/blob/1003f5079fbea1415db1c70e3aabfc2e1afd4236/oclubs/blueprints/userblueprint.py#L88-L109
from __future__ import absolute_import, unicode_literals from collections import defaultdict from datetime import date from flask import ( Blueprint, render_template, url_for, request, redirect, abort, flash, jsonify ) from flask_login import current_user, login_required, fresh_login_required from oclubs.objs import User, Club, Upload from oclubs.enums import UserType, ActivityTime from oclubs.shared import ( special_access_required, download_xlsx, render_email_template, Pagination, fail, read_xlsx ) from oclubs.exceptions import PasswordTooShort from oclubs.access import siteconfig, email userblueprint = Blueprint('userblueprint', __name__) @userblueprint.route('/') @login_required def personal(): pictures = [Upload(-num) for num in range(1, 21)] allow_club_creation = siteconfig.get_config('allow_club_creation') receive_email = current_user.get_preference('receive_email') if current_user.type == UserType.STUDENT: clubs = current_user.clubs attendances = current_user.attendance cas = defaultdict(float) for attendance in attendances: cas[attendance.club] += attendance.cas for club in clubs: if club.is_active: cas[club] meetings_obj = current_user.activities_reminder( [ActivityTime.NOON, ActivityTime.AFTERSCHOOL]) meetings = [] meetings.extend([meeting for meeting in meetings_obj]) acts_obj = current_user.activities_reminder([ActivityTime.UNKNOWN, ActivityTime.HONGMEI, ActivityTime.OTHERS]) activities = [] activities.extend([act for act in acts_obj]) leader_club = filter(lambda club_obj: current_user == club_obj.leader, clubs) info = {} for club in clubs: info[club.name] = club.activities()[0] if club.activities() else None return render_template('user/student.html.j2', pictures=pictures, clubs=clubs, info=info, cas=cas, meetings=meetings, activities=activities, leader_club=leader_club, allow_club_creation=allow_club_creation, receive_email=receive_email, is_user=True) else: years = (lambda m: map(lambda n: m + n, range(2)))(date.today().year) return render_template('user/admin.html.j2', pictures=pictures, years=years, receive_email=receive_email, is_user=True) @userblueprint.route('/submit_info', methods=['POST']) @login_required
MIT License
python-diamond/diamond
src/collectors/netapp/netappDisk.py
netappDiskCol.__init__
python
def __init__(self, device, ip, user, password, parent): self.device = device self.ip = ip self.netapp_user = user self.netapp_password = password self.path_prefix = parent[0] self.publish_metric = parent[1] self.log = parent[2] self._netapp_login() disk_xml = self.get_netapp_elem( NaElement('disk-list-info'), 'disk-details') storage_disk_xml = self.get_netapp_elem( NaElement('storage-disk-get-iter'), 'attributes-list') self.zero_disk(disk_xml) self.spare_disk(disk_xml) self.maintenance_center(storage_disk_xml) self.consistency_point() self.agr_busy()
Collectors our metrics for our netapp filer
https://github.com/python-diamond/diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/netapp/netappDisk.py#L45-L69
from __future__ import print_function import diamond.collector import time from diamond.metric import Metric try: import xml.etree.ElementTree as ET except ImportError: import cElementTree as ET try: from netappsdk.NaServer import * from netappsdk.NaElement import * netappsdk = 0 except ImportError: netappsdk = 1 __author__ = 'peter@phyn3t.com' class netappDiskCol(object):
MIT License
california-planet-search/radvel
radvel/plot/mcmc_plots.py
AutoPlot.plot
python
def plot(self): fig = pl.figure(figsize=(6, 4)) pl.scatter(self.auto['autosamples'], self.auto['automin'], color = 'blue', label='Minimum Autocorrelation Time') pl.scatter(self.auto['autosamples'], self.auto['automean'], color = 'black', label='Mean Autocorrelation Time') pl.scatter(self.auto['autosamples'], self.auto['automax'], color = 'red', label='Maximum Autocorrelation Time') pl.plot(self.auto['autosamples'], self.auto['autosamples']/self.auto['factor'][0], linestyle=':', color='gray', label='Autocorrelation Factor Criterion (N/{})'.format(self.auto['factor'][0])) pl.xlim(self.auto['autosamples'].min(), self.auto['autosamples'].max()) if (self.auto['autosamples']/self.auto['factor']).max() > self.auto['automax'].max(): pl.ylim(self.auto['automin'].min(), (self.auto['autosamples']/self.auto['factor']).max()) else: pl.ylim(self.auto['automin'].min(), self.auto['automax'].max()) pl.xlabel('Steps per Parameter') pl.ylabel('Autocorrelation Time') pl.legend() fig.tight_layout() if self.saveplot is not None: fig.savefig(self.saveplot, dpi=150) print("Auto plot saved to %s" % self.saveplot) else: fig.show()
Make and either save or display the autocorrelation plot
https://github.com/california-planet-search/radvel/blob/c68b8893eb999fa3a870f4a18608e0312d83c723/radvel/plot/mcmc_plots.py#L92-L118
import numpy as np import corner from matplotlib.backends.backend_pdf import PdfPages from matplotlib import pyplot as pl from matplotlib import rcParams from radvel import plot class TrendPlot(object): def __init__(self, post, chains, nwalkers, nensembles, outfile=None): self.chains = chains self.outfile = outfile self.nwalkers = nwalkers self.nensembles = nensembles self.labels = sorted([k for k in post.params.keys() if post.params[k].vary]) self.texlabels = [post.params.tex_labels().get(l, l) for l in self.labels] self.colors = [plot.cmap(x) for x in np.linspace(0.05, 0.95, nwalkers)] def plot(self): with PdfPages(self.outfile) as pdf: for param, tex in zip(self.labels, self.texlabels): flatchain = self.chains[param].values wchain = flatchain.reshape((self.nwalkers, self.nensembles, -1)) _ = pl.figure(figsize=(18, 10)) for w in range(self.nwalkers): for e in range(self.nensembles): pl.plot( wchain[w][e], '.', rasterized=True, color=self.colors[w], markersize=4 ) pl.xlim(0, wchain.shape[2]) pl.xlabel('Step Number') try: pl.ylabel(tex) except ValueError: pl.ylabel(param) ax = pl.gca() ax.set_rasterized(True) pdf.savefig() pl.close() print("Trend plot saved to %s" % self.outfile) class AutoPlot(object): def __init__(self, auto, saveplot=None): self.auto = auto self.saveplot = saveplot
MIT License
learningequality/ricecooker
ricecooker/commands.py
process_tree_files
python
def process_tree_files(tree): config.LOGGER.info("Processing content...") files_to_diff = tree.process_tree(tree.channel) tree.check_for_files_failed() return files_to_diff, config.FAILED_FILES
process_tree_files: Download files from nodes Args: tree (ChannelManager): manager to handle communication to Kolibri Studio Returns: None
https://github.com/learningequality/ricecooker/blob/eb74437b489b79aa2151b64394cefba500a15fc7/ricecooker/commands.py#L243-L253
import json import random import requests from requests.exceptions import HTTPError import sys import webbrowser import os import csv from . import config, __version__ from .classes.nodes import ChannelNode from .managers.progress import RestoreManager, Status from .managers.tree import ChannelManager try: input = raw_input except NameError: pass def uploadchannel_wrapper(chef, args, options): args_and_options = args.copy() args_and_options.update(options) uploadchannel(chef, **args_and_options) def uploadchannel(chef, command='uploadchannel', update=False, thumbnails=False, download_attempts=3, resume=False, step=Status.LAST.name, token="#", prompt=False, publish=False, compress=False, stage=False, **kwargs): config.UPDATE = update config.COMPRESS = chef.get_setting('compress-videos', False) config.THUMBNAILS = chef.get_setting('generate-missing-thumbnails', False) config.STAGE = stage config.PUBLISH = publish config.DOWNLOAD_SESSION.mount('http://', requests.adapters.HTTPAdapter(max_retries=int(download_attempts))) config.DOWNLOAD_SESSION.mount('https://', requests.adapters.HTTPAdapter(max_retries=int(download_attempts))) config.init_file_mapping_store() if not command == 'dryrun': username, token = authenticate_user(token) config.LOGGER.info("Logged in with username {0}".format(username)) check_version_number() else: username = '' token = '' config.LOGGER.info("\n\n***** Starting channel build process *****\n\n") config.PROGRESS_MANAGER = RestoreManager() if (not resume or not config.PROGRESS_MANAGER.check_for_session()) and step.upper() != Status.DONE.name: config.PROGRESS_MANAGER.init_session() else: if resume or prompt_yes_or_no('Previous session detected. Would you like to resume your last session?'): config.LOGGER.info("Resuming your last session...") step = Status.LAST.name if step is None else step config.PROGRESS_MANAGER = config.PROGRESS_MANAGER.load_progress(step.upper()) else: config.PROGRESS_MANAGER.init_session() if hasattr(chef, 'download_content'): chef.download_content() metadata_dict = chef.load_channel_metadata_from_csv() if config.PROGRESS_MANAGER.get_status_val() <= Status.CONSTRUCT_CHANNEL.value: config.LOGGER.info("Calling construct_channel... ") channel = chef.construct_channel(**kwargs) if 'sample' in kwargs and kwargs['sample']: channel = select_sample_nodes(channel, size=kwargs['sample']) config.PROGRESS_MANAGER.set_channel(channel) channel = config.PROGRESS_MANAGER.channel if config.PROGRESS_MANAGER.get_status_val() <= Status.CREATE_TREE.value: config.PROGRESS_MANAGER.set_tree(create_initial_tree(channel)) tree = config.PROGRESS_MANAGER.tree if config.PROGRESS_MANAGER.get_status_val() <= Status.DOWNLOAD_FILES.value: config.LOGGER.info("") config.LOGGER.info("Downloading files...") config.PROGRESS_MANAGER.set_files(*process_tree_files(tree)) chef.apply_modifications(channel, metadata_dict) chef.save_channel_tree_as_json(channel) chef.save_channel_metadata_as_csv(channel) if command == 'dryrun': config.LOGGER.info('Command is dryrun so we are not uploading chanel.') return files_to_diff = config.PROGRESS_MANAGER.files_downloaded config.FAILED_FILES = config.PROGRESS_MANAGER.files_failed if config.PROGRESS_MANAGER.get_status_val() <= Status.GET_FILE_DIFF.value: config.LOGGER.info("") config.LOGGER.info("Getting file diff...") config.PROGRESS_MANAGER.set_diff(get_file_diff(tree, files_to_diff)) file_diff = config.PROGRESS_MANAGER.file_diff tree.uploaded_files = config.PROGRESS_MANAGER.files_uploaded if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOADING_FILES.value: config.LOGGER.info("") config.LOGGER.info("Uploading files...") config.PROGRESS_MANAGER.set_uploaded(upload_files(tree, file_diff)) if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOAD_CHANNEL.value: config.LOGGER.info("") config.LOGGER.info("Creating channel...") config.PROGRESS_MANAGER.set_channel_created(*create_tree(tree)) channel_link = config.PROGRESS_MANAGER.channel_link channel_id = config.PROGRESS_MANAGER.channel_id if config.PUBLISH and config.PROGRESS_MANAGER.get_status_val() <= Status.PUBLISH_CHANNEL.value: config.LOGGER.info("") config.LOGGER.info("Publishing channel...") publish_tree(tree, channel_id) config.PROGRESS_MANAGER.set_published() config.LOGGER.info("\n\nDONE: Channel created at {0}\n".format(channel_link)) if prompt and prompt_yes_or_no('Would you like to open your channel now?'): config.LOGGER.info("Opening channel... ") webbrowser.open_new_tab(channel_link) config.PROGRESS_MANAGER.set_done() return channel_link def authenticate_user(token): config.SESSION.headers.update({"Authorization": "Token {0}".format(token)}) auth_endpoint = config.authentication_url() try: response = config.SESSION.post(auth_endpoint) response.raise_for_status() user = json.loads(response._content.decode("utf-8")) return user['username'], token except HTTPError: config.LOGGER.error("Studio token rejected by server " + auth_endpoint) sys.exit() def check_version_number(): response = config.SESSION.post(config.check_version_url(), data=json.dumps({"version": __version__})) response.raise_for_status() result = json.loads(response._content.decode('utf-8')) if result['status'] == 0: config.LOGGER.info(result['message']) elif result['status'] == 1: config.LOGGER.warning(result['message']) elif result['status'] == 2: config.LOGGER.error(result['message']) if not prompt_yes_or_no("Continue anyways?"): sys.exit() else: config.LOGGER.error(result['message']) sys.exit() def prompt_yes_or_no(message): user_input = input("{} [y/n]:".format(message)).lower() if user_input.startswith("y"): return True elif user_input.startswith("n"): return False else: return prompt_yes_or_no(message) def create_initial_tree(channel): config.LOGGER.info(" Setting up initial channel structure... ") tree = ChannelManager(channel) config.LOGGER.info(" Validating channel structure...") channel.print_tree() tree.validate() config.LOGGER.info(" Tree is valid") return tree
MIT License
ambron60/l-system-drawing
venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/req/req_install.py
InstallRequirement.from_line
python
def from_line( cls, name, comes_from=None, isolated=False, options=None, wheel_cache=None, constraint=False): from pip.index import Link if is_url(name): marker_sep = '; ' else: marker_sep = ';' if marker_sep in name: name, markers = name.split(marker_sep, 1) markers = markers.strip() if not markers: markers = None else: markers = Marker(markers) else: markers = None name = name.strip() req = None path = os.path.normpath(os.path.abspath(name)) link = None extras = None if is_url(name): link = Link(name) else: p, extras = _strip_extras(path) if (os.path.isdir(p) and (os.path.sep in name or name.startswith('.'))): if not is_installable_dir(p): raise InstallationError( "Directory %r is not installable. File 'setup.py' " "not found." % name ) link = Link(path_to_url(p)) elif is_archive_file(p): if not os.path.isfile(p): logger.warning( 'Requirement %r looks like a filename, but the ' 'file does not exist', name ) link = Link(path_to_url(p)) if link: if link.scheme == 'file' and re.search(r'\.\./', link.url): link = Link( path_to_url(os.path.normpath(os.path.abspath(link.path)))) if link.is_wheel: wheel = Wheel(link.filename) req = "%s==%s" % (wheel.name, wheel.version) else: req = link.egg_fragment else: req = name options = options if options else {} res = cls(req, comes_from, link=link, markers=markers, isolated=isolated, options=options, wheel_cache=wheel_cache, constraint=constraint) if extras: res.extras = _safe_extras( Requirement('placeholder' + extras).extras) return res
Creates an InstallRequirement from a name, which might be a requirement, directory containing 'setup.py', filename, or URL.
https://github.com/ambron60/l-system-drawing/blob/3a4ecface1d862b87acd58ff2d5303cd4475370b/venv/Lib/site-packages/pip-9.0.1-py3.5.egg/pip/req/req_install.py#L164-L241
from __future__ import absolute_import import logging import os import re import shutil import sys import tempfile import traceback import warnings import zipfile from distutils import sysconfig from distutils.util import change_root from email.parser import FeedParser from pip._vendor import pkg_resources, six from pip._vendor.packaging import specifiers from pip._vendor.packaging.markers import Marker from pip._vendor.packaging.requirements import InvalidRequirement, Requirement from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import Version, parse as parse_version from pip._vendor.six.moves import configparser import pip.wheel from pip.compat import native_str, get_stdlib, WINDOWS from pip.download import is_url, url_to_path, path_to_url, is_archive_file from pip.exceptions import ( InstallationError, UninstallationError, ) from pip.locations import ( bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user, ) from pip.utils import ( display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir, dist_in_usersite, dist_in_site_packages, egg_link_path, call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir, get_installed_version, normalize_path, dist_is_local, ) from pip.utils.hashes import Hashes from pip.utils.deprecation import RemovedInPip10Warning from pip.utils.logging import indent_log from pip.utils.setuptools_build import SETUPTOOLS_SHIM from pip.utils.ui import open_spinner from pip.req.req_uninstall import UninstallPathSet from pip.vcs import vcs from pip.wheel import move_wheel_files, Wheel logger = logging.getLogger(__name__) operators = specifiers.Specifier._operators.keys() def _strip_extras(path): m = re.match(r'^(.+)(\[[^\]]+\])$', path) extras = None if m: path_no_extras = m.group(1) extras = m.group(2) else: path_no_extras = path return path_no_extras, extras def _safe_extras(extras): return set(pkg_resources.safe_extra(extra) for extra in extras) class InstallRequirement(object): def __init__(self, req, comes_from, source_dir=None, editable=False, link=None, as_egg=False, update=True, pycompile=True, markers=None, isolated=False, options=None, wheel_cache=None, constraint=False): self.extras = () if isinstance(req, six.string_types): try: req = Requirement(req) except InvalidRequirement: if os.path.sep in req: add_msg = "It looks like a path. Does it exist ?" elif '=' in req and not any(op in req for op in operators): add_msg = "= is not a valid operator. Did you mean == ?" else: add_msg = traceback.format_exc() raise InstallationError( "Invalid requirement: '%s'\n%s" % (req, add_msg)) self.extras = _safe_extras(req.extras) self.req = req self.comes_from = comes_from self.constraint = constraint self.source_dir = source_dir self.editable = editable self._wheel_cache = wheel_cache self.link = self.original_link = link self.as_egg = as_egg if markers is not None: self.markers = markers else: self.markers = req and req.marker self._egg_info_path = None self.satisfied_by = None self.conflicts_with = None self._temp_build_dir = None self._ideal_build_dir = None self.update = update self.install_succeeded = None self.uninstalled = None self.nothing_to_uninstall = False self.use_user_site = False self.target_dir = None self.options = options if options else {} self.pycompile = pycompile self.prepared = False self.isolated = isolated @classmethod def from_editable(cls, editable_req, comes_from=None, default_vcs=None, isolated=False, options=None, wheel_cache=None, constraint=False): from pip.index import Link name, url, extras_override = parse_editable( editable_req, default_vcs) if url.startswith('file:'): source_dir = url_to_path(url) else: source_dir = None res = cls(name, comes_from, source_dir=source_dir, editable=True, link=Link(url), constraint=constraint, isolated=isolated, options=options if options else {}, wheel_cache=wheel_cache) if extras_override is not None: res.extras = _safe_extras(extras_override) return res @classmethod
MIT License
emontnemery/hatasmota
hatasmota/entity.py
TasmotaAvailability.get_availability_topics
python
def get_availability_topics(self) -> dict: def availability_message_received(msg: ReceiveMessage) -> None: if msg.payload == self._cfg.availability_online: self.poll_status() if not self._on_availability_callback: return if msg.payload == self._cfg.availability_online: self._on_availability_callback(True) if msg.payload == self._cfg.availability_offline: self._on_availability_callback(False) topics = { "availability_topic": { "event_loop_safe": True, "msg_callback": availability_message_received, "topic": self._cfg.availability_topic, } } return topics
Return MQTT topics to subscribe to for availability state.
https://github.com/emontnemery/hatasmota/blob/184d7df5004f8783f088076562953f2f57141b73/hatasmota/entity.py#L101-L122
from __future__ import annotations import logging from typing import Any, Callable import attr from .mqtt import ReceiveMessage, TasmotaMQTTClient _LOGGER = logging.getLogger(__name__) @attr.s(slots=True, frozen=True) class TasmotaEntityConfig: endpoint: str = attr.ib() idx: int | str | None = attr.ib() friendly_name: str | None = attr.ib() mac: str = attr.ib() platform: str = attr.ib() poll_payload: str = attr.ib() poll_topic: str = attr.ib() @property def unique_id(self) -> str: return f"{self.mac}_{self.platform}_{self.endpoint}_{self.idx}" @attr.s(slots=True, frozen=True) class TasmotaAvailabilityConfig(TasmotaEntityConfig): availability_topic: str = attr.ib() availability_offline: str = attr.ib() availability_online: str = attr.ib() class TasmotaEntity: def __init__(self, config: TasmotaEntityConfig, mqtt_client: TasmotaMQTTClient): self._cfg = config self._mqtt_client = mqtt_client self._on_state_callback: Callable | None = None super().__init__() def config_same(self, new_config: TasmotaEntityConfig) -> bool: return self._cfg == new_config def config_update(self, new_config: TasmotaEntityConfig) -> None: self._cfg = new_config def poll_status(self) -> None: self._mqtt_client.publish_debounced( self._cfg.poll_topic, self._cfg.poll_payload ) def set_on_state_callback(self, on_state_callback: Callable) -> None: self._on_state_callback = on_state_callback async def subscribe_topics(self) -> None: async def unsubscribe_topics(self) -> None: @property def mac(self) -> str: return self._cfg.mac @property def name(self) -> str | None: return self._cfg.friendly_name @property def unique_id(self) -> str: return self._cfg.unique_id class TasmotaAvailability(TasmotaEntity): _cfg: TasmotaAvailabilityConfig def __init__(self, **kwds: Any): self._on_availability_callback: Callable | None = None super().__init__(**kwds)
MIT License
hyperledger-archives/indy-agent
python/serializer/__init__.py
BaseSerializer.deserialize
python
def deserialize(dump: bytes) -> Message: raise NotImplementedError("Unpack method in serializer module \ is not implemented. Use the methods contained in a submodule of \ serializer, such as json_serializer.")
Deserialize to Message.
https://github.com/hyperledger-archives/indy-agent/blob/2b9f4bdddb3d6caf5f8c813bfb8a83590f320a2e/python/serializer/__init__.py#L15-L21
from python_agent_utils.messages.message import Message class BaseSerializer: @staticmethod
Apache License 2.0
mosen/salt-osx
_modules/swupd.py
ignore
python
def ignore(label): out = __salt__['cmd.run']('/usr/sbin/softwareupdate --ignore {0}'.format(label)) return out
Ignore a specific update by label CLI Example: .. code-block:: bash salt '*' swupd.ignore iTunesXPatch-12.1.2
https://github.com/mosen/salt-osx/blob/818d4ae89bb2853b28999a8ddb883c0fe1b1a657/_modules/swupd.py#L169-L180
import logging import salt.utils import re log = logging.getLogger(__name__) __virtualname__ = 'swupd' def __virtual__(): return __virtualname__ if salt.utils.platform.is_darwin() else False def _parse_packages(output): lines = output.splitlines() titles = [re.match('^\s*\*\s+(.*)', line).group(1) for line in lines if re.search('^\s*\*\s+', line)] descriptions = [re.match('^\t+(.*)', line).group(1) for line in lines if re.search('^\t+', line)] return dict(zip(titles, descriptions)) def scheduled(): out = __salt__['cmd.run']('/usr/sbin/softwareupdate --schedule') if re.search('on', out): return True else: return False def schedule(enabled): if enabled: out = __salt__['cmd.run']('/usr/sbin/softwareupdate --schedule on') else: out = __salt__['cmd.run']('/usr/sbin/softwareupdate --schedule off') return out def list(): log.debug('Fetching available updates, this may take some time') out = __salt__['cmd.run']('/usr/sbin/softwareupdate -l') packages = _parse_packages(out) return packages def install(label): out = __salt__['cmd.run']('/usr/sbin/softwareupdate -i {0}'.format(label)) return out def install_all(): out = __salt__['cmd.run']('/usr/sbin/softwareupdate -i -a') return out def install_recommended(): out = __salt__['cmd.run']('/usr/sbin/softwareupdate -i -r') return out def list_ignored(): out = __salt__['cmd.run']('/usr/sbin/softwareupdate --ignore') ignored = [] for line in out.splitlines(): if re.search('^\s{4}"(.*)"', line): ignored.append(re.match('^\s{4}"(.*)"', line).group(1)) return ignored def clear_ignored(): out = __salt__['cmd.run']('/usr/sbin/softwareupdate --reset-ignored') return out
MIT License
axerunners/electrum-axe
electrum_axe/storage.py
WalletStorage.set_password
python
def set_password(self, password, enc_version=None): if enc_version is None: enc_version = self._encryption_version if password and enc_version != STO_EV_PLAINTEXT: ec_key = self.get_eckey_from_password(password) self.pubkey = ec_key.get_public_key_hex() self._encryption_version = enc_version else: self.pubkey = None self._encryption_version = STO_EV_PLAINTEXT self.db.set_modified(True)
Set a password to be used for encrypting this storage.
https://github.com/axerunners/electrum-axe/blob/7ef05088c0edaf0688fb167df353d6da619ebf2f/electrum_axe/storage.py#L215-L227
import os import threading import stat import hashlib import base64 import zlib from . import ecc from .util import profiler, InvalidPassword, WalletFileException, bfh, standardize_path from .plugin import run_hook, plugin_loaders from .json_db import JsonDB from .logging import Logger def get_derivation_used_for_hw_device_encryption(): return ("m" "/4541509'" "/1112098098'") STO_EV_PLAINTEXT, STO_EV_USER_PW, STO_EV_XPUB_PW = range(0, 3) class WalletStorage(Logger): def __init__(self, path, *, manual_upgrades=False): Logger.__init__(self) self.lock = threading.RLock() self.path = standardize_path(path) self._file_exists = self.path and os.path.exists(self.path) DB_Class = JsonDB self.logger.info(f"wallet path {self.path}") self.pubkey = None if self.file_exists(): with open(self.path, "r", encoding='utf-8') as f: self.raw = f.read() self._encryption_version = self._init_encryption_version() if not self.is_encrypted(): self.db = DB_Class(self.raw, manual_upgrades=manual_upgrades) if self.db.upgrade_done: try: self.backup_old_version() except Exception as e: self.logger.error(f'backup failed: {str(e)}') self.load_plugins() else: self._encryption_version = STO_EV_PLAINTEXT self.db = DB_Class('', manual_upgrades=False) def load_plugins(self): wallet_type = self.db.get('wallet_type') if wallet_type in plugin_loaders: plugin_loaders[wallet_type]() def put(self, key,value): self.db.put(key, value) def get(self, key, default=None): return self.db.get(key, default) @profiler def write(self): with self.lock: self._write() def _write(self): if threading.currentThread().isDaemon(): self.logger.warning('daemon thread cannot write db') return if not self.db.modified(): return self.db.commit() s = self.encrypt_before_writing(self.db.dump()) temp_path = "%s.tmp.%s" % (self.path, os.getpid()) with open(temp_path, "w", encoding='utf-8') as f: f.write(s) f.flush() os.fsync(f.fileno()) mode = os.stat(self.path).st_mode if self.file_exists() else stat.S_IREAD | stat.S_IWRITE if not self.file_exists(): assert not os.path.exists(self.path) os.replace(temp_path, self.path) os.chmod(self.path, mode) self._file_exists = True self.logger.info(f"saved {self.path}") self.db.set_modified(False) def file_exists(self): return self._file_exists def is_past_initial_decryption(self): try: return bool(self.db.data) except AttributeError: return False def is_encrypted(self): return self.get_encryption_version() != STO_EV_PLAINTEXT def is_encrypted_with_user_pw(self): return self.get_encryption_version() == STO_EV_USER_PW def is_encrypted_with_hw_device(self): return self.get_encryption_version() == STO_EV_XPUB_PW def get_encryption_version(self): return self._encryption_version def _init_encryption_version(self): try: magic = base64.b64decode(self.raw)[0:4] if magic == b'BIE1': return STO_EV_USER_PW elif magic == b'BIE2': return STO_EV_XPUB_PW else: return STO_EV_PLAINTEXT except: return STO_EV_PLAINTEXT @staticmethod def get_eckey_from_password(password): secret = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), b'', iterations=1024) ec_key = ecc.ECPrivkey.from_arbitrary_size_secret(secret) return ec_key def _get_encryption_magic(self): v = self._encryption_version if v == STO_EV_USER_PW: return b'BIE1' elif v == STO_EV_XPUB_PW: return b'BIE2' else: raise WalletFileException('no encryption magic for version: %s' % v) def decrypt(self, password): ec_key = self.get_eckey_from_password(password) if self.raw: enc_magic = self._get_encryption_magic() s = zlib.decompress(ec_key.decrypt_message(self.raw, enc_magic)) else: s = None self.pubkey = ec_key.get_public_key_hex() s = s.decode('utf8') self.db = JsonDB(s, manual_upgrades=True) self.load_plugins() def encrypt_before_writing(self, plaintext: str) -> str: s = plaintext if self.pubkey: s = bytes(s, 'utf8') c = zlib.compress(s) enc_magic = self._get_encryption_magic() public_key = ecc.ECPubkey(bfh(self.pubkey)) s = public_key.encrypt_message(c, enc_magic) s = s.decode('utf8') return s def check_password(self, password): if not self.is_encrypted(): return if self.pubkey and self.pubkey != self.get_eckey_from_password(password).get_public_key_hex(): raise InvalidPassword() def set_keystore_encryption(self, enable): self.put('use_encryption', enable)
MIT License
seetaresearch/dragon
tensorflow/core/module/module.py
flatten_module
python
def flatten_module( module, recursive, predicate, attribute_traversal_key, attributes_to_ignore, with_path, module_path=(), seen=None, ): if seen is None: seen = {id(module)} module_dict = vars(module) submodules = [] for key in sorted(module_dict, key=attribute_traversal_key): if key in attributes_to_ignore: continue for leaf_path, leaf in nest.flatten_with_paths(module_dict[key]): leaf_path = (key,) + leaf_path if not with_path: leaf_id = id(leaf) if leaf_id in seen: continue seen.add(leaf_id) if predicate(leaf): if with_path: yield module_path + leaf_path, leaf else: yield leaf if recursive and _is_module(leaf): submodules.append((module_path + leaf_path, leaf)) for submodule_path, submodule in submodules: subvalues = flatten_module( submodule, recursive=recursive, predicate=predicate, attribute_traversal_key=attribute_traversal_key, attributes_to_ignore=submodule._MODULE_IGNORED_PROPERTIES, with_path=with_path, module_path=submodule_path, seen=seen, ) for subvalue in subvalues: yield subvalue
Flatten attributes according to the predicate.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/tensorflow/core/module/module.py#L170-L221
from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from dragon.core.framework import context from dragon.core.framework import workspace from dragon.core.util import nest from dragon.vm.tensorflow.core.ops import variables class Module(object): _MODULE_IGNORED_PROPERTIES = frozenset(()) def __init__(self, name=None): self._name = name self._scope_name = None @property def name(self): if self._name is None: self._init_set_name() return self._name @property def name_scope(self): if self._scope_name is None: with context.name_scope(self._name) as scope_name: self._scope_name = scope_name return context.name_scope(self._scope_name) @property def submodules(self): return tuple(self.flatten(predicate=_is_module)) @property def trainable_variables(self): return tuple(self.flatten(predicate=_is_trainable_variable)) @property def variables(self): return tuple(self.flatten(predicate=_is_variable)) def flatten( self, recursive=True, predicate=None, attribute_traversal_key=None, with_path=False, ): return flatten_module( self, recursive=recursive, predicate=predicate if predicate is not None else (lambda _: True), attributes_to_ignore=self._MODULE_IGNORED_PROPERTIES, attribute_traversal_key=attribute_traversal_key, with_path=with_path, ) def _init_set_name(self, name=None, zero_based=True): if name is None: self._name = workspace.get_workspace().unique_name( name=camel_to_snake(self.__class__.__name__), namespace='Object', zero_based=zero_based, ) else: if not valid_identifier(name): raise ValueError('<name> should be a legal identifier.') self._name = name def camel_to_snake(value): intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', value) insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower() if insecure[0] != '_': return insecure return insecure[2:]
BSD 2-Clause Simplified License
autodesk/pyccc
pyccc/static/run_job.py
MappedUnpickler.find_class
python
def find_class(self, module, name): import pickle modname = self.RENAMETABLE.get(module, module) try: klass = pickle.Unpickler.find_class(self, modname, name) except (ImportError, RuntimeError): definition = getattr(source, name) newmod = _makemod(modname) sys.modules[modname] = newmod setattr(newmod, name, definition) klass = pickle.Unpickler.find_class(self, newmod.__name__, name) klass.__module__ = module return klass
This override is here to help pickle find the modules that classes are defined in. It does three things: 1) remaps the "PackagedFunction" class from pyccc to the `source.py` module. 2) Remaps any classes created in the client's '__main__' to the `source.py` module 3) Creates on-the-fly modules to store any other classes present in source.py References: This is a modified version of the 2-only recipe from https://wiki.python.org/moin/UsingPickle/RenamingModules. It's been modified for 2/3 cross-compatibility
https://github.com/autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/static/run_job.py#L97-L124
from __future__ import print_function, unicode_literals, absolute_import, division import os import types import sys import pickle import traceback as tb import source PICKLE_PROTOCOL = 2 RENAMETABLE = {'pyccc.python': 'source', '__main__': 'source'} if sys.version_info.major == 2: PYVERSION = 2 import __builtin__ as BUILTINS else: assert sys.version_info.major == 3 PYVERSION = 3 import builtins as BUILTINS def main(): os.environ['IS_PYCCC_JOB'] = '1' try: funcpkg, func = load_job() result = funcpkg.run(func) serialize_output(result, persistrefs=funcpkg.persist_references) if funcpkg.is_imethod: with open('_object_state.pkl', 'wb') as ofp: pickle.dump(funcpkg.obj, ofp, PICKLE_PROTOCOL) except Exception as exc: capture_exceptions(exc) raise def load_job(): with open('function.pkl', 'rb') as pf: funcpkg = MappedUnpickler(pf).load() if hasattr(funcpkg, 'func_name'): try: func = getattr(source, funcpkg.func_name) except AttributeError: func = getattr(BUILTINS, funcpkg.func_name) else: func = None return funcpkg, func def serialize_output(result, persistrefs=False): with open('_function_return.pkl', 'wb') as rp: if persistrefs: pickler = source.ReturningPickler(rp, PICKLE_PROTOCOL) pickler.dump(result) else: pickle.dump(result, rp, PICKLE_PROTOCOL) def capture_exceptions(exc): with open('exception.pkl', 'wb') as excfile: pickle.dump(exc, excfile, protocol=PICKLE_PROTOCOL) with open('traceback.txt', 'w') as tbfile: tb.print_exc(file=tbfile) class MappedUnpickler(pickle.Unpickler): RENAMETABLE = {'pyccc.python': 'source', '__main__': 'source'}
Apache License 2.0
mabuchilab/qnet
src/qnet/convert/to_qutip.py
convert_to_qutip
python
def convert_to_qutip(expr, full_space=None, mapping=None): if full_space is None: full_space = expr.space if not expr.space.is_tensor_factor_of(full_space): raise ValueError( "expr '%s' must be in full_space %s" % (expr, full_space)) if full_space == TrivialSpace: raise AlgebraError( "Cannot convert object in TrivialSpace to qutip. " "You may pass a non-trivial `full_space`") if mapping is not None: if expr in mapping: ret = mapping[expr] if isinstance(ret, qutip.Qobj): return ret else: assert callable(ret) return ret(expr) if expr is IdentityOperator: local_spaces = full_space.local_factors if len(local_spaces) == 0: raise ValueError("full_space %s does not have local factors" % full_space) else: return qutip.tensor(*[qutip.qeye(s.dimension) for s in local_spaces]) elif expr is ZeroOperator: return qutip.tensor( *[qutip.Qobj(csr_matrix((s.dimension, s.dimension))) for s in full_space.local_factors] ) elif isinstance(expr, LocalOperator): return _convert_local_operator_to_qutip(expr, full_space, mapping) elif (isinstance(expr, Operator) and isinstance(expr, Operation)): return _convert_operator_operation_to_qutip(expr, full_space, mapping) elif isinstance(expr, OperatorTrace): raise NotImplementedError('Cannot convert OperatorTrace to ' 'qutip') elif isinstance(expr, State): return _convert_ket_to_qutip(expr, full_space, mapping) elif isinstance(expr, SuperOperator): return _convert_superoperator_to_qutip(expr, full_space, mapping) elif isinstance(expr, Operation): return _convert_state_operation_to_qutip(expr, full_space, mapping) elif isinstance(expr, SLH): raise ValueError("SLH objects can only be converted using " "SLH_to_qutip routine") else: raise ValueError("Cannot convert '%s' of type %s" % (str(expr), type(expr)))
Convert a QNET expression to a qutip object Args: expr: a QNET expression full_space (HilbertSpace): The Hilbert space in which `expr` is defined. If not given, ``expr.space`` is used. The Hilbert space must have a well-defined basis. mapping (dict): A mapping of any (sub-)expression to either a `quip.Qobj` directly, or to a callable that will convert the expression into a `qutip.Qobj`. Useful for e.g. supplying objects for symbols Raises: ValueError: if `expr` is not in `full_space`, or if `expr` cannot be converted.
https://github.com/mabuchilab/qnet/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/convert/to_qutip.py#L44-L118
import re from functools import reduce from sympy import symbols from sympy.utilities.lambdify import lambdify from scipy.sparse import csr_matrix from numpy import ( diag as np_diag, arange, cos as np_cos, sin as np_sin) from ..algebra.core.scalar_algebra import ScalarValue, is_scalar from ..algebra.core.exceptions import AlgebraError from ..algebra.core.circuit_algebra import SLH, move_drive_to_H from ..algebra.core.abstract_algebra import Operation from ..algebra.core.operator_algebra import ( Operator, IdentityOperator, ZeroOperator, LocalOperator, LocalSigma, OperatorPlus, OperatorTimes, ScalarTimesOperator, Adjoint, PseudoInverse, OperatorTrace, NullSpaceProjector) from ..algebra.library.spin_algebra import Jz, Jplus, Jminus from ..algebra.library.fock_operators import ( Destroy, Create, Phase, Displace, Squeeze) from ..algebra.core.state_algebra import ( State, BraKet, KetBra, BasisKet, CoherentStateKet, KetPlus, TensorKet, ScalarTimesKet, OperatorTimesKet) from ..algebra.core.hilbert_space_algebra import TrivialSpace from ..algebra.core.super_operator_algebra import ( SuperOperator, IdentitySuperOperator, SuperOperatorPlus, SuperOperatorTimes, ScalarTimesSuperOperator, SPre, SPost, SuperOperatorTimesOperator, ZeroSuperOperator) try: import qutip except ImportError: pass DENSE_DIMENSION_LIMIT = 1000 __all__ = ['convert_to_qutip', 'SLH_to_qutip']
MIT License
petuum/adaptdl
adaptdl/adaptdl/torch/data.py
ElasticSampler.__iter__
python
def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(hash((self.epoch, self.index // len(self.dataset)))) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) base_index = self.index % len(self.dataset) local_indices = indices[base_index + self.rank::self.num_replicas] if len(local_indices) < len(self): local_indices.append(indices[self.rank]) assert len(local_indices) == len(self) return iter(local_indices)
Iterate through the samples in the dataset, in the order defined for a set epoch, starting at a set index. Produces only the indices for the local replica. Returns: Iterator over data sample indices.
https://github.com/petuum/adaptdl/blob/ac8db205b28ee42202e15da18f2cc9db56fefaa0/adaptdl/adaptdl/torch/data.py#L63-L88
from contextlib import contextmanager import collections import functools import logging import math import numpy as np import pickle import random import torch from torch.utils.data import DataLoader, Sampler import adaptdl.checkpoint import adaptdl.collective import adaptdl.env from adaptdl.torch.epoch import current_epoch from adaptdl.torch._metrics import ( profile_step_start, profile_step_commit, set_batch_size, get_goodput_fn, get_progress) from adaptdl._signal import get_exit_flag logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) LOG.setLevel(logging.INFO) class ElasticSampler(Sampler): def __init__(self, dataset, shuffle=True): self.dataset = dataset self.shuffle = shuffle self.num_replicas = adaptdl.env.num_replicas() self.rank = adaptdl.env.replica_rank() self.epoch = 0 self.index = 0
Apache License 2.0
westerndigitalcorporation/pyvcd
vcd/writer.py
StringVariable.format_value
python
def format_value(self, value: StringValue, check: bool = True) -> str: if value is None: value_str = '' elif check and not isinstance(value, str): raise ValueError(f'Invalid string value ({value!r})') else: value_str = value.translate( { 9: "\\t", 10: "\\n", 13: "\\r", 32: "\\x20", 92: "\\\\", } ) return f's{value_str} {self.ident}'
Format scalar value change for VCD stream. :param value: a string, str() :type value: str :raises ValueError: for invalid *value*. :returns: string representing value change for use in a VCD stream.
https://github.com/westerndigitalcorporation/pyvcd/blob/ecaa5bf0faebeb34a5b0eead41a0ad8c73abb8b4/vcd/writer.py#L590-L613
from datetime import datetime from itertools import zip_longest from numbers import Number from types import TracebackType from typing import ( IO, Dict, Generator, Generic, List, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, ) from vcd.common import ScopeType, Timescale, TimescaleMagnitude, TimescaleUnit, VarType class VCDPhaseError(Exception): ScopeTuple = Tuple[str, ...] ScopeInput = Union[str, Sequence[str]] TimeValue = Union[int, float] TimescaleLike = Union[Timescale, Tuple[int, str], str] CompoundSize = Sequence[int] VariableSize = Union[int, CompoundSize] EventValue = Union[bool, int] RealValue = Union[float, int] ScalarValue = Union[int, bool, str, None] StringValue = Union[str, None] CompoundValue = Sequence[ScalarValue] VarValue = Union[EventValue, RealValue, ScalarValue, StringValue, CompoundValue] class VCDWriter: def __init__( self, file: IO[str], timescale: TimescaleLike = '1 us', date: Optional[str] = None, comment: str = '', version: str = '', default_scope_type: Union[ScopeType, str] = ScopeType.module, scope_sep: str = '.', check_values: bool = True, init_timestamp: TimeValue = 0, ) -> None: self._ofile = file self._header_keywords = { '$timescale': self._check_timescale(timescale), '$date': str(datetime.now()) if date is None else date, '$comment': comment, '$version': version, } self._default_scope_type = ScopeType(default_scope_type) self._scope_sep = scope_sep self._check_values = check_values self._registering = True self._closed = False self._dumping = True self._next_var_id: int = 1 self._scope_var_strs: Dict[ScopeTuple, List[str]] = {} self._scope_var_names: Dict[ScopeTuple, Set[str]] = {} self._scope_types: Dict[ScopeTuple, ScopeType] = {} self._vars: List[Variable] = [] self._timestamp = int(init_timestamp) self._last_dumped_ts: Optional[int] = None def set_scope_type( self, scope: ScopeInput, scope_type: Union[ScopeType, str] ) -> None: scope_type = ScopeType(scope_type) scope_tuple = self._get_scope_tuple(scope) self._scope_types[scope_tuple] = scope_type def register_var( self, scope: ScopeInput, name: str, var_type: Union[VarType, str], size: Optional[VariableSize] = None, init: VarValue = None, ) -> 'Variable': if self._closed: raise VCDPhaseError('Cannot register after close().') elif not self._registering: raise VCDPhaseError('Cannot register after time 0.') var_type = VarType(var_type) scope_tuple = self._get_scope_tuple(scope) scope_names = self._scope_var_names.setdefault(scope_tuple, set()) if name in scope_names: raise KeyError( f'Duplicate var {name} in scope {self._scope_sep.join(scope_tuple)}' ) if size is None: if var_type in [VarType.integer, VarType.real, VarType.realtime]: size = 64 elif var_type in [VarType.event, VarType.string]: size = 1 else: raise ValueError(f'Must supply size for {var_type} var_type') if isinstance(size, Sequence): size = tuple(size) var_size = sum(size) else: var_size = size ident = _encode_identifier(self._next_var_id) var_str = f'$var {var_type} {var_size} {ident} {name} $end' var: Variable if var_type == VarType.string: if init is None: init = '' elif not isinstance(init, str): raise ValueError('string init value must be str') var = StringVariable(ident, var_type, size, init) elif var_type == VarType.event: if init is None: init = True elif not isinstance(init, (bool, int)): raise ValueError('event init value must be int, bool, or None') var = EventVariable(ident, var_type, size, init) elif var_type == VarType.real: if init is None: init = 0.0 elif not isinstance(init, (float, int)): raise ValueError('real init value must be float, int, or None') var = RealVariable(ident, var_type, size, init) elif size == 1: if init is None: init = 'x' elif not isinstance(init, (int, bool, str)): raise ValueError('scalar init value must be int, bool, str, or None') var = ScalarVariable(ident, var_type, size, init) elif isinstance(size, tuple): if init is None: init = tuple('x' * len(size)) elif not isinstance(init, Sequence): raise ValueError('compount init value must be a sequence') elif len(init) != len(size): raise ValueError('compound init value must be same length as size') elif not all(isinstance(v, (int, bool, str)) for v in init): raise ValueError('compound init values must be int, bool, or str') var = CompoundVectorVariable(ident, var_type, size, init) else: if init is None: init = 'x' elif not isinstance(init, (int, bool, str)): raise ValueError('vector init value must be int, bool, str, or None') var = VectorVariable(ident, var_type, size, init) var.format_value(init, check=True) self._vars.append(var) self._next_var_id += 1 self._scope_var_strs.setdefault(scope_tuple, []).append(var_str) scope_names.add(name) return var def register_alias(self, scope: ScopeInput, name: str, var: 'Variable') -> None: if self._closed: raise VCDPhaseError('Cannot register after close().') elif not self._registering: raise VCDPhaseError('Cannot register after time 0.') scope_tuple = self._get_scope_tuple(scope) scope_names = self._scope_var_names.setdefault(scope_tuple, set()) if name in scope_names: raise KeyError( f'Duplicate var {name} in scope {self._scope_sep.join(scope_tuple)}' ) var_str = f'$var {var.type} {var.size} {var.ident} {name} $end' self._scope_var_strs.setdefault(scope_tuple, []).append(var_str) scope_names.add(name) def dump_off(self, timestamp: TimeValue) -> None: if self._registering: self._finalize_registration() self._set_timestamp(timestamp) if not self._dumping: return self._dump_timestamp() self._ofile.write('$dumpoff\n') for var in self._vars: val_str = var.dump_off() if val_str: self._ofile.write(val_str + '\n') self._ofile.write('$end\n') self._dumping = False def dump_on(self, timestamp: TimeValue) -> None: if self._registering: self._finalize_registration() self._set_timestamp(timestamp) if self._dumping: return self._dumping = True self._dump_timestamp() self._dump_values('$dumpon') def _dump_values(self, keyword: str) -> None: self._ofile.write(keyword + '\n') for var in self._vars: val_str = var.dump(self._check_values) if val_str: self._ofile.write(val_str + '\n') self._ofile.write('$end\n') def _set_timestamp(self, timestamp: TimeValue) -> None: if timestamp < self._timestamp: raise VCDPhaseError(f'Out of order timestamp: {timestamp}') elif timestamp > self._timestamp: self._timestamp = int(timestamp) def _dump_timestamp(self) -> None: if (self._timestamp != self._last_dumped_ts and self._dumping) or ( self._last_dumped_ts is None ): self._last_dumped_ts = self._timestamp self._ofile.write(f'#{self._timestamp}\n') def change(self, var: 'Variable', timestamp: TimeValue, value: VarValue) -> None: if self._closed: raise VCDPhaseError('Cannot change value after close()') if value != var.value or var.type == VarType.event: val_str = var.format_value(value, self._check_values) else: val_str = '' if timestamp < self._timestamp: raise VCDPhaseError(f'Out of order timestamp: {timestamp}') elif timestamp > self._timestamp: if self._registering: self._finalize_registration() self._timestamp = int(timestamp) if not val_str: return var.value = value if self._dumping and not self._registering: if self._timestamp != self._last_dumped_ts: self._last_dumped_ts = self._timestamp self._ofile.write(f'#{self._timestamp}\n{val_str}\n') else: self._ofile.write(f'{val_str}\n') def _get_scope_tuple(self, scope: ScopeInput) -> ScopeTuple: if isinstance(scope, str): return tuple(scope.split(self._scope_sep)) if isinstance(scope, Sequence): return tuple(scope) else: raise TypeError(f'Invalid scope {scope}') @classmethod def _check_timescale(cls, timescale: TimescaleLike) -> str: if isinstance(timescale, Timescale): return str(timescale) elif isinstance(timescale, (list, tuple)): if len(timescale) != 2: raise ValueError(f'Invalid timescale {timescale}') mag, unit = timescale return str(Timescale(TimescaleMagnitude(mag), TimescaleUnit(unit))) elif isinstance(timescale, str): return str(Timescale.from_str(timescale)) else: raise TypeError(f'Invalid timescale type {type(timescale).__name__}') def __enter__(self) -> 'VCDWriter': return self def __exit__( self, exc_type: Optional[Type[Exception]], exc_value: Optional[Exception], traceback: Optional[TracebackType], ) -> None: self.close() def close(self, timestamp: Optional[TimeValue] = None) -> None: if not self._closed: self.flush(timestamp) self._closed = True def flush(self, timestamp: Optional[TimeValue] = None) -> None: if self._closed: raise VCDPhaseError('Cannot flush() after close()') if self._registering: self._finalize_registration() if timestamp is not None: self._set_timestamp(timestamp) self._dump_timestamp() self._ofile.flush() def _gen_header(self) -> Generator[str, None, None]: for kwname, kwvalue in sorted(self._header_keywords.items()): if not kwvalue: continue lines = kwvalue.split('\n') if len(lines) == 1: yield f'{kwname} {lines[0]} $end' else: yield kwname for line in lines: yield '\t' + line yield '$end' prev_scope: ScopeTuple = () for scope in sorted(self._scope_var_strs): var_strs = self._scope_var_strs.pop(scope) for i, (prev, this) in enumerate(zip_longest(prev_scope, scope)): if prev != this: for _ in prev_scope[i:]: yield '$upscope $end' for j, name in enumerate(scope[i:]): scope_type = self._scope_types.get( scope[: i + j + 1], self._default_scope_type ) yield f'$scope {scope_type.value} {name} $end' break else: assert scope != prev_scope for var_str in var_strs: yield var_str prev_scope = scope for _ in prev_scope: yield '$upscope $end' yield '$enddefinitions $end' def _finalize_registration(self) -> None: assert self._registering self._ofile.write('\n'.join(self._gen_header()) + '\n') if self._vars: self._dump_timestamp() self._dump_values('$dumpvars') self._registering = False self._header_keywords.clear() self._scope_types.clear() self._scope_var_names.clear() ValueType = TypeVar('ValueType') class Variable(Generic[ValueType]): __slots__ = ('ident', 'type', 'size', 'value') def __init__(self, ident: str, type: VarType, size: VariableSize, init: ValueType): self.ident = ident self.type = type self.size = size self.value = init def format_value(self, value: ValueType, check: bool = True) -> str: raise NotImplementedError def dump(self, check: bool = True) -> Optional[str]: return self.format_value(self.value, check) def dump_off(self) -> Optional[str]: return None class ScalarVariable(Variable[ScalarValue]): __slots__ = () def format_value(self, value: ScalarValue, check: bool = True) -> str: if isinstance(value, str): if check and (len(value) != 1 or value not in '01xzXZ'): raise ValueError(f'Invalid scalar value ({value})') return value + self.ident elif value is None: return 'z' + self.ident elif value: return '1' + self.ident else: return '0' + self.ident def dump_off(self) -> str: return 'x' + self.ident class EventVariable(Variable[EventValue]): def format_value(self, value: EventValue, check: bool = True) -> str: if value: return '1' + self.ident else: raise ValueError('Invalid event value') def dump(self, check: bool = True) -> Optional[str]: return None class StringVariable(Variable[StringValue]): __slots__ = ()
MIT License
data61/anonlink
anonlink/stats.py
nonmatch_index_score
python
def nonmatch_index_score( candidate_pairs: _typechecking.CandidatePairs, n: int = 1 ) -> int: if not _check_bipartite(candidate_pairs): raise ValueError('only 2-party matching is supported') _, _, (rec_is0, rec_is1) = candidate_pairs matched0: _typing.Set[int] = set() matched1: _typing.Set[int] = set() nonmatches = 0 for i, rec_i0, rec_i1 in zip(itertools.count(), rec_is0, rec_is1): if rec_i0 not in matched0 and rec_i1 not in matched1: matched0.add(rec_i0) matched1.add(rec_i1) else: nonmatches += 1 if nonmatches == n: return i raise ValueError('fewer than n definite nonmatches')
Find the index of the ``n``th definite nonmatch. We use the 2-party greedy solver to split the candidate pairs into possible matches and definite nonmatches. The index (in decreasing order of similarity) of the ``n``th definite nonmatch is returned. Smaller values of ``n`` introduce more noise to the heuristic, but larger values of ``n`` provide less information. Raises ValueError if there are fewer than n definite nonmatches. :param candidate pairs: The candidate pairs. Must represent a bipartite problem. :param n: We return the index of the ``n``th definite nonmatch. Default 1. :return: The index of the ``n``th definite nonmatch if there are at least ``n`` definite nonmatches in the candidate pairs.
https://github.com/data61/anonlink/blob/9cd4e2eb2984494e876dce5054d96749e702620f/anonlink/stats.py#L138-L177
import itertools import typing as _typing import numpy as _np import anonlink.typechecking as _typechecking def _similarities_as_nparray(candidate_pairs: _typechecking.CandidatePairs): sims, _, _ = candidate_pairs return _np.frombuffer(sims, dtype=sims.typecode) def _check_bipartite(candidate_pairs: _typechecking.CandidatePairs) -> bool: _, (dset_is0, dset_is1), _ = candidate_pairs np_dset_is0 = _np.frombuffer(dset_is0, dtype=dset_is0.typecode) np_dset_is1 = _np.frombuffer(dset_is1, dtype=dset_is1.typecode) return (np_dset_is0 == 0).all() and (np_dset_is1 == 1).all() def similarities_hist(candidate_pairs: _typechecking.CandidatePairs, bins: int = 100): return _np.histogram(_similarities_as_nparray(candidate_pairs), bins=bins) def _semiopen_hist_matches_nonmatches( candidate_pairs: _typechecking.CandidatePairs, steps: int = 100 ): if not _check_bipartite(candidate_pairs): raise ValueError('only 2-party matching is supported') thresholds = _np.histogram_bin_edges( _similarities_as_nparray(candidate_pairs), bins=steps) thresholds_enumerate_iter = zip( range(thresholds.shape[0] - 1, -1, -1), thresholds[::-1]) sims, _, (rec_is0, rec_is1) = candidate_pairs matched0: _typing.Set[int] = set() matched1: _typing.Set[int] = set() num_matches = _np.zeros_like(thresholds, dtype=int) num_nonmatches = _np.zeros_like(thresholds, dtype=int) try: i, threshold = next(thresholds_enumerate_iter) except StopIteration: return num_matches, num_nonmatches, thresholds for sim, rec_i0, rec_i1 in zip(sims, rec_is0, rec_is1): while sim < threshold: try: i, threshold = next(thresholds_enumerate_iter) except StopIteration: return num_matches, num_nonmatches, thresholds if rec_i0 not in matched0 and rec_i1 not in matched1: matched0.add(rec_i0) matched1.add(rec_i1) num_matches[i] += 1 else: num_nonmatches[i] += 1 return num_matches, num_nonmatches, thresholds def matches_nonmatches_hist( candidate_pairs: _typechecking.CandidatePairs, bins: int = 100 ): semiopen_hist = _semiopen_hist_matches_nonmatches(candidate_pairs, bins) num_matches, num_nonmatches, thresholds = semiopen_hist num_matches[-2] += num_matches[-1] num_matches = _np.array(num_matches[:-1]) num_nonmatches[-2] += num_nonmatches[-1] num_nonmatches = _np.array(num_nonmatches[:-1]) return num_matches, num_nonmatches, thresholds def cumul_number_matches_vs_threshold( candidate_pairs: _typechecking.CandidatePairs, steps: int = 100 ): num_matches, _, thresholds = _semiopen_hist_matches_nonmatches( candidate_pairs, steps) num_matches_rev = num_matches[::-1] _np.cumsum(num_matches_rev, out=num_matches_rev) return num_matches, thresholds
Apache License 2.0
neuralmagic/sparseml
src/sparseml/sparsification/modifier_lr.py
LearningRateModifier.lr_class
python
def lr_class(self, value: str): self._lr_class = value self.validate_lr_info()
:param value: The name of the lr scheduler class to use: [StepLR, MultiStepLR, ExponentialLR]
https://github.com/neuralmagic/sparseml/blob/e2dcb66bad713542158dfe54cba113a0cc02ed39/src/sparseml/sparsification/modifier_lr.py#L161-L167
from typing import Dict, List, Tuple, Union from sparseml.optim.modifier import ( BaseModifier, BaseScheduled, BaseUpdate, ModifierProp, ) from sparseml.utils import ALL_TOKEN __all__ = [ "SetLearningRateModifier", "LearningRateModifier", ] class SetLearningRateModifier(BaseModifier, BaseScheduled): def __init__( self, learning_rate: float, start_epoch: float = -1.0, log_types: Union[str, List[str]] = ALL_TOKEN, end_epoch: float = -1.0, **kwargs, ): kwargs["end_comparator"] = kwargs.get("end_comparator", None) super().__init__( log_types=log_types, start_epoch=start_epoch, end_epoch=-1.0, **kwargs, ) self._learning_rate = learning_rate self.validate_learning_rate() @ModifierProp() def learning_rate(self) -> float: return self._learning_rate @learning_rate.setter def learning_rate(self, value: float): self._learning_rate = value self.validate_learning_rate() def validate_learning_rate(self): if isinstance(self._learning_rate, str): self._learning_rate = float(self._learning_rate) if self._learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0") if self._learning_rate > 1.0: raise ValueError("learning_rate must be less than or equal to 1.0") class LearningRateModifier(BaseModifier, BaseScheduled, BaseUpdate): def __init__( self, lr_class: str, lr_kwargs: Dict, init_lr: float, start_epoch: float, end_epoch: float = -1.0, update_frequency: float = -1.0, log_types: Union[str, List[str]] = ALL_TOKEN, **kwargs, ): kwargs["update_frequency"] = kwargs.get("update_frequency", -1.0) kwargs["end_comparator"] = kwargs.get("end_comparator", -1) super().__init__( log_types=log_types, start_epoch=start_epoch, end_epoch=end_epoch, **kwargs, ) self._lr_class = lr_class self._lr_kwargs = lr_kwargs self._init_lr = init_lr self.validate_lr_info() @ModifierProp() def lr_class(self) -> str: return self._lr_class @lr_class.setter
Apache License 2.0
morganstanley/testplan
examples/App/FXConverter/converter.py
FXConverter.loop
python
def loop(self): host, port = self._server_init() self._logger.info("Listener on: {}:{}".format(host, port)) downstream_addr = ( self._config.get("Downstream", "Host"), int(self._config.get("Downstream", "Port")), ) self._logger.info( "Connected to downstream: {}:{}".format( downstream_addr[0], downstream_addr[1] ) ) downstream_cli = socket.create_connection(downstream_addr) self._logger.info("Converter started.") upstream_conn, client_address = self._server.accept() self._logger.info("Client connected: {}".format(client_address)) self._loop(upstream_conn, downstream_cli, downstream_addr)
Starts the application. 1. Connect to downstream server with FX exchange rates. 2. Accepts client connection. 3. Start the loop to handle client requests.
https://github.com/morganstanley/testplan/blob/8cb6a0ed0682698b2d6af82382fbb66d8d9e3ff7/examples/App/FXConverter/converter.py#L106-L129
import os import re import sys import socket import logging from configparser import ConfigParser logging.basicConfig(stream=sys.stdout, format="%(message)s") class FXConverter(object): def __init__(self, config_file): self._logger = logging.getLogger() self._logger.setLevel(logging.INFO) self._config = self.load_config( os.path.join(os.getcwd(), "etc", config_file) ) self._server = None def load_config(self, filename): config = ConfigParser() config.read(filename) self._logger.info("Configuration read:") for section in ("Listener", "Downstream"): self._logger.info(section) self._logger.info("\tHost: {}".format(config.get(section, "Host"))) self._logger.info("\tPort: {}".format(config.get(section, "Port"))) return config def _server_init(self): self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._server.bind( ( self._config.get("Listener", "Host"), int(self._config.get("Listener", "Port")), ) ) self._server.listen(10) return self._server.getsockname() def _validate_request(self, msg): if not re.match(r"[A-Z]{3}:[A-Z]{3}:[0-9]+", msg): raise ValueError("Invalid request format ({}).".format(msg)) def _process_request(self, downstream_cli, msg): value = float(msg[8:]) if value == 0: return "0" currencies = msg[:7] source, target = currencies.split(":") if source == target: rate = 1 else: downstream_cli.sendall(bytes(currencies.encode("utf-8"))) rate = float(downstream_cli.recv(1024).decode("utf-8")) result = str(int(rate * value)) self._logger.info( "Request result for {} with rate {}: {}".format( msg[8:], rate, result ) ) return result def _loop(self, upstream_conn, downstream_cli, downstream_addr): while True: msg = str(upstream_conn.recv(1024).decode("utf-8")) self._logger.info("Client msg: {}".format(msg)) if msg == "Stop": self._server.close() self._logger.info("Converter stopped.") break else: try: self._validate_request(msg) except Exception as exc: upstream_conn.sendall(bytes(str(exc).encode("utf-8"))) continue else: self._logger.info( "Propagating query {} to {}".format( msg, downstream_addr ) ) result = self._process_request(downstream_cli, msg) upstream_conn.sendall(bytes(result.encode("utf-8")))
Apache License 2.0
pyglet/pyglet
pyglet/com.py
_make_callback_func
python
def _make_callback_func(interface, name, method_func): if method_func is None: return _missing_impl(interface, name) return _found_impl(interface, name, method_func)
Create a callback function for ctypes if possible.
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/com.py#L290-L295
import sys import ctypes from pyglet.util import debug_print _debug_com = debug_print('debug_com') if sys.platform != 'win32': raise ImportError('pyglet.com requires a Windows build of Python') class GUID(ctypes.Structure): _fields_ = [ ('Data1', ctypes.c_ulong), ('Data2', ctypes.c_ushort), ('Data3', ctypes.c_ushort), ('Data4', ctypes.c_ubyte * 8) ] def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8): self.Data1 = l self.Data2 = w1 self.Data3 = w2 self.Data4[:] = (b1, b2, b3, b4, b5, b6, b7, b8) def __repr__(self): b1, b2, b3, b4, b5, b6, b7, b8 = self.Data4 return 'GUID(%x, %x, %x, %x, %x, %x, %x, %x, %x, %x, %x)' % ( self.Data1, self.Data2, self.Data3, b1, b2, b3, b4, b5, b6, b7, b8) def __cmp__(self, other): if isinstance(other, GUID): return ctypes.cmp(bytes(self), bytes(other)) return -1 def __eq__(self, other): return isinstance(other, GUID) and bytes(self) == bytes(other) def __hash__(self): return hash(bytes(self)) LPGUID = ctypes.POINTER(GUID) IID = GUID REFIID = ctypes.POINTER(IID) class METHOD: def __init__(self, restype, *args): self.restype = restype self.argtypes = args def get_field(self): return ctypes.WINFUNCTYPE(self.restype, *self.argtypes) class STDMETHOD(METHOD): def __init__(self, *args): super(STDMETHOD, self).__init__(ctypes.HRESULT, *args) class COMMethodInstance: def __init__(self, name, i, method): self.name = name self.i = i self.method = method def __get__(self, obj, tp): if obj is not None: def _call(*args): assert _debug_com('COM: #{} IN {}({}, {})'.format(self.i, self.name, obj.__class__.__name__, args)) ret = self.method.get_field()(self.i, self.name)(obj, *args) assert _debug_com('COM: #{} OUT {}({}, {})'.format(self.i, self.name, obj.__class__.__name__, args)) assert _debug_com('COM: RETURN {}'.format(ret)) return ret return _call raise AttributeError() class COMInterface(ctypes.Structure): _fields_ = [ ('lpVtbl', ctypes.c_void_p), ] class InterfacePtrMeta(type(ctypes.POINTER(COMInterface))): def __new__(cls, name, bases, dct): methods = [] for base in bases[::-1]: methods.extend(base.__dict__.get('_methods_', ())) methods.extend(dct.get('_methods_', ())) for i, (n, method) in enumerate(methods): dct[n] = COMMethodInstance(n, i, method) dct['_type_'] = COMInterface return super(InterfacePtrMeta, cls).__new__(cls, name, bases, dct) pInterface = InterfacePtrMeta(str('Interface'), (ctypes.POINTER(COMInterface),), {'__doc__': 'Base COM interface pointer.'}) class COMInterfaceMeta(type): def __new__(mcs, name, bases, dct): methods = dct.pop("_methods_", None) cls = type.__new__(mcs, name, bases, dct) if methods is not None: cls._methods_ = methods if not bases: _ptr_bases = (cls, COMPointer) else: _ptr_bases = (cls, ctypes.POINTER(bases[0])) from ctypes import _pointer_type_cache _pointer_type_cache[cls] = type(COMPointer)("POINTER({})".format(cls.__name__), _ptr_bases, {"__interface__": cls}) return cls def __get_subclassed_methodcount(self): try: result = 0 for itf in self.mro()[1:-1]: result += len(itf.__dict__["_methods_"]) return result except KeyError as err: (name,) = err.args if name == "_methods_": raise TypeError("Interface '{}' requires a _methods_ attribute.".format(itf.__name__)) raise class COMPointerMeta(type(ctypes.c_void_p), COMInterfaceMeta): class COMPointer(ctypes.c_void_p, metaclass=COMPointerMeta): @classmethod def from_param(cls, obj): if obj is None: return try: ptr_dct = obj._pointers except AttributeError: raise Exception("Interface method argument specified incorrectly, or passed wrong argument.", cls) else: try: return ptr_dct[cls.__interface__] except KeyError: raise TypeError("Interface {} doesn't have a pointer in this class.".format(cls.__name__)) def _missing_impl(interface_name, method_name): def missing_cb_func(*args): assert _debug_com("Undefined method: {0} was called in interface: {1}".format(method_name, interface_name)) return 0 return missing_cb_func def _found_impl(interface_name, method_name, method_func): def cb_func(*args, **kw): try: result = method_func(*args, **kw) except Exception as err: raise err if not result: return 0 return result return cb_func
BSD 3-Clause New or Revised License
brandontrabucco/design-bench
design_bench/oracles/approximate_oracle.py
ApproximateOracle.save_model_to_zip
python
def save_model_to_zip(self, model, zip_archive): raise NotImplementedError
a function that serializes a machine learning model and stores that model in a compressed zip file using the python ZipFile interface for sharing and future loading by an ApproximateOracle Arguments: model: Any any format of of machine learning model that will be stored in the self.model attribute for later use zip_archive: ZipFile an instance of the python ZipFile interface that has loaded the file path specified by self.resource.disk_target
https://github.com/brandontrabucco/design-bench/blob/5152f50f23af4d68c0a1e2364705b9df648de28c/design_bench/oracles/approximate_oracle.py#L85-L102
from design_bench.oracles.oracle_builder import OracleBuilder from design_bench.datasets.dataset_builder import DatasetBuilder from design_bench.disk_resource import DiskResource, SERVER_URL from scipy import stats import numpy as np import pickle as pkl import abc import zipfile class ApproximateOracle(OracleBuilder, abc.ABC): default_split_kwargs = dict(val_fraction=0.1, subset=None, shard_size=5000, to_disk=False, disk_target=None, is_absolute=None) default_model_kwargs = dict() @abc.abstractmethod
MIT License
nsls-ii/pyxrf
pyxrf/model/lineplot.py
LinePlotModel.change_incident_energy
python
def change_incident_energy(self, energy_new): margin = 0.8 energy_new = round(energy_new, ndigits=6) self.incident_energy = energy_new + 1.0 self.incident_energy = energy_new if "coherent_sct_energy" in self.param_model.param_new: self.param_model.param_new["coherent_sct_energy"]["value"] = energy_new self.param_model.energy_bound_high_buf = energy_new + 1.8 upper_bound = energy_new + margin upper_bound = round(upper_bound, ndigits=5) self.param_model.energy_bound_high_buf = upper_bound
The function that perfroms the changes the value of incident energy and upper bound for fitted energy range. Parameters ---------- incident_energy : float New value of incident energy
https://github.com/nsls-ii/pyxrf/blob/0aa4e175f541edfaa8f71daf54b54a07e4ab2b04/pyxrf/model/lineplot.py#L392-L422
from __future__ import absolute_import, division, print_function import math import numpy as np import matplotlib from matplotlib.figure import Figure from matplotlib.axes import Axes import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib.collections import BrokenBarHCollection import matplotlib.ticker as mticker from matplotlib.colors import LogNorm from enum import Enum from mpl_toolkits.axes_grid1 import ImageGrid from atom.api import Atom, Str, observe, Typed, Int, List, Dict, Float, Bool from skbeam.core.fitting.xrf_model import K_TRANSITIONS, L_TRANSITIONS, M_TRANSITIONS from skbeam.fluorescence import XrfElement as Element from ..core.xrf_utils import get_eline_parameters import logging logger = logging.getLogger(__name__) def get_color_name(): first_ten = [ "indigo", "maroon", "green", "darkblue", "darkgoldenrod", "blue", "darkcyan", "sandybrown", "black", "darkolivegreen", ] nonred_list = [ v for v in matplotlib.colors.cnames.keys() if "pink" not in v and "fire" not in v and "sage" not in v and "tomato" not in v and "red" not in v ] return first_ten + nonred_list + list(matplotlib.colors.cnames.keys()) class PlotTypes(Enum): LINLOG = 0 LINEAR = 1 class EnergyRangePresets(Enum): SELECTED_RANGE = 0 FULL_SPECTRUM = 1 class MapTypes(Enum): LINEAR = 0 LOG = 1 class MapAxesUnits(Enum): PIXELS = 0 POSITIONS = 1 class LinePlotModel(Atom): exp_data_label = Str("experiment") number_pts_to_show = Int(3000) _fig_preview = Typed(Figure) _ax_preview = Typed(Axes) _lines_preview = List() _bahr_preview = Typed(BrokenBarHCollection) plot_type_preview = Typed(PlotTypes) energy_range_preview = Typed(EnergyRangePresets) min_v_preview = Float() max_v_preview = Float() min_e_preview = Float() max_e_preview = Float() _fig_maps = Typed(Figure) map_type_preview = Typed(MapTypes) map_axes_units_preview = Typed(MapAxesUnits) map_scatter_plot = Bool(False) map_preview_color_scheme = Str("viridis") map_preview_range_low = Float(-1) map_preview_range_high = Float(-1) _fig = Typed(Figure) _ax = Typed(Axes) _canvas = Typed(object) plot_fit_x_min = Float(0) plot_fit_x_max = Float(0) element_id = Int(0) elist = List() scale_opt = Int(0) prefit_x = Typed(object) plot_title = Str() plot_type_names = List() max_v = Float() incident_energy = Float(12.0) energy_range_names = List() energy_range_fitting = Str() eline_obj = List() plot_exp_opt = Bool(False) plot_exp_obj = Typed(Line2D) show_exp_opt = Bool(False) plot_energy_barh = Typed(BrokenBarHCollection) t_bar = Typed(object) plot_exp_list = List() auto_fit_obj = List() show_autofit_opt = Bool() plot_fit_obj = List() show_fit_opt = Bool(False) plot_style = Dict() roi_plot_dict = Dict() roi_dict = Typed(object) log_range = List() linear_range = List() plot_escape_line = Int(0) emission_line_window = Bool(True) det_materials = Int(0) escape_e = Float(1.73998) limit_cut = Int() param_model = Typed(object) io_model = Typed(object) vertical_marker_kev = Float(-1) line_vertical_marker = Typed(object) vertical_marker_is_visible = Bool(False) report_marker_state = Typed(object) def __init__(self, *, param_model, io_model): self.param_model = param_model self.io_model = io_model self._fig = plt.figure() self._ax = self._fig.add_subplot(111) try: self._ax.set_axis_bgcolor("lightgrey") except AttributeError: self._ax.set_facecolor("lightgrey") self._ax.set_xlabel("Energy (keV)") self._ax.set_ylabel("Spectrum (Counts)") self._ax.grid(which="both") self._ax.set_yscale("log") self.plot_type_names = ["LinLog", "Linear"] self.energy_range_names = ["selected", "full"] self.energy_range_fitting = "selected" self._ax.autoscale_view(tight=True) self._ax.legend(loc=2) self._color_config() self._fig.tight_layout(pad=0.5) self.max_v = 1.0 self.limit_cut = 100 self._fig_preview = Figure() self.plot_type_preview = PlotTypes.LINLOG self.energy_range_preview = EnergyRangePresets.SELECTED_RANGE self._fig_maps = Figure() self.map_type_preview = MapTypes.LINEAR self.map_axes_units_preview = MapAxesUnits.PIXELS def _color_config(self): self.plot_style = { "experiment": {"color": "blue", "linestyle": "", "marker": ".", "label": self.exp_data_label}, "background": {"color": "indigo", "marker": "+", "markersize": 1, "label": "background"}, "emission_line": {"color": "black", "linewidth": 2}, "roi_line": {"color": "red", "linewidth": 2}, "k_line": {"color": "green", "label": "k lines"}, "l_line": {"color": "magenta", "label": "l lines"}, "m_line": {"color": "brown", "label": "m lines"}, "compton": {"color": "darkcyan", "linewidth": 1.5, "label": "compton"}, "elastic": {"color": "purple", "label": "elastic"}, "escape": {"color": "darkblue", "label": "escape"}, "pileup": {"color": "darkgoldenrod", "label": "pileup"}, "userpeak": {"color": "orange", "label": "userpeak"}, "fit": {"color": "red", "label": "fit", "linewidth": 2.5}, "residual": {"color": "black", "label": "residual", "linewidth": 2.0}, } def plot_exp_data_update(self, change): self.plot_exp_opt = False self.show_exp_opt = False self.show_fit_opt = False self.element_id = 0 def init_mouse_event(self): self.t_bar = self._fig.canvas.toolbar self._fig.canvas.mpl_connect("button_press_event", self.canvas_onpress) def _update_canvas(self): self.init_mouse_event() self.plot_vertical_marker() self._ax.legend(loc=2) try: self._ax.legend(framealpha=0.2).set_draggable(True) except AttributeError: self._ax.legend(framealpha=0.2) self._fig.tight_layout(pad=0.5) self._ax.relim(visible_only=True) self._fig.canvas.draw() def _update_ylimit(self): self.log_range = [self.max_v * 1e-5, self.max_v * 2] self.linear_range = [0, self.max_v * 1.2] def exp_label_update(self, change): self.exp_data_label = change["value"] self.plot_style["experiment"]["label"] = change["value"] @observe("parameters") def _update_energy(self, change): if "coherent_sct_energy" not in self.param_model.param_new: return self.incident_energy = self.param_model.param_new["coherent_sct_energy"]["value"] def set_energy_range_fitting(self, energy_range_name): if energy_range_name not in self.energy_range_names: raise ValueError( f"Unknown energy range name {energy_range_name}. Allowed names: {self.energy_range_names}" ) self.energy_range_fitting = energy_range_name self.plot_experiment() def set_incident_energy(self, change): self.change_incident_energy(change["value"])
BSD 3-Clause New or Revised License
cartodb/carto-python
carto/auth.py
APIKeyAuthClient.send
python
def send(self, relative_path, http_method, **requests_args): try: http_method, requests_args = self.prepare_send(http_method, **requests_args) response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args) except Exception as e: raise CartoException(e) if CartoRateLimitException.is_rate_limited(response): raise CartoRateLimitException(response) return response
Makes an API-key-authorized request :param relative_path: URL path relative to self.base_url :param http_method: HTTP method :param requests_args: kwargs to be sent to requests :type relative_path: str :type http_method: str :type requests_args: kwargs :return: A request response object :raise: CartoException
https://github.com/cartodb/carto-python/blob/8122c5510f8f93fb30f1917187452b5469f8fb3c/carto/auth.py#L128-L154
from gettext import gettext as _ import re import sys import warnings import pkg_resources from pyrestcli.auth import BaseAuthClient, BasicAuthClient from .exceptions import CartoException, CartoRateLimitException if sys.version_info >= (3, 0): from urllib.parse import urlparse else: from urlparse import urlparse class _UsernameGetter: def get_user_name(self, base_url): try: url_info = urlparse(base_url) m = re.search('^/user/([^/]+)/.*$', url_info.path) if m is None: netloc = url_info.netloc if netloc.startswith('www.'): netloc = netloc.split('www.')[1] m = re.search('^(.*?)\..*', netloc) return m.group(1) except Exception: raise CartoException(_("Could not find a valid user_name in the " + "base URL provided. Please check that the" + "URL is one of " + "'https://{user_name}.carto.com', " + "'https://carto.com/user/{user_name}' " + "or a similar one based on your domain")) class _BaseUrlChecker: def check_base_url(self, base_url): if not base_url.startswith("https"): warnings.warn("You are using unencrypted API key \ authentication!!!") if not base_url.endswith('/'): base_url += '/' return base_url class _ClientIdentifier: CARTO_VERSION = pkg_resources.require('carto')[0].version def get_user_agent(self, name='carto-python-sdk'): return "{name}/{version}".format( name=name, version=self.CARTO_VERSION) def get_client_identifier(self, prefix='cps'): return "{prefix}-{version}".format( prefix=prefix, version=self.CARTO_VERSION) class APIKeyAuthClient(_UsernameGetter, _BaseUrlChecker, _ClientIdentifier, BaseAuthClient): def __init__(self, base_url, api_key, organization=None, session=None, client_id=None, user_agent=None): self.organization = organization self.api_key = api_key base_url = self.check_base_url(base_url) self.username = self.get_user_name(base_url) if user_agent is None: self.user_agent = self.get_user_agent() else: self.user_agent = user_agent if client_id is None: self.client_id = self.get_client_identifier() else: self.client_id = client_id super(APIKeyAuthClient, self).__init__(base_url, session=session)
BSD 3-Clause New or Revised License
robinmahieu/modmail-plugins
autorole/autorole.py
Autorole.on_member_join
python
async def on_member_join(self, member: discord.Member): if member.guild.id != self.bot.guild_id: return config = await self.db.find_one({"_id": "autorole-config"}) if config is None: return try: role_ids = config["roles"] except KeyError: return logger.error("Something went wrong in the database!") if not role_ids: return roles = [member.guild.get_role(role_id) for role_id in role_ids] roles = [role for role in roles if role is not None] await member.add_roles(*roles)
Function that gets invoked whenever a member joins a server. It will look for a configuration file in the database and if one is found, the set roles will be assigned to the new member.
https://github.com/robinmahieu/modmail-plugins/blob/755ab5ce3d6412cb6c82819b3acfec03ab5c13cc/autorole/autorole.py#L19-L44
import discord from discord.ext import commands from core import checks from core.models import PermissionLevel, getLogger logger = getLogger(__name__) class Autorole(commands.Cog): def __init__(self, bot: commands.Bot): self.bot = bot self.db = self.bot.api.get_plugin_partition(self) @commands.Cog.listener()
MIT License
wkiri/demud
demud/dataset_libs.py
LIBSData.filter_data
python
def filter_data(self, data, labels): n = data.shape[1] newdata = data remove_ind = [] printt("Filtering out data with large, broad features.") for i in range(n): waves = range(data.shape[0]) this_data = data[waves,i] peak_ind = this_data.argmax() peak_wave = self.xvals[waves[peak_ind]] min_peak = 0.15 * this_data[peak_ind] red_waves = [] while this_data[peak_ind] >= min_peak: red_waves = [waves[peak_ind]] low_value = 0.1 * this_data[peak_ind] filter_item = True min_wave_ind = peak_ind max_wave_ind = peak_ind for j in range(1,401): min_wave_ind = max(min_wave_ind-1, 0) max_wave_ind = min(max_wave_ind+1, len(waves)-1) red_waves += [waves[min_wave_ind]] red_waves += [waves[max_wave_ind]] if ((self.xvals[waves[min_wave_ind]+1] - self.xvals[waves[min_wave_ind]]) > 1): min_wave_ind += 1 if ((self.xvals[waves[max_wave_ind]] - self.xvals[waves[max_wave_ind]-1]) > 1): max_wave_ind -= 1 if (((self.xvals[waves[peak_ind]] - self.xvals[waves[min_wave_ind]]) > 10) or ((self.xvals[waves[max_wave_ind]] - self.xvals[waves[peak_ind]]) > 10)): filter_item = True break if this_data[min_wave_ind] <= low_value or this_data[max_wave_ind] <= low_value: filter_item = False break [waves.remove(w) for w in red_waves if w in waves] if filter_item: print "Filter item %d (%s) due to [%.2f, %.2f] nm " % (i, labels[i], self.xvals[min(red_waves)], self.xvals[max(red_waves)]) remove_ind += [i] ''' # generate a plot, highlighting the problematic feature in red_waves pylab.clf() pylab.plot(self.xvals, data[:,i], 'k-', linewidth=1) pylab.plot(self.xvals[min(red_waves):max(red_waves)+1], data[min(red_waves):max(red_waves)+1,i], 'r-', linewidth=1) pylab.xlabel(self.xlabel, fontsize=16) pylab.ylabel(self.ylabel, fontsize=16) pylab.xticks(fontsize=16) pylab.yticks(fontsize=16) pylab.title('Filtered item %d, %s' % (i, labels[i])) if not os.path.exists('filtered'): os.mkdir('filtered') pylab.savefig(os.path.join('filtered', '%s-filtered-%d.pdf' % i)) ''' break else: this_data = data[waves,i] peak_ind = this_data.argmax() newdata = np.array([data[:,i] for i in range(data.shape[1]) if i not in remove_ind]).T newlabels = np.array([labels[i] for i in range(len(labels)) if i not in remove_ind]) printt(" ... from %d to %d items (%d removed)." % (n, newdata.shape[1], n-newdata.shape[1])) n = newdata.shape[1] printt("Filtering out low-SNR data.") remove_ind = [] for i in range(n): if max(newdata[:,i]) < 0.01: remove_ind +=[i] newdata = np.array([newdata[:,i] for i in range(newdata.shape[1]) if i not in remove_ind]).T newlabels = np.array([newlabels[i] for i in range(len(newlabels)) if i not in remove_ind]) print " ... from %d to %d items (%d removed)." % (n, newdata.shape[1], n-newdata.shape[1]) return (newdata, newlabels)
filter_data(data, labels) Filter out bad quality data, using criteria provided by Nina Lanza: 1) Large, broad features (don't correspond to narrow peaks) 2) Low SNR For each item thus filtered, write out a plot of the data with an explanation: 1) Annotate in red the large, broad feature, or 2) Annotate in text the SNR. Returns updated (filtered) data and label arrays.
https://github.com/wkiri/demud/blob/603e0a647e0da5fdc4bcaa2f3a0b21bd4155a41d/demud/dataset_libs.py#L176-L334
import os, sys, pickle, fnmatch import pylab, csv, math, copy import numpy as np from dataset import * from log import printt class LIBSData(Dataset): def __init__(self, inputname=None, initfilename=None, startsol=-1, endsol=-1, initpriorsols=False, shotnoisefilt=0): input_type = inputname[-3:] if input_type == 'csv': filename = inputname expname = 'libs-' + os.path.splitext(os.path.basename(filename))[0] elif input_type == 'pkl': if shotnoisefilt > 0: filename = os.path.splitext(inputname)[0] + ('-snf%d.pkl' % shotnoisefilt) else: filename = inputname expname = 'libs-' + os.path.splitext(os.path.basename(filename))[0] else: input_type = 'dir' filename = os.path.join(inputname, 'libs-mean-norm.pkl') if shotnoisefilt > 0: filename = os.path.splitext(inputname)[0] + ('-snf%d.pkl' % shotnoisefilt) expname = 'libs-' + os.path.basename(inputname) Dataset.__init__(self, filename, expname, initfilename) printt('Reading %s data from %s.' % (input_type, self.filename)) if input_type == 'dir' and not os.path.exists(filename): LIBSData.read_dir(inputname, filename, shotnoisefilt) self.readin(startsol, endsol, initpriorsols, shotnoisefilt) def readin(self, startsol=-1, endsol=-1, initpriorsols=False, shotnoisefilt=0): input_type = os.path.splitext(self.filename)[1][1:] self.data = [] self.initdata = [] self.xlabel = 'Wavelength (nm)' self.ylabel = 'Intensity' if input_type == 'csv': (self.data, self.labels) = LIBSData.read_csv_data(self.filename) wavelengths = self.data[:,0] self.xvals = wavelengths.reshape(-1,1) self.data = self.data[:,1:] (self.data, self.xvals) = LIBSData.prune_and_normalize(self.data, self.xvals, shotnoisefilt) (self.data, self.labels) = self.filter_data(self.data, self.labels) elif input_type == 'pkl': inf = open(self.filename, 'r') (self.data, self.labels, self.xvals) = pickle.load(inf) inf.close() use = np.where(np.logical_and(self.xvals >= 270, self.xvals < 820))[0] self.xvals = self.xvals[use] self.data = self.data[use,:] (self.data, self.labels) = self.filter_data(self.data, self.labels) else: printt(' Error: Unknown input type for %s; no data read in' % self.filename) if self.initfilename != '': printt('Reading initialization data set from %s' % self.initfilename) (self.initdata, unused_labels) = LIBSData.read_csv_data(self.initfilename) wavelengths = self.initdata[:,0] self.initdata = self.initdata[:,1:] (self.initdata, unused_xvals) = LIBSData.prune_and_normalize(self.initdata, wavelengths, shotnoisefilt) print self.initdata.shape (self.initdata, unused_labels) = self.filter_data(self.initdata, unused_labels) print self.initdata.shape if startsol > -1 and endsol >=-1: printt("Analyzing data from sols %d-%d only." % (startsol, endsol)) current_sols = [i for (i,s) in enumerate(self.labels) if (int(s.split('_')[0][3:]) >= startsol and int(s.split('_')[0][3:]) <= endsol)] if initpriorsols: previous_sols = [i for (i,s) in enumerate(self.labels) if int(s.split('_')[0][3:]) < startsol] printt("Putting previous sols' (before %d) data in initialization model." % startsol) if self.initdata != []: print self.initdata.shape print self.data[:,previous_sols].shape self.initdata = np.hstack((self.initdata, self.data[:,previous_sols])) else: self.initdata = self.data[:,previous_sols] self.data = self.data[:,current_sols] self.labels = self.labels[current_sols]
Apache License 2.0
shibing624/pycorrector
pycorrector/transformers/modeling_utils.py
PreTrainedModel.tie_weights
python
def tie_weights(self): output_embeddings = self.get_output_embeddings() if output_embeddings is not None and self.config.tie_word_embeddings: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if self.config.is_encoder_decoder and self.config.tie_encoder_decoder: if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
Tie the weights between the input embeddings and the output embeddings. If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead.
https://github.com/shibing624/pycorrector/blob/779dd35e58b9662492273d0564dffcd716aa3be4/pycorrector/transformers/modeling_utils.py#L480-L494
import inspect import os import re import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, dtype, nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .activations import get_activation from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, cached_path, hf_bucket_url, is_remote_url, is_torch_tpu_available, replace_return_docstrings, ) from .generation_utils import GenerationMixin from pycorrector.utils.logger import logger try: from torch.nn import Identity except ImportError: class Identity(nn.Module): def __init__(self, *args, **kwargs): super().__init__() def forward(self, input): return input def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index class ModuleUtilsMixin: @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) return None def add_memory_hooks(self): for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> device: try: return next(self.parameters()).device except StopIteration: def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device @property def dtype(self) -> dtype: try: return next(self.parameters()).dtype except StopIteration: def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) if self.dtype == torch.float16: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4 elif self.dtype == torch.float32: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9 else: raise ValueError( "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format( self.dtype ) ) return encoder_extended_attention_mask def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat( [ torch.ones( (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype ), causal_mask, ], axis=-1, ) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask( self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False ) -> Tensor: if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = head_mask.to(dtype=self.dtype) return head_mask def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int: def parameter_filter(x): return (x.requires_grad or not only_trainable) and not ( isinstance(x, torch.nn.Embedding) and exclude_embeddings ) params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters() return sum(p.numel() for p in params) def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key] if token_inputs: return sum([token_input.numel() for token_input in token_inputs]) else: warnings.warn( "Could not estimate the number of tokens of the input, floating-point operations will not be computed" ) return 0 def floating_point_ops( self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True ) -> int: return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin): config_class = None base_model_prefix = "" _keys_to_ignore_on_load_missing = None _keys_to_ignore_on_load_unexpected = None _keys_to_ignore_on_save = None @property def dummy_inputs(self) -> Dict[str, torch.Tensor]: return {"input_ids": torch.tensor(DUMMY_INPUTS)} def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) self.config = config self.name_or_path = config.name_or_path @property def base_model(self) -> nn.Module: return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self) -> nn.Module: base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: return None
Apache License 2.0
denisenkom/django-sqlserver
tests/raw_query/tests.py
RawQueryTests.assertNoAnnotations
python
def assertNoAnnotations(self, results): self.assertAnnotations(results, ())
The results of a raw query contain no annotations
https://github.com/denisenkom/django-sqlserver/blob/f5d5dc8637799746f1bd11bd8c479d3acd468581/tests/raw_query/tests.py#L78-L82
from __future__ import unicode_literals from datetime import date from decimal import Decimal from django.db.models.query import RawQuerySet from django.db.models.query_utils import InvalidQuery from django.test import TestCase, skipUnlessDBFeature from .models import Author, Book, BookFkAsPk, Coffee, FriendlyAuthor, Reviewer class RawQueryTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(first_name='Joe', last_name='Smith', dob=date(1950, 9, 20)) cls.a2 = Author.objects.create(first_name='Jill', last_name='Doe', dob=date(1920, 4, 2)) cls.a3 = Author.objects.create(first_name='Bob', last_name='Smith', dob=date(1986, 1, 25)) cls.a4 = Author.objects.create(first_name='Bill', last_name='Jones', dob=date(1932, 5, 10)) cls.b1 = Book.objects.create( title='The awesome book', author=cls.a1, paperback=False, opening_line='It was a bright cold day in April and the clocks were striking thirteen.', ) cls.b2 = Book.objects.create( title='The horrible book', author=cls.a1, paperback=True, opening_line=( 'On an evening in the latter part of May a middle-aged man ' 'was walking homeward from Shaston to the village of Marlott, ' 'in the adjoining Vale of Blakemore, or Blackmoor.' ), ) cls.b3 = Book.objects.create( title='Another awesome book', author=cls.a1, paperback=False, opening_line='A squat grey building of only thirty-four stories.', ) cls.b4 = Book.objects.create( title='Some other book', author=cls.a3, paperback=True, opening_line='It was the day my grandmother exploded.', ) cls.c1 = Coffee.objects.create(brand='dunkin doughnuts') cls.c2 = Coffee.objects.create(brand='starbucks') cls.r1 = Reviewer.objects.create() cls.r2 = Reviewer.objects.create() cls.r1.reviewed.add(cls.b2, cls.b3, cls.b4) def assertSuccessfulRawQuery(self, model, query, expected_results, expected_annotations=(), params=[], translations=None): results = list(model.objects.raw(query, params=params, translations=translations)) self.assertProcessed(model, results, expected_results, expected_annotations) self.assertAnnotations(results, expected_annotations) def assertProcessed(self, model, results, orig, expected_annotations=()): self.assertEqual(len(results), len(orig)) for index, item in enumerate(results): orig_item = orig[index] for annotation in expected_annotations: setattr(orig_item, *annotation) for field in model._meta.fields: self.assertEqual( getattr(item, field.attname), getattr(orig_item, field.attname) ) self.assertEqual( type(getattr(item, field.attname)), type(getattr(orig_item, field.attname)) )
MIT License
genomicsengland/gelreportmodels
protocols_utils/code_generation/process_schemas.py
SchemaProcessor.getSchemaFromGitHub
python
def getSchemaFromGitHub(self): url = "https://github.com/ga4gh/schemas/archive/{0}.tar.gz".format( self.version) self.download(url, self.sourceTar) with tarfile.open(self.sourceTar, "r") as tarball: tarball.extractall(self.sourceDir)
Downloads a tagged version of the schemas from the official GitHub repo.
https://github.com/genomicsengland/gelreportmodels/blob/879bf5dd6d16efc274257e1c3f527d6b7459fa45/protocols_utils/code_generation/process_schemas.py#L412-L421
from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import glob import json import os.path import re import sys import tarfile import textwrap import avro.schema from . import utils from protocols.util.dependency_manager import DependencyManager HEADER_COMMENT = """ DO NOT EDIT THIS FILE!! This file is automatically generated by the process_schemas.py program in the scripts directory. It is not intended to be edited directly. If you need to update the GEL protocol classes, please run the script on the appropriate schema version. """ class SchemaClass(object): def __init__(self, sourceFile): self.sourceFile = sourceFile with open(sourceFile) as sf: self.schemaSource = sf.read() if sys.version_info.major > 2: self.schema = avro.schema.Parse(self.schemaSource) else: self.schema = avro.schema.parse(self.schemaSource) self.name = self.schema.name def getFields(self): return sorted(self.schema.fields, key=lambda f: f.name) def isSearchRequest(self): return re.search('Search.+Request', self.name) is not None def isSearchResponse(self): return re.search('Search.+Response', self.name) is not None def getValueListName(self): assert self.isSearchResponse() names = [field.name for field in self.getFields()] names.remove('nextPageToken') assert len(names) == 1 return names[0] def getEmbeddedTypes(self): ret = [] if isinstance(self.schema, avro.schema.RecordSchema): for field in self.getFields(): if isinstance(field.type, avro.schema.ArraySchema): if isinstance(field.type.items, avro.schema.RecordSchema): ret.append((field.name, field.type.items.name)) elif isinstance(field.type, avro.schema.MapSchema): if isinstance(field.type.values, avro.schema.RecordSchema): ret.append((field.name, field.type.values.name)) elif isinstance(field.type, avro.schema.RecordSchema): ret.append((field.name, field.type.name)) elif isinstance(field.type, avro.schema.UnionSchema): t0 = field.type.schemas[0] t1 = field.type.schemas[1] if (isinstance(t0, avro.schema.PrimitiveSchema) and t0.type == "null"): if isinstance(t1, avro.schema.ArraySchema): if isinstance(t1.items, avro.schema.RecordSchema): ret.append((field.name, t1.items.name)) elif isinstance(t1, avro.schema.MapSchema): if isinstance(t1.values, avro.schema.RecordSchema): ret.append((field.name, t1.values.name)) elif isinstance(t1, avro.schema.RecordSchema): ret.append((field.name, t1.name)) else: raise Exception("Schema union assumptions violated") return ret def formatSchema(self): schema = json.loads(self.schemaSource) stack = [schema] while len(stack) > 0: elm = stack.pop() if "doc" in elm: elm["doc"] = "" for value in elm.values(): if isinstance(value, dict): stack.append(value) elif isinstance(value, list): for dic in value: if isinstance(dic, dict): stack.append(dic) jsonData = json.dumps(schema) output = "\n".join(textwrap.wrap(text=jsonData, width=100, break_long_words=False, break_on_hyphens=False)) + "\n" return output def writeRequiredFields(self, outputFile): fields = [] for field in self.getFields(): if not field.has_default: fields.append(field) if len(fields) < 1: self._writeWithIndent('requiredFields = {}', outputFile) else: self._writeWithIndent('requiredFields = {', outputFile) for field in fields: string_ = '"{0}",'.format(field.name) self._writeWithIndent(string_, outputFile, 2) self._writeWithIndent('}', outputFile) def writeConstructor(self, outputFile): slotString = "'" + "', '".join( [field.name for field in self.getFields()]) + "'" self._writeWithIndent("__slots__ = [", outputFile) self._writeWrappedWithIndent(slotString, outputFile, 2) self._writeWithIndent("]", outputFile) self._writeNewline(outputFile) self._writeWithIndent("def __init__(self, **kwargs):", outputFile) for field in self.getFields(): string_ = "self.{} = kwargs.get(".format(field.name) self._writeWithIndent(string_, outputFile, 2) if str(field.type) == '"string"': if field.has_default and field.default is not None: string_ = "'{}', '{}')".format(field.name, field.default) else: string_ = "'{}', {})".format(field.name, None) self._writeWithIndent(string_, outputFile, 3) elif type(field.type) == avro.schema.RecordSchema and field.type.type == 'record': string_ = "'{}', {}())".format(field.name, field.type.name) self._writeWithIndent(string_, outputFile, 3) else: if field.has_default and field.default is not None: string_ = "'{}', {})".format(field.name, field.default) else: string_ = "'{}', {})".format(field.name, None) self._writeWithIndent(string_, outputFile, 3) def writeEmbeddedTypesClassMethods(self, outputFile): def writeEmbeddedTypes(): et = self.getEmbeddedTypes() if len(et) == 0: string = "embeddedTypes = {}" self._writeWithIndent(string, outputFile, 2) else: string = "embeddedTypes = {" self._writeWithIndent(string, outputFile, 2) for fn, ft in self.getEmbeddedTypes(): string = "'{0}': {1},".format(fn, ft) self._writeWithIndent(string, outputFile, 3) self._writeWithIndent("}", outputFile, 2) self._writeWithIndent("@classmethod", outputFile) self._writeWithIndent("def isEmbeddedType(cls, fieldName):", outputFile) writeEmbeddedTypes() self._writeWithIndent("return fieldName in embeddedTypes", outputFile, 2) self._writeNewline(outputFile) self._writeWithIndent("@classmethod", outputFile) self._writeWithIndent("def getEmbeddedType(cls, fieldName):", outputFile) writeEmbeddedTypes() self._writeNewline(outputFile) self._writeWithIndent("return embeddedTypes[fieldName]", outputFile, 2) self._writeNewline(outputFile) def write(self, outputFile): superclass = "ProtocolElement" if isinstance(self.schema, avro.schema.EnumSchema): superclass = "object" elif self.isSearchRequest(): superclass = "SearchRequest" elif self.isSearchResponse(): superclass = "SearchResponse" self._writeNewline(outputFile, 2) string = "class {0}({1}):".format(self.schema.name, superclass) print(string, file=outputFile) doc = self.schema.doc if doc is None: doc = "No documentation" self._writeWithIndent('"""', outputFile) self._writeWrappedWithIndent(re.sub(r'[^\x00-\x7F]+', ' ', doc), outputFile) self._writeWithIndent('"""', outputFile) if isinstance(self.schema, avro.schema.RecordSchema): string = '_schemaSource = """\n{0}"""'.format( self.formatSchema()) self._writeWithIndent(string, outputFile) string = 'schema = avro_parse(_schemaSource)' self._writeWithIndent(string, outputFile) self.writeRequiredFields(outputFile) if self.isSearchResponse(): string = '_valueListName = "{0}"'.format( self.getValueListName()) self._writeWithIndent(string, outputFile) self._writeNewline(outputFile) self.writeEmbeddedTypesClassMethods(outputFile) self.writeConstructor(outputFile) elif isinstance(self.schema, avro.schema.EnumSchema): for symbol in self.schema.symbols: string = '{0} = "{0}"'.format(symbol, symbol) self._writeWithIndent(string, outputFile) self._writeNewline(outputFile) self._writeWithIndent("def __hash__(self):", outputFile, indentLevel=1) self._writeWithIndent("return str(self).__hash__()", outputFile, indentLevel=2) def _writeWithIndent(self, string_, outputFile, indentLevel=1): indent = " " * (indentLevel * 4) toWrite = "{}{}".format(indent, string_) print(toWrite, file=outputFile) def _writeWrappedWithIndent(self, string_, outputFile, indentLevel=1): indent = " " * (indentLevel * 4) toWrite = textwrap.fill(string_, initial_indent=indent, subsequent_indent=indent, break_long_words=False, break_on_hyphens=False) print(toWrite, file=outputFile) def _writeNewline(self, outputFile, numNewlines=1): toWrite = "\n" * (numNewlines - 1) print(toWrite, file=outputFile) class SchemaGenerator(object): def __init__(self, version, schemaDir, outputFile, verbosity): self.version = version self.schemaDir = schemaDir self.outputFile = outputFile self.verbosity = verbosity self.classes = [] for avscFile in glob.glob(os.path.join(self.schemaDir, "*.avsc")): self.classes.append(SchemaClass(avscFile)) requestClassNames = [ cls.name for cls in self.classes if cls.isSearchRequest()] responseClassNames = [ cls.name for cls in self.classes if cls.isSearchResponse()] self.postSignatures = [] for request, response in zip( requestClassNames, responseClassNames): objname = re.search('Search(.+)Request', request).groups()[0] url = '/{0}/search'.format(objname.lower()) tup = (url, request, response) self.postSignatures.append(tup) self.postSignatures.sort() def writeHeader(self, outputFile): print('"""{0}"""'.format(HEADER_COMMENT), file=outputFile) print("from protocols.protocol import ProtocolElement", file=outputFile) print("from protocols.protocol import SearchRequest", file=outputFile) print("from protocols.protocol import SearchResponse", file=outputFile) print("from protocols.protocol import avro_parse", file=outputFile) print(file=outputFile) print("import avro.schema", file=outputFile) print(file=outputFile) if self.version[0].lower() == 'v' and self.version.find('.') != -1: versionStr = self.version[1:] else: versionStr = self.version print("version = '{0}'".format(versionStr), file=outputFile) def write(self): with open(self.outputFile, "w") as outputFile: self.writeHeader(outputFile) names = [cls.name for cls in self.classes] classes = dict([(cls.name, cls) for cls in self.classes]) for name in sorted(names): if self.verbosity > 1: utils.log(name) cls = classes[name] cls.write(outputFile) class SchemaProcessor(object): def __init__(self, args): self.version = args.version self.destinationFile = args.outputFile self.verbosity = args.verbose self.avroJarPath = args.avro_tools_jar self.sourceDir = args.inputSchemasDirectory self.schemaDir = self.sourceDir self.sourceTar = os.path.join(self.sourceDir, "schemas.tar.gz") self.avroJar = os.path.join(self.schemaDir, "avro-tools.jar") def cleanup(self): if self.verbosity > 0: utils.log("Cleaning up dir {}".format(self.sourceDir)) sources = os.listdir(self.sourceDir) for source in sources: if source.endswith(".avsc"): os.remove(os.path.join(self.sourceDir, source)) def download(self, url, destination): fileDownloader = utils.FileDownloader(url, destination) fileDownloader.download() def convertAvro(self, avdlFile): args = ["java", "-jar", self.avroJar, "idl2schemata", avdlFile] if self.verbosity > 0: utils.log("converting {}".format(avdlFile)) utils.log("running: {}".format(" ".join(args))) if self.verbosity > 0: utils.runCommandSplits(args) else: utils.runCommandSplits(args, silent=True) def setupAvroJar(self): if self.avroJarPath is not None: self.avroJar = os.path.abspath(self.avroJarPath) else: url = "http://www.carfab.com/apachesoftware/avro/stable/java/" "avro-tools-1.7.7.jar" self.download(url, self.avroJar)
Apache License 2.0
yandex-cloud/python-sdk
yandex/cloud/compute/v1/host_type_service_pb2_grpc.py
HostTypeServiceStub.__init__
python
def __init__(self, channel): self.Get = channel.unary_unary( '/yandex.cloud.compute.v1.HostTypeService/Get', request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__service__pb2.GetHostTypeRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__pb2.HostType.FromString, ) self.List = channel.unary_unary( '/yandex.cloud.compute.v1.HostTypeService/List', request_serializer=yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__service__pb2.ListHostTypesRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__service__pb2.ListHostTypesResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
https://github.com/yandex-cloud/python-sdk/blob/6ddaaaf0ad01d8fc36cb72957f70a6e7943a5ce7/yandex/cloud/compute/v1/host_type_service_pb2_grpc.py#L13-L28
import grpc from yandex.cloud.compute.v1 import host_type_pb2 as yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__pb2 from yandex.cloud.compute.v1 import host_type_service_pb2 as yandex_dot_cloud_dot_compute_dot_v1_dot_host__type__service__pb2 class HostTypeServiceStub(object):
MIT License
raiden-network/raiden
raiden/network/proxies/token_network_registry.py
TokenNetworkRegistry.get_token_network
python
def get_token_network( self, token_address: TokenAddress, block_identifier: BlockIdentifier ) -> Optional[TokenNetworkAddress]: typecheck(token_address, T_TargetAddress) address = self.proxy.functions.token_to_token_networks(token_address).call( block_identifier=block_identifier ) address = to_canonical_address(address) if address == NULL_ADDRESS_BYTES: return None return address
Return the token network address for the given token or None if there is no correspoding address.
https://github.com/raiden-network/raiden/blob/3414e745f0bbbbd69b5893c1b53e3808110c870c/raiden/network/proxies/token_network_registry.py#L84-L100
from typing import Any, List, Optional import structlog from eth_utils import to_canonical_address from web3.exceptions import BadFunctionCallOutput from raiden.constants import BLOCK_ID_LATEST, NULL_ADDRESS_BYTES from raiden.exceptions import ( AddressWithoutCode, BrokenPreconditionError, InvalidChannelParticipantDepositLimit, InvalidToken, InvalidTokenAddress, InvalidTokenNetworkDepositLimit, MaxTokenNetworkNumberReached, RaidenRecoverableError, RaidenUnrecoverableError, ) from raiden.network.proxies.metadata import SmartContractMetadata from raiden.network.proxies.token import Token from raiden.network.proxies.utils import raise_on_call_returned_empty from raiden.network.rpc.client import ( JSONRPCClient, check_address_has_code_handle_pruned_block, check_transaction_failure, was_transaction_successfully_mined, ) from raiden.utils.formatting import format_block_id, to_checksum_address from raiden.utils.smart_contracts import safe_gas_limit from raiden.utils.typing import ( TYPE_CHECKING, Address, BlockIdentifier, BlockNumber, Dict, SecretRegistryAddress, T_TargetAddress, TokenAddress, TokenAmount, TokenNetworkAddress, TokenNetworkRegistryAddress, TransactionHash, Tuple, typecheck, ) from raiden_contracts.constants import CONTRACT_SECRET_REGISTRY, CONTRACT_TOKEN_NETWORK_REGISTRY if TYPE_CHECKING: from raiden.network.proxies.proxy_manager import ProxyManager log = structlog.get_logger(__name__) class TokenNetworkRegistry: def __init__( self, rpc_client: JSONRPCClient, metadata: SmartContractMetadata, proxy_manager: "ProxyManager", block_identifier: BlockIdentifier, ) -> None: check_address_has_code_handle_pruned_block( client=rpc_client, address=Address(metadata.address), contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY, given_block_identifier=block_identifier, ) proxy = rpc_client.new_contract_proxy( abi=metadata.abi, contract_address=Address(metadata.address) ) self.address = TokenNetworkRegistryAddress(metadata.address) self.proxy_manager = proxy_manager self.rpc_client = rpc_client self.gas_measurements = metadata.gas_measurements self.metadata = metadata self.node_address = self.rpc_client.address self.proxy = proxy
MIT License
conchylicultor/musicgenerator
deepmusic/songstruct.py
Note.get_relative_note
python
def get_relative_note(self): return self.note - MIDI_NOTES_RANGE[0]
Convert the absolute midi position into the range given by MIDI_NOTES_RANGE Return int: The new position relative to the range (position on keyboard)
https://github.com/conchylicultor/musicgenerator/blob/adea76dccaba923b7d3807082ec6f5b512d16bb9/deepmusic/songstruct.py#L40-L45
import operator MIDI_NOTES_RANGE = [21, 108] NB_NOTES = MIDI_NOTES_RANGE[1] - MIDI_NOTES_RANGE[0] + 1 BAR_DIVISION = 16 class Note: def __init__(self): self.tick = 0 self.note = 0 self.duration = 32
Apache License 2.0
doccano/doccano-client
doccano_api_client/__init__.py
DoccanoClient.get_document_detail
python
def get_document_detail(self, project_id: int, doc_id: int) -> requests.models.Response: return self.get( "v1/projects/{project_id}/docs/{doc_id}".format(project_id=project_id, doc_id=doc_id) )
Gets details of a given document. Args: project_id (int): The project id. doc_id (int): A document ID to query. Returns: requests.models.Response: The request response.
https://github.com/doccano/doccano-client/blob/bb4c9ba8462114c2d7c9c0b2fa837d85477867d9/doccano_api_client/__init__.py#L490-L502
import os import typing from urllib.parse import urljoin import requests class _Router: def get( self, endpoint: str, params: dict = {}, ) -> requests.models.Response: request_url = urljoin(self.baseurl, endpoint) return self._get(request_url, params=params).json() def get_file( self, endpoint: str, params: dict = {}, headers: dict = {}, ) -> requests.models.Response: request_url = urljoin(self.baseurl, endpoint) return self._get(request_url, params=params, headers=headers) def _get( self, url: str, params: dict = {}, headers: dict = {}, ) -> requests.models.Response: return self.session.get(url, params=params, headers=headers) def post( self, endpoint: str, data: dict = {}, json: dict = {}, files: dict = {}, headers: typing.Optional[dict] = None, as_json=True, ) -> requests.models.Response: if json and data: return "Error: cannot have both data and json" request_url = urljoin(self.baseurl, endpoint) result = self.session.post(request_url, data=data, files=files, json=json, headers=headers) if as_json: return result.json() return result def delete( self, endpoint: str, data: typing.Optional[dict] = None, files: typing.Optional[dict] = None, headers: typing.Optional[dict] = None, ) -> requests.models.Response: request_url = urljoin(self.baseurl, endpoint) return self.session.delete(request_url, data=data, files=files, headers=headers) def update(self, endpoint: str, data: dict = {}) -> requests.models.Response: request_url = urljoin(self.baseurl, endpoint) return self.session.patch(request_url, data=data) def build_url_parameter(self, url_parameter: dict) -> str: return "".join( [ "?", "&".join( [ "&".join(["=".join([tup[0], str(value)]) for value in tup[1]]) for tup in url_parameter.items() ] ), ] ) class DoccanoClient(_Router): def __init__(self, baseurl: str, username: str, password: str): self.baseurl = baseurl if baseurl[-1] == "/" else baseurl + "/" self.session = requests.Session() self._login(username, password) def _login(self, username: str, password: str) -> requests.models.Response: url = "v1/auth/login/" auth = {"username": username, "password": password} response = self.post(url, auth) self._set_csrf_header() return response def _set_csrf_header(self): csrf = self.session.cookies.get("csrftoken") self.session.headers["X-CSRFToken"] = csrf def get_me(self) -> requests.models.Response: return self.get("v1/me") def get_features(self) -> requests.models.Response: return self.get("v1/features") def get_project_list(self) -> requests.models.Response: return self.get("v1/projects") def create_project( self, name: str, description: str = "", project_type: str = "DocumentClassification", guideline: str = "", resourcetype: str = "TextClassificationProject", randomize_document_order: bool = False, collaborative_annotation: bool = False, ) -> requests.models.Response: payload = { "name": name, "description": description, "project_type": project_type, "guideline": guideline, "resourcetype": resourcetype, "randomize_document_order": randomize_document_order, "collaborative_annotation": collaborative_annotation, } return self.post("v1/projects", data=payload) def delete_project(self, project_id: int) -> requests.models.Response: url = "v1/projects/{}".format(project_id) return self.delete(url) def update_project( self, project_id: int, name: str, description: str = "", project_type: str = "DocumentClassification", guideline: str = "", resourcetype: str = "TextClassificationProject", randomize_document_order: bool = False, collaborative_annotation: bool = False, ) -> requests.models.Response: url = "v1/projects/{}".format(project_id) payload = { "name": name, "description": description, "project_type": project_type, "guideline": guideline, "resourcetype": resourcetype, "randomize_document_order": randomize_document_order, "collaborative_annotation": collaborative_annotation, } return self.update(url, data=payload) def create_document( self, project_id: int, text: str, annotations: list = [], annotation_approver: str = None ) -> requests.models.Response: url = "v1/projects/{}/docs".format(project_id) data = { "text": text, "annotations": annotations, "annotation_approver": annotation_approver, } return self.post(url, data=data) def delete_document( self, project_id: int, document_id: int, ) -> requests.models.Response: url = "v1/projects/{}/docs/{}".format(project_id, document_id) return self.delete(url) def delete_annotation( self, project_id: int, document_id: int, annotation_id: int, ) -> requests.models.Response: url = "v1/projects/{}/docs/{}/annotations/{}".format(project_id, document_id, annotation_id) return self.delete(url) def create_label( self, project_id: int, text: str, text_color: str = "#ffffff", background_color: str = "#cdcdcd", prefix_key: str = None, suffix_key: str = None, ) -> requests.models.Response: url = "v1/projects/{}/labels".format(project_id) label_payload = { "projectId": project_id, "text": text, "prefix_key": prefix_key, "suffix_key": suffix_key, "background_color": background_color, "text_color": text_color, } try: return self.post(url, data=label_payload) except Exception as e: return "Failed (duplicate?): {}".format(e) def add_annotation( self, project_id: int, annotation_id: int, document_id: int, **kwargs ) -> requests.models.Response: url = "/v1/projects/{p_id}/docs/{d_id}/annotations".format( p_id=project_id, d_id=document_id ) payload = {"label": annotation_id, "projectId": project_id, **kwargs} return self.post(url, json=payload) def get_user_list(self) -> requests.models.Response: return self.get("v1/users") def get_roles(self) -> requests.models.Response: return self.get("v1/roles") def get_project_detail(self, project_id: int) -> requests.models.Response: return self.get("v1/projects/{project_id}".format(project_id=project_id)) def get_project_statistics(self, project_id: int) -> requests.models.Response: return self.get("v1/projects/{project_id}/statistics".format(project_id=project_id)) def get_label_list(self, project_id: int) -> requests.models.Response: return self.get("v1/projects/{project_id}/labels".format(project_id=project_id)) def get_label_detail(self, project_id: int, label_id: int) -> requests.models.Response: return self.get( "v1/projects/{project_id}/labels/{label_id}".format( project_id=project_id, label_id=label_id ) ) def get_document_list( self, project_id: int, url_parameters: dict = {} ) -> requests.models.Response: return self.get( "v1/projects/{project_id}/docs{url_parameters}".format( project_id=project_id, url_parameters=self.build_url_parameter(url_parameters) ) )
MIT License
flags/reactor-3
bad_numbers.py
clip
python
def clip(number,start,end): return max(start, min(number, end))
Returns `number`, but makes sure it's in the range of [start..end]
https://github.com/flags/reactor-3/blob/b41a2904c9ec8cc14bcee03611602d0e568acf12/bad_numbers.py#L12-L14
from globals import * from math import * import pathfinding import render_los import logging import random import numpy import tiles import time import maps
MIT License
compas-dev/compas
src/compas/topology/combinatorics.py
vertex_coloring
python
def vertex_coloring(adjacency): key_to_color = {} key_to_degree = {key: len(adjacency[key]) for key in adjacency} vertices = sorted(adjacency.keys(), key=lambda key: key_to_degree[key]) uncolored = deque(vertices[::-1]) current_color = 0 while uncolored: a = uncolored.popleft() key_to_color[a] = current_color colored_with_current = [a] for b in uncolored: if not any(b in adjacency[key] for key in colored_with_current): key_to_color[b] = current_color colored_with_current.append(b) for key in colored_with_current[1:]: uncolored.remove(key) current_color += 1 return key_to_color
Color the vertices of a network such that no two colors are adjacent. Parameters ---------- network : compas.datastructures.Network The network object. Notes ----- For more info, see [1]_. References ---------- .. [1] Chu-Carroll, M. *Graph Coloring Algorithms*. Available at: http://scienceblogs.com/goodmath/2007/06/28/graph-coloring-algorithms-1/. Warnings -------- This is a greedy algorithm, so it might be slow for large networks. Examples -------- >>> import compas >>> from compas.datastructures import Network >>> network = Network.from_obj(compas.get('lines.obj')) >>> key_color = vertex_coloring(network.adjacency) >>> key = network.get_any_node() >>> color = key_color[key] >>> any(key_color[nbr] == color for nbr in network.neighbors(key)) False
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/topology/combinatorics.py#L15-L63
from __future__ import print_function from __future__ import absolute_import from __future__ import division from collections import deque from compas.topology import breadth_first_traverse __all__ = [ 'vertex_coloring', 'connected_components', ]
MIT License
openmined/pyariesfl
aries_cloudagent/messaging/models/base.py
BaseModel.__init__
python
def __init__(self): if not self.Meta.schema_class: raise TypeError( "Can't instantiate abstract class {} with no schema_class".format( self.__class__.__name__ ) )
Initialize BaseModel. Raises: TypeError: If schema_class is not set on Meta
https://github.com/openmined/pyariesfl/blob/dd78dcebc771971abfee301b80cdd5d246c14840/aries_cloudagent/messaging/models/base.py#L76-L89
import logging from abc import ABC import json from typing import Union from marshmallow import Schema, post_dump, pre_load, post_load, ValidationError from ...classloader import ClassLoader from ...error import BaseError LOGGER = logging.getLogger(__name__) def resolve_class(the_cls, relative_cls: type = None): resolved = None if isinstance(the_cls, type): resolved = the_cls elif isinstance(the_cls, str): default_module = relative_cls and relative_cls.__module__ resolved = ClassLoader.load_class(the_cls, default_module) return resolved def resolve_meta_property(obj, prop_name: str, defval=None): cls = obj.__class__ found = defval while cls: Meta = getattr(cls, "Meta", None) if Meta and hasattr(Meta, prop_name): found = getattr(Meta, prop_name) break cls = cls.__bases__[0] if cls is object: break return found class BaseModelError(BaseError): class BaseModel(ABC): class Meta: schema_class = None
Apache License 2.0
commvault/cvpysdk
cvpysdk/alert.py
Alert.alert_name
python
def alert_name(self, name): if not isinstance(name, basestring): raise SDKException('Alert', '101') self._alert_name = name self._modify_alert_properties()
Modifies the Alert name
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/alert.py#L942-L948
from __future__ import absolute_import from __future__ import unicode_literals import xml.etree.ElementTree as ET from past.builtins import basestring from .exception import SDKException class Alerts(object): def __init__(self, commcell_object): self._commcell_object = commcell_object self._ALERTS = commcell_object._services['GET_ALL_ALERTS'] self._cvpysdk_object = commcell_object._cvpysdk_object self._services = commcell_object._services self._update_response_ = commcell_object._update_response_ self._alerts = None self._notification_types = { 'email': 1, 'snmp': 4, 'event viewer': 8, 'save to disk': 512, 'rss feeds': 1024, 'console alerts': 8192, 'scom': 32768, 'workflow': 65536, 'content indexing': 131072 } self.refresh() def __str__(self): representation_string = "{:^5}\t{:^50}\t{:^80}\t{:^30}\n\n".format( 'S. No.', 'Alert', 'Description', 'Category' ) for index, alert_name in enumerate(self._alerts): alert_description = self._alerts[alert_name]['description'] alert_category = self._alerts[alert_name]['category'] sub_str = '{:^5}\t{:50}\t{:80}\t{:30}\n'.format( index + 1, alert_name, alert_description, alert_category ) representation_string += sub_str return representation_string.strip() def __repr__(self): return "Alerts class instance for Commcell: '{0}'".format( self._commcell_object.commserv_name ) def __len__(self): return len(self.all_alerts) def __getitem__(self, value): value = str(value) if value in self.all_alerts: return self.all_alerts[value] else: try: return list(filter(lambda x: x[1]['id'] == value, self.all_alerts.items()))[0][0] except IndexError: raise IndexError('No alert exists with the given Name / Id') def _get_alerts(self): flag, response = self._cvpysdk_object.make_request('GET', self._ALERTS) if flag: if response.json() and 'alertList' in response.json(): alerts_dict = {} for dictionary in response.json()['alertList']: temp_dict = {} temp_name = dictionary['alert']['name'].lower() temp_id = str(dictionary['alert']['id']).lower() temp_description = dictionary['description'].lower() temp_category = dictionary['alertCategory']['name'].lower() temp_dict['id'] = temp_id temp_dict['description'] = temp_description temp_dict['category'] = temp_category alerts_dict[temp_name] = temp_dict del temp_dict return alerts_dict else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def _get_entities(self, entities): if not isinstance(entities, dict): raise SDKException('Alert', '101') entity_dict = { "clients": { "clientName": "client_name", "clientId": "client_id", "_type_": 3 }, "client_groups": { "clientGroupName": "clientgroup_name", "clientGroupId": "clientgroup_id", "_type_": 28 }, "users": { "userName": "user_name", "userId": "user_id", "_type_": 13 }, "user_groups": { "userGroupName": "user_group_name", "userGroupId": "user_group_id", "_type_": 15 }, "disk_libraries": { "libraryName": "library_name", "libraryId": "library_id", "_type_": 9 }, "media_agents": { "mediaAgentName": "media_agent_name", "mediaAgentId": "media_agent_id", "_type_": 11 }, "storage_policies": { "storagePolicyName": "storage_policy_name", "storagePolicyId": "storage_policy_id", "_type_": 17 } } associations = [] for entity, values in entities.items(): entity_obj = getattr(self._commcell_object, entity) values = values.split() if not isinstance(values, list) else values for value in values: temp_dict = entity_dict[entity].copy() for name, entity_attr in temp_dict.items(): if name != "_type_": try: temp_dict[name] = int(getattr(entity_obj.get(value), entity_attr)) except ValueError: temp_dict[name] = getattr(entity_obj.get(value), entity_attr) associations.append(temp_dict) return associations def _get_alert_json(self, alert_json): alert_detail = { "alertDetail": { "alertType": alert_json.get("alert_type"), "notifType": [n_type for n_type in alert_json.get("notif_type", [8192])], "notifTypeListOperationType": alert_json.get("notifTypeListOperationType", 0), "alertSeverity": alert_json.get("alertSeverity", 0), "EscnonGalaxyUserList":{ "nonGalaxyUserOperationType": alert_json.get("nonGalaxyUserOperationType", 0) }, "locale":{ "localeID":alert_json.get("localeID", 0) }, "EscUserList":{ "userListOperationType":alert_json.get("userListOperationType", 0) }, "EscUserGroupList":{ "userGroupListOperationType": alert_json.get("userGroupListOperationType", 0) }, "alertrule":{ "alertName": alert_json.get("alert_name") }, "criteria":{ "criteria": alert_json.get("criteria") }, "userList":{ "userListOperationType":alert_json.get("userListOperationType", 0), "userList":[{"userName":user} for user in alert_json.get("users", ["admin"])] }, "EntityList":{ "associationsOperationType":alert_json.get("associationsOperationType", 0), "associations": self._get_entities(alert_json.get("entities", dict())) } } } if alert_json.get("paramsList"): alert_detail["alertDetail"]["criteria"]["paramsList"] = alert_json.get("paramsList") if alert_json.get("nonGalaxyList"): alert_detail["alertDetail"]["nonGalaxyList"] = alert_json.get("nonGalaxyList") if alert_json.get("user_groups"): alert_detail["alertDetail"]["userGroupList"] = { "userGroupListOperationType":alert_json.get("userGroupListOperationType", 0), "userGroupList":[ { "userGroupName":user_grp } for user_grp in alert_json.get("user_groups") ] } return alert_detail def get_alert_sender(self): get_alert = self._services['EMAIL_SERVER'] flag, response = self._cvpysdk_object.make_request('GET', get_alert) if flag: if response.json(): sender = response.json()["senderInfo"]['senderName'] if not sender: sender = response.json()["senderInfo"]['senderAddress'] return sender else: raise SDKException('Alert', '102', "Failed to get sender address") else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def create_alert(self, alert_dict): if not isinstance(alert_dict, dict): raise SDKException('Alert', '101') alert_json = self._get_alert_json(alert_dict) alert_name = alert_json["alertDetail"]["alertrule"]["alertName"] if self.has_alert(alert_name): raise SDKException('Alert', '102', 'Alert "{0}" already exists.'. format(alert_name)) post_alert = self._services['GET_ALL_ALERTS'] flag, response = self._cvpysdk_object.make_request( 'POST', post_alert, alert_json) if flag: if response.json(): error_dict = response.json()["errorResp"] error_code = str(error_dict["errorCode"]) if error_code == "0": self.refresh() return self.get(alert_name) else: error_message = "" if 'errorMessage' in error_dict: error_message = error_dict['errorMessage'] if error_message: raise SDKException( 'Alert', '102', 'Failed to create Alert\nError: "{}"'.format( error_message ) ) else: raise SDKException( 'Alert', '102', "Failed to create Alert") else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) @property def all_alerts(self): return self._alerts def has_alert(self, alert_name): if not isinstance(alert_name, basestring): raise SDKException('Alert', '101') return self._alerts and alert_name.lower() in self._alerts def get(self, alert_name): if not isinstance(alert_name, basestring): raise SDKException('Alert', '101') else: alert_name = alert_name.lower() if self.has_alert(alert_name): return Alert( self._commcell_object, alert_name, self._alerts[alert_name]['id'], self._alerts[alert_name]['category'] ) raise SDKException('Alert', '102', 'No Alert exists with name: {0}'.format(alert_name)) def console_alerts(self, page_number=1, page_count=1): if not (isinstance(page_number, int) and isinstance(page_count, int)): raise SDKException('Alert', '101') console_alerts = self._services['GET_ALL_CONSOLE_ALERTS'] % ( page_number, page_count) flag, response = self._cvpysdk_object.make_request('GET', console_alerts) if flag: if response.json() and 'totalNoOfAlerts' in response.json(): if self._commcell_object.commserv_version >= 23: return response.json() o_str = "Total Console Alerts found: {0}".format( response.json()['totalNoOfAlerts'] ) o_str += "\n{:^5}\t{:^50}\t{:^50}\t{:^50}\n\n".format( 'S. No.', 'Alert', 'Type', 'Criteria' ) for index, dictionary in enumerate(response.json()['feedsList']): o_str += '{:^5}\t{:50}\t{:^50}\t{:^50}\n'.format( index + 1, dictionary['alertName'], dictionary['alertType'], dictionary['alertcriteria'] ) return o_str else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def console_alert(self, live_feed_id): if not (isinstance(live_feed_id, int)): raise SDKException('Alert', '101') console_alerts = self._services['GET_CONSOLE_ALERT'] % ( live_feed_id) flag, response = self._cvpysdk_object.make_request('GET', console_alerts) if flag: if response and response.json() and 'description' in response.json(): return response.json() else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def delete(self, alert_name): if not isinstance(alert_name, basestring): raise SDKException('Alert', '101') alert_name = alert_name.lower() if self.has_alert(alert_name): alert_id = self._alerts[alert_name]['id'] alert = self._services['ALERT'] % (alert_id) flag, response = self._cvpysdk_object.make_request( 'DELETE', alert ) if flag: if response.json(): if 'errorCode' in response.json(): if response.json()['errorCode'] == 0: self.refresh() else: raise SDKException('Alert', '102', response.json()['errorMessage']) else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) exception_message = 'Failed to delete alert\nError: "{0}"'.format( response_string ) raise SDKException('Alert', '102', exception_message) else: raise SDKException( 'Alert', '102', 'No alert exists with name: {0}'.format(alert_name) ) def refresh(self): self._alerts = self._get_alerts() class Alert(object): def __init__(self, commcell_object, alert_name, alert_id=None, alert_category=None): self._commcell_object = commcell_object self._cvpysdk_object = commcell_object._cvpysdk_object self._services = commcell_object._services self._update_response_ = commcell_object._update_response_ self._alerts_obj = Alerts(self._commcell_object) self._alert_name = alert_name.lower() self._alert_detail = None if alert_id: self._alert_id = str(alert_id) else: self._alert_id = self._get_alert_id() if alert_category: self._alert_category = alert_category else: self._alert_category = self._get_alert_category() self._ALERT = self._services['ALERT'] % (self.alert_id) self._all_notification_types = { 'email': 1, 'snmp': 4, 'event viewer': 8, 'save to disk': 512, 'rss feeds': 1024, 'console alerts': 8192, 'scom': 32768, 'workflow': 65536, 'content indexing': 131072 } self._alert_severity = None self._alert_type = None self._alert_type_id = None self._description = None self._criteria = [] self._entities_list = [] self._users_list = [] self._user_group_list = [] self._notification_types = [] self.refresh() def __repr__(self): representation_string = 'Alert class instance for Alert: "{0}", Alert Type: "{1}"' return representation_string.format(self.alert_name, self._alert_type) def _get_alert_id(self): return self._alerts_obj.get(self.alert_name).alert_id def _get_alert_category(self): return self._alerts_obj.get(self.alert_name).alert_category def _get_alert_properties(self): flag, response = self._cvpysdk_object.make_request('GET', self._ALERT) if flag: if response.json() and 'alertDetail' in response.json().keys(): self._alert_detail = response.json()['alertDetail'] if 'alertSeverity' in self._alert_detail: self._alert_severity = self._alert_detail['alertSeverity'] if 'criteria' in self._alert_detail: criterias = self._alert_detail['criteria'] for criteria in criterias: self._criteria.append({ 'criteria_value': criteria['value'] if 'value' in criteria else None, 'criteria_id': str(criteria['criteriaId']) if 'criteriaId' in criteria else None, 'esclation_level': criteria['esclationLevel'] if 'esclationLevel' in criteria else None }) if 'alert' in self._alert_detail: alert = self._alert_detail['alert'] if 'description' in alert: self._description = alert['description'] if 'alertType' in alert and 'name' in alert['alertType']: self._alert_type = alert['alertType']['name'] self._alert_type_id = alert['alertType']['id'] if 'xmlEntityList' in self._alert_detail: entity_xml = ET.fromstring(self._alert_detail['xmlEntityList']) self._entities_list = [] for entity in entity_xml.findall("associations"): if entity.find("flags") is not None: if entity.find("flags").attrib["exclude"] != "1": self._entities_list.append(entity.attrib) else: self._entities_list.append(entity.attrib) for entity in self._entities_list: for key, value in entity.items(): try: entity[key] = int(value) except ValueError: pass if 'regularNotifications' in self._alert_detail: self._notification_types = self._alert_detail["regularNotifications"] if 'userList' in self._alert_detail: self._users_list = [user['name'] for user in self._alert_detail['userList']] if 'userGroupList' in self._alert_detail: self._user_group_list = [grp['name'] for grp in self._alert_detail['userGroupList']] self._email_recipients = [] if 'nonGalaxyUserList' in self._alert_detail: self._email_recipients = [email['name'] for email in self._alert_detail['nonGalaxyUserList']] else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) def _modify_alert_properties(self): request_json = { "alertDetail":{ "alertDetail": { "alertType": self._alert_type_id, "notifType": self._notification_types, "alertSeverity": self._alert_severity, "alertrule": { "alertName": self._alert_name }, "criteria": { "criteria": int(self._criteria[0]['criteria_id']) }, "userList": { "userList": [{"userName": user} for user in self._users_list] }, "nonGalaxyList": { "nonGalaxyUserList": [{"nonGalaxyUser": email} for email in self._email_recipients] }, "EntityList": { "associations": self._entities_list } } } } modify_alert = self._services['MODIFY_ALERT'] % (self.alert_id) flag, response = self._cvpysdk_object.make_request( 'POST', modify_alert, request_json ) if flag: if response.json(): error_code = str(response.json()['errorCode']) if error_code == '0': self.refresh() return else: o_str = 'Failed to update properties of Alert\nError: "{0}"' o_str = o_str.format(response.json()['errorMessage']) raise SDKException('Alert', '102', o_str) else: raise SDKException('Response', '102') else: response_string = self._update_response_(response.text) raise SDKException('Response', '101', response_string) @property def name(self): return self._alert_detail['alert']['alert']['name'] @property def alert_name(self): return self._alert_name @alert_name.setter
Apache License 2.0
sdatkinson/arxiv-converter
convert/files.py
read_file
python
def read_file(fname): with open(fname) as f: lines = f.readlines() return lines
Get a file into a list of strings (one entry=one line)
https://github.com/sdatkinson/arxiv-converter/blob/b12abf80bda3e0d64e2437e74356fa386c5c5b18/convert/files.py#L48-L54
from __future__ import absolute_import import os import re from subprocess import call _comment_re = re.compile("^\s{0,}%") _input_re = re.compile("\\\\input\\{[0-9a-zA-Z\/\_]{1,}(\.tex)?\}") _graphics_re = re.compile("\\\\includegraphics" + "(\[[0-9a-zA-Z,\s\-\=\.\\\\]{1,}\])?" + "\{[a-zA-Z0-9\/\_\.]{1,}" + "[(\.eps)(\.pdf)]\}") _bibliography_re = re.compile("\\\\bibliography\{[a-zA-Z0-9\/\_\.]{1,}\}") _sty_re = re.compile("\\\\usepackage" + "(\[[0-9a-zA-Z,\s\-\=\.\\\\]{1,}\])?" + "\{[a-zA-Z0-9\/\_\.]{1,}" + "(\.sty)?\}") def main_file(dir): possible_main_files = {"Main.tex", "main.tex"} for main_file in possible_main_files: mf_with_dir = dir + "/" + main_file if os.path.isfile(mf_with_dir): print("Found main file at {}".format(mf_with_dir)) return main_file raise FileNotFoundError("Failed to find a main file in {}".format(dir))
MIT License
open-eo/openeo-python-client
openeo/rest/rest_capabilities.py
RESTCapabilities.currency
python
def currency(self): return self.capabilities.get('billing', {}).get('currency')
Get default billing currency.
https://github.com/open-eo/openeo-python-client/blob/bde2d0f992bd52fc244c8bfeceac4e58d6b12c2d/openeo/rest/rest_capabilities.py#L36-L38
from openeo.capabilities import Capabilities from openeo.internal.jupyter import render_component class RESTCapabilities(Capabilities): def __init__(self, data: dict, url: str = None): super(RESTCapabilities, self).__init__(data) self.capabilities = data self.url = url def api_version(self) -> str: if 'api_version' in self.capabilities: return self.capabilities.get('api_version') else: return self.capabilities.get('version') def list_features(self): return self.capabilities.get('endpoints') def has_features(self, method_name): pass def supports_endpoint(self, path: str, method="GET"): return any( endpoint.get("path") == path and method.upper() in endpoint.get("methods", []) for endpoint in self.capabilities.get("endpoints", []) )
Apache License 2.0
coarse-graining/cgnet
cgnet/feature/schnet_utils.py
CGBeadEmbedding.forward
python
def forward(self, embedding_property): return self.embedding(embedding_property)
Parameters ---------- embedding_property: torch.Tensor Some property that should be embedded. Can be nuclear charge or maybe an arbitrary number assigned for amino-acids. Passing a zero will produce an embedding vector filled with zeroes (necessary in the case of zero padded batches). The properties to be embedded should be integers (torch type long). Size [n_frames, n_beads] Returns ------- embedding_vector: torch.Tensor Corresponding embedding vector to the passed indices. Size [n_frames, n_beads, embedding_dim]
https://github.com/coarse-graining/cgnet/blob/ce7dadb1f8e66771032275ef87b8193ad234d495/cgnet/feature/schnet_utils.py#L101-L120
import torch import torch.nn as nn from cgnet.feature.utils import ShiftedSoftplus, LinearLayer class SimpleNormLayer(nn.Module): def __init__(self, normalization_strength): super(SimpleNormLayer, self).__init__() self.normalization_strength = normalization_strength def forward(self, input_features): return input_features / self.normalization_strength class NeighborNormLayer(nn.Module): def __init__(self): super(NeighborNormLayer, self).__init__() def forward(self, input_features, n_neighbors): return input_features / n_neighbors class CGBeadEmbedding(nn.Module): def __init__(self, n_embeddings, embedding_dim): super(CGBeadEmbedding, self).__init__() self.embedding = nn.Embedding(num_embeddings=n_embeddings, embedding_dim=embedding_dim, padding_idx=0)
BSD 3-Clause New or Revised License
santoshphilip/eppy
eppy/geometry/surface.py
unit_normal
python
def unit_normal(pt_a, pt_b, pt_c): x_val = np.linalg.det( [[1, pt_a[1], pt_a[2]], [1, pt_b[1], pt_b[2]], [1, pt_c[1], pt_c[2]]] ) y_val = np.linalg.det( [[pt_a[0], 1, pt_a[2]], [pt_b[0], 1, pt_b[2]], [pt_c[0], 1, pt_c[2]]] ) z_val = np.linalg.det( [[pt_a[0], pt_a[1], 1], [pt_b[0], pt_b[1], 1], [pt_c[0], pt_c[1], 1]] ) magnitude = (x_val ** 2 + y_val ** 2 + z_val ** 2) ** 0.5 mag = (x_val / magnitude, y_val / magnitude, z_val / magnitude) if magnitude < 0.00000001: mag = (0, 0, 0) return mag
unit normal vector of plane defined by points pt_a, pt_b, and pt_c
https://github.com/santoshphilip/eppy/blob/2d10381bf0b8f4dbc8bee17ce10cd61f77365517/eppy/geometry/surface.py#L52-L67
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals try: import numpy as np from numpy import arccos as acos except ImportError as err: from tinynumpy import tinynumpy as np from tinynumpy import tinylinalg as linalg from math import acos as acos import math def area(poly): if len(poly) < 3: return 0 total = [0, 0, 0] num = len(poly) for i in range(num): vi1 = poly[i] vi2 = poly[(i + 1) % num] prod = np.cross(vi1, vi2) total[0] += prod[0] total[1] += prod[1] total[2] += prod[2] if total == [0, 0, 0]: return 0 result = np.dot(total, unit_normal(poly[0], poly[1], poly[2])) return abs(result / 2)
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/page_dto_translation_price_list_dto.py
PageDtoTranslationPriceListDto.__eq__
python
def __eq__(self, other): if not isinstance(other, PageDtoTranslationPriceListDto): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/page_dto_translation_price_list_dto.py#L238-L243
import pprint import re import six from memsource_cli.models.translation_price_list_dto import TranslationPriceListDto class PageDtoTranslationPriceListDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'total_elements': 'int', 'total_pages': 'int', 'page_size': 'int', 'page_number': 'int', 'number_of_elements': 'int', 'content': 'list[TranslationPriceListDto]' } attribute_map = { 'total_elements': 'totalElements', 'total_pages': 'totalPages', 'page_size': 'pageSize', 'page_number': 'pageNumber', 'number_of_elements': 'numberOfElements', 'content': 'content' } def __init__(self, total_elements=None, total_pages=None, page_size=None, page_number=None, number_of_elements=None, content=None): self._total_elements = None self._total_pages = None self._page_size = None self._page_number = None self._number_of_elements = None self._content = None self.discriminator = None if total_elements is not None: self.total_elements = total_elements if total_pages is not None: self.total_pages = total_pages if page_size is not None: self.page_size = page_size if page_number is not None: self.page_number = page_number if number_of_elements is not None: self.number_of_elements = number_of_elements if content is not None: self.content = content @property def total_elements(self): return self._total_elements @total_elements.setter def total_elements(self, total_elements): self._total_elements = total_elements @property def total_pages(self): return self._total_pages @total_pages.setter def total_pages(self, total_pages): self._total_pages = total_pages @property def page_size(self): return self._page_size @page_size.setter def page_size(self, page_size): self._page_size = page_size @property def page_number(self): return self._page_number @page_number.setter def page_number(self, page_number): self._page_number = page_number @property def number_of_elements(self): return self._number_of_elements @number_of_elements.setter def number_of_elements(self, number_of_elements): self._number_of_elements = number_of_elements @property def content(self): return self._content @content.setter def content(self, content): self._content = content def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(PageDtoTranslationPriceListDto, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
afterwake/ircbots
pointBot2/plugins/pointBot.py
resetcommand
python
def resetcommand(bot, trigger): bot.say("The game has reset, everyone has 10 gift points now") players = getPlayers(bot.db, bot.users) resetGame(bot.db, players)
Reset game for the day. Sets all users' gift points to 10
https://github.com/afterwake/ircbots/blob/1a91e6f4f47fec2cd75754c8db0fa0947b0694b0/pointBot2/plugins/pointBot.py#L117-L123
from sopel import module, tools from datetime import datetime pluginName = "pointBot" startHour = 8 stopHour = 17 ptsExpr = r"^(\+|-)(\d+)\s(?:to\s)?(\w+).*$" def setup(bot): updateGameRunning(bot) pass def updateGameRunning(bot): currentHour = datetime.now().hour workhour = currentHour >= startHour and currentHour < stopHour setGameRunning(bot.db, pluginName, workhour) @module.interval(300) def checkGameRunning(bot): updateGameRunning(bot) @module.rule(ptsExpr) def addGPoints(bot, trigger): if not trigger.is_privmsg: gameRunning = getGameRunning(bot.db, trigger.sender) if not gameRunning: bot.reply("The game is not running right now") return groups = trigger.groups() points = int(groups[1]) if groups[0] == '-': amount = -points else: amount = points user = groups[len(groups)-1] if user == trigger.nick: bot.reply("YOU CAN'T GIVE YOURSELF POINTS!!!") return players = getPlayers(bot.db, bot.users) buser = user in players if not buser: bot.say("Invalid player", trigger.nick) return checkPlayerReset(bot.db, trigger.nick) gpts = getgpts(bot.db, trigger.nick) if points > gpts: bot.say("You don't have enough gift points", trigger.nick) return addpts(bot.db, user, amount) addgpts(bot.db, trigger.nick, -points) @module.nickname_commands(r'help') def help(bot, trigger): print("Admin Commands: start, stop, auto, reset, save, restore, say <msg>, me <action>, msg <nick> <msg>, status <user>, setpts <user/all> <points>, setgp <user/all> <gp>, ignore <user>, unignore <user>") bot.say("User Commands: help, rules, points, [e.g. pointBot, help]. PM anything for your status.") bot.say("Point Exchanges: +/-<pts> [to] <user> [reason] (e.g. +1 to user for being awesome)") @module.nickname_commands(r'rules') def rules(bot, trigger): bot.say("Hello, it's me, pointBot. I keep track of +s and -s handed out in the IRC. You get 10 points to give away every day, and these points are refreshed every morning at 8 AM. Using bots is not allowed. If you run into any issues, talk to the admin (JSON). Have a day.") @module.require_admin() @module.nickname_commands(r'adminhelp') def helpadmincommand(bot, trigger): bot.say("Admin Commands: status [<user>], setpts <user/all> <points>, setgpts <user/all> <gpts>, ignore <user>, unignore <user>, setbot <user> <true/false>, setalias <user> <alias> (will use user's pts if both nicks have pts), unalias <alias>", trigger.nick) @module.nickname_commands(r'points') def displaypoints(bot, trigger): players = getPlayers(bot.db, bot.users) ptsstring = displayPoints(bot.db, players) bot.say(ptsstring) @module.require_admin() @module.nickname_commands(r'reset')
MIT License
ucbdrive/hd3
utils/flowlib.py
flow_to_image
python
def flow_to_image(flow, maxrad=-1): u = flow[:, :, 0] v = flow[:, :, 1] maxu = -999. maxv = -999. minu = 999. minv = 999. idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) u[idxUnknow] = 0 v[idxUnknow] = 0 if maxrad == -1: rad = np.sqrt(u**2 + v**2) maxrad = max(-1, np.max(rad)) u = u / (maxrad + np.finfo(float).eps) v = v / (maxrad + np.finfo(float).eps) img = compute_color(u, v) idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) img[idx] = 0 return np.uint8(img)
Convert flow into middlebury color code image :param flow: optical flow map :return: optical flow image in middlebury color
https://github.com/ucbdrive/hd3/blob/84792e27eec81ed27671018c231538e6de8cd6be/utils/flowlib.py#L178-L208
from . import png import numpy as np from PIL import Image import cv2 import re UNKNOWN_FLOW_THRESH = 1e7 SMALLFLOW = 0.0 LARGEFLOW = 1e8 def read_flow(filename): if filename.endswith('.flo'): flow = read_flo_file(filename) elif filename.endswith('.png'): flow = read_kitti_png_file(filename) elif filename.endswith('.pfm'): flow = read_pfm_file(filename)[:, :, :2].astype(np.float32) else: raise Exception('Invalid flow file format!') return flow def write_flow(flow, filename): f = open(filename, 'wb') magic = np.array([202021.25], dtype=np.float32) (height, width) = flow.shape[0:2] w = np.array([width], dtype=np.int32) h = np.array([height], dtype=np.int32) magic.tofile(f) w.tofile(f) h.tofile(f) flow.tofile(f) f.close() def save_flow_image(flow, image_file): flow_img = flow_to_image(flow) img_out = Image.fromarray(flow_img) img_out.save(image_file) def flowfile_to_imagefile(flow_file, image_file): flow = read_flow(flow_file) save_flow_image(flow, image_file) def flow_error(tu, tv, u, v): smallflow = 0.0 stu = tu[:] stv = tv[:] su = u[:] sv = v[:] idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | ( abs(stv) > UNKNOWN_FLOW_THRESH) stu[idxUnknow] = 0 stv[idxUnknow] = 0 su[idxUnknow] = 0 sv[idxUnknow] = 0 ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)] index_su = su[ind2] index_sv = sv[ind2] an = 1.0 / np.sqrt(index_su**2 + index_sv**2 + 1) index_stu = stu[ind2] index_stv = stv[ind2] tn = 1.0 / np.sqrt(index_stu**2 + index_stv**2 + 1) epe = np.sqrt((stu - su)**2 + (stv - sv)**2) epe = epe[ind2] mepe = np.mean(epe) return mepe def flow_kitti_error(tu, tv, u, v, mask): tau = [3, 0.05] stu = tu[:] stv = tv[:] su = u[:] sv = v[:] smask = mask[:] ind_valid = (smask != 0) n_total = np.sum(ind_valid) epe = np.sqrt((stu - su)**2 + (stv - sv)**2) mag = np.sqrt(stu**2 + stv**2) + 1e-5 epe = epe[ind_valid] mag = mag[ind_valid] err = np.logical_and((epe > tau[0]), (epe / mag) > tau[1]) n_err = np.sum(err) mean_epe = np.mean(epe) mean_acc = 1 - (float(n_err) / float(n_total)) return (mean_epe, mean_acc)
BSD 3-Clause New or Revised License
sfdo-tooling/metadeploy
metadeploy/api/models.py
PreflightResult.optional_step_ids
python
def optional_step_ids(self): optional_step_pks = [] for step_id, results in self.results.items(): for result in results: if result["status"] in (OPTIONAL, HIDE, SKIP): optional_step_pks.append(step_id) return optional_step_pks
self.results is a dict mapping a unique identifier for a step to a list of errors, warnings, and other outcomes of preflighting that step. Right now, the unique identifier is the step's PK in the Metadeploy database, but we may change that if we reconsider it. However, currently, this is most convenient for the frontend. This key is set by PreflightFlow._get_step_id. So this will return a list of step PKs, for now.
https://github.com/sfdo-tooling/metadeploy/blob/25d10b491f35b24c317a3613f8f444c05a7ccc15/metadeploy/api/models.py#L909-L926
import logging import uuid from statistics import median from typing import Union from asgiref.sync import async_to_sync from colorfield.fields import ColorField from cumulusci.core.config import FlowConfig from cumulusci.core.flowrunner import ( FlowCoordinator, PreflightFlowCoordinator, StepSpec, ) from cumulusci.core.tasks import BaseTask from cumulusci.core.utils import import_class from django.conf import settings from django.contrib.auth.models import AbstractUser from django.contrib.auth.models import UserManager as BaseUserManager from django.contrib.postgres.fields import ArrayField from django.contrib.sites.models import Site from django.core.exceptions import ValidationError from django.core.serializers.json import DjangoJSONEncoder from django.core.validators import MaxValueValidator, MinValueValidator, RegexValidator from django.db import models from django.db.models import Count, F, Func, JSONField, Q from django.utils.translation import gettext_lazy as _ from hashid_field import HashidAutoField from model_utils import Choices, FieldTracker from parler.managers import TranslatableQuerySet from parler.models import TranslatableModel, TranslatedFields from sfdo_template_helpers.crypto import fernet_decrypt from sfdo_template_helpers.fields import MarkdownField as BaseMarkdownField from sfdo_template_helpers.slugs import AbstractSlug, SlugMixin from .belvedere_utils import convert_to_18 from .constants import ERROR, HIDE, OPTIONAL, ORGANIZATION_DETAILS, SKIP from .flows import JobFlowCallback, PreflightFlowCallback from .push import ( notify_org_changed, notify_org_result_changed, notify_post_job, notify_post_task, preflight_canceled, preflight_completed, preflight_failed, preflight_invalidated, ) from .salesforce import refresh_access_token logger = logging.getLogger(__name__) VERSION_STRING = r"^[a-zA-Z0-9._+-]+$" STEP_NUM = r"^[\d\./]+$" WorkableModel = Union["Job", "PreflightResult"] ORG_TYPES = Choices("Production", "Scratch", "Sandbox", "Developer") SUPPORTED_ORG_TYPES = Choices("Persistent", "Scratch", "Both") PRODUCT_LAYOUTS = Choices("Default", "Card") class HashIdMixin(models.Model): class Meta: abstract = True id = HashidAutoField(primary_key=True) class MarkdownField(BaseMarkdownField): def __init__(self, *args, **kwargs): kwargs["property_suffix"] = kwargs.get("property_suffix", "_markdown") kwargs["blank"] = kwargs.get("blank", True) kwargs["help_text"] = kwargs.get("help_text", "Markdown is supported") super().__init__(*args, **kwargs) class AllowedList(models.Model): title = models.CharField(max_length=128, unique=True) description = MarkdownField() org_type = ArrayField( models.CharField(max_length=64, choices=ORG_TYPES), blank=True, size=4, default=list, help_text="All orgs of these types will be automatically allowed.", ) list_for_allowed_by_orgs = models.BooleanField( default=False, help_text=( "If a user is allowed only because they have the right Org Type, should " "this be listed for them? If not, they can still find it if they happen to " "know the address." ), ) def __str__(self): return self.title class AllowedListOrg(models.Model): allowed_list = models.ForeignKey( AllowedList, related_name="orgs", on_delete=models.CASCADE ) org_id = models.CharField(max_length=18) description = models.TextField( help_text=("A description of the org for future reference",) ) created_by = models.ForeignKey( settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL ) created_at = models.DateTimeField(auto_now_add=True) def save(self, *args, **kwargs): if len(self.org_id) == 15: self.org_id = convert_to_18(self.org_id) return super().save(*args, **kwargs) class AllowedListAccessMixin(models.Model): class Meta: abstract = True visible_to = models.ForeignKey( AllowedList, on_delete=models.PROTECT, null=True, blank=True ) def is_visible_to(self, user): return not self.visible_to or ( user.is_authenticated and ( user.is_superuser or user.full_org_type in self.visible_to.org_type or self.visible_to.orgs.filter(org_id=user.org_id).exists() ) ) def is_listed_by_org_only(self, user): return self.visible_to and ( user.is_authenticated and user.full_org_type in self.visible_to.org_type and not self.visible_to.list_for_allowed_by_orgs ) class UserManager(BaseUserManager): pass class User(HashIdMixin, AbstractUser): objects = UserManager() def subscribable_by(self, user, session): return self == user @property def sf_username(self): if self.social_account: return self.social_account.extra_data.get("preferred_username") def _get_org_property(self, key): try: return self.social_account.extra_data[ORGANIZATION_DETAILS][key] except (AttributeError, KeyError): return None @property def org_id(self): if self.social_account: return self.social_account.extra_data.get("organization_id") @property def oauth_id(self): if self.social_account: return self.social_account.extra_data.get("id") @property def org_name(self): return self._get_org_property("Name") @property def org_type(self): return self._get_org_property("OrganizationType") @property def full_org_type(self): org_type = self._get_org_property("OrganizationType") is_sandbox = self._get_org_property("IsSandbox") has_expiration = self._get_org_property("TrialExpirationDate") is not None if org_type is None or is_sandbox is None: return None if org_type == "Developer Edition" and not is_sandbox: return ORG_TYPES.Developer if org_type != "Developer Edition" and not is_sandbox: return ORG_TYPES.Production if is_sandbox and not has_expiration: return ORG_TYPES.Sandbox if is_sandbox and has_expiration: return ORG_TYPES.Scratch @property def instance_url(self): try: return self.social_account.extra_data["instance_url"] except (AttributeError, KeyError): return None @property def token(self): account = self.social_account if account and account.socialtoken_set.exists(): token = self.social_account.socialtoken_set.first() return (fernet_decrypt(token.token), fernet_decrypt(token.token_secret)) return (None, None) @property def social_account(self): return self.socialaccount_set.first() @property def valid_token_for(self): if all(self.token) and self.org_id: return self.org_id return None class ProductCategory(TranslatableModel): class Meta: verbose_name_plural = "product categories" ordering = ("order_key",) order_key = models.PositiveIntegerField(default=0) is_listed = models.BooleanField(default=True) translations = TranslatedFields( title=models.CharField(max_length=256), description=MarkdownField(), ) @property def description_markdown(self): return self._get_translated_model(use_fallback=True).description_markdown def __str__(self): return self.title def get_translation_strategy(self): return "fields", f"{self.title}:product_category" class ProductSlug(AbstractSlug): parent = models.ForeignKey("Product", on_delete=models.CASCADE) class ProductQuerySet(TranslatableQuerySet): def published(self): return ( self.annotate(version__count=Count("version")) .filter(version__count__gte=1) .order_by("order_key") ) class Product(HashIdMixin, SlugMixin, AllowedListAccessMixin, TranslatableModel): SLDS_ICON_CHOICES = ( ("", ""), ("action", "action"), ("custom", "custom"), ("doctype", "doctype"), ("standard", "standard"), ("utility", "utility"), ) class Meta: ordering = ("category__order_key", "order_key") objects = ProductQuerySet.as_manager() translations = TranslatedFields( title=models.CharField(max_length=256), short_description=models.TextField(blank=True), description=MarkdownField(), click_through_agreement=MarkdownField(), error_message=MarkdownField(), ) @property def description_markdown(self): return self._get_translated_model(use_fallback=True).description_markdown @property def click_through_agreement_markdown(self): return self._get_translated_model( use_fallback=True ).click_through_agreement_markdown @property def error_message_markdown(self): return self._get_translated_model(use_fallback=True).error_message_markdown category = models.ForeignKey(ProductCategory, on_delete=models.PROTECT) color = ColorField(blank=True) image = models.ImageField(blank=True) icon_url = models.URLField( blank=True, help_text=_("This will take precedence over Color and the SLDS Icons."), ) slds_icon_category = models.CharField( choices=SLDS_ICON_CHOICES, default="", blank=True, max_length=32 ) slds_icon_name = models.CharField(max_length=64, blank=True) repo_url = models.URLField(blank=True) is_listed = models.BooleanField(default=True) order_key = models.PositiveIntegerField(default=0) layout = models.CharField( choices=PRODUCT_LAYOUTS, default=PRODUCT_LAYOUTS.Default, max_length=64 ) slug_class = ProductSlug slug_field_name = "title" @property def slug_queryset(self): return self.productslug_set def __str__(self): return self.title @property def most_recent_version(self): return self.version_set.exclude(is_listed=False).order_by("-created_at").first() @property def icon(self): if self.icon_url: return {"type": "url", "url": self.icon_url} if self.slds_icon_name and self.slds_icon_category: return { "type": "slds", "category": self.slds_icon_category, "name": self.slds_icon_name, } return None def get_translation_strategy(self): return "fields", f"{self.slug}:product" def get_absolute_url(self): return f"/products/{self.slug}" class VersionQuerySet(TranslatableQuerySet): def get_by_natural_key(self, *, product, label): return self.get(product=product, label=label) class Version(HashIdMixin, TranslatableModel): objects = VersionQuerySet.as_manager() translations = TranslatedFields(description=models.TextField(blank=True)) product = models.ForeignKey(Product, on_delete=models.PROTECT) label = models.CharField( max_length=1024, validators=[RegexValidator(regex=VERSION_STRING)] ) created_at = models.DateTimeField(auto_now_add=True) is_production = models.BooleanField(default=True) commit_ish = models.CharField( max_length=256, default="master", help_text=_("This is usually a tag, sometimes a branch."), ) is_listed = models.BooleanField(default=True) class Meta: unique_together = (("product", "label"),) def natural_key(self): return (self.product, self.label) def __str__(self): return "{}, Version {}".format(self.product, self.label) @property def primary_plan(self): return ( self.plan_set.filter(tier=Plan.Tier.primary).order_by("-created_at").first() ) @property def secondary_plan(self): return ( self.plan_set.filter(tier=Plan.Tier.secondary) .order_by("-created_at") .first() ) @property def additional_plans(self): return ( self.plan_set.filter(tier=Plan.Tier.additional) .order_by("plan_template_id", "order_key", "-created_at") .distinct("plan_template_id") ) def get_translation_strategy(self): return "fields", f"{self.product.slug}:version:{self.label}" def get_absolute_url(self): return f"/products/{self.product.slug}/{self.label}" class PlanSlug(AbstractSlug): slug = models.SlugField() parent = models.ForeignKey("PlanTemplate", on_delete=models.CASCADE) def validate_unique(self, *args, **kwargs): super().validate_unique(*args, **kwargs) qs = PlanSlug.objects.filter( parent__product__in=self.get_associated_products(), slug=self.slug ) if qs.exists(): raise ValidationError( {"slug": [_("This must be unique for the Plan's Version.")]} ) def get_associated_products(self): return Product.objects.filter(version__plan__plan_template=self.parent) class PlanTemplate(SlugMixin, TranslatableModel): name = models.CharField(max_length=100, blank=True) translations = TranslatedFields( preflight_message=MarkdownField(), post_install_message=MarkdownField(), error_message=MarkdownField(), ) product = models.ForeignKey(Product, on_delete=models.PROTECT) regression_test_opt_out = models.BooleanField(default=False) slug_class = PlanSlug @property def preflight_message_markdown(self): return self._get_translated_model(use_fallback=True).preflight_message_markdown @property def post_install_message_markdown(self): return self._get_translated_model( use_fallback=True ).post_install_message_markdown @property def error_message_markdown(self): return self._get_translated_model(use_fallback=True).error_message_markdown def __str__(self): return f"{self.product.title}: {self.name}" def get_translation_strategy(self): return "fields", f"{self.product.slug}:plan:{self.name}" class Plan(HashIdMixin, SlugMixin, AllowedListAccessMixin, TranslatableModel): Tier = Choices("primary", "secondary", "additional") translations = TranslatedFields( title=models.CharField(max_length=128), preflight_message_additional=MarkdownField(), post_install_message_additional=MarkdownField(), ) plan_template = models.ForeignKey(PlanTemplate, on_delete=models.PROTECT) version = models.ForeignKey(Version, on_delete=models.PROTECT) commit_ish = models.CharField( max_length=256, null=True, blank=True, help_text=_( "This is usually a tag, sometimes a branch. " "Use this to optionally override the Version's commit_ish." ), ) order_key = models.PositiveIntegerField(default=0) tier = models.CharField(choices=Tier, default=Tier.primary, max_length=64) is_listed = models.BooleanField(default=True) preflight_checks = JSONField(default=list, blank=True) supported_orgs = models.CharField( max_length=32, choices=SUPPORTED_ORG_TYPES, default=SUPPORTED_ORG_TYPES.Persistent, ) org_config_name = models.CharField(max_length=64, default="release", blank=True) scratch_org_duration_override = models.IntegerField( "Scratch Org duration (days)", null=True, blank=True, validators=[MinValueValidator(1), MaxValueValidator(30)], help_text="Lifetime of Scratch Orgs created for this plan. Will inherit the " "global default value if left blank.", ) calculated_average_duration = models.IntegerField( "Average duration of a plan (seconds)", null=True, blank=True, validators=[MinValueValidator(0)], help_text="The duration between the enqueueing of a job and its successful completion.", ) created_at = models.DateTimeField(auto_now_add=True) slug_class = PlanSlug slug_field_name = "title" @property def preflight_message_additional_markdown(self): return self._get_translated_model( use_fallback=True ).preflight_message_additional_markdown @property def post_install_message_additional_markdown(self): return self._get_translated_model( use_fallback=True ).post_install_message_additional_markdown @property def required_step_ids(self): return self.steps.filter(is_required=True).values_list("id", flat=True) @property def slug_parent(self): return self.plan_template @property def slug_queryset(self): return self.plan_template.planslug_set @property def average_duration(self): durations = [ (job.success_at - job.enqueued_at) for job in Job.objects.filter(plan=self, status=Job.Status.complete) .exclude(Q(success_at__isnull=True) | Q(enqueued_at__isnull=True)) .order_by("-created_at")[: settings.AVERAGE_JOB_WINDOW] ] if len(durations) < settings.MINIMUM_JOBS_FOR_AVERAGE: return None return median(durations).total_seconds() @property def scratch_org_duration(self): return self.scratch_org_duration_override or settings.SCRATCH_ORG_DURATION_DAYS def natural_key(self): return (self.version, self.title) def __str__(self): return "{}, Plan {}".format(self.version, self.title) @property def requires_preflight(self): has_plan_checks = bool(self.preflight_checks) has_step_checks = any( step.task_config.get("checks") for step in self.steps.iterator() ) return has_plan_checks or has_step_checks def get_translation_strategy(self): return ( "fields", f"{self.plan_template.product.slug}:plan:{self.plan_template.name}", ) def get_absolute_url(self): return f"/products/{self.version.product.slug}/{self.version.label}/{self.slug}" def is_visible_to(self, *args, **kwargs): if self.supported_orgs != SUPPORTED_ORG_TYPES.Persistent: return True return super().is_visible_to(*args, **kwargs) def clean(self): if self.visible_to and self.supported_orgs != SUPPORTED_ORG_TYPES.Persistent: raise ValidationError( { "supported_orgs": _( 'Restricted plans (with a "visible to" AllowedList) can only support persistent org types.' ) } ) def save(self, *args, **kwargs): super().save(*args, **kwargs) from ..adminapi.translations import update_translations update_translations(self.plan_template.product) update_translations(self.plan_template) update_translations(self) class DottedArray(Func): function = "string_to_array" template = ( "%(function)s(replace(replace(%(expressions)s, '.', '|-2|')" ", '/', '|-1|'), '|')::int[]" ) class Step(HashIdMixin, TranslatableModel): Kind = Choices( ("metadata", _("Metadata")), ("onetime", _("One Time Apex")), ("managed", _("Package")), ("data", _("Data")), ("other", _("Other")), ) translations = TranslatedFields( name=models.CharField(max_length=1024, help_text="Customer facing label"), description=models.TextField(blank=True), ) plan = models.ForeignKey(Plan, on_delete=models.CASCADE, related_name="steps") is_required = models.BooleanField(default=True) is_recommended = models.BooleanField(default=True) kind = models.CharField(choices=Kind, default=Kind.metadata, max_length=64) path = models.CharField( max_length=2048, help_text="dotted path e.g. flow1.flow2.task_name" ) step_num = models.CharField( max_length=64, help_text="dotted step number for CCI task", validators=[RegexValidator(regex=STEP_NUM)], ) task_class = models.CharField( max_length=2048, help_text="dotted module path to BaseTask implementation" ) task_config = JSONField(default=dict, blank=True) source = JSONField(blank=True, null=True) class Meta: ordering = (DottedArray(F("step_num")),) @property def kind_icon(self): if self.kind == self.Kind.metadata: return "package" if self.kind == self.Kind.onetime: return "apex" if self.kind == self.Kind.managed: return "archive" if self.kind == self.Kind.data: return "paste" return None def to_spec(self, project_config, skip: bool = False): if self.source: project_config = project_config.include_source(self.source) task_class = import_class(self.task_class) assert issubclass(task_class, BaseTask) return StepSpec( step_num=self.step_num, task_name=self.path, task_config=self.task_config or {"options": {}}, task_class=task_class, skip=skip, project_config=project_config, ) def __str__(self): return f"Step {self.name} of {self.plan.title} ({self.step_num})" def get_translation_strategy(self): return "text", f"{self.plan.plan_template.product.slug}:steps" def save(self, *args, **kwargs): super().save(*args, **kwargs) from ..adminapi.translations import update_translations update_translations(self) class ClickThroughAgreement(models.Model): text = models.TextField() class Job(HashIdMixin, models.Model): Status = Choices("started", "complete", "failed", "canceled") tracker = FieldTracker(fields=("results", "status")) user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True ) plan = models.ForeignKey(Plan, on_delete=models.PROTECT) steps = models.ManyToManyField(Step) results = JSONField(default=dict, blank=True) created_at = models.DateTimeField(auto_now_add=True) edited_at = models.DateTimeField(auto_now=True) enqueued_at = models.DateTimeField(null=True) job_id = models.UUIDField(null=True) status = models.CharField(choices=Status, max_length=64, default=Status.started) org_id = models.CharField(null=True, blank=True, max_length=18) org_type = models.CharField(blank=True, max_length=256) full_org_type = models.CharField(null=True, blank=True, max_length=256) is_public = models.BooleanField(default=False) success_at = models.DateTimeField( null=True, blank=True, help_text=("If the job completed successfully, the time of that success."), ) canceled_at = models.DateTimeField( null=True, blank=True, help_text=( "The time at which the Job canceled itself, likely just a bit after it was " "told to cancel itself." ), ) exception = models.TextField(null=True, blank=True) log = models.TextField(blank=True) click_through_agreement = models.ForeignKey( ClickThroughAgreement, on_delete=models.PROTECT, null=True ) is_release_test = models.BooleanField(default=False) @property def org_name(self): if self.user: return self.user.org_name @property def instance_url(self): if self.user: return self.user.instance_url def get_absolute_url(self): return ( f"/products/{self.plan.version.product.slug}/{self.plan.version.label}/" f"{self.plan.slug}/jobs/{self.id}" ) def subscribable_by(self, user, session): if self.is_public or user.is_staff or user == self.user: return True scratch_org = ScratchOrg.objects.get_from_session(session) return scratch_org and scratch_org.org_id == self.org_id def skip_steps(self): return [ step.step_num for step in set(self.plan.steps.all()) - set(self.steps.all()) ] def _push_if_condition(self, condition, fn): if condition: async_to_sync(fn)(self) def push_to_org_subscribers(self, is_new, changed): self._push_if_condition( is_new or "status" in changed, notify_org_result_changed ) def push_if_results_changed(self, changed): results_has_changed = "results" in changed and self.results != {} self._push_if_condition(results_has_changed, notify_post_task) def push_if_has_stopped_running(self, changed): has_stopped_running = "status" in changed and self.status != Job.Status.started self._push_if_condition(has_stopped_running, notify_post_job) def save(self, *args, **kwargs): is_new = self._state.adding changed = self.tracker.changed() if is_new: ctt, _ = ClickThroughAgreement.objects.get_or_create( text=self.plan.version.product.click_through_agreement ) self.click_through_agreement = ctt ret = super().save(*args, **kwargs) try: self.push_to_org_subscribers(is_new, changed) self.push_if_results_changed(changed) self.push_if_has_stopped_running(changed) except RuntimeError as error: logger.warn(f"RuntimeError: {error}") return ret @property def error_message(self): return ( self.plan.plan_template.error_message_markdown or self.plan.version.product.error_message_markdown ) def invalidate_related_preflight(self): preflights = PreflightResult.objects.filter( org_id=self.org_id, user=self.user, plan=self.plan, is_valid=True ) for preflight in preflights: preflight.is_valid = False preflight.save() def run(self, ctx, plan, steps, org): flow_coordinator = FlowCoordinator.from_steps( ctx.project_config, steps, name="default", callbacks=JobFlowCallback(self) ) flow_coordinator.run(org) class PreflightResultQuerySet(models.QuerySet): def most_recent(self, *, org_id, plan, is_valid_and_complete=True): kwargs = {"org_id": org_id, "plan": plan} if is_valid_and_complete: kwargs.update({"is_valid": True, "status": PreflightResult.Status.complete}) return self.filter(**kwargs).order_by("-created_at").first() class PreflightResult(models.Model): Status = Choices("started", "complete", "failed", "canceled") tracker = FieldTracker(fields=("status", "is_valid")) objects = PreflightResultQuerySet.as_manager() org_id = models.CharField(null=True, blank=True, max_length=18) user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True ) plan = models.ForeignKey(Plan, on_delete=models.PROTECT) created_at = models.DateTimeField(auto_now_add=True) edited_at = models.DateTimeField(auto_now=True) is_valid = models.BooleanField(default=True) status = models.CharField(choices=Status, max_length=64, default=Status.started) canceled_at = models.DateTimeField( null=True, help_text=( "The time at which the Job canceled itself, likely just a bit after it was " "told to cancel itself." ), ) log = models.TextField(blank=True) results = JSONField(default=dict, blank=True) exception = models.TextField(null=True) is_release_test = models.BooleanField(default=False) @property def instance_url(self): if self.user: return self.user.instance_url def subscribable_by(self, user, session): if user.is_staff or self.user == user: return True scratch_org = ScratchOrg.objects.get_from_session(session) return scratch_org and scratch_org.org_id == self.org_id def has_any_errors(self): for results in self.results.values(): if any( (result for result in results if result.get("status", None) == ERROR) ): return True return False @property
BSD 3-Clause New or Revised License
weinbe58/quspin
quspin/operators/quantum_LinearOperator_core.py
quantum_LinearOperator.diagonal
python
def diagonal(self): if self._diagonal is not None: diagonal_view=self._diagonal[:] diagonal_view.setflags(write=0,uic=0) return diagonal_view else: return None
numpy.ndarray: static diagonal part of the linear operator.
https://github.com/weinbe58/quspin/blob/5bbc3204dbf5c227a87a44f0dacf39509cba580c/quspin/operators/quantum_LinearOperator_core.py#L192-L199
from __future__ import print_function, division, absolute_import from .hamiltonian_core import ishamiltonian from .hamiltonian_core import _check_static from .hamiltonian_core import supported_dtypes from .hamiltonian_core import hamiltonian from ._make_hamiltonian import _consolidate_static from ..basis import spin_basis_1d as _default_basis from ..basis.base import _is_diagonal,_update_diag from ..basis import isbasis as _isbasis import scipy.sparse.linalg as _sla import scipy.sparse as _sp import numpy as _np from scipy.sparse.linalg import LinearOperator from six import iteritems from six.moves import zip __all__=["quantum_LinearOperator","isquantum_LinearOperator"] class quantum_LinearOperator(LinearOperator): def __init__(self,static_list,N=None,basis=None,diagonal=None,check_symm=True,check_herm=True,check_pcon=True,dtype=_np.complex128,copy=False,**basis_args): if type(static_list) in [list,tuple]: for ele in static_list: if not _check_static(ele): raise ValueError("quantum_LinearOperator only supports operator string representations.") else: raise TypeError('expecting list/tuple of lists/tuples containing opstr and list of indx') if dtype not in supported_dtypes: raise TypeError('hamiltonian does not support type: '+str(dtype)) else: self._dtype=dtype if N==[]: raise ValueError("second argument of `quantum_LinearOperator()` canNOT be an empty list.") elif type(N) is int and basis is None: self._basis = _default_basis(N,**basis_args) elif N is None and _isbasis(basis): self._basis = basis else: raise ValueError("expecting integer for N or basis object for basis.") self._unique_me = self.basis._unique_me self._transposed = False self._conjugated = False self._scale = _np.array(1.0,dtype=dtype) self._dtype = dtype self._ndim = 2 self._shape = (self._basis.Ns,self._basis.Ns) if check_herm: self.basis.check_hermitian(static_list, []) if check_symm: self.basis.check_symm(static_list,[]) if check_pcon: self.basis.check_pcon(static_list,[]) if diagonal is not None: self.set_diagonal(diagonal,copy=copy) else: self._diagonal = None self._public_static_list = list(static_list) static_list = _consolidate_static(static_list) self._static_list = [] for opstr,indx,J in static_list: ME,row,col = self.basis.Op(opstr,indx,J,self._dtype) if _is_diagonal(row,col): if self._diagonal is None: self._diagonal = _np.zeros((self.Ns,),dtype=ME.dtype) _update_diag(self._diagonal,row,ME) else: self._static_list.append((opstr,indx,J)) @property def shape(self): return self._shape @property def basis(self): return self._basis @property def ndim(self): return self._ndim @property def static_list(self): return self._public_static_list @property def get_shape(self): return self._shape @property def Ns(self): return self._shape[0] @property def dtype(self): return _np.dtype(self._dtype) @property def T(self): return self.transpose(copy = False) @property def H(self): return self.getH(copy = False) @property
BSD 3-Clause New or Revised License
icb-dcm/pyabc
test/visualization/test_visserver.py
client
python
def client(): history = pyabc.History(db_path) server.app.config["HISTORY"] = history with server.app.test_client() as client: yield client
A fake server client.
https://github.com/icb-dcm/pyabc/blob/3cef3237a819caba40efe6eb4f775822b4d66955/test/visualization/test_visserver.py#L47-L52
import os import tempfile import pytest import numpy as np import pyabc import pyabc.visserver.server as server db_path = "sqlite:///" + tempfile.mkstemp(suffix='.db')[1] def setup_module(): def model(p): return {'ss0': p['p0'] + 0.1 * np.random.uniform(), 'ss1': p['p1'] + 0.1 * np.random.uniform()} p_true = {'p0': 3, 'p1': 4} observation = {'ss0': p_true['p0'], 'ss1': p_true['p1']} limits = {'p0': (0, 5), 'p1': (1, 8)} prior = pyabc.Distribution(**{ key: pyabc.RV( 'uniform', limits[key][0], limits[key][1] - limits[key][0]) for key in p_true.keys()}) distance = pyabc.PNormDistance(p=2) abc = pyabc.ABCSMC(model, prior, distance, population_size=50) abc.new(db_path, observation) abc.run(minimum_epsilon=.1, max_nr_populations=4) def teardown_module(): os.remove(db_path[len("sqlite:///"):]) @pytest.fixture
BSD 3-Clause New or Revised License
databiosphere/azul
src/azul/service/elasticsearch_service.py
ElasticsearchService.transform_autocomplete_request
python
def transform_autocomplete_request(self, catalog: CatalogName, pagination: Pagination, filters=None, _query='', search_field='fileId', entry_format='file'): service_config = self.service_config(catalog) mapping_config = service_config.autocomplete_mapping_config if logger.isEnabledFor(logging.DEBUG): logger.debug('Entry is: %s', entry_format) logger.debug('Printing the mapping_config: \n%s', json_pp(mapping_config)) mapping_config = mapping_config[entry_format] if not filters: filters = {} entity_type = 'files' if entry_format == 'file' else 'donor' es_search = self._create_autocomplete_request( catalog, filters, self.es_client, _query, search_field, entity_type=entity_type) logger.info('Handling pagination') pagination.sort = '_score' pagination.order = 'desc' es_search = self.apply_paging(catalog, es_search, pagination) if logger.isEnabledFor(logging.DEBUG): logger.debug('Printing ES_SEARCH request dict:\n %s', json.dumps(es_search.to_dict())) es_response = es_search.execute(ignore_cache=True) es_response_dict = es_response.to_dict() if logger.isEnabledFor(logging.DEBUG): logger.debug('Printing ES_SEARCH response dict:\n %s', json.dumps(es_response_dict)) hits = [x['_source'] for x in es_response_dict['hits']['hits']] logger.debug('Generating pagination') paging = self._generate_paging_dict(catalog, filters, es_response_dict, pagination) logger.info('Creating AutoCompleteResponse') final_response = AutoCompleteResponse( mapping_config, hits, paging, _type=entry_format) final_response = final_response.apiResponse.to_json() logger.info('Returning the final response for transform_autocomplete_request') return final_response
This function does the whole transformation process. It takes the path of the config file, the filters, and pagination, if any. Excluding filters will do a match_all request. Excluding pagination will exclude pagination from the output. :param catalog: The name of the catalog to transform the autocomplete request for. :param filters: Filter parameter from the API to be used in the query :param pagination: Pagination to be used for the API :param _query: String query to use on the search :param search_field: Field to perform the search on :param entry_format: Tells the method which _type of entry format to use :return: Returns the transformed request
https://github.com/databiosphere/azul/blob/3a0ed9a2c9b2b068c9a0c9f82ce40ca839f6faa1/src/azul/service/elasticsearch_service.py#L663-L735
import json import logging from typing import ( List, Optional, ) from urllib.parse import ( urlencode, ) import attr import elasticsearch from elasticsearch import ( Elasticsearch, ) from elasticsearch_dsl import ( A, Q, Search, ) from elasticsearch_dsl.aggs import ( Agg, Terms, ) from elasticsearch_dsl.response import ( AggResponse, Response, ) from elasticsearch_dsl.response.aggs import ( Bucket, BucketData, FieldBucket, FieldBucketData, ) from more_itertools import ( one, ) from azul import ( CatalogName, cached_property, config, ) from azul.es import ( ESClientFactory, ) from azul.indexer.document_service import ( DocumentService, ) from azul.plugins import ( ServiceConfig, ) from azul.service import ( AbstractService, BadArgumentException, Filters, MutableFilters, ) from azul.service.hca_response_v5 import ( AutoCompleteResponse, FileSearchResponse, KeywordSearchResponse, ) from azul.service.utilities import ( json_pp, ) from azul.types import ( JSON, MutableJSON, ) logger = logging.getLogger(__name__) class IndexNotFoundError(Exception): def __init__(self, missing_index: str): super().__init__(f'Index `{missing_index}` was not found') SourceFilters = List[str] @attr.s(auto_attribs=True, kw_only=True, frozen=False) class Pagination: order: str size: int sort: str self_url: str search_after: Optional[List[str]] = None search_before: Optional[List[str]] = None class ElasticsearchService(DocumentService, AbstractService): @cached_property def es_client(self) -> Elasticsearch: return ESClientFactory.get() def __init__(self, service_config: Optional[ServiceConfig] = None): self._service_config = service_config def service_config(self, catalog: CatalogName): return self._service_config or self.metadata_plugin(catalog).service_config() def _translate_filters(self, catalog: CatalogName, filters: Filters, field_mapping: JSON ) -> MutableFilters: translated_filters = {} for key, value in filters.items(): key = field_mapping[key] assert isinstance(value, dict) assert isinstance(one(value.values()), list) field_type = self.field_type(catalog, tuple(key.split('.'))) value = { key: [field_type.to_index(v) for v in val] for key, val in value.items() } translated_filters[key] = value return translated_filters def _create_query(self, catalog: CatalogName, filters): filter_list = [] for facet, values in filters.items(): relation, value = one(values.items()) if relation == 'is': query = Q('terms', **{facet + '.keyword': value}) field_type = self.field_type(catalog, tuple(facet.split('.'))) translated_none = field_type.to_index(None) if translated_none in value: absent_query = Q('bool', must_not=[Q('exists', field=facet)]) query = Q('bool', should=[query, absent_query]) filter_list.append(query) elif relation in ('contains', 'within', 'intersects'): for min_value, max_value in value: range_value = { 'gte': min_value, 'lte': max_value, 'relation': relation } filter_list.append(Q('range', **{facet: range_value})) else: assert False query_list = [Q('constant_score', filter=f) for f in filter_list] return Q('bool', must=query_list) def _create_aggregate(self, catalog: CatalogName, filters: MutableFilters, facet_config, agg): excluded_filter = filters.pop(facet_config[agg], None) filter_query = self._create_query(catalog, filters) aggregate = A('filter', filter_query) _field = f'{facet_config[agg]}.keyword' service_config = self.service_config(catalog) if agg == 'project': _sub_field = service_config.translation['projectId'] + '.keyword' aggregate.bucket('myTerms', 'terms', field=_field, size=config.terms_aggregation_size).bucket( 'myProjectIds', 'terms', field=_sub_field, size=config.terms_aggregation_size) else: aggregate.bucket('myTerms', 'terms', field=_field, size=config.terms_aggregation_size) aggregate.bucket('untagged', 'missing', field=_field) if agg == 'fileFormat': def set_summary_agg(field: str, bucket: str) -> None: field_full = service_config.translation[field] + '_' aggregate.aggs['myTerms'].metric(bucket, 'sum', field=field_full) aggregate.aggs['untagged'].metric(bucket, 'sum', field=field_full) set_summary_agg(field='fileSize', bucket='size_by_type') set_summary_agg(field='matrixCellCount', bucket='matrix_cell_count_by_type') if excluded_filter is not None: filters[facet_config[agg]] = excluded_filter return aggregate def _annotate_aggs_for_translation(self, es_search: Search): def annotate(agg: Agg): if isinstance(agg, Terms): path = agg.field.split('.') if path[-1] == 'keyword': path.pop() if not hasattr(agg, 'meta'): agg.meta = {} agg.meta['path'] = path if hasattr(agg, 'aggs'): subs = agg.aggs for sub_name in subs: annotate(subs[sub_name]) for agg_name in es_search.aggs: annotate(es_search.aggs[agg_name]) def _translate_response_aggs(self, catalog: CatalogName, es_response: Response): def translate(agg: AggResponse): if isinstance(agg, FieldBucketData): field_type = self.field_type(catalog, tuple(agg.meta['path'])) for bucket in agg: bucket['key'] = field_type.from_index(bucket['key']) translate(bucket) elif isinstance(agg, BucketData): for name in dir(agg): value = getattr(agg, name) if isinstance(value, AggResponse): translate(value) elif isinstance(agg, (FieldBucket, Bucket)): for sub in agg: translate(sub) for agg in es_response.aggs: translate(agg) def _create_request(self, catalog: CatalogName, filters: Filters, post_filter: bool = False, source_filter: SourceFilters = None, enable_aggregation: bool = True, entity_type='files') -> Search: service_config = self.service_config(catalog) field_mapping = service_config.translation facet_config = {key: field_mapping[key] for key in service_config.facets} es_search = Search(using=self.es_client, index=config.es_index_name(catalog=catalog, entity_type=entity_type, aggregate=True)) filters = self._translate_filters(catalog, filters, field_mapping) es_query = self._create_query(catalog, filters) if post_filter: es_search = es_search.post_filter(es_query) else: es_search = es_search.query(es_query) if source_filter: es_search = es_search.source(includes=source_filter) elif entity_type not in ("files", "bundles"): es_search = es_search.source(excludes="bundles") if enable_aggregation: for agg, translation in facet_config.items(): es_search.aggs.bucket(agg, self._create_aggregate(catalog, filters, facet_config, agg)) return es_search def _create_autocomplete_request(self, catalog: CatalogName, filters: Filters, es_client, _query, search_field, entity_type='files'): service_config = self.service_config(catalog) field_mapping = service_config.autocomplete_translation[entity_type] es_search = Search(using=es_client, index=config.es_index_name(catalog=catalog, entity_type=entity_type, aggregate=True)) filters = self._translate_filters(catalog, filters, field_mapping) search_field = field_mapping[search_field] if search_field in field_mapping else search_field es_filter_query = self._create_query(catalog, filters) es_search = es_search.post_filter(es_filter_query) es_search = es_search.query(Q('prefix', **{str(search_field): _query})) return es_search def apply_paging(self, catalog: CatalogName, es_search: Search, pagination: Pagination, peek_ahead: bool = True ) -> Search: sort_field = pagination.sort + '.keyword' sort_order = pagination.order field_type = self.field_type(catalog, tuple(pagination.sort.split('.'))) sort_mode = field_type.es_sort_mode def sort(order): assert order in ('asc', 'desc'), order return ( { sort_field: { 'order': order, 'mode': sort_mode, 'missing': '_last' if order == 'asc' else '_first', **( {} if field_type.es_type is None else {'unmapped_type': field_type.es_type} ) } }, { '_uid': { 'order': order } } ) if pagination.search_after is not None: es_search = es_search.extra(search_after=pagination.search_after) es_search = es_search.sort(*sort(sort_order)) elif pagination.search_before is not None: es_search = es_search.extra(search_after=pagination.search_before) rev_order = 'asc' if sort_order == 'desc' else 'desc' es_search = es_search.sort(*sort(rev_order)) else: es_search = es_search.sort(*sort(sort_order)) if peek_ahead: es_search = es_search.extra(size=pagination.size + 1) return es_search def _generate_paging_dict(self, catalog: CatalogName, filters: Filters, es_response: JSON, pagination: Pagination ) -> MutableJSON: def page_link(**kwargs) -> str: params = dict(catalog=catalog, filters=json.dumps(filters), sort=pagination.sort, order=pagination.order, size=pagination.size, **kwargs) return pagination.self_url + '?' + urlencode(params) pages = -(-es_response['hits']['total'] // pagination.size) es_hits = es_response['hits']['hits'] count = len(es_hits) if pagination.search_before is not None: if count > pagination.size: count -= 1 search_before = es_hits[count - 1]['sort'] else: search_before = [None, None] search_after = es_hits[0]['sort'] else: if count > pagination.size: count -= 1 search_after = es_hits[count - 1]['sort'] else: search_after = [None, None] if pagination.search_after is not None: search_before = es_hits[0]['sort'] else: search_before = [None, None] if search_after[1] is not None: search_after[0] = json.dumps(search_after[0]) if search_before[1] is not None: search_before[0] = json.dumps(search_before[0]) next_ = page_link(search_after=search_after[0], search_after_uid=search_after[1]) if search_after[1] else None previous = page_link(search_before=search_before[0], search_before_uid=search_before[1]) if search_before[1] else None page_field = { 'count': count, 'total': es_response['hits']['total'], 'size': pagination.size, 'next': next_, 'previous': previous, 'pages': pages, 'sort': pagination.sort, 'order': pagination.order } return page_field def transform_summary(self, catalog: CatalogName, filters=None, entity_type=None): if not filters: filters = {} es_search = self._create_request(catalog=catalog, filters=filters, post_filter=False, entity_type=entity_type) if entity_type == 'files': es_search.aggs.metric('totalFileSize', 'sum', field='contents.files.size_') elif entity_type == 'cell_suspensions': es_search.aggs.bucket( 'cellCountSummaries', 'terms', field='contents.cell_suspensions.organ.keyword', size=config.terms_aggregation_size ).bucket( 'cellCount', 'sum', field='contents.cell_suspensions.total_estimated_cells_' ) es_search.aggs.metric('totalCellCount', 'sum', field='contents.cell_suspensions.total_estimated_cells_') elif entity_type == 'samples': es_search.aggs.bucket('organTypes', 'terms', field='contents.samples.effective_organ.keyword', size=config.terms_aggregation_size) elif entity_type == 'projects': es_search.aggs.metric('projectEstimatedCellCount', 'sum', field='contents.projects.estimated_cell_count_') else: assert False, entity_type cardinality_aggregations = { 'samples': { 'specimenCount': 'contents.specimens.document_id', 'speciesCount': 'contents.donors.genus_species', 'donorCount': 'contents.donors.document_id', }, 'projects': { 'labCount': 'contents.projects.laboratory', } }.get(entity_type, {}) threshold = config.precision_threshold for agg_name, cardinality in cardinality_aggregations.items(): es_search.aggs.metric(agg_name, 'cardinality', field=cardinality + '.keyword', precision_threshold=str(threshold)) self._annotate_aggs_for_translation(es_search) es_search = es_search.extra(size=0) es_response = es_search.execute(ignore_cache=True) assert len(es_response.hits) == 0 self._translate_response_aggs(catalog, es_response) if config.debug == 2 and logger.isEnabledFor(logging.DEBUG): logger.debug('Elasticsearch request: %s', json.dumps(es_search.to_dict(), indent=4)) result = es_response.aggs.to_dict() for agg_name in cardinality_aggregations: agg_value = result[agg_name]['value'] assert agg_value <= threshold / 2, (agg_name, agg_value, threshold) return result def transform_request(self, catalog: CatalogName, entity_type: str, filters: Filters, pagination: Optional[Pagination] = None) -> MutableJSON: service_config = self.service_config(catalog) translation = service_config.translation inverse_translation = {v: k for k, v in translation.items()} for facet in filters.keys(): if facet not in translation: raise BadArgumentException(f"Unable to filter by undefined facet {facet}.") if pagination is not None: facet = pagination.sort if facet not in translation: raise BadArgumentException(f"Unable to sort by undefined facet {facet}.") es_search = self._create_request(catalog=catalog, filters=filters, post_filter=True, entity_type=entity_type) if pagination is None: self._annotate_aggs_for_translation(es_search) es_response = es_search.execute(ignore_cache=True) self._translate_response_aggs(catalog, es_response) es_response_dict = es_response.to_dict() hits = [hit['_source'] for hit in es_response_dict['hits']['hits']] hits = self.translate_fields(catalog, hits, forward=False) final_response = KeywordSearchResponse(hits, entity_type, catalog) else: if pagination.sort in translation: pagination.sort = translation[pagination.sort] es_search = self.apply_paging(catalog, es_search, pagination) self._annotate_aggs_for_translation(es_search) try: es_response = es_search.execute(ignore_cache=True) except elasticsearch.NotFoundError as e: raise IndexNotFoundError(e.info["error"]["index"]) self._translate_response_aggs(catalog, es_response) es_response_dict = es_response.to_dict() es_hits = es_response_dict['hits']['hits'] list_adjustment = 1 if len(es_hits) > pagination.size else 0 if pagination.search_before is not None: hits = reversed(es_hits[0:len(es_hits) - list_adjustment]) else: hits = es_hits[0:len(es_hits) - list_adjustment] hits = [hit['_source'] for hit in hits] hits = self.translate_fields(catalog, hits, forward=False) facets = es_response_dict['aggregations'] if 'aggregations' in es_response_dict else {} pagination.sort = inverse_translation[pagination.sort] paging = self._generate_paging_dict(catalog, filters, es_response_dict, pagination) final_response = FileSearchResponse(hits, paging, facets, entity_type, catalog) final_response = final_response.apiResponse.to_json() return final_response
Apache License 2.0
enckse/freeradius
mods-config/python/freepydius.py
_convert_mac
python
def _convert_mac(mac): using = mac.lower() for c in [":", "-"]: using = using.replace(c, "") return using
convert a mac to a lower, cleansed value.
https://github.com/enckse/freeradius/blob/6574df4aa2819e3a2ff1a237cb9cdfb56c74f8d4/mods-config/python/freepydius.py#L244-L249
import radiusd import json import logging import os import threading import uuid import random import sys from ctypes import * from logging.handlers import TimedRotatingFileHandler PASS_KEY = "pass" MAC_KEY = "macs" USER_KEY = "users" VLAN_KEY = "vlans" BYPASS_KEY = "bypass" ATTR_KEY = "attr" PORT_BYPASS_KEY = "port" WILDCARD_KEY = "wildcard" _IS_BYPASS = "isbypass" rlock = threading.RLock() logger = None _CONFIG_FILE_NAME="network.json" _PY_CONF = '/etc/raddb/mods-config/python/' _CONFIG_FILE = _PY_CONF + _CONFIG_FILE_NAME _LOG_FILE_NAME = 'trace.log' _LOG_FILE = "/var/log/radius/freepydius/" + _LOG_FILE_NAME _DOMAIN_SLASH = "\\" _ENC_KEY_FILE = _PY_CONF + 'keyfile' _ENC_DELIMITER = "." _ENC_KEY = "|" _ENC_PAD = ord(":") _ISPY2 = sys.version_info < (3, 0) def byteify(input): if isinstance(input, dict): return {byteify(key): byteify(value) for key, value in input.iteritems()} elif isinstance(input, list): return [byteify(element) for element in input] elif isinstance(input, unicode): return input.encode('utf-8') else: return input def _convert_user_name(name): user_name = name if _DOMAIN_SLASH in user_name: idx = user_name.index(_DOMAIN_SLASH) user_name = user_name[idx + len(_DOMAIN_SLASH):] return user_name def _mac(possible_mac): valid = False if len(possible_mac) == 12: valid = True for c in possible_mac: if c not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']: valid = False break return valid def _config(input_name): user_name = _convert_user_name(input_name) with open(_CONFIG_FILE) as f: if _ISPY2: obj = byteify(json.loads(f.read())) else: obj = json.loads(f.read()) users = obj[USER_KEY] vlans = obj[VLAN_KEY] bypass = obj[BYPASS_KEY] user_obj = None vlan_obj = None if "." in user_name: parts = user_name.split(".") vlan = parts[0] if user_name in users: user_obj = users[user_name] if vlan in vlans: vlan_obj = vlans[vlan] else: lowered = user_name.lower() valid = _mac(lowered) if valid and lowered in bypass: vlan_name = bypass[lowered] if vlan_name in vlans: user_obj = { PASS_KEY: input_name, MAC_KEY: [lowered], _IS_BYPASS: True } vlan_obj = vlans[vlan_name] return (user_obj, vlan_obj) def _convert_key(key): return [ord(x) for x in key] def _get_tea_key(): with open(_ENC_KEY_FILE, 'r') as f: return _convert_key(f.read().strip()) def _split_key(key): if _ENC_PAD not in key: raise Exception("invalid key input - no padding indicator") idx = key.index(_ENC_PAD) pad = "".join([chr(x) for ind, x in enumerate(key) if ind < idx]) keyed = [x for ind, x in enumerate(key) if ind > idx] if int(pad) % 2 != 0: raise Exception("pad must be divisible by 2") return (int(int(pad) / 2), keyed) def _pad(pad, res): if pad == 0: return res idx = 0 val = str(res) while idx < pad: val = val.ljust(len(val) + 1, str(random.randint(0, 9))) val = val.rjust(len(val) + 1, str(random.randint(0, 9))) idx = idx + 1 return val def _encrypt(v, key_input): if len(v) % 2 != 0: raise Exception("value must be divisible by 2") resulting = [] key_parts = _split_key(key_input) key = key_parts[1] pad = key_parts[0] for i in range(0, len(v)): if i % 2 == 1: continue k = key[i:i+4] cur = (ord(v[i]), ord(v[i + 1])) res = _tea_encrypt(cur, k) f_pad = _pad(pad, res[0]) l_pad = _pad(pad, res[1]) resulting.append("{}{}{}".format(f_pad, _ENC_DELIMITER, l_pad)) return _ENC_KEY.join(resulting) def _decrypt(v, key_input): split = v.split(_ENC_KEY) resulting = [] idx = 0 key_parts = _split_key(key_input) key = key_parts[1] pad = key_parts[0] end_pad = -1 * pad for item in split: k = key[idx:idx+4] parts = item.split(_ENC_DELIMITER) f_in = parts[0] l_in = parts[1] if end_pad != 0: f_in = f_in[pad:end_pad] l_in = l_in[pad:end_pad] res = _tea_decrypt((int(f_in), int(l_in)), k) resulting.append(chr(res[0])) resulting.append(chr(res[1])) idx = idx + 2 return "".join(resulting) def _tea_encrypt(v, k): y = c_uint32(v[0]); z = c_uint32(v[1]); s = c_uint32(0); delta = 0x9E3779B9; n = 32 w = [0,0] while (n > 0): s.value += delta y.value += ( z.value << 4 ) + k[0] ^ z.value + s.value ^ ( z.value >> 5 ) + k[1] z.value += ( y.value << 4 ) + k[2] ^ y.value + s.value ^ ( y.value >> 5 ) + k[3] n -= 1 w[0] = y.value w[1] = z.value return w def _tea_decrypt(v, k): y=c_uint32(v[0]) z=c_uint32(v[1]) sum=c_uint32(0xC6EF3720) delta=0x9E3779B9 n=32 w=[0,0] while(n>0): z.value -= ( y.value << 4 ) + k[2] ^ y.value + sum.value ^ ( y.value >> 5 ) + k[3] y.value -= ( z.value << 4 ) + k[0] ^ z.value + sum.value ^ ( z.value >> 5 ) + k[1] sum.value -= delta n -= 1 w[0]=y.value w[1]=z.value return w def _get_pass(user_name): config = _config(user_name) user = config[0] if user is not None: if PASS_KEY in user: if _IS_BYPASS in user and user[_IS_BYPASS]: return user[PASS_KEY] else: return _decrypt(user[PASS_KEY], _get_tea_key()) def _get_vlan(user_name, macs): config = _config(user_name) user = config[0] vlan = config[1] if user is not None and vlan is not None: if MAC_KEY in user: mac_set = user[MAC_KEY] for mac in macs: if mac in mac_set: return vlan
MIT License
taizan-hokuto/pytchat
pytchat/core_async/livechat.py
LiveChatAsync._startlisten
python
async def _startlisten(self): if not self.continuation: channel_id = await util.get_channelid_async(self._client, self._video_id) self.continuation = liveparam.getparam( self._video_id, channel_id, past_sec=3) await self._listen(self.continuation)
Fetch first continuation parameter, create and start _listen loop.
https://github.com/taizan-hokuto/pytchat/blob/6c7dc03d069136d9ae124b7d47a26660a7c48a0c/pytchat/core_async/livechat.py#L152-L163
import asyncio import httpx import json import signal import time import traceback from asyncio import Queue from concurrent.futures import CancelledError from .buffer import Buffer from ..parser.live import Parser from .. import config from .. import exceptions from .. import util from ..paramgen import liveparam, arcparam from ..processors.default.processor import DefaultProcessor from ..processors.combinator import Combinator headers = config.headers MAX_RETRY = 10 class LiveChatAsync: _setup_finished = False def __init__(self, video_id, seektime=-1, processor=DefaultProcessor(), buffer=None, client = httpx.AsyncClient(http2=True), interruptable=True, callback=None, done_callback=None, exception_handler=None, direct_mode=False, force_replay=False, topchat_only=False, logger=config.logger(__name__), replay_continuation=None ): self._client:httpx.AsyncClient = client self._video_id = util.extract_video_id(video_id) self.seektime = seektime if isinstance(processor, tuple): self.processor = Combinator(processor) else: self.processor = processor self._buffer = buffer self._callback = callback self._done_callback = done_callback self._exception_handler = exception_handler self._direct_mode = direct_mode self._is_alive = True self._is_replay = force_replay or (replay_continuation is not None) self._parser = Parser(is_replay=self._is_replay) self._pauser = Queue() self._pauser.put_nowait(None) self._first_fetch = replay_continuation is None self._fetch_url = config._sml if replay_continuation is None else config._smr self._topchat_only = topchat_only self._dat = '' self._last_offset_ms = 0 self._logger = logger self.exception = None self.continuation = replay_continuation LiveChatAsync._logger = logger if exception_handler: self._set_exception_handler(exception_handler) if interruptable: signal.signal(signal.SIGINT, (lambda a, b: self._keyboard_interrupt())) self._setup() def _setup(self): if self._direct_mode: if self._callback is None: raise exceptions.IllegalFunctionCall( "When direct_mode=True, callback parameter is required.") else: if self._buffer is None: self._buffer = Buffer(maxsize=20) if self._callback is None: pass else: loop = asyncio.get_event_loop() loop.create_task(self._callback_loop(self._callback)) loop = asyncio.get_event_loop() self.listen_task = loop.create_task(self._startlisten()) if self._done_callback is None: self.listen_task.add_done_callback(self._finish) else: self.listen_task.add_done_callback(self._done_callback)
MIT License
google/gps_building_blocks
py/gps_building_blocks/airflow/hooks/ga_hook_test.py
PayloadBuilderTest.generate_expected_payload_str_list
python
def generate_expected_payload_str_list(self, payload_dict): return ['{k}={v}'.format(k=k, v=self.event_test_data[k]) for k in payload_dict]
Generate expected payload str list. Generate payload key value pair string test data for result verification. Args: payload_dict: paylod dict object, contain params and corresponding value. Returns: payload_str_list: list that contain key=value pair string.
https://github.com/google/gps_building_blocks/blob/b96e855ccf1ec946e78dccde74eeaa39b0f9ad7a/py/gps_building_blocks/airflow/hooks/ga_hook_test.py#L49-L61
import re import parameterized from absl.testing import absltest from absl.testing.absltest import mock from gps_building_blocks.airflow.hooks import ga_hook from gps_building_blocks.airflow.utils import errors from gps_building_blocks.airflow.utils import retry_utils class PayloadBuilderTest(absltest.TestCase): def setUp(self): super(PayloadBuilderTest, self).setUp() self.test_tracking_id = 'UA-12323-4' self.payload_builder = ga_hook.PayloadBuilder(self.test_tracking_id) self.event_test_data = { 'ec': 'ClientID', 'ea': 'test_event_action', 'el': '20190423', 'ev': 1, 'cid': '12345.456789' } self.event_payload_list = self.generate_expected_payload_str_list( self.event_test_data) self.event_payload_list.append('tid={t}'.format(t=self.test_tracking_id))
Apache License 2.0
mindspore-ai/mindinsight
mindinsight/datavisual/data_transform/loader_generators/data_loader_generator.py
DataLoaderGenerator.generate_loaders
python
def generate_loaders(self, loader_pool): loader_dict = {} if not FileHandler.exists(self._summary_path): logger.warning("Summary path does not exist. It will not start loading events data. " "Current path is %r.", self._summary_path) return loader_dict dir_map_mtime_dict = {} min_modify_time = None summaries_info = self._summary_watcher.list_summary_directories(self._summary_path) for item in summaries_info: relative_path = item.get("relative_path") current_dir = FileHandler.join(self._summary_path, relative_path) dataloader = DataLoader(current_dir) if not dataloader.has_valid_files(): logger.debug("Can not find valid train log file in folder %s , " "will ignore.", relative_path) continue modify_time = item.get("update_time").timestamp() loader_id = self._generate_loader_id(relative_path) loader = loader_pool.get(loader_id) if loader is not None and loader.latest_update_time > modify_time: modify_time = loader.latest_update_time if not min_modify_time: min_modify_time = modify_time if len(dir_map_mtime_dict) < MAX_DATA_LOADER_SIZE: if modify_time < min_modify_time: min_modify_time = modify_time dir_map_mtime_dict.update({relative_path: modify_time}) else: if modify_time >= min_modify_time: dir_map_mtime_dict.update({relative_path: modify_time}) sorted_dir_tuple = sorted(dir_map_mtime_dict.items(), key=lambda d: d[1])[-MAX_DATA_LOADER_SIZE:] for relative_path, modify_time in sorted_dir_tuple: loader_id = self._generate_loader_id(relative_path) loader = self._generate_loader_by_relative_path(relative_path) loader_dict.update({loader_id: loader}) return loader_dict
Generate loader from summary path, if summary path is empty, will return empty list. Args: loader_pool (dict[str, LoaderStruct]): Current loader pool in data_manager. Returns: dict[str, LoaderStruct], a dict of `Loader`.
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/datavisual/data_transform/loader_generators/data_loader_generator.py#L73-L134
import os from mindinsight.datavisual.common.log import logger from mindinsight.datavisual.common.exceptions import TrainJobNotExistError from mindinsight.datavisual.data_access.file_handler import FileHandler from mindinsight.datavisual.data_transform.data_loader import DataLoader from mindinsight.datavisual.data_transform.loader_generators.loader_generator import MAX_DATA_LOADER_SIZE from mindinsight.datavisual.data_transform.loader_generators.loader_struct import LoaderStruct from mindinsight.datavisual.data_transform.loader_generators.loader_generator import LoaderGenerator from mindinsight.datavisual.data_transform.summary_watcher import SummaryWatcher from mindinsight.utils.exceptions import ParamValueError from mindinsight.utils.exceptions import PathNotExistError class DataLoaderGenerator(LoaderGenerator): def __init__(self, summary_path): self._summary_path = self._check_and_normalize_summary_path(summary_path) self._summary_watcher = SummaryWatcher() def register_folder_analyzer(self, analyzer): self._summary_watcher.register_folder_analyzer(analyzer) def _check_and_normalize_summary_path(self, summary_path): if summary_path is None: logger.warning("Summary path is None. It will not init data loader generator.") raise ParamValueError("Summary path is None.") summary_path = os.path.realpath(summary_path) return summary_path
Apache License 2.0
netsys/kappa
compiler/rt/continuation.py
Continuation.__call__
python
def __call__(self, result): return self.run(result, *self.data)
Takes the result of the current computation and resumes execution.
https://github.com/netsys/kappa/blob/de1ab3393d1e6358f66427645c77833d4dc99693/compiler/rt/continuation.py#L11-L13
import abc class Continuation(abc.ABC): def __init__(self, *args) -> None: self.data = args
BSD 2-Clause Simplified License
nicolaslm/feedsubs
reader/tasks.py
synchronize_all_feeds
python
def synchronize_all_feeds(): current_date = now() inactive_user_threshold = current_date - (timedelta(seconds=settings.SESSION_COOKIE_AGE) * 2) feeds_to_sync = models.Feed.objects.filter( is_sync_enabled=True, subscribers__user__is_active=True, subscribers__user__last_login__gte=inactive_user_threshold ) ats = list() for i in range(0, 29): ats.append(current_date + timedelta(minutes=i)) batch = Batch() for feed_id in feeds_to_sync.values_list('id', flat=True): batch.schedule_at('synchronize_feed', random.choice(ats), feed_id) tasks.schedule_batch(batch)
Synchronize feeds every 30 minutes. To avoid a spike of load, the synchronization is spread over the whole period. Feeds that have their sync explicitly disabled or that have no active subscribers are not synchronized.
https://github.com/nicolaslm/feedsubs/blob/aa41222f25631866598316fc3b631f363f942c28/reader/tasks.py#L34-L58
from datetime import timedelta from logging import getLogger import random from typing import Optional, Tuple from atoma.exceptions import FeedDocumentError from atoma.simple import simple_parse_bytes, Feed as ParsedFeed from django.contrib.auth import get_user_model from django.conf import settings from django.core.files.base import File from django.core.files.storage import default_storage from django.db.models import Count, ObjectDoesNotExist from django.db.models.base import ModelBase from django.db.utils import IntegrityError from django.template.defaultfilters import filesizeformat from django.utils.timezone import now from psycopg2 import errorcodes as pg_error_codes import requests from spinach import Tasks, Batch from um import background_messages from . import ( models, html_processing, image_processing, http_fetcher, caching, utils ) from .settings import READER_CACHE_IMAGES, READER_FEED_ARTICLE_THRESHOLD tasks = Tasks() logger = getLogger(__name__) @tasks.task(name='synchronize_all_feeds', periodicity=timedelta(minutes=30), max_retries=2, max_concurrency=1)
MIT License
borda/pyimsegm
imsegm/classification.py
save_classifier
python
def save_classifier(path_out, classif, clf_name, params, feature_names=None, label_names=None): if not os.path.isdir(path_out): raise FileNotFoundError('missing folder: %s' % path_out) dict_classif = { 'params': params, 'name': clf_name, 'clf_pipeline': classif, 'features': feature_names, 'label_names': label_names, } path_clf = os.path.join(path_out, TEMPLATE_NAME_CLF.format(clf_name)) logging.info('export classif. of %s to "%s"', dict_classif, path_clf) with open(path_clf, 'wb') as f: pickle.dump(dict_classif, f) logging.debug('export finished') return path_clf
estimate classif for all data and export it :param str path_out: path for exporting trained classofier :param classif: sklearn classif. :param str clf_name: name of selected classifier :param list(str) feature_names: list of string names :param dict params: extra parameters :param list(str) label_names: list of string names of label_names :return str: >>> clf = create_classifiers()['RandForest'] >>> p_clf = save_classifier('.', clf, 'TESTINNG', {}) >>> os.path.basename(p_clf) 'classifier_TESTINNG.pkl' >>> d_clf = load_classifier(p_clf) >>> sorted(d_clf.keys()) ['clf_pipeline', 'features', 'label_names', 'name', 'params'] >>> d_clf['clf_pipeline'] # doctest: +ELLIPSIS RandomForestClassifier(...) >>> d_clf['name'] 'TESTINNG' >>> os.remove(p_clf)
https://github.com/borda/pyimsegm/blob/7463cfc7aad8781564dc84c8780f291cc3c17fe3/imsegm/classification.py#L547-L586
import collections import itertools import logging import os import pickle import random from functools import partial import numpy as np import pandas as pd from scipy import interp from scipy.stats import randint as sp_randint from scipy.stats import uniform as sp_random from sklearn import ( cluster, decomposition, ensemble, feature_selection, linear_model, metrics, model_selection, neighbors, neural_network, pipeline, preprocessing, svm, tree, ) from sklearn.base import clone from imsegm.utilities import ImageDimensionError try: from sklearn.grid_search import GridSearchCV, RandomizedSearchCV except Exception: from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from imsegm.labeling import relabel_max_overlap_unique from imsegm.utilities.experiments import get_nb_workers, WrapExecuteSequence TEMPLATE_NAME_CLF = 'classifier_{}.pkl' DEFAULT_CLASSIF_NAME = 'RandForest' DEFAULT_CLUSTERING = 'kMeans' NAME_CSV_FEATURES_SELECT = 'feature_selection.csv' NAME_CSV_CLASSIF_CV_SCORES = 'classif_{}_cross-val_scores-{}.csv' NAME_CSV_CLASSIF_CV_ROC = 'classif_{}_cross-val_ROC-{}.csv' NAME_TXT_CLASSIF_CV_AUC = 'classif_{}_cross-val_AUC-{}.txt' METRIC_AVERAGES = ('macro', 'weighted') METRIC_SCORING = ('f1_macro', 'accuracy', 'precision_macro', 'recall_macro') ROUND_UNIQUE_FTS_DIGITS = 3 NB_WORKERS_SERACH = get_nb_workers(0.5) DICT_SCORING = { 'f1': metrics.f1_score, 'accuracy': metrics.accuracy_score, 'precision': metrics.precision_score, 'recall': metrics.recall_score, } def create_classifiers(nb_workers=-1): clfs = { 'RandForest': ensemble.RandomForestClassifier( n_estimators=20, min_samples_leaf=2, min_samples_split=3, n_jobs=nb_workers ), 'GradBoost': ensemble.GradientBoostingClassifier( subsample=0.25, warm_start=False, max_depth=6, min_samples_leaf=6, n_estimators=200, min_samples_split=7, ), 'LogistRegr': linear_model.LogisticRegression(solver='sag', n_jobs=nb_workers), 'KNN': neighbors.KNeighborsClassifier(n_jobs=nb_workers), 'SVM': svm.SVC(kernel='rbf', probability=True, tol=2e-3, max_iter=5000), 'DecTree': tree.DecisionTreeClassifier(), 'AdaBoost': ensemble.AdaBoostClassifier(n_estimators=5), } return clfs def create_clf_pipeline(name_classif=DEFAULT_CLASSIF_NAME, pca_coef=0.95): components = [('scaler', preprocessing.StandardScaler())] if pca_coef is not None: components += [('reduce_dim', decomposition.PCA(pca_coef))] components += [('classif', create_classifiers()[name_classif])] clf_pipeline = pipeline.Pipeline(components) return clf_pipeline def create_clf_param_search_grid(name_classif=DEFAULT_CLASSIF_NAME): def _log_space(b, e, n): return np.unique(np.logspace(b, e, n).astype(int)).tolist() clf_params = { 'RandForest': { 'classif__n_estimators': _log_space(0, 2, 40), 'classif__min_samples_split': [2, 3, 5, 7, 9], 'classif__min_samples_leaf': [1, 2, 4, 6, 9], 'classif__criterion': ('gini', 'entropy'), }, 'KNN': { 'classif__n_neighbors': _log_space(0, 2, 20), 'classif__algorithm': ('ball_tree', 'kd_tree'), 'classif__weights': ('uniform', 'distance'), 'classif__leaf_size': _log_space(0, 1.5, 10), }, 'SVM': { 'classif__C': np.linspace(0.2, 1., 8).tolist(), 'classif__kernel': ('poly', 'rbf', 'sigmoid'), 'classif__degree': [1, 2, 4, 6, 9], }, 'DecTree': { 'classif__criterion': ('gini', 'entropy'), 'classif__min_samples_split': [2, 3, 5, 7, 9], 'classif__min_samples_leaf': range(1, 7, 2), }, 'GradBoost': { 'classif__n_estimators': _log_space(0, 2, 25), 'classif__max_depth': range(1, 7, 2), 'classif__min_samples_split': [2, 3, 5, 7, 9], 'classif__min_samples_leaf': range(1, 7, 2), }, 'LogistRegr': { 'classif__C': np.linspace(0., 1., 5).tolist(), 'classif__solver': ('lbfgs', 'sag'), }, 'AdaBoost': { 'classif__n_estimators': _log_space(0, 2, 20), } } if name_classif not in clf_params.keys(): clf_params[name_classif] = {} logging.warning('not defined classifier name "%s"', name_classif) return clf_params[name_classif] def create_clf_param_search_distrib(name_classif=DEFAULT_CLASSIF_NAME): clf_params = { 'RandForest': { 'classif__n_estimators': sp_randint(2, 25), 'classif__min_samples_split': sp_randint(2, 9), 'classif__min_samples_leaf': sp_randint(1, 7), }, 'KNN': { 'classif__n_neighbors': sp_randint(5, 25), 'classif__algorithm': ('ball_tree', 'kd_tree'), 'classif__weights': ('uniform', 'distance'), }, 'SVM': { 'classif__C': sp_random(0., 1.), 'classif__kernel': ('poly', 'rbf', 'sigmoid'), 'classif__degree': sp_randint(2, 9), }, 'DecTree': { 'classif__criterion': ('gini', 'entropy'), 'classif__min_samples_split': sp_randint(2, 9), 'classif__min_samples_leaf': sp_randint(1, 7), }, 'GradBoost': { 'classif__n_estimators': sp_randint(10, 200), 'classif__max_depth': sp_randint(1, 7), 'classif__min_samples_split': sp_randint(2, 9), 'classif__min_samples_leaf': sp_randint(1, 7), }, 'LogistRegr': { 'classif__C': sp_random(0., 1.), 'classif__solver': ('newton-cg', 'lbfgs', 'sag'), }, 'AdaBoost': { 'classif__n_estimators': sp_randint(2, 100), } } if name_classif not in clf_params.keys(): clf_params[name_classif] = {} return clf_params[name_classif] def create_pipeline_neuron_net(): logistic = linear_model.LogisticRegression() rbm = neural_network.BernoulliRBM(learning_rate=0.05, n_components=35, n_iter=299, verbose=False) clf = pipeline.Pipeline(steps=[('rbm', rbm), ('logistic', logistic)]) return clf def compute_classif_metrics(y_true, y_pred, metric_averages=METRIC_AVERAGES): y_true = np.array(y_true) y_pred = np.array(y_pred) if y_true.shape != y_pred.shape: raise ValueError('prediction (%i) and annotation (%i) should be equal' % (len(y_true), len(y_pred))) logging.debug('unique lbs true: %r, predict %r', np.unique(y_true), np.unique(y_pred)) uq_labels = np.unique(np.hstack((y_true, y_pred))) if len(uq_labels) <= 2: y_true = relabel_sequential(y_true, uq_labels) y_pred = relabel_sequential(y_pred, uq_labels) eval_str = 'EVALUATION: {:<2} PRE: {:.3f} REC: {:.3f} F1: {:.3f} S: {:>6}' try: p, r, f, s = metrics.precision_recall_fscore_support(y_true, y_pred) for lb, _ in enumerate(p): logging.debug(eval_str.format(lb, p[lb], r[lb], f[lb], s[lb])) except Exception: logging.exception('metrics.precision_recall_fscore_support') dict_metrics = { 'ARS': metrics.adjusted_rand_score(y_true, y_pred), 'accuracy': metrics.accuracy_score(y_true, y_pred), 'confusion': metrics.confusion_matrix(y_true, y_pred).tolist(), } names = ['precision', 'recall', 'f1', 'support'] for avg in metric_averages: try: mtr = metrics.precision_recall_fscore_support(y_true, y_pred, average=avg) res = dict(zip(['{}_{}'.format(n, avg) for n in names], mtr)) except Exception: logging.exception('metrics.precision_recall_fscore_support') res = dict(zip(['{}_{}'.format(n, avg) for n in names], [-1] * 4)) dict_metrics.update(res) return dict_metrics def compute_classif_stat_segm_annot(annot_segm_name, drop_labels=None, relabel=False): annot, segm, name = annot_segm_name if segm.shape != annot.shape: raise ImageDimensionError('dimension do not match for segm: %r - annot: %r' % (segm.shape, annot.shape)) y_true, y_pred = annot.ravel(), segm.ravel() if drop_labels is not None: mask = np.ones(y_true.shape, dtype=bool) for lb in drop_labels: mask[y_true == lb] = 0 mask[y_pred == lb] = 0 y_true = y_true[mask] y_pred = y_pred[mask] if relabel: y_pred = relabel_max_overlap_unique(y_true, y_pred, keep_bg=False) dict_stat = compute_classif_metrics(y_true, y_pred, metric_averages=['macro']) if len(np.unique(y_pred)) == 2: dict_stat['(FP+FN)/(TP+FN)'] = compute_metric_fpfn_tpfn(y_true, y_pred) dict_stat['(TP+FP)/(TP+FN)'] = compute_metric_tpfp_tpfn(y_true, y_pred) dict_stat['name'] = name return dict_stat def compute_stat_per_image(segms, annots, names=None, nb_workers=2, drop_labels=None, relabel=False): if len(segms) != len(annots): raise RuntimeError('size of segment. (%i) amd annot. (%i) should be equal' % (len(segms), len(annots))) if not names: names = map(str, range(len(segms))) _compute_stat = partial(compute_classif_stat_segm_annot, drop_labels=drop_labels, relabel=relabel) iterate = WrapExecuteSequence( _compute_stat, zip(annots, segms, names), nb_workers=nb_workers, desc='statistic per image' ) list_stat = list(iterate) df_stat = pd.DataFrame(list_stat) df_stat.set_index('name', inplace=True) return df_stat def feature_scoring_selection(features, labels, names=None, path_out=''): logging.info('Feature selection for %s', names) features = np.array(features) if not isinstance(features, np.ndarray) else features labels = np.array(labels) if not isinstance(labels, np.ndarray) else labels logging.debug('Features: %r and labels: %r', features.shape, labels.shape) forest = ensemble.ExtraTreesClassifier(n_estimators=125, random_state=0) forest.fit(features, labels) f_test, _ = feature_selection.f_regression(features, labels) k_best = feature_selection.SelectKBest(feature_selection.f_classif, k='all') k_best.fit(features, labels) variances = feature_selection.VarianceThreshold().fit(features, labels) imp = { 'ExtTree': forest.feature_importances_, 'k-Best': k_best.scores_, 'variance': variances.variances_, 'F-test': f_test } indices = np.argsort(forest.feature_importances_)[::-1] if names is None or len(names) < features.shape[1]: names = map(str, range(1, features.shape[1] + 1)) df_scoring = pd.DataFrame() for i, n in enumerate(names): dict_scores = {k: imp[k][i] for k in imp} dict_scores['feature'] = n df_scoring = df_scoring.append(dict_scores, ignore_index=True) df_scoring.set_index(['feature'], inplace=True) logging.debug(df_scoring) if os.path.exists(path_out): path_csv = os.path.join(path_out, NAME_CSV_FEATURES_SELECT) logging.debug('export Feature scoting to "%s"', path_csv) df_scoring.to_csv(path_csv) return indices, df_scoring
BSD 3-Clause New or Revised License
lijiannuist/lightdsfd
layers/box_utils.py
sfd_match
python
def sfd_match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx): overlaps = jaccard( truths, point_form(priors) ) best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) best_truth_idx.squeeze_(0) best_truth_overlap.squeeze_(0) best_prior_idx.squeeze_(1) best_prior_overlap.squeeze_(1) best_truth_overlap.index_fill_(0, best_prior_idx, 2) for j in range(best_prior_idx.size(0)): best_truth_idx[best_prior_idx[j]] = j conf = labels[best_truth_idx] + 1 conf[best_truth_overlap < threshold] = 0 average_onestage = 6 sort_overlaps, sort_id = overlaps.sort(1, descending=True) for gt_id in range(overlaps.size(0)): condition = best_truth_idx.eq(gt_id) * conf.byte() anchors_of_gt = condition.sum() if anchors_of_gt < average_onestage: num_plus = 0 for ac_id in range( priors.shape[0] ): if sort_overlaps[gt_id][ac_id] < 0.1: break elif not conf[sort_id[gt_id][ac_id]]: best_truth_idx[ sort_id[gt_id][ac_id] ]=gt_id conf[ sort_id[gt_id][ac_id] ] = 1 num_plus+=1 if num_plus == average_onestage - anchors_of_gt: break matches = truths[best_truth_idx] loc = encode(matches, priors, variances) loc_t[idx] = loc conf_t[idx] = conf
S3FD: Single Shot Scale-invariant Face Detector
https://github.com/lijiannuist/lightdsfd/blob/5f04ab89ac08eaf69d16c96f6c9e237701f80281/layers/box_utils.py#L264-L329
from __future__ import division import torch import math import pdb import numpy as np def point_form(boxes): return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, boxes[:, :2] + boxes[:, 2:]/2), 1) def center_size(boxes): return torch.cat([ (boxes[:, 2:] + boxes[:, :2])/2, boxes[:, 2:] - boxes[:, :2]], 1) def intersect(box_a, box_b): A = box_a.size(0) B = box_b.size(0) max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2)) inter = torch.clamp((max_xy - min_xy), min=0) return inter[:, :, 0] * inter[:, :, 1] def jaccard(box_a, box_b): inter = intersect(box_a, box_b) area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) union = area_a + area_b - inter return inter / union def refine_match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx , arm_loc): decode_arm_loc = decode(arm_loc , priors = priors, variances = variances) overlaps = jaccard( truths, decode_arm_loc ) best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) best_truth_idx.squeeze_(0) best_truth_overlap.squeeze_(0) best_prior_idx.squeeze_(1) best_prior_overlap.squeeze_(1) best_truth_overlap.index_fill_(0, best_prior_idx, 2) for j in range(best_prior_idx.size(0)): best_truth_idx[best_prior_idx[j]] = j matches = truths[best_truth_idx] conf = labels[best_truth_idx] + 1 conf[best_truth_overlap < threshold] = 0 loc = encode(matches, center_size(decode_arm_loc), variances) loc_t[idx] = loc conf_t[idx] = conf def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx): overlaps = jaccard( truths, point_form(priors) ) best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) best_truth_idx.squeeze_(0) best_truth_overlap.squeeze_(0) best_prior_idx.squeeze_(1) best_prior_overlap.squeeze_(1) best_truth_overlap.index_fill_(0, best_prior_idx, 2) for j in range(best_prior_idx.size(0)): best_truth_idx[ best_prior_idx[j] ] = j matches = truths[best_truth_idx] conf = labels[best_truth_idx] + 1 if len(threshold) > 1: conf[best_truth_overlap < threshold[1]] = -1 conf[best_truth_overlap < threshold[0]] = 0 else: conf[best_truth_overlap < threshold[0]] = 0 loc = encode(matches, priors, variances) loc_t[idx] = loc conf_t[idx] = conf def pa_sfd_match(part, threshold, truths, priors, variances, labels, loc_t, conf_t, idx): overlaps = jaccard( truths, point_form(priors) ) best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) best_truth_idx.squeeze_(0) best_truth_overlap.squeeze_(0) best_prior_idx.squeeze_(1) best_prior_overlap.squeeze_(1) best_truth_overlap.index_fill_(0, best_prior_idx, 2) for j in range(best_prior_idx.size(0)): best_truth_idx[best_prior_idx[j]] = j conf = labels[best_truth_idx] + 1 conf[best_truth_overlap < threshold] = 0 average_onestage = 6 sort_overlaps, sort_id = overlaps.sort(1, descending=True) for gt_id in range(overlaps.size(0)): condition = best_truth_idx.eq(gt_id) * conf.byte() anchors_of_gt = condition.sum() if anchors_of_gt < average_onestage: num_plus = 0 for ac_id in range( priors.shape[0] ): if sort_overlaps[gt_id][ac_id] < 0.1: break elif not conf[sort_id[gt_id][ac_id]]: best_truth_idx[ sort_id[gt_id][ac_id] ]=gt_id conf[ sort_id[gt_id][ac_id] ] = 1 num_plus+=1 if num_plus == average_onestage - anchors_of_gt: break matches = truths[best_truth_idx] loc = encode(matches, priors, variances) loc_t[idx] = loc conf_t[idx] = conf
MIT License
red-hat-storage/ocs-ci
ocs_ci/deployment/flexy.py
FlexyBase.build_container_args
python
def build_container_args(self, purpose=""): args = list() args.append(f"--env-file={constants.FLEXY_ENV_FILE_UPDATED_PATH}") args.append(f"-w={self.flexy_mnt_container_dir}") if purpose == "destroy": args.append( f"--mount=type=bind,source={self.flexy_host_dir}," f"destination={self.flexy_mnt_container_dir},relabel=shared" ) else: args.append( f"--mount=type=bind,source={self.flexy_host_dir}," f"destination={self.flexy_mnt_container_dir},relabel=shared" ) args.append(f"{self.flexy_img_url}") return args
Builds most commonly used arguments for flexy container Args: purpose (str): purpose for which we are building these args eg: destroy, debug. By default it will be empty string which turns into 'deploy' mode for flexy Returns: list: of flexy container args
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/ocs_ci/deployment/flexy.py#L268-L300
import base64 import binascii import hcl import logging import os import time import yaml import io import configparser from semantic_version import Version import subprocess from subprocess import CalledProcessError import shlex import shutil from ocs_ci.framework import config, merge_dict from ocs_ci.ocs import constants from ocs_ci.utility.proxy import update_kubeconfig_with_proxy_url_for_client from ocs_ci.utility.utils import ( clone_repo, exec_cmd, expose_ocp_version, get_ocp_version, get_terraform, get_terraform_ignition_provider, login_to_mirror_registry, wait_for_machineconfigpool_status, ) from ocs_ci.utility.flexy import ( configure_allowed_domains_in_proxy, load_cluster_info, ) logger = logging.getLogger(__name__) class FlexyBase(object): def __init__(self): self.cluster_name = config.ENV_DATA["cluster_name"] self.cluster_path = config.ENV_DATA["cluster_path"] self.flexy_host_dir = os.path.expanduser(constants.FLEXY_HOST_DIR_PATH) self.flexy_prepare_work_dir() self.flexy_mnt_container_dir = config.ENV_DATA.get( "flexy_mnt_container_dir", constants.FLEXY_MNT_CONTAINER_DIR ) self.flexy_img_url = config.ENV_DATA.get( "flexy_img_url", constants.FLEXY_IMAGE_URL ) self.template_file = None if not config.ENV_DATA.get("flexy_env_file"): self.flexy_private_conf_url = config.ENV_DATA.get( "flexy_private_conf_repo", constants.FLEXY_DEFAULT_PRIVATE_CONF_REPO ) self.flexy_private_conf_branch = config.ENV_DATA.get( "flexy_private_conf_branch", constants.FLEXY_DEFAULT_PRIVATE_CONF_BRANCH ) self.flexy_host_private_conf_dir_path = os.path.join( self.flexy_host_dir, "flexy-ocs-private" ) self.flexy_env_file = os.path.join( self.flexy_host_private_conf_dir_path, constants.FLEXY_DEFAULT_ENV_FILE ) else: self.flexy_env_file = config.ENV_DATA["flexy_env_file"] if not config.ENV_DATA.get("flexy_env_file"): self.template_file = config.FLEXY.get( "VARIABLES_LOCATION", os.path.join( constants.OPENSHIFT_MISC_BASE, f"aos-{get_ocp_version('_')}", config.ENV_DATA.get("flexy_template", self.default_flexy_template), ), ) def deploy_prereq(self): if not config.ENV_DATA.get("flexy_env_file"): self.clone_and_unlock_ocs_private_conf() config.FLEXY["VARIABLES_LOCATION"] = self.template_file config.FLEXY["INSTANCE_NAME_PREFIX"] = self.cluster_name config.FLEXY["LAUNCHER_VARS"].update(self.get_installer_payload()) config.FLEXY["LAUNCHER_VARS"].update( { "vm_type_masters": config.ENV_DATA["master_instance_type"], "vm_type_workers": config.ENV_DATA["worker_instance_type"], "num_nodes": str(config.ENV_DATA["master_replicas"]), "num_workers": str(config.ENV_DATA["worker_replicas"]), "ssh_key_name": "openshift-dev", } ) if "master_num_cpus" in config.ENV_DATA: config.FLEXY["LAUNCHER_VARS"]["num_cpus"] = str( config.ENV_DATA["master_num_cpus"] ) if "worker_num_cpus" in config.ENV_DATA: config.FLEXY["LAUNCHER_VARS"]["node_num_cpus"] = str( config.ENV_DATA["worker_num_cpus"] ) if "master_memory" in config.ENV_DATA: config.FLEXY["LAUNCHER_VARS"]["memory"] = str( config.ENV_DATA["master_memory"] ) if "compute_memory" in config.ENV_DATA: config.FLEXY["LAUNCHER_VARS"]["node_memory"] = str( config.ENV_DATA["compute_memory"] ) config.FLEXY["AVAILABILITY_ZONE_COUNT"] = config.ENV_DATA.get( "availability_zone_count", "1" ) config.FLEXY["OPENSHIFT_SSHKEY_PATH"] = config.DEPLOYMENT["ssh_key_private"] if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM: config.FLEXY["LAUNCHER_VARS"].update( { "iaas_name": "vsphere_config", "rhcos_ami": config.ENV_DATA["vm_template"], } ) if config.DEPLOYMENT.get("proxy"): config.FLEXY["LAUNCHER_VARS"].update( { "http_proxy": config.ENV_DATA["proxy_http_proxy"], "https_proxy": config.ENV_DATA.get( "proxy_https_proxy", config.ENV_DATA["proxy_http_proxy"] ), "proxy_for_client_on_install": config.ENV_DATA.get( "client_http_proxy", "" ), } ) config.FLEXY["BUSHSLICER_CONFIG"].update( { "services": { "vsphere_config": { "install_base_domain": config.ENV_DATA["base_domain"], "connect": { "host": config.ENV_DATA["vsphere_server"], "user": config.ENV_DATA["vsphere_user"], "password": config.ENV_DATA["vsphere_password"], }, "common": { "cluster": config.ENV_DATA["vsphere_cluster"], "datacenter": config.ENV_DATA["vsphere_datacenter"], "datastore": config.ENV_DATA["vsphere_datastore"], "CIDR": config.ENV_DATA["machine_cidr"], "internal_CIDR": config.ENV_DATA["machine_cidr"], "network": config.ENV_DATA["vm_network"], }, "create_opts": { "type": ":clone", "clone_opts": { "from_wm": "rhcos-latest", "target_resource_pool": "null", "edit": {}, }, }, "host_connect_opts": { "user": "root", "ssh_private_key": "config/keys/openshift-dev.pem", "class": "SSHAccessibleHost", }, "ipam_server": { "host": config.ENV_DATA["ipam"], "external_host": config.ENV_DATA["ipam"], "token": config.ENV_DATA["ipam_token"], }, "cloud_type": config.ENV_DATA["platform"].lower(), } } } ) self.merge_flexy_env() def get_installer_payload(self, version=None): payload_img = {"installer_payload_image": None} vers = version or config.DEPLOYMENT["installer_version"] installer_version = expose_ocp_version(vers) payload_img["installer_payload_image"] = ":".join( [constants.REGISTRY_SVC, installer_version] ) return payload_img def run_container(self, cmd_string): logger.info(f"Starting Flexy container with options {cmd_string}") with subprocess.Popen( cmd_string, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True, ) as p: for line in p.stdout: logger.info(line.strip()) p.communicate() if p.returncode: logger.error("Flexy command failed") raise CalledProcessError(p.returncode, p.args) logger.info("Flexy run finished successfully") def build_install_cmd(self): cmd = shlex.split("podman run --rm=true") flexy_container_args = self.build_container_args() return cmd + flexy_container_args def build_destroy_cmd(self): cmd = shlex.split("podman run --rm=true") flexy_container_args = self.build_container_args("destroy") return cmd + flexy_container_args + ["destroy"]
MIT License
ptrckhmmr/deal
DEAL/01_LeNet/sampling_methods/simulate_batch.py
SimulateBatchSampler.sampler_select_batch
python
def sampler_select_batch(self, sampler, N, already_selected, y, model, X_test, y_test, **kwargs): m = copy.deepcopy(model) kwargs['y'] = y kwargs['model'] = m kwargs['already_selected'] = copy.copy(already_selected) inds = [] kwargs['N'] = N inds.extend(sampler.select_batch(**kwargs)) kwargs['already_selected'] = sorted(kwargs['already_selected'] + inds) m.fit(self.X[kwargs['already_selected']], y[kwargs['already_selected']]) acc = m.score(X_test, y_test) del m del kwargs['already_selected'] return inds, acc
Calculate the performance of the model if the batch had been selected using the base method without simulation. Args: sampler: dict with two fields 'samplers': list of named samplers 'weights': percentage of batch to allocate to each sampler N: batch size already_selected: indices already selected y: labels to use for training model: model to use for training X_test, y_test: validation set Returns: - indices selected by base method - validation accuracy of model trained on new batch
https://github.com/ptrckhmmr/deal/blob/164ba36a21f7f779557e025bd5acc8a4a42f01a1/DEAL/01_LeNet/sampling_methods/simulate_batch.py#L182-L212
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import math import numpy as np from sampling_methods.wrapper_sampler_def import AL_MAPPING from sampling_methods.wrapper_sampler_def import WrapperSamplingMethod class SimulateBatchSampler(WrapperSamplingMethod): def __init__(self, X, y, seed, samplers=[{'methods': ('margin', 'uniform'),'weight': (1, 0)}], n_sims=10, train_per_sim=10, return_type='best_sim'): self.name = 'simulate_batch' self.X = X self.y = y self.seed = seed self.n_sims = n_sims self.train_per_sim = train_per_sim self.return_type = return_type self.samplers_list = samplers self.initialize_samplers(self.samplers_list) self.trace = [] self.selected = [] np.random.seed(seed) def simulate_batch(self, sampler, N, already_selected, y, model, X_test, y_test, **kwargs): minibatch = max(int(math.ceil(N / self.train_per_sim)), 1) results = [] best_acc = 0 best_inds = [] self.selected = [] n_minibatch = int(N/minibatch) + (N % minibatch > 0) for _ in range(self.n_sims): inds = [] hallucinated_y = [] kwargs['already_selected'] = copy.copy(already_selected) kwargs['y'] = copy.copy(y) kwargs['model'] = copy.deepcopy(model) for _ in range(n_minibatch): batch_size = min(minibatch, N-len(inds)) if batch_size > 0: kwargs['N'] = batch_size new_inds = sampler.select_batch(**kwargs) inds.extend(new_inds) probs = kwargs['model'].predict_proba(self.X[new_inds]) try: classes = kwargs['model'].best_estimator_.classes_ except: classes = kwargs['model'].classes_ new_y = ([ np.random.choice(classes, p=probs[i, :]) for i in range(batch_size) ]) hallucinated_y.extend(new_y) kwargs['already_selected'] = sorted(kwargs['already_selected'] + new_inds) kwargs['y'][new_inds] = new_y kwargs['model'].fit(self.X[kwargs['already_selected']], kwargs['y'][kwargs['already_selected']]) acc_hallucinated = kwargs['model'].score(X_test, y_test) if acc_hallucinated > best_acc: best_acc = acc_hallucinated best_inds = inds kwargs['model'].fit(self.X[kwargs['already_selected']], y[kwargs['already_selected']]) acc_true = kwargs['model'].score(X_test, y_test) results.append([acc_hallucinated, acc_true]) print('Hallucinated acc: %.3f, Actual Acc: %.3f' % (acc_hallucinated, acc_true)) t = {} t['arm'] = sampler t['data_size'] = len(kwargs['already_selected']) t['inds'] = inds t['y_hal'] = hallucinated_y t['acc_hal'] = acc_hallucinated t['acc_true'] = acc_true self.trace.append(t) self.selected.extend(inds) del kwargs['model'] del kwargs['already_selected'] results = np.array(results) return np.mean(results, axis=0), best_inds, best_acc
Apache License 2.0
polysimtools/pysimm
pysimm/apps/random_walk.py
constrained_opt
python
def constrained_opt(s, m, active): print("Constrained Opt...") sim = lmps.Simulation(s, name='constrained_opt') total_atoms = s.particles.count monomer_atoms = m.particles.count p = s.particles[total_atoms] sim.add_custom("group last_monomer id " + str(total_atoms - monomer_atoms) + ":" + str(total_atoms)) sim.add_custom( "group prev_two_monomers id " + str(total_atoms - 3 * monomer_atoms) + ":" + str(total_atoms - monomer_atoms)) sim.add_custom("group non_last_monomers subtract all last_monomer") sim.add_custom("region insertion_area sphere {0} {1} {2} 20 side out units box".format(p.x, p.y, p.z)) sim.add_custom("group 20_ang_away region insertion_area") sim.add_custom("group last_monomer_and_far union 20_ang_away last_monomer") if (active == "system"): sim.add_custom("fix freeze last_monomer setforce 0.0 0.0 0.0") elif (active == "monomer"): sim.add_custom("fix freeze non_last_monomers setforce 0.0 0.0 0.0") elif (active == "nearby"): sim.add_custom("fix freeze last_monomer_and_far setforce 0.0 0.0 0.0") sim.add_min(min_style="cg") sim.run()
pysimm.apps.random_walk.constrained_opt This function is called by redo_monomer_insertion and optimizes polymer chain s while keeping the last monomer fixed. Args: s: :class:`~pysimm.system.System` is a polymer chain in which the last monomer insertion has generated a hardcore overlap m: reference monomer :class:`~pysimm.system.System`. Must be a capped monomer, with headCap and tail_cap as the first and last atoms in the .mol file. Returns: nothing; all changes to the polymer chain are written to the argument s_
https://github.com/polysimtools/pysimm/blob/175d112d7b54fa63ad4209ee0b3e03694a3750c9/pysimm/apps/random_walk.py#L603-L632
import random import sys from time import strftime from itertools import permutations import numpy as np from pysimm import system, lmps, calc from pysimm import error_print import math from scipy.spatial.transform import Rotation as R try: from itertools import izip as zip except ImportError: pass def displ_next_unit_default(m, s): head_pos = np.zeros(3) tail_pos = np.zeros(3) hcount = 0 tcount = 0 bnd_lngth = 1.8 for p in s: if p.linker == 'head': hcount += 1 head_pos += np.array([p.x, p.y, p.z]) elif p.linker == 'tail': tcount += 1 tail_pos += np.array([p.x, p.y, p.z]) displ = head_pos / hcount - tail_pos / tcount displ_dir = displ / np.linalg.norm(displ) for p, p_ in zip(s, m.particles): p_.x = p.x + displ[0] + bnd_lngth * displ_dir[0] p_.y = p.y + displ[1] + bnd_lngth * displ_dir[1] p_.z = p.z + displ[2] + bnd_lngth * displ_dir[2] return [] def find_last_backbone_vector(s, m): head_pos = [0, 0, 0] tail_pos = [0, 0, 0] for p in s.particles[-1*m.particles.count:]: if p.linker == 'head': head_pos = [p.x, p.y, p.z] elif p.linker == 'tail': tail_pos = [p.x, p.y, p.z] return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]] def copolymer(m, nmon, s_=None, **kwargs): m = [x.copy() for x in m] settings = kwargs.get('settings', {}) density = kwargs.get('density', 0.3) f = kwargs.get('forcefield') capped = kwargs.get('capped') unwrap = kwargs.get('unwrap') traj = kwargs.get('traj', True) pattern = kwargs.get('pattern', [1 for _ in range(len(m))]) limit = kwargs.get('limit', 0.1) sim = kwargs.get('sim') for m_ in m: m_.add_particle_bonding() for p in m_.particles: if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'): p.linker = 'head' elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'): p.linker = 'tail' m_.remove_linker_types() if s_ is None: s = system.replicate(m[0], 1, density=density/nmon) else: s = system.replicate(m[0], 1, s_=s_, density=density/nmon) print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon)) for p in s.particles: if p.linker == 'head': last_head = p elif p.linker == 'tail': last_tail = p for m_ in m: if capped: m_.particles.remove(1) m_.remove_spare_bonding() m_.add_particle_bonding() s.add_particle_bonding() if traj: s.write_xyz('random_walk.xyz') temp_nmon = 1 while True: m_ = m.pop(0) m.append(m_) p_ = pattern.pop(0) pattern.append(p_) if temp_nmon == 1 and p_ == 1: m_ = m.pop(0) m.append(m_) p_ = pattern.pop(0) pattern.append(p_) elif temp_nmon == 1: p_ -= 1 for insert in range(p_): head = None tail = None backbone_vector = np.array([last_head.x - last_tail.x, last_head.y - last_tail.y, last_head.z - last_tail.z]) ref_head = None ref_tail = None for p in m_.particles: if p.linker == 'head': ref_head = p elif p.linker == 'tail': ref_tail = p if ref_head and ref_tail: ref_backbone_vector = np.array([ref_head.x - ref_tail.x, ref_head.y - ref_tail.y, ref_head.z - ref_tail.z]) rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector) m_.rotate(around=ref_tail, rot_matrix=rot_matrix) translation_vector = [last_tail.x - ref_tail.x, last_tail.y - ref_tail.y, last_tail.z - ref_tail.z] for p in m_.particles: p.x = p.x + translation_vector[0] + 3*backbone_vector[0] p.y = p.y + translation_vector[1] + 3*backbone_vector[1] p.z = p.z + translation_vector[2] + 3*backbone_vector[2] else: print('reference molecule has no head or tail') n = m_.copy() if capped: s.particles.remove(s.particles.count) s.remove_spare_bonding() s.add_particle_bonding() s.add(n, change_dim=False) s.add_particle_bonding() head = last_head for p in s.particles[-1*n.particles.count:]: if p.linker == 'tail': tail = p s.make_new_bonds(head, tail, f) temp_nmon += 1 print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon)) if unwrap: s.unwrap() if sim is None: sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings) sim.add_md(ensemble='nve', limit=limit, **settings) sim.add_min(**settings) if isinstance(sim, lmps.Simulation): sim.system = s sim.name = 'relax_%03d' % (temp_nmon) sim.run(np=settings.get('np')) if unwrap: s.unwrap() if unwrap: s.wrap() for p in s.particles[-1*n.particles.count:]: if p.linker == 'head': last_head = p elif p.linker == 'tail': last_tail = p if temp_nmon >= nmon: break if unwrap: if not s.unwrap(): error_print('something went wrong') return s if traj: s.write_xyz('random_walk.xyz', append=True) if unwrap: s.wrap() for p in s.particles: if p not in s.molecules[p.molecule.tag].particles: s.molecules[p.molecule.tag].particles.add(p) s.write_lammps('polymer.lmps') s.unwrap() s.write_xyz('polymer.xyz') return s def random_walk(m, nmon, s_=None, **kwargs): m = m.copy() extra_bonds = kwargs.get('extra_bonds', False) displ_next_unit = kwargs.get('geometry_rule', displ_next_unit_default) settings = kwargs.get('settings', {}) density = kwargs.get('density', 0.3) f = kwargs.get('forcefield') capped = kwargs.get('capped') unwrap = kwargs.get('unwrap') traj = kwargs.get('traj', True) limit = kwargs.get('limit', 0.1) sim = kwargs.get('sim') debug = kwargs.get('debug', False) m.add_particle_bonding() for p in m.particles: if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'): p.linker = 'head' elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'): p.linker = 'tail' m.remove_linker_types() if s_ is None: s = system.replicate(m, 1, density=density/nmon) else: s = system.replicate(m, 1, s_=s_, density=None) print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon)) if traj: s.write_xyz('random_walk.xyz') if capped: if __check_tags__(m, req_tags=['tail', 'tail_cap']): for p in m.particles: if p.linker == 'tail': for p_ in p.bonded_to: if p_.rnd_wlk_tag == 'tail_cap': p.charge += p_.charge m.particles.remove(p_.tag) m.remove_spare_bonding() break m.add_particle_bonding() else: sys.exit("The capped flag is on, however, the 'tail_cap' atom is not defined") for insertion in range(nmon - 1): head = None tail = None info = displ_next_unit(m, s.particles[-1 * m.particles.count:]) n = m.copy() if extra_bonds: heads = [] for p in s.particles[-1*n.particles.count:]: if p.linker == 'head': heads.append(p) else: for p in s.particles[-1*n.particles.count:]: if p.linker == 'head': head = p if capped: if __check_tags__(m, req_tags=['head_cap']): for p_ in s.particles[-m.particles.count:]: if p_.rnd_wlk_tag == 'head_cap': head.charge += p_.charge s.particles.remove(p_.tag) s.remove_spare_bonding() break s.add_particle_bonding() else: sys.exit("The capped flag is on, however, the 'head_cap' atom is not defined") s.add(n, change_dim=False) s.add_particle_bonding() if extra_bonds: tails = [] for p in s.particles[-1*n.particles.count:]: if p.linker == 'tail': tails.append(p) else: for p in s.particles[-1*n.particles.count:]: if p.linker == 'tail': tail = p if debug: for p in s.particles: if not p.bonded_to: print(p.tag) if head and tail: s.make_new_bonds(head, tail, f) print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion + 2, nmon)) elif extra_bonds and (len(heads) == len(tails)): order = [(0, 0), (1, 1)] if len(info) == 2: order = [(0, info[0]), (1, info[1])] for elm in order: s.make_new_bonds(heads[elm[0]], tails[elm[1]], f) ''' for h, t, ord in zip(heads, tails, extra_bonds): s.make_new_bonds(h, tails[ord], f) ''' print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion + 2, nmon)) s.write_lammps('curr_progress.lmps') else: print('cannot find head and tail') if sim is None: sim = lmps.Simulation(s, name='relax_%03d' % (insertion + 2), log='relax.log', **settings) sim.add_md(ensemble='nve', limit=limit, **settings) sim.add_min(**settings) if isinstance(sim, lmps.Simulation): sim.system = s sim.name = 'relax_%03d' % (insertion + 2) sim.run(np=settings.get('np')) if traj: s.unwrap() s.write_xyz('random_walk.xyz', append=True) if unwrap: s.wrap() for p in s.particles: if p not in s.molecules[p.molecule.tag].particles: s.molecules[p.molecule.tag].particles.add(p) if debug: s.write_lammps('polymer.lmps') s.write_xyz('polymer.xyz') s.unwrap() return s def find_last_tail_vector(s): result = None if not (__check_tags__(s, req_tags=['head', 'head_cap'])): print("Error: find_last_tail_vector() requires a capped monomer!") for p in s: if p.linker == 'head': for p_ in p.bonded_to: if p_.rnd_wlk_tag == 'head_cap': result = [p_.x - p.x, p_.y - p.y, p_.z - p.z] return result def rot_mat_about_axis(v, theta): theta = theta * 2 * math.pi / 180 r = R.from_rotvec(theta * v) print("Rotating vector: " + str(r.as_rotvec())) return r.as_dcm() def define_plane(a1, a2, a3): p1 = np.array(a1.coords()) p2 = np.array(a2.coords()) p3 = np.array(a3.coords()) cp = np.cross(p3 - p1, p2 - p1) a, b, c = cp d = -np.dot(cp, p3) return np.array([a, b, c, d]) def reflect_coords_thru_plane(atom, plane): try: x1, y1, z1 = atom.coords() except: x1, y1, z1 = atom a, b, c, d = plane k = (-a * x1 - b * y1 - c * z1 - d) / float((a * a + b * b + c * c)) x2 = a * k + x1 y2 = b * k + y1 z2 = c * k + z1 x3 = 2 * x2 - x1 y3 = 2 * y2 - y1 z3 = 2 * z2 - z1 return x3, y3, z3 def scale_monomer(atom, origin, scale): try: x1, y1, z1 = atom.coords() x0, y0, z0 = origin.coords() except: x1, y1, z1 = atom x0, y0, z0 = origin return np.array([x0 + (x1 - x0) * scale, y0 + (y1 - y0) * scale, z0 + (z1 - z0) * scale]) def redo_monomer_insertion(s, m, i): for p in s.particles[-1 * m.particles.count:]: if p.linker == 'tail': tail = p scale_min = 0.1 s.unwrap() s.set_box(padding=10) s.wrap() for p in s.particles[-1 * m.particles.count:]: p.x, p.y, p.z = scale_monomer(p, tail, scale_min) scale = 1 while scale_min * scale * 1.05 < 0.91: print("Scaling up from %s to %s" % (str(scale_min * scale), str(scale * scale_min * 1.05))) scale = scale * 1.05 for p in s.particles[-1 * m.particles.count:]: p.x, p.y, p.z = scale_monomer(p, tail, 1.05) constrained_opt(s, m, "nearby") s.unwrap() s.write_xyz('bad_insertion_' + str(i) + '.xyz', append=True) s.wrap() if s.quality(tolerance=0.2) > 0: error_print("system is broken upon monomer reexpansion") constrained_opt(s, m, "monomer")
MIT License
thesadru/genshinstats
genshinstats/genshinstats.py
set_cookie
python
def set_cookie(cookie: Union[Mapping[str, Any], str] = None, **kwargs: Any) -> None: if bool(cookie) == bool(kwargs): raise ValueError("Cannot use both positional and keyword arguments at once") set_cookies(cookie or kwargs)
Logs-in using a cookie. Usage: >>> set_cookie(ltuid=..., ltoken=...) >>> set_cookie(account_id=..., cookie_token=...) >>> set_cookie({'ltuid': ..., 'ltoken': ...}) >>> set_cookie("ltuid=...; ltoken=...")
https://github.com/thesadru/genshinstats/blob/60705db87f65e410952b60f9cddbff8363b3ac8d/genshinstats/genshinstats.py#L66-L78
import hashlib import json import random import string import time from http.cookies import SimpleCookie from typing import Any, Dict, List, Mapping, MutableMapping, Union from urllib.parse import urljoin import requests from requests.sessions import RequestsCookieJar, Session from .errors import NotLoggedIn, TooManyRequests, raise_for_error from .pretty import ( prettify_abyss, prettify_activities, prettify_characters, prettify_notes, prettify_stats, ) from .utils import USER_AGENT, is_chinese, recognize_server, retry __all__ = [ "set_cookie", "set_cookies", "get_browser_cookies", "set_cookies_auto", "set_cookie_auto", "fetch_endpoint", "get_user_stats", "get_characters", "get_spiral_abyss", "get_notes", "get_activities", "get_all_user_data", ] session = Session() session.headers.update( { "x-rpc-app_version": "", "x-rpc-client_type": "", "x-rpc-language": "en-us", "ds": "", "user-agent": USER_AGENT, } ) cookies: List[RequestsCookieJar] = [] OS_DS_SALT = "6cqshh5dhw73bzxn20oexa9k516chk7s" CN_DS_SALT = "xV8v4Qu54lUKrEYFZkJhB8cuOh9Asafs" OS_TAKUMI_URL = "https://api-os-takumi.mihoyo.com/" CN_TAKUMI_URL = "https://api-takumi.mihoyo.com/" OS_GAME_RECORD_URL = "https://api-os-takumi.mihoyo.com/game_record/" CN_GAME_RECORD_URL = "https://api-takumi.mihoyo.com/game_record/app/"
MIT License
voxel-fox-ltd/novus
discord/voice_client.py
VoiceClient.guild
python
def guild(self) -> Optional[Guild]: return getattr(self.channel, 'guild', None)
Optional[:class:`Guild`]: The guild we're connected to, if applicable.
https://github.com/voxel-fox-ltd/novus/blob/4b3a3f918b6212ef2167002c4dbfe910727c04b0/discord/voice_client.py#L267-L269
from __future__ import annotations import asyncio import socket import logging import struct import threading from typing import Any, Callable, List, Optional, TYPE_CHECKING, Tuple from . import opus, utils from .backoff import ExponentialBackoff from .gateway import * from .errors import ClientException, ConnectionClosed from .player import AudioPlayer, AudioSource from .utils import MISSING if TYPE_CHECKING: from .client import Client from .guild import Guild from .state import ConnectionState from .user import ClientUser from .opus import Encoder from . import abc from .types.voice import ( GuildVoiceState as GuildVoiceStatePayload, VoiceServerUpdate as VoiceServerUpdatePayload, SupportedModes, ) has_nacl: bool try: import nacl.secret has_nacl = True except ImportError: has_nacl = False __all__ = ( 'VoiceProtocol', 'VoiceClient', ) _log = logging.getLogger(__name__) class VoiceProtocol: def __init__(self, client: Client, channel: abc.Connectable) -> None: self.client: Client = client self.channel: abc.Connectable = channel async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None: raise NotImplementedError async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None: raise NotImplementedError async def connect(self, *, timeout: float, reconnect: bool) -> None: raise NotImplementedError async def disconnect(self, *, force: bool) -> None: raise NotImplementedError def cleanup(self) -> None: key_id, _ = self.channel._get_voice_client_key() self.client._connection._remove_voice_client(key_id) class VoiceClient(VoiceProtocol): endpoint_ip: str voice_port: int secret_key: List[int] ssrc: int def __init__(self, client: Client, channel: abc.Connectable): if not has_nacl: raise RuntimeError("PyNaCl library needed in order to use voice") super().__init__(client, channel) state = client._connection self.token: str = MISSING self.socket = MISSING self.loop: asyncio.AbstractEventLoop = state.loop self._state: ConnectionState = state self._connected: threading.Event = threading.Event() self._handshaking: bool = False self._potentially_reconnecting: bool = False self._voice_state_complete: asyncio.Event = asyncio.Event() self._voice_server_complete: asyncio.Event = asyncio.Event() self.mode: str = MISSING self._connections: int = 0 self.sequence: int = 0 self.timestamp: int = 0 self.timeout: float = 0 self._runner: asyncio.Task = MISSING self._player: Optional[AudioPlayer] = None self.encoder: Encoder = MISSING self._lite_nonce: int = 0 self.ws: DiscordVoiceWebSocket = MISSING warn_nacl = not has_nacl supported_modes: Tuple[SupportedModes, ...] = ( 'xsalsa20_poly1305_lite', 'xsalsa20_poly1305_suffix', 'xsalsa20_poly1305', ) @property
MIT License
mikeboers/pyhaml
haml/parse.py
Parser._prep_stack_for_depth
python
def _prep_stack_for_depth(self, depth): while depth <= self._stack[-1][0]: self._stack.pop()
Pop everything off the stack that is not shorter than the given depth.
https://github.com/mikeboers/pyhaml/blob/b12983dcb01d1cddf808c5bff68ee0d8d720fbb2/haml/parse.py#L381-L384
import itertools import re import tokenize from six import string_types, next from . import nodes def split_balanced_parens(line, depth=0): deltas = {'(': 1, ')': -1} pos = None for pos, char in enumerate(line): depth += deltas.get(char, 0) if not depth: break if pos: return line[:pos+1], line[pos+1:] else: return '', line class Parser(object): def __init__(self): self.root = nodes.Document() self._stack = [((-1, 0), self.root)] def parse_string(self, source): self.parse(source.splitlines()) @property def _topmost_node(self): return self._stack[-1][1] def _peek_buffer(self, i=0): while len(self._buffer) <= i: self._buffer.append(next(self._source)) return self._buffer[i] def _consume_buffer(self): if self._buffer: return self._buffer.pop(0) def _replace_buffer(self, line): self._buffer[0] = line def _make_readline_peeker(self): counter = itertools.count(0) def readline(): try: return self._peek_buffer(next(counter)) except StopIteration: return '' return readline def _peek_python_tokens(self): return tokenize.generate_tokens(self._make_readline_peeker()) def _consume_python_token(self, token): ret = [] line, col = token[3] if line > 1: ret = self._buffer[:line-1] self._buffer[:line-1] = [] ret.append(self._buffer[0][:col]) self._buffer[0] = self._buffer[0][col:] return ''.join(ret) def _match_python_expr(self, first=None, last=None): openers = set('({[') closers = set(')}]') close_to_open = {')': '(', '}': '{', ']': '['} stack = [] try: for token_i, token in enumerate(self._peek_python_tokens()): type_, value, _, _, _ = token if not token_i and first is not None: if value not in first: return if type_ == tokenize.OP: if value in openers: stack.append(token[1]) elif value in closers: if stack[-1] != close_to_open[value]: return stack.pop(-1) if not stack and (last is None or value in last): return self._consume_python_token(token) except IndentationError: return def parse(self, source): self._source = iter(source) self._buffer = [] self._parse_buffer() self._parse_context(self.root) def _parse_buffer(self): indent_str = '' raw_line = None while True: if raw_line is not None: self._consume_buffer() try: raw_line = self._peek_buffer() except StopIteration: break try: while raw_line.endswith('|'): raw_line = raw_line[:-1] if self._peek_buffer(1).endswith('|'): self._consume_buffer() raw_line += self._peek_buffer() except StopIteration: pass line = raw_line.lstrip() if line: inter_depth = len(raw_line) - len(line) intra_depth = 0 indent_str = raw_line[:inter_depth] self._prep_stack_for_depth((inter_depth, intra_depth)) else: inter_depth, intra_depth = self._stack[-1][0] if isinstance(self._topmost_node, nodes.FilterBase): self._topmost_node.add_line(indent_str, line) continue if isinstance(self._topmost_node, nodes.GreedyBase): self._add_node( self._topmost_node.__class__(line), (inter_depth, intra_depth) ) continue if not line: continue while line: self._replace_buffer(line) node, line = self._parse_statement() self._add_node(node, (inter_depth, intra_depth)) line = line.lstrip() intra_depth += 1 def _parse_statement(self): line = self._peek_buffer() if line.startswith('\\'): return ( nodes.Content(line[1:]), '' ) m = re.match(r'/(\[if[^\]]+])?(.*)$', line) if m: return ( nodes.HTMLComment(m.group(2).strip(), (m.group(1) or '').rstrip()), '' ) m = re.match(r''' (&?) # HTML escaping flag = (?:\|(\w+(?:,\w+)*))? # mako filters \s* (.*) # expression content $ ''', line, re.X) if m: add_escape, filters, content = m.groups() filters = filters or '' if add_escape: filters = filters + (',' if filters else '') + 'h' return ( nodes.Expression(content, filters), '' ) m = re.match(r'@(\w+)', line) if m: name = m.group(1) line = line[m.end():] self._replace_buffer(line) argspec = self._match_python_expr(first=set('('), last=set(')')) if argspec: argspec = argspec[1:-1] line = self._peek_buffer() return ( nodes.MixinDef(name, argspec), line ) m = re.match(r'\+([\w.]+)', line) if m: name = m.group(1) line = line[m.end():] self._replace_buffer(line) argspec = self._match_python_expr(first=set('('), last=set(')')) if argspec: argspec = argspec[1:-1] line = self._peek_buffer() return ( nodes.MixinCall(name, argspec), line ) m = re.match(r':(\w+)(?:\s+(.+))?$', line) if m: filter, content = m.groups() return ( nodes.Filter(content, filter), '' ) if line.startswith('-#'): return ( nodes.HAMLComment(line[2:].lstrip()), '' ) if line.startswith('!!!'): return ( nodes.Doctype(*line[3:].strip().split()), '' ) m = re.match(r''' (?:%(%?(?:\w+:)?[\w-]*))? # tag name. the extra % is for mako (?: \[(.+?)(?:,(.+?))?\] # object reference and prefix )? ( (?:\#[\w-]+|\.[\w-]+)+ # id/class )? ''', line, re.X) if m and (m.group(1) is not None or m.group(4)): name, object_reference, object_reference_prefix, raw_id_class = m.groups() id, class_ = None, [] for m2 in re.finditer(r'(#|\.)([\w-]+)', raw_id_class or ''): type, value = m2.groups() if type == '#': id = value else: class_.append(value) line = line[m.end():] self._replace_buffer(line) kwargs_expr = self._match_python_expr(first=set('('), last=set(')')) if kwargs_expr: kwargs_expr = kwargs_expr[1:-1] line = self._peek_buffer() m2 = re.match(r'([<>]+)', line) strip_outer = strip_inner = False if m2: strip_outer = '>' in m2.group(1) strip_inner = '<' in m2.group(1) line = line[m2.end():] self_closing = bool(line and line[0] == '/') line = line[int(self_closing):].lstrip() return ( nodes.Tag( name=name, id=id, class_=' '.join(class_), kwargs_expr=kwargs_expr, object_reference=object_reference, object_reference_prefix=object_reference_prefix, self_closing=self_closing, strip_inner=strip_inner, strip_outer=strip_outer, ), line ) m = re.match(r''' - \s* (for|if|while|elif) # control type ''', line, re.X) if m: control_type = m.group(1) self._replace_buffer(line[m.end():].lstrip()) expr = self._match_python_expr(last=set(':')) return ( nodes.Control(control_type, expr[:-1]), self._peek_buffer() ) m = re.match(r'-\s*else\s*:', line, re.X) if m: return ( nodes.Control('else', None), line[m.end():].lstrip() ) if line.startswith('-'): if line.startswith('-!'): return ( nodes.Python(line[2:].lstrip(), module=True), '' ) else: return ( nodes.Python(line[1:].lstrip(), module=False), '' ) return ( nodes.Content(line), '' )
BSD 3-Clause New or Revised License
znick/anytask
svn/hooks/mailer.py
OutputBase.start
python
def start(self, group, params): raise NotImplementedError
Override this method. Begin writing an output representation. GROUP is the name of the configuration file group which is causing this output to be produced. PARAMS is a dictionary of any named subexpressions of regular expressions defined in the configuration file, plus the key 'author' contains the author of the action being reported.
https://github.com/znick/anytask/blob/6c609ff1a27fe21af0ede460c7cb71cdc1e15ebd/svn/hooks/mailer.py#L171-L178
import os import sys try: import configparser from urllib.parse import quote as urllib_parse_quote except ImportError: import ConfigParser as configparser from urllib import quote as urllib_parse_quote import time import subprocess if sys.version_info[0] >= 3: from io import StringIO else: from cStringIO import StringIO import smtplib import re import tempfile _MIN_SVN_VERSION = [1, 5, 0] try: import svn.fs import svn.delta import svn.repos import svn.core except ImportError: sys.stderr.write( "You need version %s or better of the Subversion Python bindings.\n" % ".".join([str(x) for x in _MIN_SVN_VERSION])) sys.exit(1) if _MIN_SVN_VERSION > [svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR, svn.core.SVN_VER_PATCH]: sys.stderr.write( "You need version %s or better of the Subversion Python bindings.\n" % ".".join([str(x) for x in _MIN_SVN_VERSION])) sys.exit(1) SEPARATOR = '=' * 78 def main(pool, cmd, config_fname, repos_dir, cmd_args): if cmd == 'commit': revision = int(cmd_args[0]) repos = Repository(repos_dir, revision, pool) cfg = Config(config_fname, repos, {'author': repos.author, 'repos_basename': os.path.basename(repos.repos_dir) }) messenger = Commit(pool, cfg, repos) elif cmd == 'propchange' or cmd == 'propchange2': revision = int(cmd_args[0]) author = cmd_args[1] propname = cmd_args[2] action = (cmd == 'propchange2' and cmd_args[3] or 'A') repos = Repository(repos_dir, revision, pool) repos.author = author cfg = Config(config_fname, repos, {'author': author, 'repos_basename': os.path.basename(repos.repos_dir) }) messenger = PropChange(pool, cfg, repos, author, propname, action) elif cmd == 'lock' or cmd == 'unlock': author = cmd_args[0] repos = Repository(repos_dir, 0, pool) repos.author = author cfg = Config(config_fname, repos, {'author': author, 'repos_basename': os.path.basename(repos.repos_dir) }) messenger = Lock(pool, cfg, repos, author, cmd == 'lock') else: raise UnknownSubcommand(cmd) messenger.generate() def remove_leading_slashes(path): while path and path[0] == '/': path = path[1:] return path class OutputBase: def __init__(self, cfg, repos, prefix_param): self.cfg = cfg self.repos = repos self.prefix_param = prefix_param self._CHUNKSIZE = 128 * 1024 self.subject = "" def make_subject(self, group, params): prefix = self.cfg.get(self.prefix_param, group, params) if prefix: subject = prefix + ' ' + self.subject else: subject = self.subject try: truncate_subject = int( self.cfg.get('truncate_subject', group, params)) except ValueError: truncate_subject = 0 if truncate_subject and len(subject) > truncate_subject: subject = subject[:(truncate_subject - 3)] + "..." return subject
MIT License
randolphvi/question-difficulty-prediction
TF/C-MIDP/train_cmidp.py
train_cmidp
python
def train_cmidp(): dh.tab_printer(args, logger) logger.info("Loading data...") logger.info("Data processing...") train_data = dh.load_data_and_labels(args.train_file, args.word2vec_file, data_aug_flag=False) val_data = dh.load_data_and_labels(args.validation_file, args.word2vec_file, data_aug_flag=False) logger.info("Data padding...") x_train_content, x_train_question, x_train_option, y_train = dh.pad_data(train_data, args.pad_seq_len) x_val_content, x_val_question, x_val_option, y_val = dh.pad_data(val_data, args.pad_seq_len) VOCAB_SIZE, EMBEDDING_SIZE, pretrained_word2vec_matrix = dh.load_word2vec_matrix(args.word2vec_file) with tf.Graph().as_default(): session_conf = tf.ConfigProto( allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement) session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth sess = tf.Session(config=session_conf) with sess.as_default(): cmidp = TextCMIDP( sequence_length=args.pad_seq_len, vocab_size=VOCAB_SIZE, embedding_type=args.embedding_type, embedding_size=EMBEDDING_SIZE, filter_sizes=args.filter_sizes, num_filters=args.num_filters, pooling_size=args.pooling_size, fc_hidden_size=args.fc_dim, l2_reg_lambda=args.l2_lambda, pretrained_embedding=pretrained_word2vec_matrix) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate, global_step=cmidp.global_step, decay_steps=args.decay_steps, decay_rate=args.decay_rate, staircase=True) optimizer = tf.train.AdamOptimizer(learning_rate) grads, vars = zip(*optimizer.compute_gradients(cmidp.loss)) grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio) train_op = optimizer.apply_gradients(zip(grads, vars), global_step=cmidp.global_step, name="train_op") grad_summaries = [] for g, v in zip(grads, vars): if g is not None: grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g) sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g)) grad_summaries.append(grad_hist_summary) grad_summaries.append(sparsity_summary) grad_summaries_merged = tf.summary.merge(grad_summaries) out_dir = dh.get_out_dir(OPTION, logger) checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints")) best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints")) loss_summary = tf.summary.scalar("loss", cmidp.loss) train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged]) train_summary_dir = os.path.join(out_dir, "summaries", "train") train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) validation_summary_op = tf.summary.merge([loss_summary]) validation_summary_dir = os.path.join(out_dir, "summaries", "validation") validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph) saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints) best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=False) if OPTION == 'R': logger.info("Loading model...") checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) logger.info(checkpoint_file) saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) if OPTION == 'T': if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) config = projector.ProjectorConfig() embedding_conf = config.embeddings.add() embedding_conf.tensor_name = "embedding" embedding_conf.metadata_path = args.metadata_file projector.visualize_embeddings(train_summary_writer, config) projector.visualize_embeddings(validation_summary_writer, config) saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt")) current_step = sess.run(cmidp.global_step) def train_step(x_batch_content, x_batch_question, x_batch_option, y_batch): feed_dict = { cmidp.input_x_content: x_batch_content, cmidp.input_x_question: x_batch_question, cmidp.input_x_option: x_batch_option, cmidp.input_y: y_batch, cmidp.dropout_keep_prob: args.dropout_rate, cmidp.is_training: True } _, step, summaries, loss = sess.run( [train_op, cmidp.global_step, train_summary_op, cmidp.loss], feed_dict) logger.info("step {0}: loss {1:g}".format(step, loss)) train_summary_writer.add_summary(summaries, step) def validation_step(x_val_content, x_val_question, x_val_option, y_val, writer=None): batches_validation = dh.batch_iter(list(zip(x_val_content, x_val_question, x_val_option, y_val)), args.batch_size, 1) eval_counter, eval_loss = 0, 0.0 true_labels = [] predicted_scores = [] for batch_validation in batches_validation: x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_validation) feed_dict = { cmidp.input_x_content: x_batch_content, cmidp.input_x_question: x_batch_question, cmidp.input_x_option: x_batch_option, cmidp.input_y: y_batch, cmidp.dropout_keep_prob: 1.0, cmidp.is_training: False } step, summaries, scores, cur_loss = sess.run( [cmidp.global_step, validation_summary_op, cmidp.scores, cmidp.loss], feed_dict) for i in y_batch: true_labels.append(i) for j in scores: predicted_scores.append(j) eval_loss = eval_loss + cur_loss eval_counter = eval_counter + 1 if writer: writer.add_summary(summaries, step) eval_loss = float(eval_loss / eval_counter) pcc, doa = dh.evaluation(true_labels, predicted_scores) rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5 r2 = r2_score(true_labels, predicted_scores) return eval_loss, pcc, doa, rmse, r2 batches_train = dh.batch_iter(list(zip(x_train_content, x_train_question, x_train_option, y_train)), args.batch_size, args.epochs) num_batches_per_epoch = int((len(y_train) - 1) / args.batch_size) + 1 for batch_train in batches_train: x_batch_train_content, x_batch_train_question, x_batch_train_option, y_batch_train = zip(*batch_train) train_step(x_batch_train_content, x_batch_train_question, x_batch_train_option, y_batch_train) current_step = tf.train.global_step(sess, cmidp.global_step) if current_step % args.evaluate_steps == 0: logger.info("\nEvaluation:") eval_loss, pcc, doa, rmse, r2 = validation_step(x_val_content, x_val_question, x_val_option, y_val, writer=validation_summary_writer) logger.info("All Validation set: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}" .format(eval_loss, pcc, doa, rmse, r2)) best_saver.handle(rmse, sess, current_step) if current_step % args.checkpoint_steps == 0: checkpoint_prefix = os.path.join(checkpoint_dir, "model") path = saver.save(sess, checkpoint_prefix, global_step=current_step) logger.info("Saved model checkpoint to {0}\n".format(path)) if current_step % num_batches_per_epoch == 0: current_epoch = current_step // num_batches_per_epoch logger.info("Epoch {0} has finished!".format(current_epoch)) logger.info("All Done.")
Training CMIDP model.
https://github.com/randolphvi/question-difficulty-prediction/blob/77b4b83b5bc747c5074926d7a37545a5d46ed343/TF/C-MIDP/train_cmidp.py#L25-L220
__author__ = 'Randolph' import os import sys import time import logging sys.path.append('../') logging.getLogger('tensorflow').disabled = True import tensorflow as tf from text_cmidp import TextCMIDP from utils import checkmate as cm from utils import data_helpers as dh from utils import param_parser as parser from tensorboard.plugins import projector from sklearn.metrics import mean_squared_error, r2_score args = parser.parameter_parser() OPTION = dh.option(pattern=0) logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
Apache License 2.0
jsfehler/flake8-multiline-containers
flake8_multiline_containers.py
get_left_pad
python
def get_left_pad(line: str) -> int: return len(line) - len(line.lstrip(' '))
Get the amount of whitespace before the first character in a line.
https://github.com/jsfehler/flake8-multiline-containers/blob/a57f923b0c3a3e104594fe146533e42499967c66/flake8_multiline_containers.py#L46-L48
import enum import re from typing import List, Tuple import attr STRING_REGEX = re.compile( r'"([^"\\]*(\\.[^"\\]*)*)"|\'([^\'\\]*(\\.[^\'\\]*)*)\'', ) FUNCTION_CALL_REGEX = r'\w+\s*[(]' CONDITIONAL_BLOCK_REGEX = re.compile( r'if\s*[(]|elif\s*[(]|or\s*[(]*[(]|and\s*[(]|not\s*[(]') ASSIGNMENT_REGEX = re.compile(r'(^\s*[A-Za-z_]\w+|\])\s*=\s*([^=]*$)') ONLY_COMMENTS_STRING = '__only_comments__' FUNCTION_STRING = '__func__' class ErrorCodes(enum.Enum): JS101 = "Multi-line container not broken after opening character" JS102 = "Multi-line container does not close on same column as opening" def _error( line_number: int, column: int, error_code: ErrorCodes, ) -> Tuple[int, int, str, None]: return (line_number, column, f'{error_code.name} {error_code.value}', None)
MIT License
huxiaoling/imageseg-2.5d_topo
TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/index.py
PackageFinder.find_all_candidates
python
def find_all_candidates(self, project_name): index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( self.find_links, expand_dir=True, ) file_locations = (Link(url) for url in itertools.chain( index_file_loc, fl_file_loc, )) url_locations = [ link for link in itertools.chain( (Link(url) for url in index_url_loc), (Link(url) for url in fl_url_loc), ) if self._validate_secure_origin(logger, link) ] logger.debug('%d location(s) to search for versions of %s:', len(url_locations), project_name) for location in url_locations: logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) formats = self.format_control.get_allowed_formats(canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( (Link(url, '-f') for url in self.find_links), search ) page_versions = [] for page in self._get_pages(url_locations, project_name): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( self._package_versions(page.iter_links(), search) ) file_versions = self._package_versions(file_locations, search) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ url_to_path(candidate.location.url) for candidate in file_versions ]) ) return file_versions + find_links_versions + page_versions
Find all available InstallationCandidate for project_name This checks index_urls and find_links. All versions found are returned as an InstallationCandidate list. See _link_package_versions for details on which files are accepted
https://github.com/huxiaoling/imageseg-2.5d_topo/blob/86ca52e53f838309132a67f2a3e58cf69d314770/TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/index.py#L564-L629
from __future__ import absolute_import import cgi import itertools import logging import mimetypes import os import posixpath import re import sys from collections import namedtuple from pip._vendor import html5lib, requests, six from pip._vendor.distlib.compat import unescape from pip._vendor.packaging import specifiers from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import parse as parse_version from pip._vendor.requests.exceptions import RetryError, SSLError from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path from pip._internal.exceptions import ( BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, UnsupportedWheel, ) from pip._internal.models.candidate import InstallationCandidate from pip._internal.models.format_control import FormatControl from pip._internal.models.index import PyPI from pip._internal.models.link import Link from pip._internal.pep425tags import get_supported from pip._internal.utils.compat import ipaddress from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path, redact_password_from_url, ) from pip._internal.utils.packaging import check_requires_python from pip._internal.utils.typing import MYPY_CHECK_RUNNING from pip._internal.wheel import Wheel if MYPY_CHECK_RUNNING: from logging import Logger from typing import ( Tuple, Optional, Any, List, Union, Callable, Set, Sequence, Iterable, MutableMapping ) from pip._vendor.packaging.version import _BaseVersion from pip._vendor.requests import Response from pip._internal.req import InstallRequirement from pip._internal.download import PipSession SecureOrigin = Tuple[str, str, Optional[str]] BuildTag = Tuple[Any, ...] CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]] __all__ = ['FormatControl', 'PackageFinder'] SECURE_ORIGINS = [ ("https", "*", "*"), ("*", "localhost", "*"), ("*", "127.0.0.0/8", "*"), ("*", "::1/128", "*"), ("file", "*", None), ("ssh", "*", "*"), ] logger = logging.getLogger(__name__) def _match_vcs_scheme(url): from pip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': return scheme return None def _is_url_like_archive(url): filename = Link(url).filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): return True return False class _NotHTML(Exception): def __init__(self, content_type, request_desc): super(_NotHTML, self).__init__(content_type, request_desc) self.content_type = content_type self.request_desc = request_desc def _ensure_html_header(response): content_type = response.headers.get("Content-Type", "") if not content_type.lower().startswith("text/html"): raise _NotHTML(content_type, response.request.method) class _NotHTTP(Exception): pass def _ensure_html_response(url, session): scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) resp.raise_for_status() _ensure_html_header(resp) def _get_html_response(url, session): if _is_url_like_archive(url): _ensure_html_response(url, session=session) logger.debug('Getting page %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=0", }, ) resp.raise_for_status() _ensure_html_header(resp) return resp def _handle_get_page_fail( link, reason, meth=None ): if meth is None: meth = logger.debug meth("Could not fetch URL %s: %s - skipping", link, reason) def _get_html_page(link, session=None): if session is None: raise TypeError( "_get_html_page() missing 1 required keyword argument: 'session'" ) url = link.url.split('#', 1)[0] vcs_scheme = _match_vcs_scheme(url) if vcs_scheme: logger.debug('Cannot look at %s URL %s', vcs_scheme, link) return None scheme, _, path, _, _, _ = urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) try: resp = _get_html_response(url, session=session) except _NotHTTP as exc: logger.debug( 'Skipping page %s because it looks like an archive, and cannot ' 'be checked by HEAD.', link, ) except _NotHTML as exc: logger.debug( 'Skipping page %s because the %s request got Content-Type: %s', link, exc.request_desc, exc.content_type, ) except requests.HTTPError as exc: _handle_get_page_fail(link, exc) except RetryError as exc: _handle_get_page_fail(link, exc) except SSLError as exc: reason = "There was a problem confirming the ssl certificate: " reason += str(exc) _handle_get_page_fail(link, reason, meth=logger.info) except requests.ConnectionError as exc: _handle_get_page_fail(link, "connection error: %s" % exc) except requests.Timeout: _handle_get_page_fail(link, "timed out") else: return HTMLPage(resp.content, resp.url, resp.headers) return None class PackageFinder(object): def __init__( self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None, prefer_binary=False ): if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] self.allow_all_prereleases = allow_all_prereleases self.session = session self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) self.prefer_binary = prefer_binary if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break def get_formatted_locations(self): lines = [] if self.index_urls and self.index_urls != [PyPI.simple_url]: lines.append( "Looking in indexes: {}".format(", ".join( redact_password_from_url(url) for url in self.index_urls)) ) if self.find_links: lines.append( "Looking in links: {}".format(", ".join(self.find_links)) ) return "\n".join(lines) @staticmethod def _sort_locations(locations, expand_dir=False): files = [] urls = [] def sort_path(path): url = path_to_url(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: is_local_path = os.path.exists(url) is_file_url = url.startswith('file:') if is_local_path or is_file_url: if is_local_path: path = url else: path = url_to_path(url) if os.path.isdir(path): if expand_dir: path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif is_file_url: urls.append(url) else: logger.warning( "Path '{0}' is ignored: " "it is a directory.".format(path), ) elif os.path.isfile(path): sort_path(path) else: logger.warning( "Url '%s' is ignored: it is neither a file " "nor a directory.", url, ) elif is_url(url): urls.append(url) else: logger.warning( "Url '%s' is ignored. It is either a non-existing " "path or lacks a specific scheme.", url, ) return files, urls def _candidate_sort_key(self, candidate): support_num = len(self.valid_tags) build_tag = tuple() binary_preference = 0 if candidate.location.is_wheel: wheel = Wheel(candidate.location.filename) if not wheel.supported(self.valid_tags): raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) if self.prefer_binary: binary_preference = 1 pri = -(wheel.support_index_min(self.valid_tags)) if wheel.build_tag is not None: match = re.match(r'^(\d+)(.*)$', wheel.build_tag) build_tag_groups = match.groups() build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: pri = -(support_num) return (binary_preference, candidate.version, build_tag, pri) def _validate_secure_origin(self, logger, location): parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) protocol = origin[0].rsplit('+', 1)[-1] for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: if addr not in network: continue if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue return True logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS we " "recommend you use HTTPS instead, otherwise you may silence " "this warning and allow it anyway with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False def _get_index_urls_locations(self, project_name): def mkurl_pypi_url(url): loc = posixpath.join( url, urllib_parse.quote(canonicalize_name(project_name))) if not loc.endswith('/'): loc = loc + '/' return loc return [mkurl_pypi_url(url) for url in self.index_urls]
MIT License
darkenergysurvey/ugali
ugali/utils/healpix.py
d_grade_ipix
python
def d_grade_ipix(ipix, nside_in, nside_out, nest=False): if nside_in==nside_out: return ipix if not (nside_in > nside_out): raise ValueError("nside_out must be less than nside_in") return hp.vec2pix(nside_out, *hp.pix2vec(nside_in, ipix, nest), nest=nest)
Return the indices of the super-pixels which contain each of the sub-pixels (nside_in > nside_out). Parameters: ----------- ipix : index of the input subpixels nside_in : nside of the input subpix nside_out : nside of the desired superpixels Returns: -------- ipix_out : superpixels for each subpixel
https://github.com/darkenergysurvey/ugali/blob/82abffcc92bddf830d89f85cb3966870f7d9f720/ugali/utils/healpix.py#L44-L64
from collections import OrderedDict as odict import numpy import numpy as np import healpy as hp import fitsio import ugali.utils.projector from ugali.utils import fileio from ugali.utils.logger import logger import ugali.utils.fileio def superpixel(subpix, nside_subpix, nside_superpix): if nside_subpix==nside_superpix: return subpix theta, phi = hp.pix2ang(nside_subpix, subpix) return hp.ang2pix(nside_superpix, theta, phi) def subpixel(superpix, nside_superpix, nside_subpix): if nside_superpix==nside_subpix: return superpix vec = hp.pix2vec(nside_superpix, superpix) radius = np.degrees(2. * hp.max_pixrad(nside_superpix)) subpix = query_disc(nside_subpix, vec, radius) pix_for_subpix = superpixel(subpix,nside_subpix,nside_superpix) return subpix[pix_for_subpix == superpix]
MIT License
cgatoxford/cgatpipelines
CGATPipelines/pipeline_genesets.py
buildTranscriptTSS
python
def buildTranscriptTSS(infile, outfile): job_memory = PARAMS["job_memory"] statement = """ gunzip < %(infile)s | cgat gtf2gtf --method=join-exons --log=%(outfile)s.log | cgat gtf2gff --method=promotors --promotor-size=1 --genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log | cgat gff2bed --is-gtf --set-name=transcript_id --log=%(outfile)s.log | gzip > %(outfile)s """ P.run()
build a :term:`bed` file with transcription start sites. This method outputs all transcription start sites within a geneset. The trancription start site is derived from the most upstream coordinate of each transcript. The name column of the :term:`bed` file is set to the `transcript_id`. Arguments --------- infile : list Input filename with geneset in :term:`gtf` format. outfile : string Output filename with genomic regions in :term:`bed` format.
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/CGATPipelines/pipeline_genesets.py#L901-L933
import sys import re import os import sqlite3 import glob import pandas as pd from ruffus import follows, transform, merge, mkdir, files, jobs_limit, suffix, regex, add_inputs, originate import CGAT.IndexedFasta as IndexedFasta import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGATPipelines.Pipeline as P import CGATPipelines.PipelineGtfsubset as PipelineGtfsubset import CGATPipelines.PipelineUCSC as PipelineUCSC import CGATPipelines.PipelineGeneset as PipelineGeneset import CGATPipelines.PipelineGO as PipelineGO PARAMS = P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS.update(dict([ ("interface_geneset_%s" % re.sub("[.]", "_", os.path.basename(P.snip(x, ".gz"))), x) for x in glob.glob('geneset.dir/*.bed.gz')])) def connect(): dbh = sqlite3.connect(PARAMS["database_name"]) return dbh def connectToUCSC(): return PipelineGtfsubset.connectToUCSC( host=PARAMS["ucsc_host"], user=PARAMS["ucsc_user"], database=PARAMS["ucsc_database"]) @follows(mkdir('assembly.dir')) @files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"), PARAMS['interface_contigs']) def buildContigSizes(infile, outfile): prefix = P.snip(infile, ".fasta") fasta = IndexedFasta.IndexedFasta(prefix) contigs = [] for contig, size in fasta.getContigSizes(with_synonyms=False).items(): contigs.append([contig, size]) df_contig = pd.DataFrame(contigs, columns=['contigs', 'size']) df_contig.sort_values('contigs', inplace=True) df_contig.to_csv(outfile, sep="\t", header=False, index=False) @follows(buildContigSizes) @follows(mkdir('assembly.dir')) @files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"), PARAMS['interface_contigs_bed']) def buildContigBed(infile, outfile): prefix = P.snip(infile, ".fasta") fasta = IndexedFasta.IndexedFasta(prefix) outs = IOTools.openFile(outfile, "w") for contig, size in fasta.getContigSizes(with_synonyms=False).items(): outs.write("%s\t%i\t%i\n" % (contig, 0, size)) outs.close() @follows(buildContigBed) @files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"), (PARAMS['interface_contigs_ungapped_bed'], PARAMS['interface_gaps_bed'], )) def buildUngappedContigBed(infile, outfiles): prefix = P.snip(infile, ".fasta") fasta = IndexedFasta.IndexedFasta(prefix) outs_nogap = IOTools.openFile(outfiles[0], "w") outs_gap = IOTools.openFile(outfiles[1], "w") min_gap_size = PARAMS["assembly_gaps_min_size"] for contig, size in fasta.getContigSizes(with_synonyms=False).items(): seq = fasta.getSequence(contig) def gapped_regions(seq): is_gap = seq[0] == "N" last = 0 for x, c in enumerate(seq): if c == "N": if not is_gap: last = x is_gap = True else: if is_gap: yield(last, x) last = x is_gap = False if is_gap: yield last, size last_end = 0 for start, end in gapped_regions(seq): if end - start < min_gap_size: continue if last_end != 0: outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, start)) outs_gap.write("%s\t%i\t%i\n" % (contig, start, end)) last_end = end if last_end < size: outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, size)) outs_nogap.close() outs_gap.close() @follows(buildUngappedContigBed) @files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"), PARAMS['interface_cpg_bed']) def buildCpGBed(infile, outfile): job_memory = PARAMS["job_highmemory"] statement = ''' cgat fasta2bed --method=cpg --log=%(outfile)s.log < %(infile)s | bgzip > %(outfile)s ''' P.run() statement = ''' tabix -p bed %(outfile)s ''' P.run() @follows(mkdir('ensembl.dir')) @transform(PARAMS["ensembl_filename_gtf"], regex("(\S+)"), r"%s" % PARAMS['interface_geneset_all_gtf']) def buildUCSCGeneSet(infile, outfile): job_memory = PARAMS["job_memory"] statement = ['''zcat %(infile)s | grep 'transcript_id' | cgat gff2gff --method=sanitize --sanitize-method=ucsc --assembly-report="%(ncbi_assembly_report)s" --log=%(outfile)s.log '''] if PARAMS["ncbi_remove_contigs"]: statement.append( ''' --contig-pattern="%(ncbi_remove_contigs)s" ''') statement.append( ''' | cgat gtf2gtf --method=set-gene_biotype-to-source --log=%(outfile)s.log | gzip > %(outfile)s ''') statement = " ".join(statement) P.run() @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_cds_gtf']) def buildCdsTranscript(infile, outfile): m = PipelineGtfsubset.SubsetGTF(infile) filteroption = PARAMS['ensembl_cgat_feature'] filteritem = ["CDS"] m.filterGTF(outfile, filteroption, filteritem, operators=None) @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_exons_gtf']) def buildExonTranscript(infile, outfile): m = PipelineGtfsubset.SubsetGTF(infile) filteroption = PARAMS['ensembl_cgat_feature'] filteritem = ["exon"] m.filterGTF(outfile, filteroption, filteritem, operators=None) @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_coding_exons_gtf']) def buildCodingExonTranscript(infile, outfile): m = PipelineGtfsubset.SubsetGTF(infile) filteroption = [PARAMS['ensembl_cgat_feature'], PARAMS['ensembl_cgat_gene_biotype']] filteritem = ["exon", "protein_coding"] m.filterGTF(outfile, filteroption, filteritem, operators="and") @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_lincrna_exons_gtf']) def buildLincRNAExonTranscript(infile, outfile): m = PipelineGtfsubset.SubsetGTF(infile) filteroptions = [PARAMS['ensembl_cgat_feature'], PARAMS['ensembl_cgat_gene_biotype']] filteritem = ["exon", "lincRNA"] m.filterGTF(outfile, filteroptions, filteritem, operators="and") @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_noncoding_exons_gtf']) def buildNonCodingExonTranscript(infile, outfile): m = PipelineGtfsubset.SubsetGTF(infile) filteroptions = [PARAMS['ensembl_cgat_feature'], PARAMS['ensembl_cgat_gene_biotype']] filteritem = ["exon", "protein_coding"] m.filterGTF(outfile, filteroptions, filteritem, operators="and not") @transform((buildUCSCGeneSet, buildCdsTranscript, buildExonTranscript, buildCodingExonTranscript, buildNonCodingExonTranscript, buildLincRNAExonTranscript), suffix(".gtf.gz"), "_gtf.load") def loadTranscripts(infile, outfile): job_memory = PARAMS["job_highmemory"] load_statement = P.build_load_statement( P.toTable(outfile), options="--add-index=gene_id " "--add-index=transcript_id " "--allow-empty-file ") statement = ''' gunzip < %(infile)s | cgat gtf2tsv -f | %(load_statement)s > %(outfile)s''' P.run() @P.add_doc(PipelineGtfsubset.buildFlatGeneSet) @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS['interface_geneset_flat_gtf']) def buildFlatGeneSet(infile, outfile): PipelineGtfsubset.buildFlatGeneSet(infile, outfile, job_memory=PARAMS["job_highmemory"]) @follows(mkdir("geneset.dir")) @transform(buildUCSCGeneSet, suffix("ensembl.dir/geneset_all.gtf.gz"), PARAMS["interface_ref_flat"]) def buildRefFlat(infile, outfile): tmpflat = P.getTempFilename(".") job_memory = PARAMS["job_memory"] statement = ''' gtfToGenePred -genePredExt -geneNameAsName2 %(infile)s %(tmpflat)s; paste <(cut -f 12 %(tmpflat)s) <(cut -f 1-10 %(tmpflat)s) > %(outfile)s ''' P.run() os.unlink(tmpflat) @transform((buildCodingExonTranscript, buildNonCodingExonTranscript, buildLincRNAExonTranscript), regex('.*geneset_(.*)_exons.gtf.gz'), r'geneset.dir/\1_transcript_region.bed.gz') def buildTranscriptRegions(infile, outfile): job_memory = PARAMS["job_memory"] statement = """ gunzip < %(infile)s | cgat gtf2gtf --method=join-exons --log=%(outfile)s.log | cgat gff2bed --is-gtf --set-name=transcript_id --log=%(outfile)s.log | gzip > %(outfile)s """ P.run() @transform((buildCodingExonTranscript, buildNonCodingExonTranscript, buildLincRNAExonTranscript), regex('.*geneset_(.*)_exons.gtf.gz'), r'geneset.dir/\1_gene_region.bed.gz') def buildGeneRegions(infile, outfile): job_memory = PARAMS["job_memory"] statement = """ gunzip < %(infile)s | cgat gtf2gtf --method=merge-transcripts --log=%(outfile)s.log | cgat gff2bed --is-gtf --set-name=gene_id --log=%(outfile)s.log | gzip > %(outfile)s """ P.run() @follows(mkdir("geneset.dir")) @transform((buildCodingExonTranscript, buildNonCodingExonTranscript, buildLincRNAExonTranscript), regex('.*geneset_(.*)_exons.gtf.gz'), r'geneset.dir/\1_transcript_tss.bed.gz')
MIT License
catalyst-cooperative/pudl
src/pudl/transform/ferc714.py
demand_monthly_ba
python
def demand_monthly_ba(tfr_dfs): return tfr_dfs
A stub transform function.
https://github.com/catalyst-cooperative/pudl/blob/6a75069b90219a2da55262737b92fe0a024c4fb8/src/pudl/transform/ferc714.py#L528-L530
import logging import re import numpy as np import pandas as pd import pudl from pudl import constants as pc logger = logging.getLogger(__name__) OFFSET_CODE_FIXES = { 102: {"CPT": "CST"}, 110: {"CPT": "EST"}, 115: {"MS": "MST"}, 118: { "CS": "CST", "CD": "CDT", }, 120: { "CTR": "CST", "CSR": "CST", "CPT": "CST", "DST": "CST", np.nan: "CST", }, 133: { "AKS": "AKST", "AST": "AKST", "AKD": "AKDT", "ADT": "AKDT", }, 134: {np.nan: "EST"}, 137: {np.nan: "CST"}, 140: { "1": "EST", "2": "EDT", np.nan: "EST", }, 141: {np.nan: "CST"}, 143: {"MS": "MST"}, 146: {"DST": "EST"}, 148: {np.nan: "CST"}, 151: { "DST": "CDT", np.nan: "CST", }, 153: {np.nan: "MST"}, 154: {np.nan: "MST"}, 156: {np.nan: "CST"}, 157: {"DST": "EDT"}, 161: {"CPT": "CST"}, 163: {"CPT": "CST"}, 164: {np.nan: "CST"}, 165: {"CS": "CST"}, 173: { "CPT": "CST", np.nan: "CST", }, 174: { "CS": "CDT", "CD": "CST", "433": "CDT", }, 176: { "E": "EST", np.nan: "EST", }, 182: {"PPT": "PDT"}, 186: {"EAS": "EST"}, 189: {"CPT": "CST"}, 190: {"CPT": "CST"}, 193: { "CS": "CST", "CD": "CDT", }, 194: {"PPT": "PST"}, 195: {"CPT": "CST"}, 208: {np.nan: "CST"}, 211: { "206": "EST", "DST": "EDT", np.nan: "EST", }, 213: {"CDS": "CDT"}, 216: {np.nan: "CDT"}, 217: { "MPP": "MST", "MPT": "MST", }, 224: {"DST": "EST"}, 225: { "EDS": "EDT", "DST": "EDT", "EPT": "EST", }, 226: {"DST": "CDT"}, 230: {"EPT": "EST"}, 233: {"DST": "EDT"}, 234: { "1": "EST", "2": "EDT", "DST": "EDT", }, 239: {"PPT": "PST"}, 243: {"DST": "PST"}, 245: {"CDS": "CDT"}, 248: {"DST": "EDT"}, 253: {"CPT": "CST"}, 254: {"DST": "CDT"}, 257: {"CPT": "CST"}, 259: {"DST": "CDT"}, 264: {"CDS": "CDT"}, 271: {"EDS": "EDT"}, 275: {"CPT": "CST"}, 277: { "CPT": "CST", np.nan: "CST", }, 281: {"CEN": "CST"}, 288: {np.nan: "EST"}, 293: {np.nan: "MST"}, 294: {np.nan: "EST"}, 296: {"CPT": "CST"}, 297: {"CPT": "CST"}, 298: {"CPT": "CST"}, 299: {"CPT": "CST"}, 307: {"PPT": "PST"}, 308: { "DST": "EDT", "EDS": "EDT", "EPT": "EST", }, 328: { "EPT": "EST", }, } OFFSET_CODE_FIXES_BY_YEAR = [ { "respondent_id_ferc714": 139, "report_year": 2006, "utc_offset_code": "PST" }, { "respondent_id_ferc714": 235, "report_year": 2015, "utc_offset_code": "MST" }, { "respondent_id_ferc714": 289, "report_year": 2011, "utc_offset_code": "CST" }, { "respondent_id_ferc714": 292, "report_year": 2011, "utc_offset_code": "CST" }, ] BAD_RESPONDENTS = [ 319, 99991, 99992, 99993, 99994, 99995, ] OFFSET_CODES = { "EST": pd.Timedelta(-5, unit="hours"), "EDT": pd.Timedelta(-5, unit="hours"), "CST": pd.Timedelta(-6, unit="hours"), "CDT": pd.Timedelta(-6, unit="hours"), "MST": pd.Timedelta(-7, unit="hours"), "MDT": pd.Timedelta(-7, unit="hours"), "PST": pd.Timedelta(-8, unit="hours"), "PDT": pd.Timedelta(-8, unit="hours"), "AKST": pd.Timedelta(-9, unit="hours"), "AKDT": pd.Timedelta(-9, unit="hours"), "HST": pd.Timedelta(-10, unit="hours"), } TZ_CODES = { "EST": "America/New_York", "EDT": "America/New_York", "CST": "America/Chicago", "CDT": "America/Chicago", "MST": "America/Denver", "MDT": "America/Denver", "PST": "America/Los_Angeles", "PDT": "America/Los_Angeles", "AKST": "America/Anchorage", "AKDT": "America/Anchorage", "HST": "Pacific/Honolulu", } EIA_CODE_FIXES = { 125: 2775, 134: 5416, 203: 12341, 257: 59504, 292: 20382, 295: 40229, 301: 14725, 302: 14725, 303: 14725, 304: 14725, 305: 14725, 306: 14725, 307: 14379, 309: 12427, 315: 56090, 323: 58790, 324: 58791, 329: 39347, } RENAME_COLS = { "respondent_id_ferc714": { "respondent_id": "respondent_id_ferc714", "respondent_name": "respondent_name_ferc714", }, "demand_hourly_pa_ferc714": { "report_yr": "report_year", "plan_date": "report_date", "respondent_id": "respondent_id_ferc714", "timezone": "utc_offset_code", }, "description_pa_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", "elec_util_name": "respondent_name_ferc714", "peak_summer": "peak_demand_summer_mw", "peak_winter": "peak_demand_winter_mw", }, "id_certification_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "gen_plants_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "demand_monthly_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "net_energy_load_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "adjacency_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "interchange_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "lambda_hourly_ba_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "lambda_description_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, "demand_forecast_pa_ferc714": { "report_yr": "report_year", "respondent_id": "respondent_id_ferc714", }, } def _standardize_offset_codes(df, offset_fixes): logger.debug("Standardizing UTC offset codes.") is_blank = df["utc_offset_code"] == "" code = df["utc_offset_code"].mask(is_blank) return code.groupby(df['respondent_id_ferc714']).apply( lambda x: x.replace(offset_fixes[x.name]) if x.name in offset_fixes else x) def _log_dupes(df, dupe_cols): n_dupes = len(df[df.duplicated(dupe_cols)]) logger.debug(f"Found {n_dupes} duplicated hours.") def respondent_id(tfr_dfs): df = ( tfr_dfs["respondent_id_ferc714"].assign( respondent_name_ferc714=lambda x: x.respondent_name_ferc714.str.strip(), eia_code=lambda x: x.eia_code.replace(to_replace=0, value=pd.NA)) .query("respondent_id_ferc714 not in @BAD_RESPONDENTS") ) for rid in EIA_CODE_FIXES: df.loc[df.respondent_id_ferc714 == rid, "eia_code"] = EIA_CODE_FIXES[rid] tfr_dfs["respondent_id_ferc714"] = df return tfr_dfs def demand_hourly_pa(tfr_dfs): logger.debug("Converting dates into pandas Datetime types.") df = tfr_dfs["demand_hourly_pa_ferc714"].copy() df["report_date"] = pd.to_datetime( df["report_date"], format="%m/%d/%Y", exact=False ) all_dates = { year: set(pd.date_range(f"{year}-01-01", f"{year}-12-31", freq="1D")) for year in range(df["report_year"].min(), df["report_year"].max() + 1) } assert df.groupby(["respondent_id_ferc714", "report_year"]).apply( lambda x: set(x["report_date"]) == all_dates[x.name[1]] ).all() df["utc_offset_code"] = df["utc_offset_code"].str.strip().str.upper() df["utc_offset_code"] = df.pipe( _standardize_offset_codes, OFFSET_CODE_FIXES) for fix in OFFSET_CODE_FIXES_BY_YEAR: mask = ( (df["report_year"] == fix["report_year"]) & (df["respondent_id_ferc714"] == fix["respondent_id_ferc714"]) ) df.loc[mask, "utc_offset_code"] = fix["utc_offset_code"] df["utc_offset"] = df["utc_offset_code"].map(OFFSET_CODES) df["timezone"] = df["utc_offset_code"].map(TZ_CODES) df.drop(columns="utc_offset_code", inplace=True) df.drop(columns="hour25", inplace=True) logger.debug("Melting daily FERC 714 records into hourly records.") df.rename( columns=lambda x: int(re.sub(r"^hour", "", x)) - 1 if "hour" in x else x, inplace=True ) df = df.melt( id_vars=[ "respondent_id_ferc714", "report_year", "report_date", "utc_offset", "timezone", ], value_vars=range(24), var_name="hour", value_name="demand_mwh" ) missing_offset = df["utc_offset"].isna() assert df.loc[missing_offset, "demand_mwh"].eq(0).all() df.query("~@missing_offset", inplace=True) logger.debug("Converting local time + offset code to UTC + timezone.") hour_timedeltas = {i: pd.to_timedelta(i, unit="h") for i in range(24)} df["report_date"] += df["hour"].map(hour_timedeltas) df["utc_datetime"] = df["report_date"] - df["utc_offset"] df.drop(columns=["hour", "utc_offset"], inplace=True) duplicated = df.duplicated(["respondent_id_ferc714", "utc_datetime"]) logger.debug( f"Found {np.count_nonzero(duplicated)} duplicate UTC datetimes.") df.query("~@duplicated", inplace=True) mask = ( ( df["report_year"].isin([2006, 2007, 2008, 2009]) & (df["respondent_id_ferc714"] == 156) ) | ( df["report_year"].isin([2006, 2007, 2008, 2009, 2010]) & (df["respondent_id_ferc714"] == 289) ) ) df.loc[mask, "demand_mwh"] *= -1 df["report_date"] = df["report_date"].astype("datetime64[Y]") columns = [ "respondent_id_ferc714", "report_date", "utc_datetime", "timezone", "demand_mwh" ] df.drop(columns=set(df.columns) - set(columns), inplace=True) tfr_dfs["demand_hourly_pa_ferc714"] = df[columns] return tfr_dfs def id_certification(tfr_dfs): return tfr_dfs def gen_plants_ba(tfr_dfs): return tfr_dfs
MIT License
openshift/openshift-client-python
packages/openshift/apiobject.py
APIObject.logs
python
def logs(self, timestamps=False, previous=False, since=None, limit_bytes=None, tail=-1, cmd_args=None, try_longshots=True): log_aggregation = {} def add_entry(collection, entry_key, action): entry = action.out if action.status != 0: entry += u'\n>>>>Error during log collection rc={}<<<<\n{}\n'.format(action.status, action.err) entry = entry.strip().replace('\r\n', '\n') collection[entry_key] = entry base_args = list() if previous: base_args.append('-p') if since: base_args.append('--since={}'.format(since)) if limit_bytes: base_args.append('--limit-bytes={}'.format(limit_bytes)) if timestamps: base_args.append('--timestamps') base_args.append('--tail={}'.format(tail)) pod_list = [] if kind_matches(self.kind(), 'pod'): pod_list.append(self) elif kind_matches(self.kind(), ['ds', 'statefulset']): pod_list.extend(self.get_owned('pod')) elif kind_matches(self.kind(), 'deployment'): for rs in self.get_owned('rs'): pod_list.extend(rs.get_owned('pod')) elif kind_matches(self.kind(), 'dc'): for rc in self.get_owned('rc'): pod_list.extend(rc.get_owned('pod')) elif kind_matches(self.kind(), ['rs', 'rc']): pod_list.extend(self.get_owned('pod')) elif kind_matches(self.kind(), ['bc', 'build']): action = oc_action(self.context, "logs", cmd_args=[base_args, cmd_args, self.qname()], namespace=self.namespace(if_missing=None)) add_entry(log_aggregation, self.fqname(), action) else: if try_longshots: pod_list.extend(self.get_owned('pod')) if not pod_list: action = oc_action(self.context, "logs", cmd_args=[base_args, cmd_args, self.qname()], namespace=self.namespace(if_missing=None)) add_entry(log_aggregation, self.fqname(), action) else: return dict() for pod in pod_list: for container in pod.model.spec.containers: action = oc_action(self.context, "logs", cmd_args=[base_args, cmd_args, pod.qname(), '-c', container.name, '--namespace={}'.format(pod.namespace())], no_namespace=True ) key = '{}->{}({})'.format(self.fqname(), pod.qname(), container.name) add_entry(log_aggregation, key, action) return log_aggregation
Attempts to collect logs from running pods associated with this resource. Supports daemonset, statefulset, deploymentconfig, deployment, replicationcontroller, replicationset, buildconfig, build, pod. If a resource is associated with many pods, all pods owned by that resource will be individually scraped for logs. For example, if a daemonset is specified, an invocation of `oc logs ...` will be made for each pod associated with that daemonset -- this is different from the output of `oc logs ds/name`. If try_longshots==True, logs can also be collected to for any object which directly owns pods or responds successfully with "oc logs kind/name". Since pods can be pending or otherwise unable to deliver logs, if an error is encountered during an 'oc logs' invocation, the stderr will be considered the 'logs' of the object. In other words, oc returning an error will not terminate this function. :param cmd_args: An optional list of additional arguments to pass on the command line :param try_longshots: If True, an attempt we will be made to collect logs from resources which the library does not natively understand to possess logs. If False and the object is not recognized, an empty dict will be returned. :return: Returns a dict of {<fully-qualified-name> -> <log output>}. The fully qualified name will be a human readable, unique identifier containing namespace, object, and container-name (when applicable).
https://github.com/openshift/openshift-client-python/blob/39fcf605f0e6774c13e2219d8051db87d90f51f6/packages/openshift/apiobject.py#L355-L456
from __future__ import absolute_import import yaml import sys import copy from .action import * from .model import * from .result import * from .naming import kind_matches from .context import cur_context from .selector import selector from . import util _DEFAULT = object() def _obj_to_primitive(obj): if isinstance(obj, APIObject): return _obj_to_primitive(obj.model._primitive()) if isinstance(obj, Model): return _obj_to_primitive(obj._primitive()) if isinstance(obj, list): l = [] for e in obj: l.append(_obj_to_primitive(e)) return l if isinstance(obj, dict): return obj raise ValueError("Unknown how to transform into dict: {}".format(type(obj))) def _as_model(obj): if isinstance(obj, (Model, ListModel)): return obj if isinstance(obj, list): return ListModel(obj) return Model(obj) def _access_field(val, err_msg, if_missing=_DEFAULT, lowercase=False): if val is Missing or val == '': if if_missing is _DEFAULT: raise ModelError(err_msg) else: return if_missing elif lowercase: val = val.lower() return val class APIObject: def __init__(self, dict_to_model=None, string_to_model=None, context=None): if string_to_model is not None: string_to_model = string_to_model.strip() if string_to_model == "": dict_to_model = { "apiVersion": "v1", "kind": "List", "metadata": {}, "items": [] } elif string_to_model.startswith("{"): dict_to_model = json.loads(string_to_model) elif "\n" in string_to_model: dict_to_model = yaml.safe_load(string_to_model) else: raise ValueError("Unable to detect markup format (not yaml or json)") self.model = Model(dict_to_model) self.context = copy.copy(context if context else cur_context()) self.context.project_name = self.namespace(None) def as_dict(self): return self.model._primitive() def as_json(self, indent=4): return json.dumps(self.model._primitive(), indent=indent).strip() def kind(self, lowercase=True, if_missing=_DEFAULT): return _access_field(self.model.kind, "Object model does not contain .kind", if_missing=if_missing, lowercase=lowercase) def qkind(self, lowercase=True, if_missing=_DEFAULT): return '{kind}{group}'.format(kind=self.kind(if_missing=if_missing, lowercase=lowercase), group=self.group(prefix_dot=True, if_missing='', lowercase=lowercase)) def apiVersion(self, lowercase=True, if_missing=_DEFAULT): return _access_field(self.model.apiVersion, "Object model does not contain .apiVersion", if_missing=if_missing, lowercase=lowercase) def group(self, prefix_dot=False, lowercase=True, if_missing=_DEFAULT): apiVersion = self.apiVersion(lowercase=lowercase, if_missing=None) if apiVersion is None: if if_missing is _DEFAULT: raise ModelError("Unable to find apiVersion in object") else: return if_missing if '/' not in apiVersion: return '' group = apiVersion.split('/')[0] if prefix_dot: return '.{}'.format(group) return group def is_kind(self, test_kind_or_kind_list): return kind_matches(self.kind(), test_kind_or_kind_list) def uid(self, if_missing=_DEFAULT): return _access_field(self.model.metadata.uid, "Object model does not contain .metadata.uid", if_missing=if_missing, lowercase=False) def resource_version(self, if_missing=_DEFAULT): return _access_field(self.model.metadata.resourceVersion, "Object model does not contain .metadata.resourceVersion", if_missing=if_missing, lowercase=False) def api_version(self, if_missing=_DEFAULT): return _access_field(self.model.apiVersion, "Object model does not contain apiVersion", if_missing=if_missing, lowercase=False) def name(self, if_missing=_DEFAULT): return _access_field(self.model.metadata.name, "Object model does not contain .metadata.name", if_missing=if_missing, lowercase=False) def namespace(self, if_missing=_DEFAULT): return _access_field(self.model.metadata.namespace, "Object model does not contain .metadata.namespace", if_missing=if_missing, lowercase=True) def fqname(self): return '{ns}:{kind}{group}/{name}'.format(ns=self.namespace(if_missing=''), group=self.group(prefix_dot=True), kind=self.kind(), name=self.name() ) def qname(self): return self.qkind() + '/' + self.name() def _object_def_action(self, verb, auto_raise=True, cmd_args=None): content = self.as_dict() base_args = list() base_args.extend(["-o=name", "-f", "-"]) result = Result(verb) result.add_action(oc_action(self.context, verb, cmd_args=[base_args, cmd_args], stdin_obj=content, namespace=self.namespace(if_missing=None))) if auto_raise: result.fail_if("Error during object {}".format(verb)) return result def self_selector(self): return selector(self.qname(), static_context=self.context) def exists(self, on_exists_func=_DEFAULT, on_absent_func=_DEFAULT): does_exist = self.self_selector().count_existing() == 1 ret = None if does_exist: if on_exists_func is not _DEFAULT: ret = on_exists_func(self) elif on_absent_func is not _DEFAULT: ret = on_absent_func(self) return does_exist, ret def create(self, cmd_args=None): return self._object_def_action("create", cmd_args=cmd_args) def replace(self, cmd_args=None): return self._object_def_action("replace", cmd_args=cmd_args) def create_or_replace(self, cmd_args=None): _, action = self.exists(on_exists_func=lambda: self.replace(cmd_args=cmd_args), on_absent_func=lambda: self.create(cmd_args=cmd_args)) return action def describe(self, auto_raise=True): r = Result('describe') r.add_action(oc_action(self.context, "describe", cmd_args=[self.qname()], namespace=self.namespace(if_missing=None))) if auto_raise: r.fail_if('Error describing object') return (r.out() + '\n' + r.err()).strip()
Apache License 2.0