repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
hoxmark/deep_reinforcement_active_learning
reinforcement/logger.py
ExternalLogger.__init__
python
def __init__(self, external_logger_name): self.external_logger_name = external_logger_name
Create a summary writer logging to log_dir.
https://github.com/hoxmark/deep_reinforcement_active_learning/blob/7458916d6f75c7fbfcfd4bc81763ab5ba16208ad/reinforcement/logger.py#L64-L66
import requests import tensorflow as tf import numpy as np import scipy.misc from visdom import Visdom from config import opt try: from StringIO import StringIO except ImportError: from io import BytesIO class LocalLogger(object): def __init__(self, log_dir): self.log_dir = log_dir self.writer = tf.summary.FileWriter(log_dir) def scalar_summary(self, tag, value, step): summary = tf.Summary( value=[tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step) def dict_scalar_summary(self, prefix, values, step): for key in values: tag = "{}/{}".format(prefix, key) self.scalar_summary(tag, values[key], step) def histo_summary(self, tag, values, step, bins=1000): counts, bin_edges = np.histogram(values, bins=bins) hist = tf.HistogramProto() hist.min = float(np.min(values)) hist.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values**2)) bin_edges = bin_edges[1:] for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)]) self.writer.add_summary(summary, step) self.writer.flush() class ExternalLogger(object):
MIT License
fausecteam/ctf-gameserver
src/ctf_gameserver/web/registration/views.py
delete_team
python
def delete_team(request): try: team = request.user.team except Team.DoesNotExist: team = None user_form = forms.UserForm(prefix='user', instance=request.user) team_form = forms.TeamForm(prefix='team', instance=team) if request.method == 'POST': delete_form = forms.DeleteForm(request.POST, user=request.user, prefix='delete') if delete_form.is_valid(): request.user.delete() logout(request) messages.success(request, _('Your team has been deleted.')) return redirect(settings.HOME_URL) else: delete_form = forms.DeleteForm(user=request.user, prefix='delete') return render(request, 'edit_team.html', { 'user_form': user_form, 'team_form': team_form, 'delete_form': delete_form })
View for deletion of a User and the associated Team. This renders the 'edit_team' template with a modal overlay for deletion. The modal is rendered in static HTML instead of showing it dynamically to avoid the need for (custom) JavaScript, especially when handling form errors in the modal.
https://github.com/fausecteam/ctf-gameserver/blob/00290cf103e5aa4eea9d9157aba2ab9d07ab3282/src/ctf_gameserver/web/registration/views.py#L111-L144
import logging import random from django.db import transaction, IntegrityError from django.views.generic import ListView from django.shortcuts import render, redirect from django.conf import settings from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from django.contrib import messages from django.contrib.auth import logout, get_user_model from django.contrib.auth.decorators import login_required from django.contrib.admin.views.decorators import staff_member_required from ctf_gameserver.web.scoring.decorators import before_competition_required, registration_open_required import ctf_gameserver.web.scoring.models as scoring_models from . import forms from .models import Team from .util import email_token_generator User = get_user_model() class TeamList(ListView): queryset = Team.active_not_nop_objects.order_by('user__username') context_object_name = 'teams' template_name = 'team_list.html' @registration_open_required @transaction.atomic def register(request): if request.method == 'POST': user_form = forms.UserForm(request.POST, prefix='user') team_form = forms.TeamForm(request.POST, request.FILES, prefix='team') if user_form.is_valid() and team_form.is_valid(): user = user_form.save() team_form.save(user) user_form.send_confirmation_mail(request) messages.success(request, mark_safe(_('Successful registration! A confirmation mail has been sent to ' 'your formal email address. <strong>You must click the link inside ' 'that email to complete your sign-up, otherwise you will not be ' 'able to participate.</strong>'))) return redirect(settings.HOME_URL) else: user_form = forms.UserForm(prefix='user') team_form = forms.TeamForm(prefix='team') return render(request, 'register.html', {'user_form': user_form, 'team_form': team_form}) @login_required @registration_open_required @transaction.atomic def edit_team(request): try: team = request.user.team except Team.DoesNotExist: team = None if request.method == 'POST': user_form = forms.UserForm(request.POST, prefix='user', instance=request.user) team_form = forms.TeamForm(request.POST, request.FILES, prefix='team', instance=team) if user_form.is_valid() and team_form.is_valid(): user = user_form.save() team_form.save(user) if 'email' in user_form.changed_data: user_form.send_confirmation_mail(request) logout(request) messages.warning(request, _('A confirmation mail has been sent to your new formal email ' 'address. Please visit the link inside that email. Until then, ' 'your team has been deactivated and you have been logged out.')) return redirect(settings.HOME_URL) team_form = forms.TeamForm(prefix='team', instance=team) else: user_form = forms.UserForm(prefix='user', instance=request.user) team_form = forms.TeamForm(prefix='team', instance=team) game_control = scoring_models.GameControl.get_instance() show_delete_button = not game_control.competition_started() return render(request, 'edit_team.html', { 'team': team, 'user_form': user_form, 'team_form': team_form, 'show_delete_button': show_delete_button, 'delete_form': None }) @login_required @before_competition_required @registration_open_required @transaction.atomic
ISC License
gregdavill/kibuzzard
KiBuzzard/deps/fonttools/Lib/fontTools/t1Lib/__init__.py
readPFB
python
def readPFB(path, onlyHeader=False): data = [] with open(path, "rb") as f: while True: if f.read(1) != bytechr(128): raise T1Error('corrupt PFB file') code = byteord(f.read(1)) if code in [1, 2]: chunklen = stringToLong(f.read(4)) chunk = f.read(chunklen) assert len(chunk) == chunklen data.append(chunk) elif code == 3: break else: raise T1Error('bad chunk code: ' + repr(code)) if onlyHeader: break data = bytesjoin(data) assertType1(data) return data
reads a PFB font file, returns raw data
https://github.com/gregdavill/kibuzzard/blob/22e7358f7d5eb4d802700334988d2c2f68ad7869/KiBuzzard/deps/fonttools/Lib/fontTools/t1Lib/__init__.py#L180-L201
from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.misc import eexec from fontTools.misc.macCreatorType import getMacCreatorAndType import os import re __author__ = "jvr" __version__ = "1.0b2" DEBUG = 0 try: try: from Carbon import Res except ImportError: import Res except ImportError: haveMacSupport = 0 else: haveMacSupport = 1 class T1Error(Exception): pass class T1Font(object): def __init__(self, path, encoding="ascii", kind=None): if kind is None: self.data, _ = read(path) elif kind == "LWFN": self.data = readLWFN(path) elif kind == "PFB": self.data = readPFB(path) elif kind == "OTHER": self.data = readOther(path) else: raise ValueError(kind) self.encoding = encoding def saveAs(self, path, type, dohex=False): write(path, self.getData(), type, dohex) def getData(self): return self.data def getGlyphSet(self): return self["CharStrings"] def __getitem__(self, key): if not hasattr(self, "font"): self.parse() return self.font[key] def parse(self): from fontTools.misc import psLib from fontTools.misc import psCharStrings self.font = psLib.suckfont(self.data, self.encoding) charStrings = self.font["CharStrings"] lenIV = self.font["Private"].get("lenIV", 4) assert lenIV >= 0 subrs = self.font["Private"]["Subrs"] for glyphName, charString in charStrings.items(): charString, R = eexec.decrypt(charString, 4330) charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) for i in range(len(subrs)): charString, R = eexec.decrypt(subrs[i], 4330) subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) del self.data def read(path, onlyHeader=False): _, ext = os.path.splitext(path) ext = ext.lower() creator, typ = getMacCreatorAndType(path) if typ == 'LWFN': return readLWFN(path, onlyHeader), 'LWFN' if ext == '.pfb': return readPFB(path, onlyHeader), 'PFB' else: return readOther(path), 'OTHER' def write(path, data, kind='OTHER', dohex=False): assertType1(data) kind = kind.upper() try: os.remove(path) except os.error: pass err = 1 try: if kind == 'LWFN': writeLWFN(path, data) elif kind == 'PFB': writePFB(path, data) else: writeOther(path, data, dohex) err = 0 finally: if err and not DEBUG: try: os.remove(path) except os.error: pass LWFNCHUNKSIZE = 2000 HEXLINELENGTH = 80 def readLWFN(path, onlyHeader=False): from fontTools.misc.macRes import ResourceReader reader = ResourceReader(path) try: data = [] for res in reader.get('POST', []): code = byteord(res.data[0]) if byteord(res.data[1]) != 0: raise T1Error('corrupt LWFN file') if code in [1, 2]: if onlyHeader and code == 2: break data.append(res.data[2:]) elif code in [3, 5]: break elif code == 4: with open(path, "rb") as f: data.append(f.read()) elif code == 0: pass else: raise T1Error('bad chunk code: ' + repr(code)) finally: reader.close() data = bytesjoin(data) assertType1(data) return data
MIT License
letolab/airy
airy/core/web.py
UIProxy.set_meta_description
python
def set_meta_description(self, text): return self.execute('airy.ui.meta.description("%s")' % text)
Set page meta description to ``text``.
https://github.com/letolab/airy/blob/4dc53bec4aa0cd9b983a0a626fecdf49d14bdf94/airy/core/web.py#L170-L174
import calendar from airy.core.conf import settings from airy.core.exceptions import Http404 from airy.core.files.uploadedfile import SimpleUploadedFile from airy.core.reportbug import report_on_fail from airy.utils.encoding import smart_unicode from airy.utils.functional import curry from tornado.web import * from tornado.escape import * from tornadio2 import TornadioRouter, SocketConnection, event from urlparse import urlparse, parse_qs from urllib2 import unquote import logging import base64 class ConnectionSet(set): def _get_filter_condition(self, item, key, value): try: key, tester = key.split('__', 1) except ValueError: tester = None if tester: if callable(getattr(getattr(item, key), tester)): condition = getattr(getattr(item, key), tester)(value) else: condition = getattr(getattr(item, key), tester) == value else: condition = getattr(item, key) == value return condition def filter(self, **kwargs): filtered_set = ConnectionSet(self) for item in self: for key, value in kwargs.iteritems(): condition = self._get_filter_condition(item, key, value) if not condition: filtered_set = filtered_set - set([item]) break return filtered_set def exclude(self, *args, **kwargs): if len(args): for conn in args: if isinstance(conn, AiryHandler): kwargs['session__session_id'] = conn.connection.session.session_id else: kwargs['session__session_id'] = conn.session.session_id filtered_set = ConnectionSet(self) for item in self: for key, value in kwargs.iteritems(): condition = self._get_filter_condition(item, key, value) if condition: filtered_set = filtered_set - set([item]) break return filtered_set @property def ui(self): return UIProxySet(self) def __getattr__(self, name): def filtered_func(conn_set, name, *args, **kwargs): for item in conn_set: getattr(item, name)(*args, **kwargs) return conn_set return curry(filtered_func, self, name) class UIProxy(object): _data = '' def __init__(self, connection): self.connection = connection def execute(self, data): self.connection.emit('execute', data) self._data = '' return self def redirect(self, url): return self.execute('airy.ui.redirect("%s");' % url) def insert(self, target, data=None): data = data or self._data return self.execute('airy.ui.insert("%s", %s);' % (target, json_encode(data))) def after(self, target, data=None): data = data or self._data return self.execute('airy.ui.after("%s", %s);' % (target, json_encode(data))) def append(self, target, data=None): data = data or self._data return self.execute('airy.ui.append("%s", %s);' % (target, json_encode(data))) def prepend(self, target, data=None): data = data or self._data return self.execute('airy.ui.prepend("%s", %s);' % (target, json_encode(data))) def remove(self, target): return self.execute('airy.ui.remove("%s")' % target) def set_title(self, text): return self.execute('airy.ui.title("%s")' % text)
BSD 2-Clause Simplified License
google/clusterfuzz
src/clusterfuzz/_internal/build_management/revisions.py
get_components_list
python
def get_components_list(component_revisions_dict, job_type): components = sorted(component_revisions_dict.keys()) if utils.is_chromium(): return components project_name = data_handler.get_project_name(job_type) if not project_name: return components main_repo = data_handler.get_main_repo(job_type) project_src = '/src/' + project_name for component in components.copy(): if component_revisions_dict[component]['url'] == main_repo: components.remove(component) components.insert(0, component) break if component == project_src: components.remove(component) components.insert(0, component) break if project_name.lower() in os.path.basename(component).lower(): components.remove(component) components.insert(0, component) return components
Return a prioritized order of components based on job type.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/build_management/revisions.py#L300-L332
import ast import base64 import bisect import os import re import time import urllib.parse import six from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.build_management import overrides from clusterfuzz._internal.build_management import source_mapper from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment CHROMIUM_GIT_ROOT_URL = 'https://chromium.googlesource.com' CRREV_NUMBERING_URL = ( 'https://cr-rev.appspot.com/_ah/api/crrev/v1/get_numbering') CLANK_URL = 'https://chrome-internal.googlesource.com/clank/internal/apps.git' CLANK_REVISION_FILE_COMPONENT_REGEX = re.compile( r'.*["]([^"]+)["]\s*:\s*["]([^"]+)["]') COMPONENT_NAMES_BLACKLIST = [ 'api', 'bin', 'data', 'dist', 'lib', 'pylib', 'source', 'src' ] DISK_CACHE_SIZE = 1000 SOURCE_MAP_EXTENSION = '.srcmap.json' FIND_BRANCHED_FROM = re.compile(r'Cr-Branched-From:.*master@\{#(\d+)\}') def _add_components_from_dict(deps_dict, vars_dict, revisions_dict): if not deps_dict: return for key, value in six.iteritems(deps_dict): url = rev = None if isinstance(value, str): url, _, rev = value.partition('@') elif isinstance(value, dict): if 'revision' in value: url = value['url'] rev = value['revision'] elif 'url' in value and value['url'] is not None: url, _, rev = value['url'].partition('@') if url and rev: url = url.format(**vars_dict) rev = rev.format(**vars_dict) revisions_dict[key] = { 'name': _get_component_display_name(key), 'rev': rev, 'url': url } def _clank_revision_file_to_revisions_dict(content): component_revision_mappings = {} for line in content.splitlines(): match = CLANK_REVISION_FILE_COMPONENT_REGEX.match(line) if not match: continue component = match.group(1) revision = match.group(2) component_revision_mappings[component] = revision if not component_revision_mappings: logs.log_error('Failed to get component revision mappings for clank.') return None chromium_revision = component_revision_mappings['chromium_revision'] clank_revision = component_revision_mappings['clank_revision'] revisions_dict = get_component_revisions_dict(chromium_revision, None) if revisions_dict is None: logs.log_error( 'Failed to get chromium component revisions.', chromium_revision=chromium_revision, clank_revision=clank_revision) return None revisions_dict['/src/clank'] = { 'name': 'Clank', 'url': CLANK_URL, 'rev': clank_revision } return revisions_dict def _get_component_display_name(name, default=None): if default and name in ['', 'default', '/src']: return default.capitalize() names = name.split('/') name_index = -1 if len(names) > 1 and names[-1] in COMPONENT_NAMES_BLACKLIST: name_index -= 1 return names[name_index].capitalize() def _get_display_revision(component_revision_dict): if 'commit_pos' in component_revision_dict: return component_revision_dict['commit_pos'] return component_revision_dict['rev'] or '<empty>' def _get_link_text(start_component_revision_dict, end_component_revision_dict): start_revision = _get_display_revision(start_component_revision_dict) end_revision = _get_display_revision(end_component_revision_dict) if start_revision == end_revision: return str(start_revision) return '%s:%s' % (start_revision, end_revision) def _get_link_url(start_component_revision_dict, end_component_revision_dict): url = start_component_revision_dict['url'] if not url: return None vcs_viewer = source_mapper.get_vcs_viewer_for_url(url) if not vcs_viewer: return None start_revision = _get_revision(start_component_revision_dict) end_revision = _get_revision(end_component_revision_dict) if start_revision == end_revision: return vcs_viewer.get_source_url_for_revision(start_revision) return vcs_viewer.get_source_url_for_revision_diff(start_revision, end_revision) def _get_revision(component_revision_dict): return component_revision_dict['rev'] def _get_url_content(url): if url.startswith(storage.GS_PREFIX): url_data = storage.read_data(url) if url_data is None: return None url_content = url_data.decode('utf-8') else: url_content = utils.fetch_url(url) if url_content and url.endswith('format=text'): url_content = base64.b64decode(url_content) return url_content def _git_url_for_chromium_repository(repository): return '%s/%s.git' % (CHROMIUM_GIT_ROOT_URL, repository) def _is_clank(url): return '/chrome-test-builds/android' in url def _is_deps(url): return urllib.parse.urlparse(url).path.endswith('/DEPS') def _src_map_to_revisions_dict(src_map, project_name): revisions_dict = {} for key in src_map: if 'url' in src_map[key] and 'rev' in src_map[key]: revisions_dict[key] = { 'name': _get_component_display_name(key, project_name), 'rev': src_map[key]['rev'], 'url': src_map[key]['url'] } return revisions_dict @memoize.wrap(memoize.FifoOnDisk(DISK_CACHE_SIZE)) @memoize.wrap(memoize.Memcache(60 * 60 * 24 * 30)) def _git_commit_position_to_git_hash_for_chromium(revision, repository): request_variables = { 'number': revision, 'numbering_identifier': 'refs/heads/main', 'numbering_type': 'COMMIT_POSITION', 'project': 'chromium', 'repo': repository, 'fields': 'git_sha', } query_string = urllib.parse.urlencode(request_variables) query_url = '%s?%s' % (CRREV_NUMBERING_URL, query_string) url_content = _get_url_content(query_url) if url_content is None: logs.log_error('Failed to fetch git hash from url: ' + query_url) return None result_dict = _to_dict(url_content) if result_dict is None: logs.log_error('Failed to parse git hash from url: ' + query_url) return None return result_dict['git_sha'] def _to_dict(contents): try: result = ast.literal_eval(contents) if isinstance(result, dict): return result except (ValueError, TypeError): pass return None def deps_to_revisions_dict(content): local_context = {} global_context = { 'Var': lambda x: local_context.get('vars', {}).get(x), 'Str': str, } exec(content, global_context, local_context) revisions_dict = {} vars_dict = local_context.get('vars', {}) deps_dict = local_context.get('deps') if not deps_dict: logs.log_error('Deps format has changed, code needs fixing.') return None _add_components_from_dict(deps_dict, vars_dict, revisions_dict) deps_os_dict = local_context.get('deps_os') if deps_os_dict: for deps_os in list(deps_os_dict.values()): _add_components_from_dict(deps_os, vars_dict, revisions_dict) return revisions_dict
Apache License 2.0
hozn/stravalib
stravalib/client.py
Client.leave_club
python
def leave_club(self, club_id): self.protocol.post('clubs/{id}/leave', id=club_id)
Leave club on behalf of authenticated user. (Acces token with write permissions required.) :param club_id:
https://github.com/hozn/stravalib/blob/d3bc7b8094ce318e8dfd94bf1aeba3f421258412/stravalib/client.py#L426-L434
from __future__ import division, absolute_import, print_function, unicode_literals import logging import warnings import functools import time import collections import calendar from io import BytesIO from datetime import datetime, timedelta import arrow import pytz import six from units.quantity import Quantity from stravalib import model, exc from stravalib.protocol import ApiV3 from stravalib.util import limiter from stravalib import unithelper class Client(object): def __init__(self, access_token=None, rate_limit_requests=True, rate_limiter=None, requests_session=None): self.log = logging.getLogger('{0.__module__}.{0.__name__}'.format(self.__class__)) if rate_limit_requests: if not rate_limiter: rate_limiter = limiter.DefaultRateLimiter() elif rate_limiter: raise ValueError("Cannot specify rate_limiter object when rate_limit_requests is False") self.protocol = ApiV3(access_token=access_token, requests_session=requests_session, rate_limiter=rate_limiter) @property def access_token(self): return self.protocol.access_token @access_token.setter def access_token(self, v): self.protocol.access_token = v def authorization_url(self, client_id, redirect_uri, approval_prompt='auto', scope=None, state=None): return self.protocol.authorization_url(client_id=client_id, redirect_uri=redirect_uri, approval_prompt=approval_prompt, scope=scope, state=state) def exchange_code_for_token(self, client_id, client_secret, code): return self.protocol.exchange_code_for_token(client_id=client_id, client_secret=client_secret, code=code) def refresh_access_token(self, client_id, client_secret, refresh_token): return self.protocol.refresh_access_token(client_id=client_id, client_secret=client_secret, refresh_token=refresh_token) def deauthorize(self): self.protocol.post("oauth/deauthorize") def _utc_datetime_to_epoch(self, activity_datetime): if isinstance(activity_datetime, str): activity_datetime = arrow.get(activity_datetime).datetime assert isinstance(activity_datetime, datetime) if activity_datetime.tzinfo: activity_datetime = activity_datetime.astimezone(pytz.utc) return calendar.timegm(activity_datetime.timetuple()) def get_activities(self, before=None, after=None, limit=None): if before: before = self._utc_datetime_to_epoch(before) if after: after = self._utc_datetime_to_epoch(after) params = dict(before=before, after=after) result_fetcher = functools.partial(self.protocol.get, '/athlete/activities', **params) return BatchedResultsIterator(entity=model.Activity, bind_client=self, result_fetcher=result_fetcher, limit=limit) def get_athlete(self, athlete_id=None): if athlete_id is None: raw = self.protocol.get('/athlete') else: raise NotImplementedError("The /athletes/{id} endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") return model.Athlete.deserialize(raw, bind_client=self) def get_athlete_friends(self, athlete_id=None, limit=None): if athlete_id is None: result_fetcher = functools.partial(self.protocol.get, '/athlete/friends') else: raise NotImplementedError("The /athletes/{id}/friends endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") return BatchedResultsIterator(entity=model.Athlete, bind_client=self, result_fetcher=result_fetcher, limit=limit) def update_athlete(self, city=None, state=None, country=None, sex=None, weight=None): params = {'city': city, 'state': state, 'country': country, 'sex': sex} params = {k: v for (k, v) in params.items() if v is not None} if weight is not None: params['weight'] = float(weight) raw_athlete = self.protocol.put('/athlete', **params) return model.Athlete.deserialize(raw_athlete, bind_client=self) def get_athlete_followers(self, athlete_id=None, limit=None): if athlete_id is None: result_fetcher = functools.partial(self.protocol.get, '/athlete/followers') else: raise NotImplementedError("The /athletes/{id}/followers endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") return BatchedResultsIterator(entity=model.Athlete, bind_client=self, result_fetcher=result_fetcher, limit=limit) def get_both_following(self, athlete_id, limit=None): raise NotImplementedError("The /athletes/{id}/both-following endpoint was removed by Strava. " "See https://developers.strava.com/docs/january-2018-update/") def get_athlete_koms(self, athlete_id, limit=None): result_fetcher = functools.partial(self.protocol.get, '/athletes/{id}/koms', id=athlete_id) return BatchedResultsIterator(entity=model.SegmentEffort, bind_client=self, result_fetcher=result_fetcher, limit=limit) def get_athlete_stats(self, athlete_id=None): if athlete_id is None: athlete_id = self.get_athlete().id raw = self.protocol.get('/athletes/{id}/stats', id=athlete_id) return model.AthleteStats.deserialize(raw) def get_athlete_clubs(self): club_structs = self.protocol.get('/athlete/clubs') return [model.Club.deserialize(raw, bind_client=self) for raw in club_structs] def join_club(self, club_id): self.protocol.post('clubs/{id}/join', id=club_id)
Apache License 2.0
luci/recipes-py
recipe_modules/step/api.py
StepApi.sub_build
python
def sub_build(self, name, cmd, build, raise_on_failure=True, output_path=None, timeout=None, step_test_data=None, cost=_ResourceCost()): self._validate_cmd_list(cmd) cmd = list(cmd) cmd[1:1] = ['--output', self._sub_build_output(output_path)] new_tmp_dir = str(self.m.path.mkdtemp()) with self.m.context( env={ var: new_tmp_dir for var in ( 'TEMPDIR', 'TMPDIR', 'TEMP', 'TMP', 'MAC_CHROMIUM_TMPDIR') }, env_prefixes={'PATH': self._prefix_path} ): env = self.m.context.env env_prefixes = self.m.context.env_prefixes return self._run_or_raise_step( self.step_client.StepConfig( name=name, cmd=cmd, cost=self._normalize_cost(cost), cwd=self._normalize_cwd(self.m.context.cwd), env=env, env_prefixes=self._to_env_affix(env_prefixes), env_suffixes=self._to_env_affix(self.m.context.env_suffixes), timeout=timeout, luci_context=self.m.context.luci_context, stdin=self.m.proto.input(self._make_initial_build(build), 'BINARY'), infra_step=self.m.context.infra_step or False, raise_on_failure=raise_on_failure, merge_step=True, ok_ret=self.step_client.StepConfig.ALL_OK, step_test_data=step_test_data, ))
Launch a sub-build by invoking a LUCI executable. All steps in the sub-build will appear as child steps of this step (Merge Step). See protocol: https://go.chromium.org/luci/luciexe Example: ```python run_exe = api.cipd.ensure_tool(...) # Install LUCI executable `run_exe` # Basic Example: launch `run_exe` with empty initial build and # default options. ret = api.sub_build("launch sub build", [run_exe], build_pb2.Build()) sub_build = ret.step.sub_build # access final build proto result # Example: launch `run_exe` with input build to recipe and customized # output path, cwd and cache directory. with api.context( # Change the cwd of the launched LUCI executable cwd=api.path['start_dir'].join('subdir'), # Change the cache_dir of the launched LUCI executable. Defaults to # api.path['cache'] if unchanged. luciexe=sections_pb2.LUCIExe(cache_dir=api.path['cache'].join('sub')), ): # Command executed: # `/path/to/run_exe --output [CLEANUP]/build.json --foo bar baz` ret = api.sub_build("launch sub build", [run_exe, '--foo', 'bar', 'baz'], api.buildbucket.build, output_path=api.path['cleanup'].join('build.json')) sub_build = ret.step.sub_build # access final build proto result ``` Args: * name (str): The name of this step. * cmd (List[int|string|Placeholder|Path]): Same as the `cmd` parameter in `__call__` method except that None is NOT allowed. cmd[0] MUST denote a LUCI executable. The `--output` flag and its value should NOT be provided in the list. It should be provided via keyword arg `output_path` instead. * build (build_pb2.Build): The initial build state that the launched luciexe will start with. This method will clone the input build, modify the clone's fields and pass the clone to luciexe (see 'Invocation' section in http://go.chromium.org/luci/luciexe for what modification will be done). * raise_on_failure: Whether or not the step will raise on failure. If True, a StepFailure will be raised if the step's status is FAILURE, an InfraFailure will be raised if the step's status is EXCEPTION and a StepWarning will be raised if the step's status is WARNING. Regardless of the value of this argument, an InfraFailure will be raised if the step is canceled. * output_path (None|str|Path): The value of the `--output` flag. If provided, it should be a path to a non-existent file (its directory MUST exist). The extension of the path dictates the encoding format of final build proto (See `EXT_TO_CODEC`). If not provided, the output will be a temp file with binary encoding. * timeout (None|int|float|datetime.timedelta): Same as the `timeout` parameter in `__call__` method. * step_test_data(Callable[[], recipe_test_api.StepTestData]): Same as the `step_test_data` parameter in `__call__` method. * cost (None|ResourceCost): Same as the `cost` parameter in `__call__` method. Returns a `step_data.StepData` for the finished step. The final build proto object can be accessed via `ret.step.sub_build`. The build is guaranteed to be present (i.e. not None) with a terminal build status. Raises `StepFailure` if the sub-build reports FAILURE status. Raises `InfraFailure` if the sub-build reports INFRA_FAILURE or CANCELED status.
https://github.com/luci/recipes-py/blob/32f0255a6910af47c6cb35546032ae4d60fe9a92/recipe_modules/step/api.py#L449-L564
import contextlib import multiprocessing import sys import types from builtins import int from future.utils import iteritems from past.builtins import basestring import enum from recipe_engine import recipe_api from recipe_engine.config_types import Path from recipe_engine.engine_types import StepPresentation from recipe_engine.engine_types import ResourceCost as _ResourceCost from recipe_engine.util import Placeholder, returns_placeholder from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2 from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2 class StepApi(recipe_api.RecipeApiPlain): step_client = recipe_api.RequireClient('step') def __init__(self, step_properties, **kwargs): super(StepApi, self).__init__(**kwargs) self._prefix_path = step_properties.get('prefix_path', []) EXCEPTION = 'EXCEPTION' FAILURE = 'FAILURE' SUCCESS = 'SUCCESS' WARNING = 'WARNING' EXT_TO_CODEC = { '.pb': 'BINARY', '.json': 'JSONPB', '.textpb': 'TEXTPB', } def ResourceCost(self, cpu=500, memory=50, disk=0, net=0): return _ResourceCost( min(cpu, self.MAX_CPU), min(memory, self.MAX_MEMORY), disk, net) CPU_CORE = 1000 @property def MAX_CPU(self): return self.m.platform.cpu_count * self.CPU_CORE @property def MAX_MEMORY(self): return self.m.platform.total_memory @property def StepFailure(self): return recipe_api.StepFailure @property def StepWarning(self): return recipe_api.StepWarning @property def InfraFailure(self): return recipe_api.InfraFailure @property def active_result(self): return self.step_client.previous_step_result() def close_non_nest_step(self): return self.step_client.close_non_parent_step() class _StepPresentationProxy(object): def __init__(self, presentation): object.__setattr__(self, 'presentation', presentation) def __getattr__(self, name): return getattr(self.presentation, name) def __setattr__(self, name, value): setattr(self.presentation, name, value) @contextlib.contextmanager def nest(self, name, status='worst'): assert status in ('worst', 'last'), 'Got bad status: %r' % (status,) with self.step_client.parent_step(name) as (pres, children_presentations): caught_exc = None try: yield self._StepPresentationProxy(pres) except: caught_exc = sys.exc_info()[0] raise finally: if pres.status is None: if caught_exc: pres.status = { recipe_api.StepFailure: self.FAILURE, recipe_api.StepWarning: self.WARNING, }.get(caught_exc, self.EXCEPTION) elif children_presentations: if status == 'worst': worst = self.SUCCESS for cpres in children_presentations: worst = StepPresentation.status_worst(worst, cpres.status) pres.status = worst else: pres.status = children_presentations[-1].status else: pres.status = self.SUCCESS @property def defer_results(self): return recipe_api.defer_results @staticmethod def _validate_cmd_list(cmd): if not isinstance(cmd, list): raise ValueError('cmd must be a list, got %r' % (cmd,)) for arg in cmd: if not isinstance(arg, (int, basestring, Path, Placeholder)): raise ValueError('Type %s is not permitted. ' 'cmd is %r' % (type(arg), cmd)) @staticmethod def _normalize_cost(cost): if not isinstance(cost, (type(None), _ResourceCost)): raise ValueError('cost must be a None or ResourceCost , got %r' % (cost,)) return cost or _ResourceCost.zero() def _normalize_cwd(self, cwd): if cwd and cwd == self.m.path['start_dir']: cwd = None elif cwd is not None: cwd = str(cwd) return cwd def _to_env_affix(self, affix): return self.step_client.EnvAffix( mapping={k: [str(v) for v in vs] for k, vs in iteritems(affix)}, pathsep=self.m.path.pathsep, ) @returns_placeholder('sub_build') def _sub_build_output(self, output_path): if not isinstance(output_path, (type(None), str, Path)): raise ValueError('expected None, str or Path; got %r' % (output_path,)) ext = '.pb' if output_path is None: output_path = self.m.path.mkdtemp().join('sub_build' + ext) else: if self.m.path.exists(output_path): raise ValueError('expected non-existent output path; ' 'got path %s' % (output_path,)) _, ext = self.m.path.splitext(output_path) if ext not in self.EXT_TO_CODEC: raise ValueError('expected extension of output path to be ' 'one of %s; got %s' % (tuple(self.EXT_TO_CODEC), ext)) dir_name = self.m.path.dirname(output_path) self.m.path.mock_add_paths(dir_name) if not self.m.path.exists(dir_name): raise ValueError('expected directory of output path exists; ' 'got dir: %s' % (dir_name,)) return self.m.proto.output(build_pb2.Build, self.EXT_TO_CODEC[ext], leak_to=output_path, add_json_log=True) def _make_initial_build(self, input_build): build = build_pb2.Build() build.CopyFrom(input_build) build.status = common_pb2.STARTED if self._test_data.enabled: build.create_time.FromSeconds( self._test_data.get('initial_build_create_time', 1577836800)) build.start_time.FromSeconds( self._test_data.get('initial_build_start_time', 1577836801)) else: build.create_time.GetCurrentTime() build.start_time.GetCurrentTime() for f in ('end_time', 'output', 'status_details', 'steps', 'summary_markdown', 'update_time'): build.ClearField(f) return build def _raise_on_disallowed_statuses(self, result, allowed_statuses, status_override=None): status = status_override or result.presentation.status if status in allowed_statuses: return result exc = { 'FAILURE': self.StepFailure, 'WARNING': self.StepWarning, 'EXCEPTION': self.InfraFailure, 'CANCELED': self.InfraFailure, }[status] raise exc('.'.join(result.name_tokens), result) def raise_on_failure(self, result, status_override=None): return self._raise_on_disallowed_statuses( result, [self.SUCCESS], status_override=status_override) def _run_or_raise_step(self, step_config): ret = self.step_client.run_step(step_config) allowed_statuses = [self.SUCCESS] if not step_config.raise_on_failure: allowed_statuses += [self.WARNING, self.FAILURE, self.EXCEPTION] return self._raise_on_disallowed_statuses(ret, allowed_statuses) @recipe_api.composite_step
Apache License 2.0
kevin-ssy/vip
utils/flop_count/jit_analysis.py
JitModelAnalysis.__init__
python
def __init__( self, model: nn.Module, inputs: Union[Tensor, Tuple[Tensor, ...]], ) -> None: self._model = model self._inputs = inputs self._op_handles: Dict[str, Handle] = {} self._named_modules: Dict[str, nn.Module] = dict(_named_modules_with_dup(model)) self._aliases: Dict[Union[nn.Module, str], str] = self._get_aliases(model) self._stats: Optional[Statistics] = None self._enable_warn_unsupported_ops = True self._enable_warn_uncalled_mods = True self._warn_trace = "no_tracer_warning" self._ignored_ops: Set[str] = copy(_IGNORED_OPS)
Args: model: The model to analyze inputs: The inputs to the model for analysis.
https://github.com/kevin-ssy/vip/blob/dedc27616ab24e78685e21291d1e6ef9b56de889/utils/flop_count/jit_analysis.py#L198-L219
import logging import typing import warnings from collections import Counter from copy import copy from dataclasses import dataclass from typing import Any, Dict, List, Optional, Set, Tuple, Union, Iterable import torch import torch.nn as nn from torch import Tensor from torch.jit import TracerWarning, _get_trace_graph from .jit_handles import Handle def _named_modules_with_dup( model: nn.Module, prefix: str = "" ) -> Iterable[Tuple[str, nn.Module]]: yield prefix, model for name, module in model._modules.items(): if module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name yield from _named_modules_with_dup(module, submodule_prefix) _IGNORED_OPS: Set[str] = { "aten::Int", "aten::ScalarImplicit", "aten::__and__", "aten::arange", "aten::cat", "aten::chunk", "aten::clamp", "aten::clamp_", "aten::constant_pad_nd", "aten::contiguous", "aten::copy_", "aten::detach", "aten::dropout", "aten::empty", "aten::eq", "aten::expand", "aten::flatten", "aten::floor", "aten::floor_divide", "aten::full", "aten::ge", "aten::gt", "aten::index", "aten::index_put_", "aten::max", "aten::nonzero", "aten::permute", "aten::relu", "aten::relu_", "aten::remainder", "aten::reshape", "aten::select", "aten::size", "aten::slice", "aten::split", "aten::split_with_sizes", "aten::squeeze", "aten::narrow", "aten::unbind", "aten::full_like", "aten::stack", "aten::t", "aten::to", "aten::transpose", "aten::unsqueeze", "aten::unsqueeze_", "aten::view", "aten::zeros", "aten::zeros_like", } @dataclass class Statistics: counts: "Dict[str, Counter[str]]" unsupported_ops: "Dict[str, Counter[str]]" uncalled_mods: "Set[str]" def _get_scoped_trace_graph( module: nn.Module, inputs: Union[Tensor, Tuple[Tensor, ...]], aliases: Dict[Union[str, nn.Module], str], ) -> torch._C.Graph: class ScopePushHook: def __init__(self, name: str) -> None: self.name = name def __call__(self, module: nn.Module, inputs: Any) -> Any: tracing_state = torch._C._get_tracing_state() if tracing_state: tracing_state.push_scope(self.name) return inputs class ScopePopHook: def __call__(self, module: nn.Module, inputs: Any, outputs: Any) -> Any: tracing_state = torch._C._get_tracing_state() if tracing_state: tracing_state.pop_scope() return outputs seen = set() hook_handles: List[Any] = [] def register_hooks(mod: nn.Module, name: str) -> None: prehook = mod.register_forward_pre_hook(ScopePushHook(name)) posthook = mod.register_forward_hook(ScopePopHook()) hook_handles.append(prehook) hook_handles.append(posthook) if isinstance( module, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel) ): root_name = aliases[module] module = module.module register_hooks(module, root_name) for name, mod in _named_modules_with_dup(module): if mod not in seen: name = aliases[mod] register_hooks(mod, name) seen.add(mod) if hasattr(torch.jit, "get_trace_graph"): trace, _ = torch.jit.get_trace_graph(module, inputs) graph = trace.graph() else: graph, _ = _get_trace_graph(module, inputs) for handle in hook_handles: handle.remove() return graph class JitModelAnalysis:
MIT License
amzn/differential-privacy-bayesian-optimization
experiments/output_perturbation/scikit-learn/sklearn/kernel_ridge.py
KernelRidge.fit
python
def fit(self, X, y=None, sample_weight=None): X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True) if sample_weight is not None and not isinstance(sample_weight, float): sample_weight = check_array(sample_weight, ensure_2d=False) K = self._get_kernel(X) alpha = np.atleast_1d(self.alpha) ravel = False if len(y.shape) == 1: y = y.reshape(-1, 1) ravel = True copy = self.kernel == "precomputed" self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) if ravel: self.dual_coef_ = self.dual_coef_.ravel() self.X_fit_ = X return self
Fit Kernel Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data. If kernel == "precomputed" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples]. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Individual weights for each sample, ignored if None is passed. Returns ------- self : returns an instance of self.
https://github.com/amzn/differential-privacy-bayesian-optimization/blob/4f3c98d4b747e22ac4890089f46fd79137235492/experiments/output_perturbation/scikit-learn/sklearn/kernel_ridge.py#L131-L174
import numpy as np from .base import BaseEstimator, RegressorMixin, MultiOutputMixin from .metrics.pairwise import pairwise_kernels from .linear_model.ridge import _solve_cholesky_kernel from .utils import check_array, check_X_y from .utils.validation import check_is_fitted class KernelRidge(BaseEstimator, RegressorMixin, MultiOutputMixin): def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None): self.alpha = alpha self.kernel = kernel self.gamma = gamma self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) @property def _pairwise(self): return self.kernel == "precomputed"
Apache License 2.0
dayyass/text-classification-baseline
text_clf/pr_roc_curve.py
_get_model_and_data
python
def _get_model_and_data( path_to_model_folder: str, ) -> Tuple[Pipeline, np.ndarray, np.ndarray]: path_to_model = os.path.join(path_to_model_folder, "model.joblib") path_to_target_names = os.path.join(path_to_model_folder, "target_names.json") with open(path_to_target_names, mode="r") as fp: target_names = json.load(fp) assert ( len(target_names) == 2 ), f"The model must have 2 classes, but has {len(target_names)} classes." path_to_model_folder_yaml_list = [ file for file in os.listdir(path_to_model_folder) if file.endswith(".yaml") ] if len(path_to_model_folder_yaml_list) == 0: raise FileNotFoundError("There is no config file (with .yaml extension).") elif len(path_to_model_folder_yaml_list) > 1: raise FileNotFoundError( "There are more then one config files (with .yaml extension)." ) path_to_config = os.path.join( path_to_model_folder, path_to_model_folder_yaml_list[0] ) config = get_config(path_to_config) _, X_test, _, y_test = load_data(config) model = joblib.load(path_to_model) return model, X_test, y_test
Helper function to get model and data. Args: path_to_model_folder (str): Path to trained model folder. Raises: Exception: Not a binary classification error. FileNotFoundError: No config error. FileNotFoundError: More then 1 config error. Returns: Tuple[Pipeline, np.ndarray, np.ndarray]: model, X_test, y_test.
https://github.com/dayyass/text-classification-baseline/blob/c9d6876be567b05ffcae1a5e31bbf56c1e5cae78/text_clf/pr_roc_curve.py#L29-L80
import json import os from typing import Tuple import joblib import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import ( PrecisionRecallDisplay, RocCurveDisplay, auc, precision_recall_curve, roc_curve, ) from sklearn.pipeline import Pipeline from .config import get_config from .data import load_data __all__ = [ "get_precision_recall_curve", "get_roc_curve", "plot_precision_recall_curve", "plot_roc_curve", "plot_precision_recall_f1_curves_for_thresholds", ]
MIT License
scikit-hep/pyhf
src/pyhf/constraints.py
poisson_constraint_combined.logpdf
python
def logpdf(self, auxdata, pars): tensorlib, _ = get_backend() pdf = self.make_pdf(pars) if pdf is None: return ( tensorlib.zeros(self.batch_size) if self.batch_size is not None else tensorlib.astensor(0.0)[0] ) poisson_data = tensorlib.gather(auxdata, self.poisson_data) return pdf.log_prob(poisson_data)
Args: auxdata (:obj:`tensor`): The auxiliary data (a subset of the full data in a HistFactory model) pars (:obj:`tensor`): The model parameters Returns: log pdf value: The log of the pdf value of the Poisson constraints
https://github.com/scikit-hep/pyhf/blob/7ecaa63f6673108d23e4bd984e32504a150cfbb8/src/pyhf/constraints.py#L251-L269
import pyhf from pyhf.tensor.manager import get_backend from pyhf import events from pyhf import probability as prob from pyhf.parameters import ParamViewer __all__ = ["gaussian_constraint_combined", "poisson_constraint_combined"] def __dir__(): return __all__ class gaussian_constraint_combined: def __init__(self, pdfconfig, batch_size=None): default_backend = pyhf.default_backend self.batch_size = batch_size self.data_indices = list(range(len(pdfconfig.auxdata))) self.parsets = [pdfconfig.param_set(cname) for cname in pdfconfig.auxdata_order] pars_constrained_by_normal = [ constrained_parameter for constrained_parameter in pdfconfig.auxdata_order if pdfconfig.param_set(constrained_parameter).pdf_type == 'normal' ] parfield_shape = (self.batch_size or 1, pdfconfig.npars) self.param_viewer = ParamViewer( parfield_shape, pdfconfig.par_map, pars_constrained_by_normal ) start_index = 0 normal_constraint_data = [] normal_constraint_sigmas = [] for parset in self.parsets: end_index = start_index + parset.n_parameters thisauxdata = self.data_indices[start_index:end_index] start_index = end_index if not parset.pdf_type == 'normal': continue normal_constraint_data.append(thisauxdata) try: normal_constraint_sigmas.append(parset.sigmas) except AttributeError: normal_constraint_sigmas.append([1.0] * len(thisauxdata)) self._normal_data = None self._sigmas = None self._access_field = None if self.param_viewer.index_selection: self._normal_data = default_backend.astensor( default_backend.concatenate(normal_constraint_data), dtype='int' ) _normal_sigmas = default_backend.concatenate(normal_constraint_sigmas) if self.batch_size: sigmas = default_backend.reshape(_normal_sigmas, (1, -1)) self._sigmas = default_backend.tile(sigmas, (self.batch_size, 1)) else: self._sigmas = _normal_sigmas access_field = default_backend.concatenate( self.param_viewer.index_selection, axis=1 ) self._access_field = access_field self._precompute() events.subscribe('tensorlib_changed')(self._precompute) def _precompute(self): if not self.param_viewer.index_selection: return tensorlib, _ = get_backend() self.sigmas = tensorlib.astensor(self._sigmas) self.normal_data = tensorlib.astensor(self._normal_data, dtype='int') self.access_field = tensorlib.astensor(self._access_field, dtype='int') def has_pdf(self): return bool(self.param_viewer.index_selection) def make_pdf(self, pars): tensorlib, _ = get_backend() if not self.param_viewer.index_selection: return None if self.batch_size is None: flat_pars = pars else: flat_pars = tensorlib.reshape(pars, (-1,)) normal_means = tensorlib.gather(flat_pars, self.access_field) if self.batch_size is None: normal_means = normal_means[0] result = prob.Independent( prob.Normal(normal_means, self.sigmas), batch_size=self.batch_size ) return result def logpdf(self, auxdata, pars): tensorlib, _ = get_backend() pdf = self.make_pdf(pars) if pdf is None: return ( tensorlib.zeros(self.batch_size) if self.batch_size is not None else tensorlib.astensor(0.0)[0] ) normal_data = tensorlib.gather(auxdata, self.normal_data) return pdf.log_prob(normal_data) class poisson_constraint_combined: def __init__(self, pdfconfig, batch_size=None): default_backend = pyhf.default_backend self.batch_size = batch_size self.par_indices = list(range(pdfconfig.npars)) self.data_indices = list(range(len(pdfconfig.auxdata))) self.parsets = [pdfconfig.param_set(cname) for cname in pdfconfig.auxdata_order] pars_constrained_by_poisson = [ constrained_parameter for constrained_parameter in pdfconfig.auxdata_order if pdfconfig.param_set(constrained_parameter).pdf_type == 'poisson' ] parfield_shape = (self.batch_size or 1, pdfconfig.npars) self.param_viewer = ParamViewer( parfield_shape, pdfconfig.par_map, pars_constrained_by_poisson ) start_index = 0 poisson_constraint_data = [] poisson_constraint_rate_factors = [] for parset in self.parsets: end_index = start_index + parset.n_parameters thisauxdata = self.data_indices[start_index:end_index] start_index = end_index if not parset.pdf_type == 'poisson': continue poisson_constraint_data.append(thisauxdata) poisson_constraint_rate_factors.append(parset.factors) self._poisson_data = None self._access_field = None self._batched_factors = None if self.param_viewer.index_selection: self._poisson_data = default_backend.astensor( default_backend.concatenate(poisson_constraint_data), dtype='int' ) _poisson_rate_fac = default_backend.astensor( default_backend.concatenate(poisson_constraint_rate_factors), dtype='float', ) factors = default_backend.reshape(_poisson_rate_fac, (1, -1)) self._batched_factors = default_backend.tile( factors, (self.batch_size or 1, 1) ) access_field = default_backend.concatenate( self.param_viewer.index_selection, axis=1 ) self._access_field = access_field self._precompute() events.subscribe('tensorlib_changed')(self._precompute) def _precompute(self): if not self.param_viewer.index_selection: return tensorlib, _ = get_backend() self.poisson_data = tensorlib.astensor(self._poisson_data, dtype='int') self.access_field = tensorlib.astensor(self._access_field, dtype='int') self.batched_factors = tensorlib.astensor(self._batched_factors) def has_pdf(self): return bool(self.param_viewer.index_selection) def make_pdf(self, pars): if not self.param_viewer.index_selection: return None tensorlib, _ = get_backend() if self.batch_size is None: flat_pars = pars else: flat_pars = tensorlib.reshape(pars, (-1,)) nuispars = tensorlib.gather(flat_pars, self.access_field) pois_rates = tensorlib.product( tensorlib.stack([nuispars, self.batched_factors]), axis=0 ) if self.batch_size is None: pois_rates = pois_rates[0] return prob.Independent(prob.Poisson(pois_rates), batch_size=self.batch_size)
Apache License 2.0
googlecloudplatform/perfkitbenchmarker
perfkitbenchmarker/providers/aws/aws_virtual_machine.py
AwsVirtualMachine._InstallEfa
python
def _InstallEfa(self): if not self.TryRemoteCommand('ulimit -l | grep unlimited'): self.RemoteCommand(f'echo "{self.user_name} - memlock unlimited" | ' 'sudo tee -a /etc/security/limits.conf') self.RemoteCommand('cd aws-efa-installer; sudo ./efa_installer.sh -y') if not self.TryRemoteCommand('ulimit -l | grep unlimited'): self.Reboot()
Installs AWS EFA packages. See https://aws.amazon.com/hpc/efa/
https://github.com/googlecloudplatform/perfkitbenchmarker/blob/c14a122016d414351d41167029c79c9a19709384/perfkitbenchmarker/providers/aws/aws_virtual_machine.py#L720-L731
import base64 import collections import json import logging import posixpath import re import threading import time import uuid from absl import flags from perfkitbenchmarker import disk from perfkitbenchmarker import errors from perfkitbenchmarker import linux_virtual_machine from perfkitbenchmarker import placement_group from perfkitbenchmarker import resource from perfkitbenchmarker import virtual_machine from perfkitbenchmarker import vm_util from perfkitbenchmarker import windows_virtual_machine from perfkitbenchmarker.configs import option_decoders from perfkitbenchmarker.providers import aws from perfkitbenchmarker.providers.aws import aws_disk from perfkitbenchmarker.providers.aws import aws_network from perfkitbenchmarker.providers.aws import util from six.moves import range FLAGS = flags.FLAGS HVM = 'hvm' PV = 'paravirtual' NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2'] NON_PLACEMENT_GROUP_PREFIXES = frozenset(['t2', 'm3', 't3']) DRIVE_START_LETTER = 'b' TERMINATED = 'terminated' SHUTTING_DOWN = 'shutting-down' INSTANCE_EXISTS_STATUSES = frozenset(['running', 'stopping', 'stopped']) INSTANCE_DELETED_STATUSES = frozenset([SHUTTING_DOWN, TERMINATED]) INSTANCE_TRANSITIONAL_STATUSES = frozenset(['pending']) INSTANCE_KNOWN_STATUSES = (INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES | INSTANCE_TRANSITIONAL_STATUSES) HOST_EXISTS_STATES = frozenset( ['available', 'under-assessment', 'permanent-failure']) HOST_RELEASED_STATES = frozenset(['released', 'released-permanent-failure']) KNOWN_HOST_STATES = HOST_EXISTS_STATES | HOST_RELEASED_STATES AWS_INITIATED_SPOT_TERMINATING_TRANSITION_STATUSES = frozenset( ['marked-for-termination', 'marked-for-stop']) AWS_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['instance-terminated-by-price', 'instance-terminated-by-service', 'instance-terminated-no-capacity', 'instance-terminated-capacity-oversubscribed', 'instance-terminated-launch-group-constraint']) USER_INITIATED_SPOT_TERMINAL_STATUSES = frozenset( ['request-canceled-and-instance-running', 'instance-terminated-by-user']) AMAZON_LINUX_IMAGE_PROJECT = [ '137112412989', '210953353124', '910595266909', '071630900071', ] DEBIAN_9_IMAGE_PROJECT = ['379101102735'] DEBIAN_IMAGE_PROJECT = ['136693071363'] CENTOS_IMAGE_PROJECT = ['125523088429'] MARKETPLACE_IMAGE_PROJECT = ['679593333241'] RHEL_IMAGE_PROJECT = ['309956199498'] UBUNTU_IMAGE_PROJECT = ['099720109477'] WINDOWS_IMAGE_PROJECT = ['801119661308'] ARM = 'arm64' X86 = 'x86_64' _MACHINE_TYPE_PREFIX_TO_ARM_ARCH = { 'a1': 'cortex-a72', 'c6g': 'graviton2', 'm6g': 'graviton2', 'r6g': 'graviton2', } _EFA_PARAMS = { 'InterfaceType': 'efa', 'DeviceIndex': 0, 'NetworkCardIndex': 0, 'Groups': '', 'SubnetId': '' } _EFA_URL = ('https://s3-us-west-2.amazonaws.com/aws-efa-installer/' 'aws-efa-installer-{version}.tar.gz') class AwsTransitionalVmRetryableError(Exception): class AwsDriverDoesntSupportFeatureError(Exception): class AwsUnexpectedWindowsAdapterOutputError(Exception): class AwsUnknownStatusError(Exception): class AwsImageNotFoundError(Exception): def GetRootBlockDeviceSpecForImage(image_id, region): command = util.AWS_PREFIX + [ 'ec2', 'describe-images', '--region=%s' % region, '--image-ids=%s' % image_id, '--query', 'Images[]'] stdout, _ = util.IssueRetryableCommand(command) images = json.loads(stdout) assert images assert len(images) == 1, ( 'Expected to receive only one image description for %s' % image_id) image_spec = images[0] root_device_name = image_spec['RootDeviceName'] block_device_mappings = image_spec['BlockDeviceMappings'] root_block_device_dict = next((x for x in block_device_mappings if x['DeviceName'] == root_device_name)) return root_block_device_dict def GetBlockDeviceMap(machine_type, root_volume_size_gb=None, image_id=None, region=None): mappings = [] if root_volume_size_gb is not None: if image_id is None: raise ValueError( 'image_id must be provided if root_volume_size_gb is specified') if region is None: raise ValueError( 'region must be provided if image_id is specified') root_block_device = GetRootBlockDeviceSpecForImage(image_id, region) root_block_device['Ebs']['VolumeSize'] = root_volume_size_gb if not FLAGS.aws_vm_hibernate: root_block_device['Ebs'].pop('Encrypted') else: root_block_device['Ebs']['Encrypted'] = True mappings.append(root_block_device) if (machine_type in aws_disk.NUM_LOCAL_VOLUMES and not aws_disk.LocalDriveIsNvme(machine_type)): for i in range(aws_disk.NUM_LOCAL_VOLUMES[machine_type]): od = collections.OrderedDict() od['VirtualName'] = 'ephemeral%s' % i od['DeviceName'] = '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i) mappings.append(od) if mappings: return json.dumps(mappings) return None def IsPlacementGroupCompatible(machine_type): prefix = machine_type.split('.')[0] return prefix not in NON_PLACEMENT_GROUP_PREFIXES def GetArmArchitecture(machine_type): prefix = re.split(r'[dn]?\.', machine_type)[0] return _MACHINE_TYPE_PREFIX_TO_ARM_ARCH.get(prefix) def GetProcessorArchitecture(machine_type): if GetArmArchitecture(machine_type): return ARM else: return X86 class AwsDedicatedHost(resource.BaseResource): def __init__(self, machine_type, zone): super(AwsDedicatedHost, self).__init__() self.machine_type = machine_type self.zone = zone self.region = util.GetRegionFromZone(self.zone) self.client_token = str(uuid.uuid4()) self.id = None self.fill_fraction = 0.0 def _Create(self): create_cmd = util.AWS_PREFIX + [ 'ec2', 'allocate-hosts', '--region=%s' % self.region, '--client-token=%s' % self.client_token, '--instance-type=%s' % self.machine_type, '--availability-zone=%s' % self.zone, '--auto-placement=off', '--quantity=1'] vm_util.IssueCommand(create_cmd) def _Delete(self): if self.id: delete_cmd = util.AWS_PREFIX + [ 'ec2', 'release-hosts', '--region=%s' % self.region, '--host-ids=%s' % self.id] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) @vm_util.Retry() def _Exists(self): describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-hosts', '--region=%s' % self.region, '--filter=Name=client-token,Values=%s' % self.client_token] stdout, _, _ = vm_util.IssueCommand(describe_cmd) response = json.loads(stdout) hosts = response['Hosts'] assert len(hosts) < 2, 'Too many hosts.' if not hosts: return False host = hosts[0] self.id = host['HostId'] state = host['State'] assert state in KNOWN_HOST_STATES, state return state in HOST_EXISTS_STATES class AwsVmSpec(virtual_machine.BaseVmSpec): CLOUD = aws.CLOUD @classmethod def _ApplyFlags(cls, config_values, flag_values): super(AwsVmSpec, cls)._ApplyFlags(config_values, flag_values) if flag_values['aws_boot_disk_size'].present: config_values['boot_disk_size'] = flag_values.aws_boot_disk_size if flag_values['aws_spot_instances'].present: config_values['use_spot_instance'] = flag_values.aws_spot_instances if flag_values['aws_spot_price'].present: config_values['spot_price'] = flag_values.aws_spot_price if flag_values['aws_spot_block_duration_minutes'].present: config_values['spot_block_duration_minutes'] = int( flag_values.aws_spot_block_duration_minutes) @classmethod def _GetOptionDecoderConstructions(cls): result = super(AwsVmSpec, cls)._GetOptionDecoderConstructions() result.update({ 'use_spot_instance': (option_decoders.BooleanDecoder, { 'default': False }), 'spot_price': (option_decoders.FloatDecoder, { 'default': None }), 'spot_block_duration_minutes': (option_decoders.IntDecoder, { 'default': None }), 'boot_disk_size': (option_decoders.IntDecoder, { 'default': None }) }) return result def _GetKeyfileSetKey(region): return (region, FLAGS.run_uri) class AwsKeyFileManager(object): _lock = threading.Lock() imported_keyfile_set = set() deleted_keyfile_set = set() @classmethod def ImportKeyfile(cls, region): with cls._lock: if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: return cat_cmd = ['cat', vm_util.GetPublicKeyPath()] keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd) formatted_tags = util.FormatTagSpecifications('key-pair', util.MakeDefaultTags()) import_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'import-key-pair', '--key-name=%s' % cls.GetKeyNameForRun(), '--public-key-material=%s' % keyfile, '--tag-specifications=%s' % formatted_tags, ] _, stderr, retcode = vm_util.IssueCommand( import_cmd, raise_on_failure=False) if retcode: if 'KeyPairLimitExceeded' in stderr: raise errors.Benchmarks.QuotaFailure( 'KeyPairLimitExceeded in %s: %s' % (region, stderr)) else: raise errors.Benchmarks.PrepareException(stderr) cls.imported_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: cls.deleted_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def DeleteKeyfile(cls, region): with cls._lock: if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set: return delete_cmd = util.AWS_PREFIX + [ 'ec2', '--region=%s' % region, 'delete-key-pair', '--key-name=%s' % cls.GetKeyNameForRun()] util.IssueRetryableCommand(delete_cmd) cls.deleted_keyfile_set.add(_GetKeyfileSetKey(region)) if _GetKeyfileSetKey(region) in cls.imported_keyfile_set: cls.imported_keyfile_set.remove(_GetKeyfileSetKey(region)) @classmethod def GetKeyNameForRun(cls): return 'perfkit-key-{0}'.format(FLAGS.run_uri) class AwsVirtualMachine(virtual_machine.BaseVirtualMachine): CLOUD = aws.CLOUD IMAGE_NAME_FILTER = None IMAGE_NAME_REGEX = None IMAGE_OWNER = MARKETPLACE_IMAGE_PROJECT IMAGE_PRODUCT_CODE_FILTER = None IMAGE_DESCRIPTION_FILTER = None DEFAULT_ROOT_DISK_TYPE = 'gp2' DEFAULT_USER_NAME = 'ec2-user' _lock = threading.Lock() deleted_hosts = set() host_map = collections.defaultdict(list) def __init__(self, vm_spec): super(AwsVirtualMachine, self).__init__(vm_spec) self.region = util.GetRegionFromZone(self.zone) self.user_name = FLAGS.aws_user_name or self.DEFAULT_USER_NAME if self.machine_type in aws_disk.NUM_LOCAL_VOLUMES: self.max_local_disks = aws_disk.NUM_LOCAL_VOLUMES[self.machine_type] self.user_data = None self.network = aws_network.AwsNetwork.GetNetwork(self) self.placement_group = getattr(vm_spec, 'placement_group', self.network.placement_group) self.firewall = aws_network.AwsFirewall.GetFirewall() self.use_dedicated_host = vm_spec.use_dedicated_host self.num_vms_per_host = vm_spec.num_vms_per_host self.use_spot_instance = vm_spec.use_spot_instance self.spot_price = vm_spec.spot_price self.spot_block_duration_minutes = vm_spec.spot_block_duration_minutes self.boot_disk_size = vm_spec.boot_disk_size self.client_token = str(uuid.uuid4()) self.host = None self.id = None self.metadata.update({ 'spot_instance': self.use_spot_instance, 'spot_price': self.spot_price, 'spot_block_duration_minutes': self.spot_block_duration_minutes, 'placement_group_strategy': self.placement_group.strategy if self.placement_group else placement_group.PLACEMENT_GROUP_NONE, 'aws_credit_specification': FLAGS.aws_credit_specification if FLAGS.aws_credit_specification else 'none' }) self.spot_early_termination = False self.spot_status_code = None self._smp_affinity_script = 'smp_affinity.sh' if self.use_dedicated_host and util.IsRegion(self.zone): raise ValueError( 'In order to use dedicated hosts, you must specify an availability ' 'zone, not a region ("zone" was %s).' % self.zone) if self.use_dedicated_host and self.use_spot_instance: raise ValueError( 'Tenancy=host is not supported for Spot Instances') self.allocation_id = None self.association_id = None self.aws_tags = {} @property def host_list(self): return self.host_map[(self.machine_type, self.zone)] @property def group_id(self): return self.network.regional_network.vpc.default_security_group_id @classmethod def GetDefaultImage(cls, machine_type, region): if not cls.IMAGE_OWNER: raise NotImplementedError('AWS OSMixins require IMAGE_OWNER') if not cls.IMAGE_NAME_FILTER: raise NotImplementedError('AWS OSMixins require IMAGE_NAME_FILTER') if FLAGS.aws_image_name_filter: cls.IMAGE_NAME_FILTER = FLAGS.aws_image_name_filter if FLAGS.aws_image_name_regex: cls.IMAGE_NAME_REGEX = FLAGS.aws_image_name_regex prefix = machine_type.split('.')[0] virt_type = PV if prefix in NON_HVM_PREFIXES else HVM processor_architecture = GetProcessorArchitecture(machine_type) describe_cmd = util.AWS_PREFIX + [ '--region=%s' % region, 'ec2', 'describe-images', '--query', ('Images[*].{Name:Name,ImageId:ImageId,' 'CreationDate:CreationDate}'), '--filters', 'Name=name,Values=%s' % cls.IMAGE_NAME_FILTER, 'Name=block-device-mapping.volume-type,Values=%s' % cls.DEFAULT_ROOT_DISK_TYPE, 'Name=virtualization-type,Values=%s' % virt_type, 'Name=architecture,Values=%s' % processor_architecture] if cls.IMAGE_PRODUCT_CODE_FILTER: describe_cmd.extend(['Name=product-code,Values=%s' % cls.IMAGE_PRODUCT_CODE_FILTER]) if cls.IMAGE_DESCRIPTION_FILTER: describe_cmd.extend(['Name=description,Values=%s' % cls.IMAGE_DESCRIPTION_FILTER]) describe_cmd.extend(['--owners'] + cls.IMAGE_OWNER) stdout, _ = util.IssueRetryableCommand(describe_cmd) if not stdout: raise AwsImageNotFoundError('aws describe-images did not produce valid ' 'output.') if cls.IMAGE_NAME_REGEX: image_name_regex = cls.IMAGE_NAME_REGEX.format( virt_type=virt_type, disk_type=cls.DEFAULT_ROOT_DISK_TYPE, architecture=processor_architecture) images = [] excluded_images = [] for image in json.loads(stdout): if re.search(image_name_regex, image['Name']): images.append(image) else: excluded_images.append(image) if excluded_images: logging.debug('Excluded the following images with regex "%s": %s', image_name_regex, sorted(image['Name'] for image in excluded_images)) else: images = json.loads(stdout) if not images: raise AwsImageNotFoundError('No AMIs with given filters found.') return max(images, key=lambda image: image['CreationDate'])['ImageId'] @vm_util.Retry(max_retries=2) def _PostCreate(self): describe_cmd = util.AWS_PREFIX + [ 'ec2', 'describe-instances', '--region=%s' % self.region, '--instance-ids=%s' % self.id] logging.info('Getting instance %s public IP. This will fail until ' 'a public IP is available, but will be retried.', self.id) stdout, _ = util.IssueRetryableCommand(describe_cmd) response = json.loads(stdout) instance = response['Reservations'][0]['Instances'][0] self.internal_ip = instance['PrivateIpAddress'] if util.IsRegion(self.zone): self.zone = str(instance['Placement']['AvailabilityZone']) assert self.group_id == instance['SecurityGroups'][0]['GroupId'], ( self.group_id, instance['SecurityGroups'][0]['GroupId']) if FLAGS.aws_efa: self._ConfigureEfa(instance) elif 'PublicIpAddress' in instance: self.ip_address = instance['PublicIpAddress'] else: raise errors.Resource.RetryableCreationError('Public IP not ready.') def _ConfigureEfa(self, instance): if FLAGS.aws_efa_count > 1: self._ConfigureElasticIp(instance) else: self.ip_address = instance['PublicIpAddress'] if FLAGS.aws_efa_version: self.InstallPackages('curl') url = _EFA_URL.format(version=FLAGS.aws_efa_version) tarfile = posixpath.basename(url) self.RemoteCommand(f'curl -O {url}; tar -xzf {tarfile}') self._InstallEfa() self.RemoteCommand('cd aws-efa-installer; ' 'PATH=${PATH}:/opt/amazon/efa/bin ./efa_test.sh') def _ConfigureElasticIp(self, instance): network_interface_id = None for network_interface in instance['NetworkInterfaces']: if network_interface['Attachment']['DeviceIndex'] == 0: network_interface_id = network_interface['NetworkInterfaceId'] break assert network_interface_id is not None stdout, _, _ = vm_util.IssueCommand(util.AWS_PREFIX + ['ec2', 'allocate-address', f'--region={self.region}', '--domain=vpc']) response = json.loads(stdout) self.ip_address = response['PublicIp'] self.allocation_id = response['AllocationId'] util.AddDefaultTags(self.allocation_id, self.region) stdout, _, _ = vm_util.IssueCommand( util.AWS_PREFIX + ['ec2', 'associate-address', f'--region={self.region}', f'--allocation-id={self.allocation_id}', f'--network-interface-id={network_interface_id}']) response = json.loads(stdout) self.association_id = response['AssociationId']
Apache License 2.0
joeyespo/gitpress
gitpress/repository.py
init
python
def init(directory=None): repo = repo_path(directory) if os.path.isdir(repo): raise RepositoryAlreadyExistsError(directory, repo) shutil.copytree(default_template_path, repo) message = '"Default presentation content."' subprocess.call(['git', 'init', '-q', repo]) subprocess.call(['git', 'add', '.'], cwd=repo) subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo) return repo
Initializes a Gitpress presentation repository at the specified directory.
https://github.com/joeyespo/gitpress/blob/a23edb80b6e4a113d167217475344a01c92b5c6d/gitpress/repository.py#L45-L59
import os import re import shutil import fnmatch import subprocess repo_dir = '.gitpress' templates_path = os.path.join(os.path.dirname(__file__), 'templates') default_template_path = os.path.join(templates_path, 'default') specials = ['.*', '_*'] specials_re = re.compile('|'.join([fnmatch.translate(x) for x in specials])) class RepositoryAlreadyExistsError(Exception): def __init__(self, directory=None, repo=None): super(RepositoryAlreadyExistsError, self).__init__() self.directory = os.path.abspath(directory if directory else os.getcwd()) self.repo = os.path.abspath(repo or repo_path(self.directory)) class RepositoryNotFoundError(Exception): def __init__(self, directory=None): super(RepositoryNotFoundError, self).__init__() self.directory = os.path.abspath(directory if directory else os.getcwd()) def require_repo(directory=None): if directory and not os.path.isdir(directory): raise ValueError('Directory not found: ' + repr(directory)) repo = repo_path(directory) if not os.path.isdir(repo): raise RepositoryNotFoundError(directory) return repo def repo_path(directory=None): return os.path.join(directory, repo_dir) if directory else repo_dir
MIT License
kozea/weasyprint
tests/test_text.py
make_text
python
def make_text(text, width=None, **style): new_style = dict(INITIAL_VALUES) new_style['font_family'] = MONO_FONTS.split(',') new_style.update(style) return split_first_line( text, new_style, context=None, max_width=width, justification_spacing=0)
Wrapper for split_first_line() creating a style dict.
https://github.com/kozea/weasyprint/blob/a149af9aaf902901d5d19134f5393e2637bcd219/tests/test_text.py#L16-L23
import pytest from weasyprint.css.properties import INITIAL_VALUES from weasyprint.text.line_break import split_first_line from .testing_utils import MONO_FONTS, SANS_FONTS, assert_no_logs, render_pages
BSD 3-Clause New or Revised License
laurentrdc/npstreams
npstreams/utils.py
deprecated
python
def deprecated(message): def decorator(func): @wraps(func) def newfunc(*args, **kwargs): full_message = f"""Calls to {func.__name__} deprecated: {message}. {name} will be removed in a future release.""" with contextwarnings("always", DeprecationWarning): warn(full_message, category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return newfunc return decorator
Decorator factory that warns of deprecation Parameters ---------- message : str Message will be dressed up with the name of the function. Returns ------- decorator : callable
https://github.com/laurentrdc/npstreams/blob/730e77eed3ee594e212ccd500558558fc7f37642/npstreams/utils.py#L15-L40
from contextlib import contextmanager from functools import wraps from warnings import resetwarnings, simplefilter, warn @contextmanager def contextwarnings(*args, **kwargs): simplefilter(*args, **kwargs) yield resetwarnings()
BSD 3-Clause New or Revised License
numba/numba
numba/np/arrayobj.py
mark_positive
python
def mark_positive(builder, load): upper_bound = (1 << (load.type.width - 1)) - 1 set_range_metadata(builder, load, 0, upper_bound)
Mark the result of a load instruction as positive (or zero).
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/np/arrayobj.py#L49-L54
import functools import math import operator from llvmlite import ir import llvmlite.llvmpy.core as lc from llvmlite.llvmpy.core import Constant import numpy as np from numba import pndindex, literal_unroll from numba.core import types, utils, typing, errors, cgutils, extending from numba.np.numpy_support import (as_dtype, carray, farray, is_contiguous, is_fortran, check_is_integer) from numba.np.numpy_support import type_can_asarray, is_nonelike from numba.core.imputils import (lower_builtin, lower_getattr, lower_getattr_generic, lower_setattr_generic, lower_cast, lower_constant, iternext_impl, impl_ret_borrowed, impl_ret_new_ref, impl_ret_untracked, RefType) from numba.core.typing import signature from numba.core.extending import (register_jitable, overload, overload_method, intrinsic) from numba.misc import quicksort, mergesort from numba.cpython import slicing from numba.cpython.unsafe.tuple import tuple_setitem, build_full_slice_tuple from numba.core.overload_glue import glue_lowering from numba.core.extending import overload_classmethod def set_range_metadata(builder, load, lower_bound, upper_bound): range_operands = [Constant.int(load.type, lower_bound), Constant.int(load.type, upper_bound)] md = builder.module.add_metadata(range_operands) load.set_metadata("range", md)
BSD 2-Clause Simplified License
caserec2018/caserecommender
caserec/recommenders/rating_prediction/base_knn.py
BaseKNN.compute_bu
python
def compute_bu(self): self.bu = dict() for user in self.users: count = 0 for item in self.train_set['items_seen_by_user'].get(user, []): self.bu[user] = self.bu.get(user, 0) + float(self.train_set['feedback'][user].get(item, 0)) - self.train_set['mean_value'] - self.bi.get(item, 0) count += 1 if count > 1: self.bu[user] = float(self.bu[user]) / float(self.reg_bu + count) elif count == 0: self.bu[user] = self.train_set['mean_value']
Method to compute bu values bu = (rui - mi - bi) / (regBu + number of interactions)
https://github.com/caserec2018/caserecommender/blob/1b63fe79aa26786c99f35e6b8f0a0dd9e591811b/caserec/recommenders/rating_prediction/base_knn.py#L111-L131
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction __author__ = 'removed for double blind review' class BaseKNN(BaseRatingPrediction): def __init__(self, train_file, test_file, output_file=None, reg_bi=10, reg_bu=15, similarity_metric='cosine', sep='\t', output_sep='\t'): super(BaseKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, similarity_metric=similarity_metric, sep=sep, output_sep=output_sep) self.reg_bi = reg_bi self.reg_bu = reg_bu self.number_users = None self.number_items = None self.bu = {} self.bi = {} self.bui = {} def init_model(self): self.number_users = len(self.users) self.number_items = len(self.items) self.create_matrix() def train_baselines(self): self.bu = {} self.bi = {} self.bui = {} for i in range(10): self.compute_bi() self.compute_bu() self.compute_bui() def compute_bi(self): self.bi = dict() for item in self.items: count = 0 for user in self.train_set['users_viewed_item'].get(item, []): self.bi[item] = self.bi.get(item, 0) + float(self.train_set['feedback'][user].get(item, 0)) - self.train_set['mean_value'] - self.bu.get(user, 0) count += 1 if count > 1: self.bi[item] = float(self.bi[item]) / float(self.reg_bi + count) elif count == 0: self.bi[item] = self.train_set['mean_value']
MIT License
catalyst-cooperative/pudl
src/pudl/extract/ferc1.py
drop_tables
python
def drop_tables(engine): md = sa.MetaData() md.reflect(engine) md.drop_all(engine) conn = engine.connect() conn.execute("VACUUM") conn.close()
Drop all FERC Form 1 tables from the SQLite database. Creates an sa.schema.MetaData object reflecting the structure of the database that the passed in ``engine`` refers to, and uses that schema to drop all existing tables. Todo: Treat DB connection as a context manager (with/as). Args: engine (:class:`sqlalchemy.engine.Engine`): A DB Engine pointing at an exising SQLite database to be deleted. Returns: None
https://github.com/catalyst-cooperative/pudl/blob/6a75069b90219a2da55262737b92fe0a024c4fb8/src/pudl/extract/ferc1.py#L182-L205
import csv import importlib import io import logging from pathlib import Path from typing import Dict import dbfread import pandas as pd import sqlalchemy as sa from dbfread import DBF from sqlalchemy import or_ import pudl from pudl import constants as pc from pudl.workspace.datastore import Datastore logger = logging.getLogger(__name__) PUDL_RIDS = { 514: "AEP Texas", 519: "Upper Michigan Energy Resources Company", 522: "Luning Energy Holdings LLC, Invenergy Investments", 529: "Tri-State Generation and Transmission Association", 531: "Basin Electric Power Cooperative", } def missing_respondents(reported, observed, identified): records = [] for rid in observed: if rid in reported: continue elif rid in identified: records.append( { "respondent_id": rid, "respondent_name": f"{identified[rid]} (PUDL determined)" }, ) else: records.append( { "respondent_id": rid, "respondent_name": f"Missing Respondent {rid}" }, ) return records def observed_respondents(ferc1_engine): f1_table_meta = pudl.output.pudltabl.get_table_meta(ferc1_engine) observed = set([]) for table in f1_table_meta.values(): if table.name != "f1_respondent_id" and "respondent_id" in table.columns: observed = observed.union(set(pd.read_sql_table( table.name, ferc1_engine, columns=["respondent_id"]).respondent_id)) return observed class Ferc1Datastore: PACKAGE_PATH = "pudl.package_data.meta.ferc1_row_maps" def __init__(self, datastore: Datastore): self.datastore = datastore self._cache = {} self.dbc_path = {} with importlib.resources.open_text(self.PACKAGE_PATH, "file_map.csv") as f: for row in csv.DictReader(f): year = int(row["year"]) path = Path(row["path"]) self.dbc_path[year] = path def get_dir(self, year: int) -> Path: if year not in self.dbc_path: raise ValueError(f"No ferc1 data for year {year}") return self.dbc_path[year] def get_file(self, year: int, filename: str): if year not in self._cache: self._cache[year] = self.datastore.get_zipfile_resource("ferc1", year=year) archive = self._cache[year] try: return archive.open((self.get_dir(year) / filename).as_posix()) except KeyError: raise KeyError(f"{filename} not availabe for year {year} in ferc1.")
MIT License
sepandhaghighi/pycm
pycm/pycm_class_func.py
dInd_calc
python
def dInd_calc(TNR, TPR): try: result = math.sqrt(((1 - TNR)**2) + ((1 - TPR)**2)) return result except (TypeError, ValueError): return "None"
Calculate dInd (Distance index). :param TNR: specificity or true negative rate :type TNR : float :param TPR: sensitivity, recall, hit rate, or true positive rate :type TPR : float :return: dInd as float
https://github.com/sepandhaghighi/pycm/blob/efca98f6dc92fd1eb5ac05a48aec01680cd344bc/pycm/pycm_class_func.py#L527-L541
from __future__ import division import math from .pycm_util import normal_quantile from .pycm_interpret import * def sensitivity_index_calc(TPR, FPR): try: return normal_quantile(TPR) - normal_quantile(FPR) except TypeError: return "None" def NB_calc(TP, FP, POP, w): try: NB = (TP - w * FP) / POP return NB except (ZeroDivisionError, TypeError): return "None" def TI_calc(TP, FP, FN, alpha, beta): try: TI = TP / (TP + alpha * FN + beta * FP) return TI except (ZeroDivisionError, TypeError): return "None" def OOC_calc(TP, TOP, P): try: OOC = TP / (math.sqrt(TOP * P)) return OOC except (ZeroDivisionError, TypeError, ValueError): return "None" def OC_calc(TP, TOP, P): try: overlap_coef = TP / min(TOP, P) return overlap_coef except (ZeroDivisionError, TypeError): return "None" def AGF_calc(TP, FP, FN, TN): try: F2 = F_calc(TP=TP, FP=FP, FN=FN, beta=2) F05_inv = F_calc(TP=TN, FP=FN, FN=FP, beta=0.5) AGF = math.sqrt(F2 * F05_inv) return AGF except (TypeError, ValueError): return "None" def AGM_calc(TPR, TNR, GM, N, POP): try: n = N / POP if TPR == 0: result = 0 else: result = (GM + TNR * n) / (1 + n) return result except (ZeroDivisionError, TypeError): return "None" def Q_calc(TP, TN, FP, FN): try: OR = (TP * TN) / (FP * FN) result = (OR - 1) / (OR + 1) return result except (ZeroDivisionError, TypeError): return "None" def TTPN_calc(item1, item2): try: result = item1 / (item1 + item2) return result except (ZeroDivisionError, TypeError): return "None" def FXR_calc(item): try: result = 1 - item return result except TypeError: return "None" def ACC_calc(TP, TN, FP, FN): try: result = (TP + TN) / (TP + TN + FN + FP) return result except (ZeroDivisionError, TypeError): return "None" def F_calc(TP, FP, FN, beta): try: result = ((1 + (beta)**2) * TP) / ((1 + (beta)**2) * TP + FP + (beta**2) * FN) return result except (ZeroDivisionError, TypeError): return "None" def MCC_calc(TP, TN, FP, FN): try: result = (TP * TN - FP * FN) / (math.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))) return result except (ZeroDivisionError, TypeError, ValueError): return "None" def MK_BM_calc(item1, item2): try: result = item1 + item2 - 1 return result except TypeError: return "None" def LR_calc(item1, item2): try: result = item1 / item2 return result except (ZeroDivisionError, TypeError): return "None" def PRE_calc(P, POP): try: result = P / POP return result except (ZeroDivisionError, TypeError): return "None" def G_calc(item1, item2): try: result = math.sqrt(item1 * item2) return result except (TypeError, ValueError): return "None" def RACC_calc(TOP, P, POP): try: result = (TOP * P) / ((POP) ** 2) return result except (ZeroDivisionError, TypeError): return "None" def RACCU_calc(TOP, P, POP): try: result = ((TOP + P) / (2 * POP))**2 return result except (ZeroDivisionError, TypeError): return "None" def ERR_calc(ACC): try: return 1 - ACC except TypeError: return "None" def jaccard_index_calc(TP, TOP, P): try: return TP / (TOP + P - TP) except (ZeroDivisionError, TypeError): return "None" def IS_calc(TP, FP, FN, POP): try: result = -math.log(((TP + FN) / POP), 2) + math.log((TP / (TP + FP)), 2) return result except (ZeroDivisionError, TypeError, ValueError): return "None" def CEN_misclassification_calc( table, TOP, P, i, j, subject_class, modified=False): try: result = TOP + P if modified: result -= table[subject_class][subject_class] result = table[i][j] / result return result except (ZeroDivisionError, TypeError): return "None" def CEN_calc(classes, table, TOP, P, class_name, modified=False): try: result = 0 class_number = len(classes) for k in classes: if k != class_name: P_j_k = CEN_misclassification_calc( table, TOP, P, class_name, k, class_name, modified) P_k_j = CEN_misclassification_calc( table, TOP, P, k, class_name, class_name, modified) if P_j_k != 0: result += P_j_k * math.log(P_j_k, 2 * (class_number - 1)) if P_k_j != 0: result += P_k_j * math.log(P_k_j, 2 * (class_number - 1)) if result != 0: result = result * (-1) return result except (ZeroDivisionError, TypeError, ValueError): return "None" def AUC_calc(item, TPR): try: return (item + TPR) / 2 except TypeError: return "None"
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/gce_account_region_settings.py
GceAccountRegionSettings.subnet_id
python
def subnet_id(self, subnet_id): if subnet_id is not None: if not isinstance(subnet_id, string_types): raise TypeError("Invalid type for `subnet_id`, type has to be `string_types`") self._subnet_id = subnet_id
Sets the subnet_id of this GceAccountRegionSettings. Id of the subnet for encoding instances (required) :param subnet_id: The subnet_id of this GceAccountRegionSettings. :type: string_types
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/gce_account_region_settings.py#L110-L124
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource from bitmovin_api_sdk.models.google_cloud_region import GoogleCloudRegion import pprint import six class GceAccountRegionSettings(BitmovinResource): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, network=None, subnet_id=None, region=None): super(GceAccountRegionSettings, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._network = None self._subnet_id = None self._region = None self.discriminator = None if network is not None: self.network = network if subnet_id is not None: self.subnet_id = subnet_id if region is not None: self.region = region @property def openapi_types(self): types = {} if hasattr(super(GceAccountRegionSettings, self), 'openapi_types'): types = getattr(super(GceAccountRegionSettings, self), 'openapi_types') types.update({ 'network': 'string_types', 'subnet_id': 'string_types', 'region': 'GoogleCloudRegion' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(GceAccountRegionSettings, self), 'attribute_map'): attributes = getattr(super(GceAccountRegionSettings, self), 'attribute_map') attributes.update({ 'network': 'network', 'subnet_id': 'subnetId', 'region': 'region' }) return attributes @property def network(self): return self._network @network.setter def network(self, network): if network is not None: if not isinstance(network, string_types): raise TypeError("Invalid type for `network`, type has to be `string_types`") self._network = network @property def subnet_id(self): return self._subnet_id @subnet_id.setter
MIT License
brainix/pottery
pottery/deque.py
RedisDeque.insert
python
def insert(self, index: int, value: JSONTypes) -> None: with self._watch() as pipeline: current_length = cast(int, pipeline.llen(self.key)) if self.maxlen is not None and current_length >= self.maxlen: raise IndexError( f'{self.__class__.__name__} already at its maximum size' ) super()._insert(index, value, pipeline=pipeline)
Insert an element into the RedisDeque before the given index. O(n)
https://github.com/brainix/pottery/blob/6432d21a3e8405a661fb1fab8c281d8bfe06f04c/pottery/deque.py#L79-L87
import collections from typing import Iterable from typing import Optional from typing import Tuple from typing import Union from typing import cast from redis import Redis from redis.client import Pipeline from .base import JSONTypes from .list import RedisList class RedisDeque(RedisList, collections.deque): def __init__(self, iterable: Iterable[JSONTypes] = tuple(), maxlen: Optional[int] = None, *, redis: Optional[Redis] = None, key: Optional[str] = None, ) -> None: if maxlen is not None and not isinstance(maxlen, int): raise TypeError('an integer is required') self._maxlen = maxlen super().__init__(iterable, redis=redis, key=key) if not iterable and self.maxlen is not None and len(self) > self.maxlen: raise IndexError( f'persistent {self.__class__.__name__} beyond its maximum size' ) def _populate(self, pipeline: Pipeline, iterable: Iterable[JSONTypes] = tuple(), ) -> None: if self.maxlen is not None: if self.maxlen: iterable = tuple(iterable)[-self.maxlen:] else: iterable = tuple() super()._populate(pipeline, iterable) @property def maxlen(self) -> Optional[int]: return self._maxlen @maxlen.setter def maxlen(self, value: int) -> None: raise AttributeError( f"attribute 'maxlen' of '{self.__class__.__name__}' objects is not " 'writable' )
Apache License 2.0
earthlab/earthpy
earthpy/clip.py
clip_shp
python
def clip_shp(shp, clip_obj): raise Warning( "clip_shp is deprecated in earthpy and has been moved to Geopandas. " "Please use the Geopandas clip() function. Exiting..." ) sys.exit()
This function has been deprecated from earthpy. Please use the clip() function in GeoPandas instead.
https://github.com/earthlab/earthpy/blob/9ad455e85002a2b026c78685329f8c5b360fde5a/earthpy/clip.py#L65-L74
import sys def _clip_points(shp, clip_obj): raise Warning( "_clip_points is deprecated. Use the _clip_points() function in " "GeoPandas. Exiting..." ) sys.exit() def _clip_multi_point(shp, clip_obj): raise Warning( "_clip_multi_point is deprecated. Use the _clip_points() function in " "GeoPandas. Exiting..." ) sys.exit() def _clip_line_poly(shp, clip_obj): raise Warning( "_clip_line_poly is deprecated. Use the _clip_line_poly() function in " "GeoPandas. Exiting..." ) sys.exit() def _clip_multi_poly_line(shp, clip_obj): raise Warning( "_clip_multi_poly_line is deprecated. Use the _clip_line_poly() " "function in GeoPandas. Exiting..." ) sys.exit()
BSD 3-Clause New or Revised License
suchydev/kivy-dynamic-screens-template
screens/resource_registers.py
register_kv_and_data
python
def register_kv_and_data(): global _registered if _registered: return for path in _data_paths + _kv_paths: p = os.path.join(_base_path, path) resource_add_path(p) _registered = True
Registers the data and kv paths in Kivy resources system.
https://github.com/suchydev/kivy-dynamic-screens-template/blob/7fbab74c5693430ea486ac359fa7a032596c232b/screens/resource_registers.py#L23-L31
import os.path from kivy.resources import resource_add_path _base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) _data_paths = ( 'data/images', 'data/fonts', 'data/audio' ) _kv_paths = ('kv', ) _registered = False
MIT License
thefourgreaterrors/alpha-rptr
src/binance_futures.py
BinanceFutures.get_market_price
python
def get_market_price(self): self.__init_client() if self.market_price != 0: return self.market_price else: self.market_price = float(retry(lambda: self.client .futures_symbol_ticker(symbol=self.pair))['price']) return self.market_price
get current price :return:
https://github.com/thefourgreaterrors/alpha-rptr/blob/17fc54b1fb044978aab337a0a4814840e328b6f9/src/binance_futures.py#L245-L256
import json import math import os import traceback from datetime import datetime, timezone import time import pandas as pd from bravado.exception import HTTPNotFound from pytz import UTC from src import logger, allowed_range, to_data_frame, resample, delta, FatalError, notify, ord_suffix from src import retry_binance_futures as retry from src.config import config as conf from src.binance_futures_api import Client from src.binance_futures_websocket import BinanceFuturesWs from src.orderbook import OrderBook class BinanceFutures: account = '' pair = 'BTCUSDT' wallet = None market_price = 0 order_update = None order_update_log = True position = None position_size = None entry_price = None margin = None account_information = None bin_size = '1h' client = None is_running = True crawler = None strategy = None enable_trade_log = True ohlcv_len = 100 data = None sltp_values = { 'profit_long': 0, 'profit_short': 0, 'stop_long': 0, 'stop_short': 0, 'eval_tp_next_candle': False, 'profit_long_callback': None, 'profit_short_callback': None, 'stop_long_callback': None, 'stop_short_callback': None } round_decimals = 2 exit_order = { 'profit': 0, 'loss': 0, 'trail_offset': 0, 'profit_callback': None, 'loss_callback': None, 'trail_callbak': None } trail_price = 0 last_action_time = None best_bid_price = None best_ask_price = None callbacks = {} def __init__(self, account, pair, demo=False, threading=True): self.account = account self.pair = pair self.demo = demo self.is_running = threading def __init_client(self): if self.client is not None: return api_key = conf['binance_keys'][self.account]['API_KEY'] api_secret = conf['binance_keys'][self.account]['SECRET_KEY'] self.client = Client(api_key=api_key, api_secret=api_secret) def now_time(self): return datetime.now().astimezone(UTC) def get_retain_rate(self): return 0.8 def lot_leverage(self): return 20 def get_lot(self, round_decimals=3): account_information = self.get_account_information() return round(float(account_information['totalMarginBalance']) / self.get_market_price() * self.lot_leverage(), round_decimals) def get_balance(self): self.__init_client() ret = self.get_margin() if len(ret) > 0: balances = [p for p in ret if p["asset"] == "USDT"] return float(balances[0]["balance"]) else: return None def get_margin(self): self.__init_client() if self.margin is not None: return self.margin else: self.margin = retry(lambda: self.client .futures_account_balance_v2()) return self.margin def get_leverage(self): self.__init_client() return float(self.get_position()["leverage"]) def get_account_information(self): self.account_information = retry(lambda: self.client .futures_account_v2()) return self.account_information def get_position(self): self.__init_client() ret = retry(lambda: self.client .futures_position_information()) if len(ret) > 0: self.position = [p for p in ret if p["symbol"] == self.pair] return self.position[0] else: return None def get_position_size(self): self.__init_client() if self.position_size is not None: return self.position_size position = self.get_position() if position['symbol'] == self.pair: return float(position['positionAmt']) else: return 0 def get_position_avg_price(self): self.__init_client() return float(self.get_position()['entryPrice'])
MIT License
henglan/siamfc-pytorch
ILSVRC15-curation/gen_imdb_VID.py
generate_image_imdb
python
def generate_image_imdb(vid_root_path, vid_curated_path): anno_str = "Annotations/VID/train/" data_str = "Data/VID/train/" vid_anno_path = os.path.join(vid_root_path, anno_str) vid_data_path = os.path.join(vid_root_path, data_str) num_videos = 0 all_dirs_level1 = os.listdir(vid_anno_path) for i in range(len(all_dirs_level1)): all_dirs_level2 = os.listdir(os.path.join(vid_anno_path, all_dirs_level1[i])) num_videos = num_videos + len(all_dirs_level2) train_video_num = round(num_videos * (1-validation_ratio)) val_video_num = num_videos - train_video_num imdb_video_train = dict() imdb_video_train['num_videos'] = train_video_num imdb_video_train['data_str'] = data_str imdb_video_val = dict() imdb_video_val['num_videos'] = val_video_num imdb_video_val['data_str'] = data_str videos_train = dict() videos_val = dict() vid_idx = 0 for i in range(len(all_dirs_level1)): all_dirs_level2 = os.listdir(os.path.join(vid_anno_path, all_dirs_level1[i])) for j in range(len(all_dirs_level2)): if vid_idx < train_video_num: if not videos_train.has_key(all_dirs_level2[j]): videos_train[all_dirs_level2[j]] = [] else: if not videos_val.has_key(all_dirs_level2[j]): videos_val[all_dirs_level2[j]] = [] frame_list = glob.glob(os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], "*.xml")) frame_list.sort() video_ids = dict() for k in range(len(frame_list)): frame_id = k frame_xml_name = os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], frame_list[k]) frame_xml_tree = ET.parse(frame_xml_name) frame_xml_root = frame_xml_tree.getroot() crop_path = os.path.join(all_dirs_level1[i], all_dirs_level2[j]) frame_filename = frame_xml_root.find('filename').text print ("processing: %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename)) for object in frame_xml_root.iter("object"): id = object.find("trackid").text if not video_ids.has_key(id): video_ids[id] = [] bbox_node = object.find("bndbox") xmax = float(bbox_node.find('xmax').text) xmin = float(bbox_node.find('xmin').text) ymax = float(bbox_node.find('ymax').text) ymin = float(bbox_node.find('ymin').text) width = xmax - xmin + 1 height = ymax - ymin + 1 bbox = np.array([xmin, ymin, width, height]) tmp_instance = dict() tmp_instance['instance_path'] = os.path.join(all_dirs_level1[i], all_dirs_level2[j], '{}.{:02d}.crop.x.jpg'.format(frame_filename, int(id))) tmp_instance['bbox'] =bbox.tolist() video_ids[id].append(tmp_instance) tmp_keys = video_ids.keys() for ki in range(len(tmp_keys)): if len(video_ids[tmp_keys[ki]]) < 2: del video_ids[tmp_keys[ki]] tmp_keys = video_ids.keys() if len(tmp_keys) > 0: if vid_idx < train_video_num: videos_train[all_dirs_level2[j]].append(video_ids) else: videos_val[all_dirs_level2[j]].append(video_ids) vid_idx = vid_idx + 1 imdb_video_train['videos'] = videos_train imdb_video_val['videos'] = videos_val json.dump(imdb_video_train, open('imdb_video_train.json', 'w'), indent=2) json.dump(imdb_video_val, open('imdb_video_val.json', 'w'), indent=2)
# save image crops to the vid_curated_path
https://github.com/henglan/siamfc-pytorch/blob/6cb921f5ac58b612ebface176d9a84d5e033150a/ILSVRC15-curation/gen_imdb_VID.py#L14-L125
import numpy as np import os import glob import xml.etree.ElementTree as ET import json validation_ratio = 0.1
Apache License 2.0
tensorflow/tensor2tensor
tensor2tensor/models/evolved_transformer_test.py
get_var
python
def get_var(name): variables = [var for var in tf.trainable_variables() if var.name == name] if len(variables) == 1: return variables[0] raise ValueError("`name` must match exactly one variable. '%s' matched %d" % (name, len(variables)))
Get trainable variable by name.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/models/evolved_transformer_test.py#L45-L51
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensor2tensor.data_generators import problem_hparams from tensor2tensor.models import evolved_transformer from tensor2tensor.models import transformer import tensorflow.compat.v1 as tf BATCH_SIZE = 3 INPUT_LENGTH = 5 TARGET_LENGTH = 7 VOCAB_SIZE = 10 DECODE_LENGTH = 3 def print_vars(all_vars=None): if not all_vars: all_vars = tf.trainable_variables() tf.logging.info("Format: <name>, <shape>, <(soft) device placement>") for var in all_vars: tf.logging.info(" %s, %s, %s" % (var.name, str(var.get_shape()), var.op.device))
Apache License 2.0
lehrblogger/where-do-you-go
geo/geocell.py
point_distance
python
def point_distance(cell, point): bbox = compute_box(cell) between_w_e = bbox.west <= point.lon and point.lon <= bbox.east between_n_s = bbox.south <= point.lat and point.lat <= bbox.north if between_w_e: if between_n_s: return min(geomath.distance(point, (bbox.south, point.lon)), geomath.distance(point, (bbox.north, point.lon)), geomath.distance(point, (point.lat, bbox.east)), geomath.distance(point, (point.lat, bbox.west))) else: return min(geomath.distance(point, (bbox.south, point.lon)), geomath.distance(point, (bbox.north, point.lon))) else: if between_n_s: return min(geomath.distance(point, (point.lat, bbox.east)), geomath.distance(point, (point.lat, bbox.west))) else: return min(geomath.distance(point, (bbox.south, bbox.east)), geomath.distance(point, (bbox.north, bbox.east)), geomath.distance(point, (bbox.south, bbox.west)), geomath.distance(point, (bbox.north, bbox.west)))
Returns the shortest distance between a point and a geocell bounding box. If the point is inside the cell, the shortest distance is always to a 'edge' of the cell rectangle. If the point is outside the cell, the shortest distance will be to either a 'edge' or 'corner' of the cell rectangle. Returns: The shortest distance from the point to the geocell's rectangle, in meters.
https://github.com/lehrblogger/where-do-you-go/blob/51c1fb3a66d8babe00a9412c72ebbd66fe348d88/geo/geocell.py#L333-L367
__author__ = 'api.roman.public@gmail.com (Roman Nurik)' import os.path import sys import geomath import geotypes _GEOCELL_GRID_SIZE = 4 _GEOCELL_ALPHABET = '0123456789abcdef' MAX_GEOCELL_RESOLUTION = 13 MAX_FEASIBLE_BBOX_SEARCH_CELLS = 300 NORTHWEST = (-1, 1) NORTH = (0, 1) NORTHEAST = (1, 1) EAST = (1, 0) SOUTHEAST = (1, -1) SOUTH = (0, -1) SOUTHWEST = (-1, -1) WEST = (-1, 0) def best_bbox_search_cells(bbox, cost_function): cell_ne = compute(bbox.north_east, resolution=MAX_GEOCELL_RESOLUTION) cell_sw = compute(bbox.south_west, resolution=MAX_GEOCELL_RESOLUTION) min_cost = 1e10000 min_cost_cell_set = None min_resolution = len(os.path.commonprefix([cell_sw, cell_ne])) for cur_resolution in range(min_resolution, MAX_GEOCELL_RESOLUTION + 1): cur_ne = cell_ne[:cur_resolution] cur_sw = cell_sw[:cur_resolution] num_cells = interpolation_count(cur_ne, cur_sw) if num_cells > MAX_FEASIBLE_BBOX_SEARCH_CELLS: continue cell_set = sorted(interpolate(cur_ne, cur_sw)) simplified_cells = [] cost = cost_function(num_cells=len(cell_set), resolution=cur_resolution) if cost <= min_cost: min_cost = cost min_cost_cell_set = cell_set else: break return min_cost_cell_set def collinear(cell1, cell2, column_test): for i in range(min(len(cell1), len(cell2))): x1, y1 = _subdiv_xy(cell1[i]) x2, y2 = _subdiv_xy(cell2[i]) if not column_test and y1 != y2: return False if column_test and x1 != x2: return False return True def interpolate(cell_ne, cell_sw): cell_set = [[cell_sw]] while not collinear(cell_set[0][-1], cell_ne, True): cell_tmp = adjacent(cell_set[0][-1], (1, 0)) if cell_tmp is None: break cell_set[0].append(cell_tmp) while cell_set[-1][-1] != cell_ne: cell_tmp_row = [adjacent(g, (0, 1)) for g in cell_set[-1]] if cell_tmp_row[0] is None: break cell_set.append(cell_tmp_row) return [g for inner in cell_set for g in inner] def interpolation_count(cell_ne, cell_sw): bbox_ne = compute_box(cell_ne) bbox_sw = compute_box(cell_sw) cell_lat_span = bbox_sw.north - bbox_sw.south cell_lon_span = bbox_sw.east - bbox_sw.west num_cols = int((bbox_ne.east - bbox_sw.west) / cell_lon_span) num_rows = int((bbox_ne.north - bbox_sw.south) / cell_lat_span) return num_cols * num_rows def all_adjacents(cell): return [adjacent(cell, d) for d in [NORTHWEST, NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST]] def adjacent(cell, dir): if cell is None: return None dx = dir[0] dy = dir[1] cell_adj_arr = list(cell) i = len(cell_adj_arr) - 1 while i >= 0 and (dx != 0 or dy != 0): x, y = _subdiv_xy(cell_adj_arr[i]) if dx == -1: if x == 0: x = _GEOCELL_GRID_SIZE - 1 else: x -= 1 dx = 0 elif dx == 1: if x == _GEOCELL_GRID_SIZE - 1: x = 0 else: x += 1 dx = 0 if dy == 1: if y == _GEOCELL_GRID_SIZE - 1: y = 0 else: y += 1 dy = 0 elif dy == -1: if y == 0: y = _GEOCELL_GRID_SIZE - 1 else: y -= 1 dy = 0 cell_adj_arr[i] = _subdiv_char((x,y)) i -= 1 if dy != 0: return None return ''.join(cell_adj_arr) def contains_point(cell, point): return compute(point, len(cell)) == cell
MIT License
google/clusterfuzz
src/clusterfuzz/_internal/platforms/android/adb.py
run_command
python
def run_command(cmd, log_output=False, log_error=True, timeout=None, recover=True): if isinstance(cmd, list): cmd = ' '.join([str(i) for i in cmd]) if log_output: logs.log('Running: adb %s' % cmd) if not timeout: timeout = ADB_TIMEOUT output = execute_command(get_adb_command_line(cmd), timeout, log_error) if not recover or environment.is_android_emulator(): if log_output: logs.log('Output: (%s)' % output) return output device_not_found_string_with_serial = DEVICE_NOT_FOUND_STRING.format( serial=environment.get_value('ANDROID_SERIAL')) if (output in [ DEVICE_HANG_STRING, DEVICE_OFFLINE_STRING, device_not_found_string_with_serial ]): logs.log_warn('Unable to query device, resetting device connection.') if reset_device_connection(): output = execute_command(get_adb_command_line(cmd), timeout, log_error) else: output = DEVICE_HANG_STRING if output is DEVICE_HANG_STRING: logs.log_warn('Unable to query device, restarting device to recover.') hard_reset() wait_until_fully_booted() output = execute_command(get_adb_command_line(cmd), timeout, log_error) if log_output: logs.log('Output: (%s)' % output) return output
Run a command in adb shell.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/platforms/android/adb.py#L609-L654
import collections import os import re import signal import socket import subprocess import tempfile import threading import time from clusterfuzz._internal.base import persistent_cache from clusterfuzz._internal.base import utils from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell ADB_TIMEOUT = 1200 BAD_STATE_WAIT = 900 BOOT_WAIT_INTERVAL = 30 CUTTLEFISH_USER = 'vsoc-01' CUTTLEFISH_CVD_PORT = 6520 DEFAULT_DEVICE_MEMORY_MB = 2048 DEVICE = collections.namedtuple('Device', ['serial', 'path']) DEVICE_HANG_STRING = None DEVICE_NOT_FOUND_STRING = 'error: device \'{serial}\' not found' DEVICE_OFFLINE_STRING = 'error: device offline' FACTORY_RESET_WAIT = 60 KERNEL_LOG_FILES = [ '/proc/last_kmsg', '/sys/fs/pstore/console-ramoops', ] MONKEY_PROCESS_NAME = 'monkey' WAIT_FOR_DEVICE_TIMEOUT = 600 REBOOT_TIMEOUT = 3600 RECOVERY_CMD_TIMEOUT = 60 STOP_CVD_WAIT = 20 LSUSB_BUS_RE = re.compile(r'Bus\s+(\d+)\s+Device\s+(\d+):.*') LSUSB_SERIAL_RE = re.compile(r'\s+iSerial\s+\d\s+(.*)') USBDEVFS_RESET = ord('U') << 8 | 20 def bad_state_reached(): persistent_cache.clear_values() logs.log_fatal_and_exit( 'Device in bad state.', wait_before_exit=BAD_STATE_WAIT) def connect_to_cuttlefish_device(): logs.log('Connect to cuttlefish device.') device_serial = environment.get_value('ANDROID_SERIAL') connect_cmd = f'{get_adb_path()} connect {device_serial}' return execute_command(connect_cmd, timeout=RECOVERY_CMD_TIMEOUT) def copy_local_directory_to_remote(local_directory, remote_directory): create_directory_if_needed(remote_directory) if os.listdir(local_directory): run_command(['push', '%s/.' % local_directory, remote_directory]) def copy_local_file_to_remote(local_file_path, remote_file_path): create_directory_if_needed(os.path.dirname(remote_file_path)) run_command(['push', local_file_path, remote_file_path]) def copy_remote_directory_to_local(remote_directory, local_directory): run_command(['pull', '%s/.' % remote_directory, local_directory]) def copy_remote_file_to_local(remote_file_path, local_file_path): shell.create_directory( os.path.dirname(local_file_path), create_intermediates=True) run_command(['pull', remote_file_path, local_file_path]) def create_directory_if_needed(device_directory): run_shell_command(['mkdir', '-p', device_directory]) def directory_exists(directory_path): expected = '0' result = run_shell_command( '\'test -d "%s"; echo $?\'' % directory_path, log_error=False) return result == expected def execute_command(cmd, timeout=None, log_error=True, on_cuttlefish_host=False): if on_cuttlefish_host and environment.is_android_cuttlefish(): cmd = ('ssh -o StrictHostKeyChecking=no ' f'{get_cuttlefish_ssh_target()} "{cmd}"') so = [] output_dest = tempfile.TemporaryFile() pipe = subprocess.Popen( cmd, executable='/bin/bash', stdout=output_dest, stderr=subprocess.STDOUT, shell=True, preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL), bufsize=0) def run(): try: pipe.communicate() output_dest.seek(0) output = output_dest.read() output_dest.close() if output: so.append(output) except OSError as _: logs.log_warn('Failed to retrieve stdout from: %s' % cmd) if pipe.returncode: if log_error: logs.log_warn( '%s returned %d error code.' % (cmd, pipe.returncode), output=output) thread = threading.Thread(target=run) thread.start() thread.join(timeout) if thread.is_alive(): logs.log_warn('Command %s timed out. Killing process.' % cmd) try: pipe.kill() except OSError: pass return None bytes_output = b''.join(so) return bytes_output.strip().decode('utf-8', errors='ignore') def copy_to_cuttlefish(src_path, dest_path, timeout=None): cvd_address = get_cuttlefish_ssh_target() return execute_command( 'scp -o StrictHostKeyChecking=no ' f'-r {src_path} {cvd_address}:{dest_path}', timeout=timeout) def factory_reset(): if environment.is_android_cuttlefish() or environment.is_android_emulator(): bad_state_reached() revert_asan_device_setup_if_needed() run_as_root() run_shell_command([ 'am', 'broadcast', '-a', 'android.intent.action.MASTER_CLEAR', '-n', 'android/com.android.server.MasterClearReceiver' ]) time.sleep(FACTORY_RESET_WAIT) def file_exists(file_path): expected = '0' result = run_shell_command( '\'test -f "%s"; echo $?\'' % file_path, log_error=False) return result == expected def get_adb_command_line(adb_cmd): device_serial = environment.get_value('ANDROID_SERIAL') adb_cmd_line = '%s -s %s %s' % (get_adb_path(), device_serial, adb_cmd) return adb_cmd_line def get_adb_path(): adb_path = environment.get_value('ADB') if adb_path: return adb_path return os.path.join(environment.get_platform_resources_directory(), 'adb') def get_device_state(): state_cmd = get_adb_command_line('get-state') return execute_command(state_cmd, timeout=RECOVERY_CMD_TIMEOUT) def get_fastboot_command_line(fastboot_cmd): fastboot_cmd_line = '%s %s' % (get_fastboot_path(), fastboot_cmd) return fastboot_cmd_line def get_fastboot_path(): return os.path.join(environment.get_platform_resources_directory(), 'fastboot') def get_file_checksum(file_path): if not file_exists(file_path): return None return run_shell_command(['md5sum', '-b', file_path]) def get_file_size(file_path): if not file_exists(file_path): return None return int(run_shell_command(['stat', '-c%s', file_path])) def get_kernel_log_content(): kernel_log_content = '' for kernel_log_file in KERNEL_LOG_FILES: kernel_log_content += read_data_from_file(kernel_log_file) or '' return kernel_log_content def get_ps_output(): return run_shell_command(['ps', '-A']) def get_process_and_child_pids(process_name): pids = [] ps_output_lines = get_ps_output().splitlines() while True: old_pids_length = len(pids) for line in ps_output_lines: data = line.split() try: line_process_pid = int(data[1]) line_parent_pid = int(data[2]) except: continue if line_process_pid in pids: continue line_process_name = data[-1] if (process_name in line_process_name or MONKEY_PROCESS_NAME in line_process_name): if process_name == line_process_name: pids.insert(0, line_process_pid) else: pids.append(line_process_pid) continue if line_parent_pid in pids: pids.append(line_process_pid) new_pids_length = len(pids) if old_pids_length == new_pids_length: break return pids def get_property(property_name): return run_shell_command(['getprop', property_name]) def hard_reset(): if environment.is_android_cuttlefish() or environment.is_android_emulator(): bad_state_reached() hard_reset_sysrq_cmd = get_adb_command_line( 'shell echo b \\> /proc/sysrq-trigger') execute_command(hard_reset_sysrq_cmd, timeout=RECOVERY_CMD_TIMEOUT) soft_reset_cmd = get_adb_command_line('reboot') execute_command(soft_reset_cmd, timeout=RECOVERY_CMD_TIMEOUT) def kill_processes_and_children_matching_name(process_name): process_and_child_pids = get_process_and_child_pids(process_name) if not process_and_child_pids: return kill_command = ['kill', '-9'] + process_and_child_pids run_shell_command(kill_command) def read_data_from_file(file_path): if not file_exists(file_path): return None return run_shell_command(['cat', '"%s"' % file_path]) def reboot(): run_command('reboot') def start_cuttlefish_device(use_kernel=False): cvd_dir = environment.get_value('CVD_DIR') cvd_bin_dir = os.path.join(cvd_dir, 'bin') launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd') device_memory_mb = environment.get_value('DEVICE_MEMORY_MB', DEFAULT_DEVICE_MEMORY_MB) launch_cvd_command_line = ( f'{launch_cvd_path} -daemon -memory_mb {device_memory_mb} ' '-report_anonymous_usage_stats Y') if use_kernel: kernel_path = os.path.join(cvd_dir, 'bzImage') initramfs_path = os.path.join(cvd_dir, 'initramfs.img') launch_cvd_command_line += ( f' -kernel_path={kernel_path} -initramfs_path={initramfs_path}') execute_command(launch_cvd_command_line, on_cuttlefish_host=True) def stop_cuttlefish_device(): cvd_dir = environment.get_value('CVD_DIR') cvd_bin_dir = os.path.join(cvd_dir, 'bin') stop_cvd_cmd = os.path.join(cvd_bin_dir, 'stop_cvd') execute_command( stop_cvd_cmd, timeout=RECOVERY_CMD_TIMEOUT, on_cuttlefish_host=True) time.sleep(STOP_CVD_WAIT) def restart_cuttlefish_device(): cvd_dir = environment.get_value('CVD_DIR') cvd_bin_dir = os.path.join(cvd_dir, 'bin') restart_cvd_cmd = os.path.join(cvd_bin_dir, 'restart_cvd') execute_command(restart_cvd_cmd, on_cuttlefish_host=True) def recreate_cuttlefish_device(): logs.log('Reimaging cuttlefish device.') cvd_dir = environment.get_value('CVD_DIR') stop_cuttlefish_device() rm_cmd = f'rm -rf {cvd_dir}/*' execute_command(rm_cmd, timeout=RECOVERY_CMD_TIMEOUT, on_cuttlefish_host=True) image_directory = environment.get_value('IMAGES_DIR') for image_filename in os.listdir(image_directory): if image_filename.endswith('.zip') or image_filename.endswith('.tar.gz'): continue image_src = os.path.join(image_directory, image_filename) image_dest = os.path.join(cvd_dir, image_filename) copy_to_cuttlefish(image_src, image_dest) start_cuttlefish_device() def remount(): run_as_root() run_command('remount') wait_for_device() run_as_root() def remove_directory(device_directory, recreate=False): run_shell_command('rm -rf %s' % device_directory, root=True) if recreate: create_directory_if_needed(device_directory) def remove_file(file_path): run_shell_command('rm -f %s' % file_path, root=True) def reset_device_connection(): if environment.is_android_cuttlefish(): restart_cuttlefish_device() else: reset_usb() state = get_device_state() if state != 'device': logs.log_warn('Device state is %s, unable to recover using usb reset/' 'cuttlefish reconnect.' % str(state)) return False return True def get_cuttlefish_device_ip(): try: return socket.gethostbyname('cuttlefish') except socket.gaierror: logs.log_fatal_and_exit('Unable to get cvd ip address on cuttlefish host.') return None def set_cuttlefish_device_serial(): device_serial = '%s:%d' % (get_cuttlefish_device_ip(), CUTTLEFISH_CVD_PORT) environment.set_value('ANDROID_SERIAL', device_serial) logs.log('Set cuttlefish device serial: %s' % device_serial) def get_cuttlefish_ssh_target(): return f'{CUTTLEFISH_USER}@{get_cuttlefish_device_ip()}' def get_device_path(): def _get_usb_devices(): usb_list_cmd = 'lsusb -v' output = execute_command(usb_list_cmd, timeout=RECOVERY_CMD_TIMEOUT) if output is None: logs.log_error('Failed to populate usb devices using lsusb, ' 'host restart might be needed.') bad_state_reached() devices = [] path = None for line in output.splitlines(): match = LSUSB_BUS_RE.match(line) if match: path = '/dev/bus/usb/%s/%s' % (match.group(1), match.group(2)) continue match = LSUSB_SERIAL_RE.match(line) if path and match and match.group(1): serial = match.group(1) devices.append(DEVICE(serial, path)) return devices def _get_device_path_for_serial(): devices = _get_usb_devices() for device in devices: if device_serial == device.serial: return device.path return None def _get_device_path_for_usb(): device_id = device_serial[len('usb:'):] bus_number = int( open('/sys/bus/usb/devices/%s/busnum' % device_id).read().strip()) device_number = int( open('/sys/bus/usb/devices/%s/devnum' % device_id).read().strip()) return '/dev/bus/usb/%03d/%03d' % (bus_number, device_number) if environment.is_android_cuttlefish(): return None device_serial = environment.get_value('ANDROID_SERIAL') if device_serial.startswith('usb:'): return _get_device_path_for_usb() return _get_device_path_for_serial() def reset_usb(): if environment.is_android_cuttlefish() or environment.is_android_emulator(): return True import fcntl try: device_path = get_device_path() except IOError: device_path = None if not device_path: device_path = environment.get_value('DEVICE_PATH') if not device_path: logs.log_warn('No device path found, unable to reset usb.') return False try: with open(device_path, 'w') as f: fcntl.ioctl(f, USBDEVFS_RESET) except: logs.log_warn('Failed to reset usb.') return False wait_for_device(recover=False) return True def revert_asan_device_setup_if_needed(): if not environment.get_value('ASAN_DEVICE_SETUP'): return device_id = environment.get_value('ANDROID_SERIAL') device_argument = '--device %s' % device_id revert_argument = '--revert' asan_device_setup_script_path = os.path.join( environment.get_platform_resources_directory(), 'third_party', 'asan_device_setup.sh') command = '%s %s %s' % (asan_device_setup_script_path, device_argument, revert_argument) execute_command(command, timeout=RECOVERY_CMD_TIMEOUT) def run_as_root(): if get_property('service.adb.root') == '1': return wait_for_device() run_command('root') wait_for_device()
Apache License 2.0
yandex-cloud/python-sdk
yandex/cloud/mdb/redis/v1/cluster_service_pb2_grpc.py
ClusterServiceServicer.Delete
python
def Delete(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
Deletes the specified Redis cluster.
https://github.com/yandex-cloud/python-sdk/blob/6ddaaaf0ad01d8fc36cb72957f70a6e7943a5ce7/yandex/cloud/mdb/redis/v1/cluster_service_pb2_grpc.py#L177-L182
import grpc from yandex.cloud.mdb.redis.v1 import cluster_pb2 as yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__pb2 from yandex.cloud.mdb.redis.v1 import cluster_service_pb2 as yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2 from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2 class ClusterServiceStub(object): def __init__(self, channel): self.Get = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Get', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.GetClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__pb2.Cluster.FromString, ) self.List = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/List', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClustersRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClustersResponse.FromString, ) self.Create = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Create', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Update = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Update', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Delete = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Delete', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Start = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Start', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.StartClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Stop = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Stop', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.StopClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Move = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Move', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.MoveClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Backup = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Backup', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.BackupClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Restore = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Restore', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.RestoreClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.RescheduleMaintenance = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/RescheduleMaintenance', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.RescheduleMaintenanceRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.StartFailover = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/StartFailover', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.StartClusterFailoverRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.ListLogs = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/ListLogs', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterLogsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterLogsResponse.FromString, ) self.StreamLogs = channel.unary_stream( '/yandex.cloud.mdb.redis.v1.ClusterService/StreamLogs', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.StreamClusterLogsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.StreamLogRecord.FromString, ) self.ListOperations = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/ListOperations', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterOperationsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterOperationsResponse.FromString, ) self.ListBackups = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/ListBackups', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterBackupsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterBackupsResponse.FromString, ) self.ListHosts = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/ListHosts', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterHostsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterHostsResponse.FromString, ) self.AddHosts = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/AddHosts', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.AddClusterHostsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.DeleteHosts = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/DeleteHosts', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.DeleteClusterHostsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.GetShard = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/GetShard', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.GetClusterShardRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__pb2.Shard.FromString, ) self.ListShards = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/ListShards', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterShardsRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.ListClusterShardsResponse.FromString, ) self.AddShard = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/AddShard', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.AddClusterShardRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.DeleteShard = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/DeleteShard', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.DeleteClusterShardRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) self.Rebalance = channel.unary_unary( '/yandex.cloud.mdb.redis.v1.ClusterService/Rebalance', request_serializer=yandex_dot_cloud_dot_mdb_dot_redis_dot_v1_dot_cluster__service__pb2.RebalanceClusterRequest.SerializeToString, response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString, ) class ClusterServiceServicer(object): def Get(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def List(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Create(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Update(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
MIT License
gmum/huggingmolecules
src/huggingmolecules/models/models_grover.py
ReadoutSelfAttention.forward
python
def forward(self, X): x = torch.tanh(torch.matmul(self.w1, X.transpose(1, 0))) x = torch.matmul(self.w2, x) attn = torch.nn.functional.softmax(x, dim=-1) x = torch.matmul(attn, X) return x, attn
The forward function. :param X: The input feature map. $X \in \mathbb{R}^{n \times in_feature}$. :return: The final embeddings and attention matrix.
https://github.com/gmum/huggingmolecules/blob/adc581c97fbc21d9967dd9334afa94b22fb77651/src/huggingmolecules/models/models_grover.py#L945-L955
import math from typing import Union, Dict, Type import numpy as np import scipy.stats as stats import torch import torch.nn as nn import torch.nn.functional as F from .models_api import PretrainedModelBase from .models_common_utils import MultiHeadedAttention, PositionwiseFeedForward, SublayerConnection, get_activation_function from .models_grover_utils import select_neighbor_and_aggregate from ..configuration import GroverConfig from ..featurization.featurization_grover import GroverBatchEncoding, GroverFeaturizer GROVER_MODEL_ARCH = { 'grover_base': 'https://drive.google.com/uc?id=1FbBQFRbEIItfCjumjvY_zepGq56dSClU', 'grover_large': 'https://drive.google.com/uc?id=1eQfPPWghmaq-dvWNO-bhom1UsovKn2h5' } class GroverModel(PretrainedModelBase[GroverBatchEncoding, GroverConfig]): @classmethod def _get_archive_dict(cls) -> dict: return GROVER_MODEL_ARCH @classmethod def get_config_cls(cls) -> Type[GroverConfig]: return GroverConfig @classmethod def get_featurizer_cls(cls) -> Type[GroverFeaturizer]: return GroverFeaturizer def __init__(self, config: GroverConfig): super(GroverModel, self).__init__(config) self.hidden_size = config.d_model self.grover = GroverEncoderWrapper(config) if config.readout_self_attention: self.readout = Readout(rtype="self_attention", hidden_size=self.hidden_size, attn_hidden=config.readout_attn_hidden, attn_out=config.readout_attn_out) else: self.readout = Readout(rtype="mean", hidden_size=self.hidden_size) self.mol_atom_from_atom_ffn = self.create_ffn(config) self.mol_atom_from_bond_ffn = self.create_ffn(config) self.init_weights(config.init_type) def create_ffn(self, config: GroverConfig): if config.ffn_features_only: first_linear_dim = config.features_size + config.ffn_d_features else: if config.readout_self_attention: first_linear_dim = config.d_model * config.readout_attn_out first_linear_dim += config.ffn_d_features else: first_linear_dim = config.d_model + config.ffn_d_features dropout = nn.Dropout(config.dropout) activation = get_activation_function(config.activation) if config.ffn_n_layers == 1: ffn = [ dropout, nn.Linear(first_linear_dim, config.readout_n_outputs) ] else: ffn = [ dropout, nn.Linear(first_linear_dim, config.ffn_d_hidden) ] for _ in range(config.ffn_n_layers - 2): ffn.extend([ activation, dropout, nn.Linear(config.ffn_d_hidden, config.ffn_d_hidden), ]) ffn.extend([ activation, dropout, nn.Linear(config.ffn_d_hidden, config.readout_n_outputs), ]) return nn.Sequential(*ffn) def forward(self, batch: GroverBatchEncoding): _, _, _, _, _, a_scope, _, _ = batch.get_components() output = self.grover(batch) mol_atom_from_bond_output = self.readout(output["atom_from_bond"], a_scope) mol_atom_from_atom_output = self.readout(output["atom_from_atom"], a_scope) atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output) bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output) return atom_ffn_output, bond_ffn_output class GroverEncoderWrapper(nn.Module): def __init__(self, config: GroverConfig): super(GroverEncoderWrapper, self).__init__() self.embedding_output_type = config.encoder_output_type edge_dim = config.d_bond + config.d_atom node_dim = config.d_atom if not hasattr(config, "backbone"): print("No backbone specified in args, use gtrans backbone.") config.backbone = "gtrans" if config.backbone == "gtrans" or config.backbone == "dualtrans": self.encoders = GroverEncoder(config, hidden_size=config.d_model, edge_fdim=edge_dim, node_fdim=node_dim, dropout=config.dropout, activation=config.activation, num_mt_block=config.encoder_n_blocks, num_attn_head=config.encoder_n_attn_heads, atom_emb_output=self.embedding_output_type, bias=config.encoder_attn_output_bias) def forward(self, batch: GroverBatchEncoding) -> Dict: output = self.encoders(batch) if self.embedding_output_type == 'atom': return {"atom_from_atom": output[0], "atom_from_bond": output[1], "bond_from_atom": None, "bond_from_bond": None} elif self.embedding_output_type == 'bond': return {"atom_from_atom": None, "atom_from_bond": None, "bond_from_atom": output[0], "bond_from_bond": output[1]} elif self.embedding_output_type == "both": return {"atom_from_atom": output[0][0], "bond_from_atom": output[0][1], "atom_from_bond": output[1][0], "bond_from_bond": output[1][1]} class GroverEncoder(nn.Module): def __init__(self, config: GroverConfig, hidden_size, edge_fdim, node_fdim, dropout=0.0, activation="ReLU", num_mt_block=1, num_attn_head=4, atom_emb_output: Union[bool, str] = False, bias=False, res_connection=False): super(GroverEncoder, self).__init__() if atom_emb_output is False: atom_emb_output = None if atom_emb_output is True: atom_emb_output = 'atom' self.hidden_size = hidden_size self.dropout = dropout self.activation = activation self.bias = bias self.res_connection = res_connection self.edge_blocks = nn.ModuleList() self.node_blocks = nn.ModuleList() edge_input_dim = edge_fdim node_input_dim = node_fdim edge_input_dim_i = edge_input_dim node_input_dim_i = node_input_dim for i in range(num_mt_block): if i != 0: edge_input_dim_i = self.hidden_size node_input_dim_i = self.hidden_size self.edge_blocks.append(MTBlock(config=config, num_attn_head=num_attn_head, input_dim=edge_input_dim_i, hidden_size=self.hidden_size, activation=activation, dropout=dropout, bias=self.bias, atom_messages=False)) self.node_blocks.append(MTBlock(config=config, num_attn_head=num_attn_head, input_dim=node_input_dim_i, hidden_size=self.hidden_size, activation=activation, dropout=dropout, bias=self.bias, atom_messages=True)) self.atom_emb_output = atom_emb_output self.ffn_atom_from_atom = PositionwiseFeedForward(d_input=self.hidden_size + node_fdim, d_hidden=self.hidden_size * 4, activation=self.activation, dropout=self.dropout, d_output=self.hidden_size, n_layers=2) self.ffn_atom_from_bond = PositionwiseFeedForward(d_input=self.hidden_size + node_fdim, d_hidden=self.hidden_size * 4, activation=self.activation, dropout=self.dropout, d_output=self.hidden_size, n_layers=2) self.ffn_bond_from_atom = PositionwiseFeedForward(d_input=self.hidden_size + edge_fdim, d_hidden=self.hidden_size * 4, activation=self.activation, dropout=self.dropout, d_output=self.hidden_size, n_layers=2) self.ffn_bond_from_bond = PositionwiseFeedForward(d_input=self.hidden_size + edge_fdim, d_hidden=self.hidden_size * 4, activation=self.activation, dropout=self.dropout, d_output=self.hidden_size, n_layers=2) self.atom_from_atom_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout) self.atom_from_bond_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout) self.bond_from_atom_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout) self.bond_from_bond_sublayer = SublayerConnection(size=self.hidden_size, dropout=self.dropout) self.act_func_node = get_activation_function(self.activation) self.act_func_edge = get_activation_function(self.activation) self.dropout_layer = nn.Dropout(p=config.dropout) def pointwise_feed_forward_to_atom_embedding(self, emb_output, atom_fea, index, ffn_layer): aggr_output = select_neighbor_and_aggregate(emb_output, index) aggr_outputx = torch.cat([atom_fea, aggr_output], dim=1) return ffn_layer(aggr_outputx), aggr_output def pointwise_feed_forward_to_bond_embedding(self, emb_output, bond_fea, a2nei, b2revb, ffn_layer): aggr_output = select_neighbor_and_aggregate(emb_output, a2nei) aggr_output = self.remove_rev_bond_message(emb_output, aggr_output, b2revb) aggr_outputx = torch.cat([bond_fea, aggr_output], dim=1) return ffn_layer(aggr_outputx), aggr_output @staticmethod def remove_rev_bond_message(orginal_message, aggr_message, b2revb): rev_message = orginal_message[b2revb] return aggr_message - rev_message def atom_bond_transform(self, to_atom=True, atomwise_input=None, bondwise_input=None, original_f_atoms=None, original_f_bonds=None, a2a=None, a2b=None, b2a=None, b2revb=None ): if to_atom: atomwise_input, _ = self.pointwise_feed_forward_to_atom_embedding(atomwise_input, original_f_atoms, a2a, self.ffn_atom_from_atom) atom_in_atom_out = self.atom_from_atom_sublayer(None, atomwise_input) bondwise_input, _ = self.pointwise_feed_forward_to_atom_embedding(bondwise_input, original_f_atoms, a2b, self.ffn_atom_from_bond) bond_in_atom_out = self.atom_from_bond_sublayer(None, bondwise_input) return atom_in_atom_out, bond_in_atom_out else: atom_list_for_bond = torch.cat([b2a.unsqueeze(dim=1), a2a[b2a]], dim=1) atomwise_input, _ = self.pointwise_feed_forward_to_bond_embedding(atomwise_input, original_f_bonds, atom_list_for_bond, b2a[b2revb], self.ffn_bond_from_atom) atom_in_bond_out = self.bond_from_atom_sublayer(None, atomwise_input) bond_list_for_bond = a2b[b2a] bondwise_input, _ = self.pointwise_feed_forward_to_bond_embedding(bondwise_input, original_f_bonds, bond_list_for_bond, b2revb, self.ffn_bond_from_bond) bond_in_bond_out = self.bond_from_bond_sublayer(None, bondwise_input) return atom_in_bond_out, bond_in_bond_out def forward(self, batch: GroverBatchEncoding, features_batch=None): f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch.get_components() node_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a edge_batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a original_f_atoms, original_f_bonds = f_atoms, f_bonds for nb in self.node_blocks: node_batch, features_batch = nb(node_batch, features_batch) for eb in self.edge_blocks: edge_batch, features_batch = eb(edge_batch, features_batch) atom_output, _, _, _, _, _, _, _ = node_batch _, bond_output, _, _, _, _, _, _ = edge_batch if self.atom_emb_output is None: return atom_output, bond_output if self.atom_emb_output == 'atom': return self.atom_bond_transform(to_atom=True, atomwise_input=atom_output, bondwise_input=bond_output, original_f_atoms=original_f_atoms, original_f_bonds=original_f_bonds, a2a=a2a, a2b=a2b, b2a=b2a, b2revb=b2revb) elif self.atom_emb_output == 'bond': return self.atom_bond_transform(to_atom=False, atomwise_input=atom_output, bondwise_input=bond_output, original_f_atoms=original_f_atoms, original_f_bonds=original_f_bonds, a2a=a2a, a2b=a2b, b2a=b2a, b2revb=b2revb) else: atom_embeddings = self.atom_bond_transform(to_atom=True, atomwise_input=atom_output, bondwise_input=bond_output, original_f_atoms=original_f_atoms, original_f_bonds=original_f_bonds, a2a=a2a, a2b=a2b, b2a=b2a, b2revb=b2revb) bond_embeddings = self.atom_bond_transform(to_atom=False, atomwise_input=atom_output, bondwise_input=bond_output, original_f_atoms=original_f_atoms, original_f_bonds=original_f_bonds, a2a=a2a, a2b=a2b, b2a=b2a, b2revb=b2revb) return ((atom_embeddings[0], bond_embeddings[0]), (atom_embeddings[1], bond_embeddings[1])) class MPNEncoder(nn.Module): def __init__(self, config: GroverConfig, atom_messages: bool, init_message_dim: int, attached_fea_fdim: int, hidden_size: int, bias: bool, depth: int, dropout: float, undirected: bool, dense: bool, aggregate_to_atom: bool, attach_fea: bool, input_layer="fc", dynamic_depth='none' ): super(MPNEncoder, self).__init__() self.init_message_dim = init_message_dim self.attached_fea_fdim = attached_fea_fdim self.hidden_size = hidden_size self.bias = bias self.depth = depth self.dropout = dropout self.input_layer = input_layer self.layers_per_message = 1 self.undirected = undirected self.atom_messages = atom_messages self.dense = dense self.aggreate_to_atom = aggregate_to_atom self.attached_fea = attach_fea self.dynamic_depth = dynamic_depth self.dropout_layer = nn.Dropout(p=self.dropout) self.act_func = get_activation_function(config.activation) if self.input_layer == "fc": input_dim = self.init_message_dim self.W_i = nn.Linear(input_dim, self.hidden_size, bias=self.bias) if self.attached_fea: w_h_input_size = self.hidden_size + self.attached_fea_fdim else: w_h_input_size = self.hidden_size self.W_h = nn.Linear(w_h_input_size, self.hidden_size, bias=self.bias) def forward(self, init_messages, init_attached_features, a2nei, a2attached, b2a=None, b2revb=None, adjs=None ) -> torch.FloatTensor: if self.input_layer == 'fc': input = self.W_i(init_messages) message = self.act_func(input) elif self.input_layer == 'none': input = init_messages message = input attached_fea = init_attached_features if self.training and self.dynamic_depth != "none": if self.dynamic_depth == "uniform": ndepth = np.random.randint(self.depth - 3, self.depth + 3) else: mu = self.depth sigma = 1 lower = mu - 3 * sigma upper = mu + 3 * sigma X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma) ndepth = int(X.rvs(1)) else: ndepth = self.depth for _ in range(ndepth - 1): if self.undirected: message = (message + message[b2revb]) / 2 nei_message = select_neighbor_and_aggregate(message, a2nei) a_message = nei_message if self.attached_fea: attached_nei_fea = select_neighbor_and_aggregate(attached_fea, a2attached) a_message = torch.cat((nei_message, attached_nei_fea), dim=1) if not self.atom_messages: rev_message = message[b2revb] if self.attached_fea: atom_rev_message = attached_fea[b2a[b2revb]] rev_message = torch.cat((rev_message, atom_rev_message), dim=1) message = a_message[b2a] - rev_message else: message = a_message message = self.W_h(message) if self.dense: message = self.act_func(message) else: message = self.act_func(input + message) message = self.dropout_layer(message) output = message return output class MTBlock(nn.Module): def __init__(self, config: GroverConfig, num_attn_head, input_dim, hidden_size, activation="ReLU", dropout=0.0, bias=True, atom_messages=False, res_connection=False): super(MTBlock, self).__init__() self.atom_messages = atom_messages self.hidden_size = hidden_size self.heads = nn.ModuleList() self.input_dim = input_dim self.res_connection = res_connection self.act_func = get_activation_function(activation) self.dropout_layer = nn.Dropout(p=dropout) self.layernorm = nn.LayerNorm(self.hidden_size, elementwise_affine=True) self.W_i = nn.Linear(self.input_dim, self.hidden_size, bias=bias) self.attn = MultiHeadedAttention(h=num_attn_head, d_model=self.hidden_size, output_bias=bias, dropout=dropout, attention=GroverAttention()) self.W_o = nn.Linear(self.hidden_size * num_attn_head, self.hidden_size, bias=bias) self.sublayer = SublayerConnection(size=self.hidden_size, dropout=dropout) for _ in range(num_attn_head): self.heads.append(Head(config, hidden_size=hidden_size, atom_messages=atom_messages)) def forward(self, batch, features_batch=None): f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a = batch if self.atom_messages: if f_atoms.shape[1] != self.hidden_size: f_atoms = self.W_i(f_atoms) f_atoms = self.dropout_layer(self.layernorm(self.act_func(f_atoms))) else: if f_bonds.shape[1] != self.hidden_size: f_bonds = self.W_i(f_bonds) f_bonds = self.dropout_layer(self.layernorm(self.act_func(f_bonds))) queries = [] keys = [] values = [] for head in self.heads: q, k, v = head(f_atoms, f_bonds, a2b, a2a, b2a, b2revb) queries.append(q.unsqueeze(1)) keys.append(k.unsqueeze(1)) values.append(v.unsqueeze(1)) queries = torch.cat(queries, dim=1) keys = torch.cat(keys, dim=1) values = torch.cat(values, dim=1) x_out = self.attn(queries, keys, values) x_out = x_out.view(x_out.shape[0], -1) x_out = self.W_o(x_out) x_in = None if self.res_connection: if self.atom_messages: x_in = f_atoms else: x_in = f_bonds if self.atom_messages: f_atoms = self.sublayer(x_in, x_out) else: f_bonds = self.sublayer(x_in, x_out) batch = f_atoms, f_bonds, a2b, b2a, b2revb, a_scope, b_scope, a2a features_batch = features_batch return batch, features_batch class GroverAttention(nn.Module): def forward(self, query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query.size(-1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn class Head(nn.Module): def __init__(self, config: GroverConfig, hidden_size, atom_messages=False): super(Head, self).__init__() atom_fdim = hidden_size bond_fdim = hidden_size hidden_size = hidden_size self.atom_messages = atom_messages if self.atom_messages: init_message_dim = atom_fdim attached_fea_dim = bond_fdim else: init_message_dim = bond_fdim attached_fea_dim = atom_fdim self.mpn_q = MPNEncoder(config=config, atom_messages=atom_messages, init_message_dim=init_message_dim, attached_fea_fdim=attached_fea_dim, hidden_size=hidden_size, bias=config.encoder_attn_output_bias, depth=config.mpn_depth, dropout=config.dropout, undirected=config.mpn_undirected, dense=config.mpn_dense, aggregate_to_atom=False, attach_fea=False, input_layer="none", dynamic_depth="truncnorm") self.mpn_k = MPNEncoder(config=config, atom_messages=atom_messages, init_message_dim=init_message_dim, attached_fea_fdim=attached_fea_dim, hidden_size=hidden_size, bias=config.encoder_attn_output_bias, depth=config.mpn_depth, dropout=config.dropout, undirected=config.mpn_undirected, dense=config.mpn_dense, aggregate_to_atom=False, attach_fea=False, input_layer="none", dynamic_depth="truncnorm") self.mpn_v = MPNEncoder(config=config, atom_messages=atom_messages, init_message_dim=init_message_dim, attached_fea_fdim=attached_fea_dim, hidden_size=hidden_size, bias=config.encoder_attn_output_bias, depth=config.mpn_depth, dropout=config.dropout, undirected=config.mpn_undirected, dense=config.mpn_dense, aggregate_to_atom=False, attach_fea=False, input_layer="none", dynamic_depth="truncnorm") def forward(self, f_atoms, f_bonds, a2b, a2a, b2a, b2revb): if self.atom_messages: init_messages = f_atoms init_attached_features = f_bonds a2nei = a2a a2attached = a2b b2a = b2a b2revb = b2revb else: init_messages = f_bonds init_attached_features = f_atoms a2nei = a2b a2attached = a2a b2a = b2a b2revb = b2revb q = self.mpn_q(init_messages=init_messages, init_attached_features=init_attached_features, a2nei=a2nei, a2attached=a2attached, b2a=b2a, b2revb=b2revb) k = self.mpn_k(init_messages=init_messages, init_attached_features=init_attached_features, a2nei=a2nei, a2attached=a2attached, b2a=b2a, b2revb=b2revb) v = self.mpn_v(init_messages=init_messages, init_attached_features=init_attached_features, a2nei=a2nei, a2attached=a2attached, b2a=b2a, b2revb=b2revb) return q, k, v class Readout(nn.Module): def __init__(self, rtype: str = "none", hidden_size: int = 0, attn_hidden: int = None, attn_out: int = None, ): super(Readout, self).__init__() self.cached_zero_vector = nn.Parameter(torch.zeros(hidden_size), requires_grad=False) self.rtype = "mean" if rtype == "self_attention": self.attn = ReadoutSelfAttention(hidden=attn_hidden, in_feature=hidden_size, out_feature=attn_out) self.rtype = "self_attention" def forward(self, embeddings, scope): mol_vecs = [] self.attns = [] for _, (a_start, a_size) in enumerate(scope): if a_size == 0: mol_vecs.append(self.cached_zero_vector) else: cur_hiddens = embeddings.narrow(0, a_start, a_size) if self.rtype == "self_attention": cur_hiddens, attn = self.attn(cur_hiddens) cur_hiddens = cur_hiddens.flatten() else: cur_hiddens = cur_hiddens.sum(dim=0) / a_size mol_vecs.append(cur_hiddens) mol_vecs = torch.stack(mol_vecs, dim=0) return mol_vecs class ReadoutSelfAttention(nn.Module): def __init__(self, *, hidden, in_feature, out_feature): super(ReadoutSelfAttention, self).__init__() self.w1 = torch.nn.Parameter(torch.FloatTensor(hidden, in_feature)) self.w2 = torch.nn.Parameter(torch.FloatTensor(out_feature, hidden)) self.reset_parameters() def reset_parameters(self): nn.init.xavier_normal_(self.w1) nn.init.xavier_normal_(self.w2)
Apache License 2.0
corakwue/ftrace
ftrace/common.py
filter_by_task
python
def filter_by_task(iterable, attr, value, how='first'): def filter_func(event): try: return getattr(event.task, attr, None) == value except AttributeError: return getattr(event.event.task, attr, None) == value except: return False filtered = ifilter(filter_func, iterable) rv = None try: if how in ('any', 'all'): rv = iter(filtered) elif how == 'first': rv = filtered.next() elif how == 'last': for rv in filtered: pass except: rv = None finally: return rv
Filter iterable to objects whose `attr` has `value`. Parameters ---------- iterable : iterable Iterable <list, set> object attr : string Name of attribute to compare. value : object Value to filter by. how : string Which events to return. Valid args: 'any'/'all', 'first', 'last'
https://github.com/corakwue/ftrace/blob/d46139a562418146087f11f25528e0ab29c1c2c2/ftrace/common.py#L100-L137
import sys import types import math import functools from .third_party.enum.enum import Enum, unique from itertools import ifilter from .utils.decorators import memoize def is_list_like(arg): return (hasattr(arg, '__iter__') and not isinstance(arg, str)) @unique class ConstantBase(Enum): def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __str__(self): return self.__repr__() def __repr__(self): return self.name def describe(self): return self.name, self.value @classmethod def universe(cls): return cls.__members__.values() @classmethod def exclude(cls, items): if is_list_like(items): return cls.universe() - items universe_list = cls.universe() universe_list.remove(items) return universe_list @classmethod @memoize def map(cls, name): for item in cls.universe(): if item.name == name: return item return def bind_method(cls, name, func): if not sys.version_info[0] >= 3: setattr(cls, name, types.MethodType(func, None, cls)) else: setattr(cls, name, func)
Apache License 2.0
amzn/differential-privacy-bayesian-optimization
experiments/output_perturbation/scikit-learn/sklearn/_build_utils/openmp_helpers.py
check_openmp_support
python
def check_openmp_support(): ccompiler = new_compiler() customize_compiler(ccompiler) if os.getenv('SKLEARN_NO_OPENMP'): return False start_dir = os.path.abspath('.') with tempfile.TemporaryDirectory() as tmp_dir: try: os.chdir(tmp_dir) with open('test_openmp.c', 'w') as f: f.write(CCODE) os.mkdir('objects') openmp_flags = get_openmp_flag(ccompiler) ccompiler.compile(['test_openmp.c'], output_dir='objects', extra_postargs=openmp_flags) extra_preargs = os.getenv('LDFLAGS', None) if extra_preargs is not None: extra_preargs = extra_preargs.split(" ") else: extra_preargs = [] objects = glob.glob( os.path.join('objects', '*' + ccompiler.obj_extension)) ccompiler.link_executable(objects, 'test_openmp', extra_preargs=extra_preargs, extra_postargs=openmp_flags) output = subprocess.check_output('./test_openmp') output = output.decode(sys.stdout.encoding or 'utf-8').splitlines() if 'nthreads=' in output[0]: nthreads = int(output[0].strip().split('=')[1]) openmp_supported = (len(output) == nthreads) else: openmp_supported = False except (CompileError, LinkError, subprocess.CalledProcessError): openmp_supported = False finally: os.chdir(start_dir) err_message = textwrap.dedent( """ *** It seems that scikit-learn cannot be built with OpenMP support. - Make sure you have followed the installation instructions: https://scikit-learn.org/dev/developers/advanced_installation.html - If your compiler supports OpenMP but the build still fails, please submit a bug report at: https://github.com/scikit-learn/scikit-learn/issues - If you want to build scikit-learn without OpenMP support, you can set the environment variable SKLEARN_NO_OPENMP and rerun the build command. Note however that some estimators will run in sequential mode and their `n_jobs` parameter will have no effect anymore. *** """) if not openmp_supported: raise CompileError(err_message) return True
Check whether OpenMP test code can be compiled and run
https://github.com/amzn/differential-privacy-bayesian-optimization/blob/4f3c98d4b747e22ac4890089f46fd79137235492/experiments/output_perturbation/scikit-learn/sklearn/_build_utils/openmp_helpers.py#L60-L142
import os import sys import glob import tempfile import textwrap import subprocess from numpy.distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler from distutils.errors import CompileError, LinkError CCODE = textwrap.dedent( """\ #include <omp.h> #include <stdio.h> int main(void) { #pragma omp parallel printf("nthreads=%d\\n", omp_get_num_threads()); return 0; } """) def get_openmp_flag(compiler): if hasattr(compiler, 'compiler'): compiler = compiler.compiler[0] else: compiler = compiler.__class__.__name__ if sys.platform == "win32" and ('icc' in compiler or 'icl' in compiler): return ['/Qopenmp'] elif sys.platform == "win32": return ['/openmp'] elif sys.platform == "darwin" and ('icc' in compiler or 'icl' in compiler): return ['-openmp'] elif sys.platform == "darwin" and 'openmp' in os.getenv('CPPFLAGS', ''): return [] return ['-fopenmp']
Apache License 2.0
autonomousvision/convolutional_occupancy_networks
src/checkpoints.py
CheckpointIO.load
python
def load(self, filename): if is_url(filename): return self.load_url(filename) else: return self.load_file(filename)
Loads a module dictionary from local file or url. Args: filename (str): name of saved module dictionary
https://github.com/autonomousvision/convolutional_occupancy_networks/blob/f44d413f8d455657a44c24d06163934c69141a09/src/checkpoints.py#L40-L49
import os import urllib import torch from torch.utils import model_zoo class CheckpointIO(object): def __init__(self, checkpoint_dir='./chkpts', **kwargs): self.module_dict = kwargs self.checkpoint_dir = checkpoint_dir if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def register_modules(self, **kwargs): self.module_dict.update(kwargs) def save(self, filename, **kwargs): if not os.path.isabs(filename): filename = os.path.join(self.checkpoint_dir, filename) outdict = kwargs for k, v in self.module_dict.items(): outdict[k] = v.state_dict() torch.save(outdict, filename)
MIT License
tlc-pack/tenset
tests/python/contrib/test_arm_compute_lib/test_pooling.py
_get_pooling_model
python
def _get_pooling_model( shape, dtype, typef, sizes, strides, padding, ceil_mode, count_include_pad, var_names ): if len(padding) == 2: padding = (padding[0], padding[1], padding[0], padding[1]) out = relay.var(next(var_names), shape=shape, dtype=dtype) if typef == "nn.max_pool2d": out = relay.nn.max_pool2d( out, pool_size=sizes, strides=strides, padding=padding, ceil_mode=ceil_mode, layout="NHWC", ) elif typef == "nn.avg_pool2d": if dtype == "uint8": out = relay.cast(out, "int32") out = relay.nn.avg_pool2d( out, pool_size=sizes, strides=strides, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, layout="NHWC", ) if dtype == "uint8": out = relay.cast(out, "uint8") elif typef == "nn.l2_pool2d": out = relay.power(out, relay.const(2.0)) out = relay.nn.avg_pool2d( out, pool_size=sizes, strides=strides, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, layout="NHWC", ) out = relay.sqrt(out) else: raise ValueError("Function not supported") return out
Return a model and any parameters it may have.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/tests/python/contrib/test_arm_compute_lib/test_pooling.py#L42-L88
import numpy as np import tvm from tvm import relay from tvm import testing from test_arm_compute_lib.infrastructure import ( skip_runtime_test, skip_codegen_test, build_and_run, verify, verify_codegen, ) from test_arm_compute_lib.infrastructure import Device def _calculate_output_shape(shape, sizes, padding, strides): output_height = ((shape[1] - sizes[0] + padding[0] + padding[2]) / strides[0]) + 1 output_width = ((shape[2] - sizes[1] + padding[1] + padding[3]) / strides[1]) + 1 return 1, int(output_height), int(output_width), shape[3]
Apache License 2.0
thomasgerstenberg/blatann
blatann/gatt/gatts_attribute.py
GattsAttribute.parent
python
def parent(self) -> GattsCharacteristic: return self._parent
**Read Only** Gets the parent characteristic which owns this attribute
https://github.com/thomasgerstenberg/blatann/blob/86366bc38d84f3ddb22314aa1ca50ee28be8b916/blatann/gatt/gatts_attribute.py#L66-L72
from __future__ import annotations import typing import binascii import logging from collections import namedtuple from blatann.exceptions import InvalidOperationException from blatann.gatt import Attribute from blatann.services.ble_data_types import BleDataStream from blatann.event_args import WriteEventArgs from blatann.nrf import nrf_events, nrf_types from blatann.event_type import EventSource, Event from blatann import gatt from blatann.uuid import Uuid if typing.TYPE_CHECKING: from blatann.peer import Peer from blatann.device import BleDevice from blatann.gatt.gatts import GattsCharacteristic logger = logging.getLogger(__name__) class GattsAttributeProperties(object): def __init__(self, read=True, write=False, security_level=gatt.SecurityLevel.OPEN, max_length=20, variable_length=True, read_auth=False, write_auth=False): self.read = read self.write = write self.security_level = security_level self.max_len = max_length self.variable_length = variable_length self.read_auth = read_auth self.write_auth = write_auth class GattsAttribute(Attribute): _QueuedChunk = namedtuple("QueuedChunk", ["offset", "data"]) def __init__(self, ble_device: BleDevice, peer: Peer, parent: GattsCharacteristic, uuid: Uuid, handle: int, properties: GattsAttributeProperties, initial_value=b"", string_encoding="utf8"): super(GattsAttribute, self).__init__(uuid, handle, initial_value, string_encoding) self._ble_device = ble_device self._peer = peer self._parent = parent self._properties = properties self._on_write = EventSource("Write Event", logger) self._on_read = EventSource("Read Event", logger) if properties.write: self._ble_device.ble_driver.event_subscribe(self._on_gatts_write, nrf_events.GattsEvtWrite) if properties.read_auth or properties.write_auth: self._ble_device.ble_driver.event_subscribe(self._on_rw_auth_request, nrf_events.GattsEvtReadWriteAuthorizeRequest) self._write_queued = False self._read_in_process = False self._queued_write_chunks = [] @property
BSD 3-Clause New or Revised License
hewlettpackard/dlcookbook-dlbs
python/mxnet_benchmarks/models/deep_speech2.py
DeepSpeech2.shape
python
def shape(self, v): return str(v.infer_shape(data=self.__data_shape)[1])
Return shape of v's output tensor.
https://github.com/hewlettpackard/dlcookbook-dlbs/blob/42b9ce648c2bbfa0f306e95022b80f1fe9230dce/python/mxnet_benchmarks/models/deep_speech2.py#L329-L331
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import math import logging import itertools import numpy as np import mxnet as mx from mxnet_benchmarks.models.model import Model from mxnet_benchmarks.contrib.ctc_metrics import CtcMetrics class DeepSpeech2(Model): implements = 'deep_speech2' CONV_ARCHS = [ '1-layer-1D', '2-layer-1D', '3-layer-1D', '1-layer-2D', '2-layer-2D', '3-layer-2D', '2-layer-2D-v2' ] RNN_TYPES = ['rnn_relu', 'rnn_tanh', 'lstm', 'gru'] BRNN_OUTPUT = ['concat', 'sum'] CTC_LOSSES = ['mxnet_ctc_loss', 'warp_ctc_loss'] @property def output(self): return self.__output @property def batch_size(self): return self.__batch_size @property def output_length(self): return self.__output_length def __init__(self, params): Model.check_parameters( params, { 'name': 'DeepSpeech2', 'input_shape': (1, 200, 161), 'num_classes': 29*29 + 1, 'phase': 'training', 'dtype': 'float32', 'model_opts': {} } ) Model.check_parameters( params['model_opts'], { 'conv_batch_norm': True, 'conv_arch': '2-layer-2D-v2', 'num_rnn_layers': 3, 'rnn_layer_size': 2048, 'bidirectional': True, 'rnn_type': 'rnn_relu', 'rnn_batch_norm': False, 'brnn_share_i2h': False, 'brnn_output': 'concat', 'rnn_dropout': 0.0, 'ctc_loss': 'mxnet_ctc_loss' } ) Model.__init__(self, params) self.__batch_size = params['batch_size'] self.__output_length = 0 self.__data_shape = (self.batch_size,) + self.input_shape self.__debug = logging.getLogger().isEnabledFor(logging.DEBUG) or os.environ.get('DLBS_DEBUG', '0') == '1' if self.model_opts['conv_arch'] not in DeepSpeech2.CONV_ARCHS: raise "Invalid conv arch ('%s'), must be one of '%s'" % (self.model_opts['conv_arch'], str(DeepSpeech2.CONV_ARCHS)) if self.model_opts['rnn_type'] not in DeepSpeech2.RNN_TYPES: raise "Invalid RNN type ('%s'), must be one of '%s'" % (self.model_opts['rnn_type'], str(DeepSpeech2.RNN_TYPES)) if self.model_opts['brnn_output'] not in DeepSpeech2.BRNN_OUTPUT: raise "Invalid BRNN output function ('%s'), must be one of '%s'" % (self.model_opts['brnn_output'], str(DeepSpeech2.BRNN_OUTPUT)) if self.model_opts['ctc_loss'] not in DeepSpeech2.CTC_LOSSES: raise "Invalid ctc loss ('%s'), must be one of '%s'" % (self.model_opts['ctc_loss'], str(DeepSpeech2.CTC_LOSSES)) if self.model_opts['rnn_batch_norm'] is True: self.model_opts['rnn_batch_norm'] = False print("[WARNING] Batch norm is not supported in RNNs.") if self.model_opts['brnn_share_i2h'] is True: self.model_opts['brnn_share_i2h'] = False print("[WARNING] Sharing input2hidden weights in BRNNs is not supported.") print("Model options: " + str(self.model_opts)) logging.debug("Batch size: %d", self.batch_size) logging.debug("Input length: %d", self.input_shape[1]) logging.debug("Num input features: %d", self.input_shape[2]) v = self.add_data_node() self.log_shape("Input shape: %s", v) v, length = self.add_conv_layers(v) v, nrnn_features = self.add_rnn_layers(v, length) v = mx.sym.Reshape(data=v, shape=(-1, nrnn_features)) self.log_shape("FC input shape: %s", v) v = mx.sym.FullyConnected(data=v, num_hidden=self.num_classes) self.log_shape("FC output shape: %s", v) if self.dtype == 'float16': print("Casting logits to np.float32") v = mx.sym.cast(data=v, dtype=np.float32) if self.phase == 'training': v_ctc = mx.sym.Reshape(data=v, shape=(length, self.batch_size, self.num_classes)) labels = mx.sym.Variable(name="softmax_label", shape=(self.batch_size, length), init=mx.init.Zero()) self.log_shape("CTC input shape: %s", v_ctc) if self.model_opts['ctc_loss'] == 'warp_ctc_loss': print("Using Baidu's Warp CTC Loss.") print("[WARNING] WarpCTC was not tested and may not work.") try: v = mx.symbol.WarpCTC(data=v_ctc, label=labels) except AttributeError: print("[ERROR] WarpCTC symbol is not available. Recompile MXNET with WarpCTC support.") raise else: print("Using CTCLoss from mx.symbol.contrib.") ctc_loss = mx.sym.MakeLoss(mx.symbol.contrib.CTCLoss(data=v_ctc, label=labels, name='ctc')) predictions = mx.sym.MakeLoss(mx.sym.SoftmaxActivation(data=v, name='softmax')) v = mx.sym.Group([mx.sym.BlockGrad(predictions), ctc_loss]) else: v = mx.symbol.softmax(data=v, name='softmax') self.log_shape("Output shape: %s", v) self.__output = v self.__output_length = length self._labels_shape = (self.__output_length, ) self._labels_range = (1, self.num_classes) self.__ctc_metrics = CtcMetrics(seq_len=self.__output_length) self._eval_metric = mx.metric.CustomMetric(feval=self.__ctc_metrics.accuracy, name='ctc_metric', allow_extra_outputs=True) def add_conv_layers(self, v): length = self.input_shape[1] nfeatures = self.input_shape[2] defs = { '1-layer-1D': {'channels': [1280], 'filters': [(11, nfeatures)], 'strides': [(2, 1)], 'pads': [(0, 0)]}, '2-layer-1D': {'channels': [640, 640], 'filters': [(5, nfeatures), (5, 1)], 'strides': [(1, 1), (2, 1)], 'pads': [(0, 0), (0, 0)]}, '3-layer-1D': {'channels': [512, 512, 512], 'filters': [(5, nfeatures), (5, 1), (5, 1)], 'strides': [(1, 1), (1, 1), (2, 1)], 'pads': [(0, 0), (0, 0), (0, 0)]}, '1-layer-2D': {'channels': [32], 'filters': [(11, 41)], 'strides': [(2, 2)], 'pads': [(0, 0)]}, '2-layer-2D': {'channels': [32, 32], 'filters': [(11, 41), (11, 21)], 'strides': [(2, 2), (1, 2)], 'pads': [(0, 0), (0, 0)]}, '3-layer-2D': {'channels': [32, 32, 96], 'filters': [(11, 41), (11, 21), (11, 21)], 'strides': [(2, 2), (1, 2), (1, 2)], 'pads': [(0, 0), (0, 0), (0, 0)]}, '2-layer-2D-v2': {'channels': [32, 32], 'filters': [(11, 41), (11, 21)], 'strides': [(3, 2), (1, 2)], 'pads': [(5, 20), (5, 10)]} } arch = defs[self.model_opts['conv_arch']] for i in range(len(arch['filters'])): name = 'conv%d' % i v = DeepSpeech2.conv_bn( name, v, kernel=arch['filters'][i], stride=arch['strides'][i], num_channels_out=arch['channels'][i], pad=arch['pads'][i], batch_norm=self.model_opts['conv_batch_norm']) length = int(math.floor((length + 2*arch['pads'][i][0] - arch['filters'][i][0])/arch['strides'][i][0])) + 1 self.log_shape("Conv '" + name + "' output shape: %s", v) logging.debug("Utterance length after conv layers is %d", length) return v, length def add_rnn_layers(self, v, length): def _begin_state(rnn_cell, func=mx.sym.zeros, **kwargs): if self.dtype == 'float32': return None assert not rnn_cell._modified, "After applying modifier cells (e.g. DropoutCell) the base " "cell cannot be called directly. Call the modifier cell instead." states = [] for info in rnn_cell.state_info: rnn_cell._init_counter += 1 if info is None: state = func(name='%sbegin_state_%d' % (rnn_cell._prefix, rnn_cell._init_counter), dtype=np.float16, **kwargs) else: kwargs.update(info) state = func(name='%sbegin_state_%d' % (rnn_cell._prefix, rnn_cell._init_counter), dtype=np.float16, **kwargs) states.append(state) return states rnn_cell = mx.rnn.FusedRNNCell( num_hidden=self.model_opts['rnn_layer_size'], num_layers=self.model_opts['num_rnn_layers'], bidirectional=self.model_opts['bidirectional'], mode=self.model_opts['rnn_type'], prefix='rnn', dropout=self.model_opts['rnn_dropout'] ) v = mx.sym.Reshape(data=v, shape=(length, self.batch_size, -1)) self.log_shape("RNN input shape: %s", v) v, _ = rnn_cell.unroll( length=length, inputs=v, begin_state=_begin_state(rnn_cell), layout='TNC', merge_outputs=True ) self.log_shape("RNN output shape: %s", v) nfeatures = self.model_opts['rnn_layer_size'] if self.model_opts['bidirectional']: if self.model_opts['brnn_output'] == 'sum': outputs = mx.sym.split(data=v, num_outputs=2, axis=2) v = outputs[0] + outputs[1] else: nfeatures = nfeatures * 2 return v, nfeatures @staticmethod def conv_bn(name, input, kernel, stride, num_channels_out, pad=(0, 0), batch_norm=False, activation='relu'): logging.debug("Adding convolution layer kernel=%s, stride=%s, num_filters=%d, padding=%s", str(kernel), str(stride), num_channels_out, str(pad)) v = mx.symbol.Convolution(name=name+'_conv', data=input, kernel=kernel, stride=stride, num_filter=num_channels_out, no_bias=batch_norm is True, pad=pad) if batch_norm: logging.debug("Adding batch norm layer") v = mx.sym.BatchNorm(name=name+"_bn", data=v, fix_gamma=False, eps=2e-5, momentum=0.9) if activation: logging.debug("Adding activation layer '%s'", activation) v = mx.symbol.Activation(name=name+'_act', data=v, act_type=activation) return v
Apache License 2.0
rlenglet/openfaucet
src/openfaucet/ofcontroller.py
OpenflowControllerStub.get_stats_flow
python
def get_stats_flow(self, match, table_id, out_port, callback, timeout_callback=None, timeout=None): xid = self.initiate_operation( callback, timeout_callback, timeout=timeout, cookie=(ofproto.OFPT_STATS_REQUEST, ofproto.OFPST_FLOW)) self.send_stats_request_flow(xid, match, table_id, out_port)
Request individual flow stats. This operation is asynchronous: the flow stats are passed back by calling the given callback as: callback(flow_stats, reply_more) with arguments: flow_stats: A tuple of FlowStats objects containing each the stats for an individual flow. reply_more: If True, more callbacks will be made to the callable after this one to completely terminate this operation. If False, this is the last callback in this operation. Args: match: A Match object describing the fields of the flows to match. table_id: The ID of the table to read, as an 8-bit unsigned integer. 0xff for all tables or 0xfe for emergency. out_port: Require matching flows to include this as an output port. A value of OFPP_NONE indicates no restriction. callback: The callable to be called with the replied data. timeout_callback: The callable to be called in case the operation times out. timeout: The period, in seconds, before the operation times out. If None, defaults to the default timeout.
https://github.com/rlenglet/openfaucet/blob/4ef1783fc74320e66ee7a71576dc91511f238a81/src/openfaucet/ofcontroller.py#L677-L712
import collections import logging import threading import weakref import twisted.internet.reactor from zope import interface from openfaucet import oferror from openfaucet import ofproto from openfaucet import ofprotoops class IOpenflowController(interface.Interface): def connection_made(): def connection_lost(reason): def handle_packet_in(buffer_id, total_len, in_port, reason, data): def handle_flow_removed( match, cookie, priority, reason, duration_sec, duration_nsec, idle_timeout, packet_count, byte_count): def handle_port_status(reason, desc): class IOpenflowControllerStub(interface.Interface): def get_features(callback, timeout_callback=None, timeout=None): features = interface.Attribute( """The SwitchFeatures object describing the switch features. None if the handshake with the datapath has not yet been completed. This SwitchFeatures object is automatically updated by this stub when any port status changes. """) def get_config(callback, timeout_callback=None, timeout=None): def send_set_config(switch_config): def send_packet_out(buffer_id, in_port, actions, data): def send_flow_mod_add( match, cookie, idle_timeout, hard_timeout, priority, buffer_id, send_flow_rem, check_overlap, emerg, actions): def send_flow_mod_modify( strict, match, cookie, idle_timeout, hard_timeout, priority, buffer_id, send_flow_rem, check_overlap, emerg, actions): def send_flow_mod_delete(strict, match, priority, out_port): def send_port_mod(port_no, hw_addr, config, mask, advertise): def get_stats_desc(callback, timeout_callback=None, timeout=None): def get_stats_flow(match, table_id, out_port, callback, timeout_callback=None, timeout=None): def get_stats_aggregate(match, table_id, out_port, callback, timeout_callback=None, timeout=None): def get_stats_table(callback, timeout_callback=None, timeout=None): def get_stats_port(port_no, callback, timeout_callback=None, timeout=None): def get_stats_queue(port_no, queue_id, callback, timeout_callback=None, timeout=None): def barrier(callback, timeout_callback=None, timeout=None): def get_queue_config(port_no, callback, timeout_callback=None, timeout=None): def raise_error_with_request(error_type, error_code): class OpenflowControllerStub(ofprotoops.OpenflowProtocolOperations): interface.implements(IOpenflowControllerStub) def __init__(self): ofprotoops.OpenflowProtocolOperations.__init__(self) self._features = None self._features_lock = threading.Lock() def connectionMade(self): ofprotoops.OpenflowProtocolOperations.connectionMade(self) self._features = None self.logger.info('controller stub configuration: controller=%r', self.controller, extra=self.log_extra) self.get_features(None, timeout_callback=self.transport.loseConnection) def connectionLost(self, reason): ofprotoops.OpenflowProtocolOperations.connectionLost(self, reason) self.controller.connection_lost(reason) @property def features(self): with self._features_lock: return self._features def get_features(self, callback, timeout_callback=None, timeout=None): xid = self.initiate_operation( callback, timeout_callback, timeout=timeout, cookie=ofproto.OFPT_FEATURES_REQUEST) self.send_features_request(xid) def get_config(self, callback, timeout_callback=None, timeout=None): xid = self.initiate_operation( callback, timeout_callback, timeout=timeout, cookie=ofproto.OFPT_GET_CONFIG_REQUEST) self.send_get_config_request(xid) def get_stats_desc(self, callback, timeout_callback=None, timeout=None): xid = self.initiate_operation( callback, timeout_callback, timeout=timeout, cookie=(ofproto.OFPT_STATS_REQUEST, ofproto.OFPST_DESC)) self.send_stats_request_desc(xid)
Apache License 2.0
artyompal/tpu_models
models/official/retinanet/retinanet_model.py
_predict_postprocess
python
def _predict_postprocess(cls_outputs, box_outputs, labels, params): predict_anchors = anchors.Anchors( params['min_level'], params['max_level'], params['num_scales'], params['aspect_ratios'], params['anchor_scale'], params['image_size']) cls_outputs, box_outputs, anchor_boxes = postprocess.reshape_outputs( cls_outputs, box_outputs, predict_anchors.boxes, params['min_level'], params['max_level'], params['num_classes']) boxes, scores, classes, num_detections = postprocess.generate_detections( cls_outputs, box_outputs, anchor_boxes) predictions = { 'detection_boxes': boxes, 'detection_classes': classes, 'detection_scores': scores, 'num_detections': num_detections, } if labels is not None: predictions.update({ 'image_info': labels['image_info'], 'source_id': labels['source_ids'], 'groundtruth_data': labels['groundtruth_data'], }) return predictions
Post processes prediction outputs.
https://github.com/artyompal/tpu_models/blob/639306f30e085bb1cdb5b1118a4c96a2dbe14e3e/models/official/retinanet/retinanet_model.py#L331-L356
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import anchors import coco_metric import postprocess import retinanet_architecture _DEFAULT_BATCH_SIZE = 64 _WEIGHT_DECAY = 1e-4 def update_learning_rate_schedule_parameters(params): batch_size = ( params['batch_size'] * params['num_shards'] if params['use_tpu'] else params['batch_size']) params['adjusted_learning_rate'] = ( params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE) steps_per_epoch = params['num_examples_per_epoch'] / batch_size params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch) params['first_lr_drop_step'] = int( params['first_lr_drop_epoch'] * steps_per_epoch) params['second_lr_drop_step'] = int( params['second_lr_drop_epoch'] * steps_per_epoch) def learning_rate_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step, first_lr_drop_step, second_lr_drop_step, global_step): linear_warmup = ( lr_warmup_init + (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step * (adjusted_learning_rate - lr_warmup_init))) learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup, adjusted_learning_rate) lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step], [0.01, second_lr_drop_step]] for mult, start_global_step in lr_schedule: learning_rate = tf.where(global_step < start_global_step, learning_rate, adjusted_learning_rate * mult) return learning_rate def focal_loss(logits, targets, alpha, gamma, normalizer): with tf.name_scope('focal_loss'): positive_label_mask = tf.equal(targets, 1.0) cross_entropy = ( tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits)) probs = tf.sigmoid(logits) probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs) modulator = tf.pow(1.0 - probs_gt, gamma) loss = modulator * cross_entropy weighted_loss = tf.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss) total_loss = tf.reduce_sum(weighted_loss) total_loss /= normalizer return total_loss def _classification_loss(cls_outputs, cls_targets, num_positives, alpha=0.25, gamma=2.0): normalizer = num_positives classification_loss = focal_loss(cls_outputs, cls_targets, alpha, gamma, normalizer) return classification_loss def _box_loss(box_outputs, box_targets, num_positives, delta=0.1): normalizer = num_positives * 4.0 mask = tf.not_equal(box_targets, 0.0) box_loss = tf.losses.huber_loss( box_targets, box_outputs, weights=mask, delta=delta, reduction=tf.losses.Reduction.SUM) box_loss /= normalizer return box_loss def detection_loss(cls_outputs, box_outputs, labels, params): num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0 levels = cls_outputs.keys() cls_losses = [] box_losses = [] for level in levels: cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level], params['num_classes']) bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list() cls_targets_at_level = tf.reshape(cls_targets_at_level, [bs, width, height, -1]) box_targets_at_level = labels['box_targets_%d' % level] cls_losses.append( _classification_loss( cls_outputs[level], cls_targets_at_level, num_positives_sum, alpha=params['alpha'], gamma=params['gamma'])) box_losses.append( _box_loss( box_outputs[level], box_targets_at_level, num_positives_sum, delta=params['delta'])) cls_loss = tf.add_n(cls_losses) box_loss = tf.add_n(box_losses) total_loss = cls_loss + params['box_loss_weight'] * box_loss return total_loss, cls_loss, box_loss def add_metric_fn_inputs(params, cls_outputs, box_outputs, metric_fn_inputs): cls_outputs_all = [] box_outputs_all = [] batch_size = tf.shape(cls_outputs[params['min_level']])[0] for level in range(params['min_level'], params['max_level'] + 1): cls_outputs_all.append( tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']])) box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4])) cls_outputs_all = tf.concat(cls_outputs_all, 1) box_outputs_all = tf.concat(box_outputs_all, 1) cls_outputs_all_after_topk = [] box_outputs_all_after_topk = [] indices_all = [] classes_all = [] def _compute_top_k(x): cls_outputs_per_sample, box_outputs_per_sample = x cls_outputs_per_sample_reshape = tf.reshape(cls_outputs_per_sample, [-1]) _, cls_topk_indices = tf.nn.top_k(cls_outputs_per_sample_reshape, k=anchors.MAX_DETECTION_POINTS) indices = tf.div(cls_topk_indices, params['num_classes']) classes = tf.mod(cls_topk_indices, params['num_classes']) cls_indices = tf.stack([indices, classes], axis=1) cls_outputs_after_topk = tf.gather_nd(cls_outputs_per_sample, cls_indices) box_outputs_after_topk = tf.gather_nd(box_outputs_per_sample, tf.expand_dims(indices, 1)) return [indices, classes, cls_outputs_after_topk, box_outputs_after_topk] (indices_all, classes_all, cls_outputs_all_after_topk, box_outputs_all_after_topk) = tf.map_fn( _compute_top_k, [cls_outputs_all, box_outputs_all], back_prop=False, dtype=[tf.int32, tf.int32, tf.float32, tf.float32]) metric_fn_inputs['cls_outputs_all'] = cls_outputs_all_after_topk metric_fn_inputs['box_outputs_all'] = box_outputs_all_after_topk metric_fn_inputs['indices_all'] = indices_all metric_fn_inputs['classes_all'] = classes_all def coco_metric_fn(batch_size, anchor_labeler, filename=None, **kwargs): detections_bs = [] for index in range(batch_size): cls_outputs_per_sample = kwargs['cls_outputs_all'][index] box_outputs_per_sample = kwargs['box_outputs_all'][index] indices_per_sample = kwargs['indices_all'][index] classes_per_sample = kwargs['classes_all'][index] detections = anchor_labeler.generate_detections( cls_outputs_per_sample, box_outputs_per_sample, indices_per_sample, classes_per_sample, tf.slice(kwargs['source_ids'], [index], [1]), tf.slice(kwargs['image_scales'], [index], [1])) detections_bs.append(detections) eval_metric = coco_metric.EvaluationMetric(filename=filename) coco_metrics = eval_metric.estimator_metric_fn(detections_bs, kwargs['groundtruth_data']) return coco_metrics
Apache License 2.0
timbrel/gitsavvy
core/interfaces/status.py
StatusInterface.render
python
def render(self, nuke_cursors=False): self.refresh_view_state() self.just_render(nuke_cursors) if nuke_cursors: self.reset_cursor()
Refresh view state and render.
https://github.com/timbrel/gitsavvy/blob/10bd373e39462f7b404c28446dba428b32877b38/core/interfaces/status.py#L243-L249
from functools import partial, wraps from itertools import chain import os import threading import sublime from sublime_plugin import WindowCommand, TextCommand from ..git_mixins.status import FileStatus from ..commands import GsNavigate from ...common import ui from ..git_command import GitCommand from ...common import util from GitSavvy.core import store flatten = chain.from_iterable MYPY = False if MYPY: from typing import Iterable, Iterator, List, Optional, Tuple EXTRACT_FILENAME_RE = ( r"^(?: .+ -> | [ -] (?!\(\d+\) ))" r"(?!Your working directory is clean\.)" r"(\S.*)$" ) def distinct_until_state_changed(just_render_fn): previous_state = {} @wraps(just_render_fn) def wrapper(self, *args, **kwargs): nonlocal previous_state current_state = self.state if current_state != previous_state: just_render_fn(self, *args, **kwargs) previous_state = current_state.copy() return wrapper class GsShowStatusCommand(WindowCommand, GitCommand): def run(self): StatusInterface(repo_path=self.repo_path) class StatusInterface(ui.Interface, GitCommand): interface_type = "status" syntax_file = "Packages/GitSavvy/syntax/status.sublime-syntax" template = """\ BRANCH: {branch_status} ROOT: {git_root} HEAD: {head} {< unstaged_files} {< untracked_files} {< staged_files} {< merge_conflicts} {< no_status_message} {< stashes} {< help} """ template_help = """ ################### ############### ## SELECTED FILE ## ## ALL FILES ## ################### ############### [o] open file [a] stage all unstaged files [s] stage file [A] stage all unstaged and untracked files [u] unstage file [U] unstage all staged files [d] discard changes to file [D] discard all unstaged changes [h] open file on remote [M] launch external merge tool [l] diff file inline [f] diff all files [e] diff file [F] diff all cached files ############# ############# ## ACTIONS ## ## STASHES ## ############# ############# [c] commit [t][a] apply stash [C] commit, including unstaged [t][p] pop stash [m] amend previous commit [t][s] show stash [p] push current branch [t][c] create stash [t][u] create stash including untracked files [i] ignore file [t][g] create stash of staged changes only [I] ignore pattern [t][d] drop stash [B] abort merge ########### ## OTHER ## ########### [g] show graph repop history [?] toggle this help menu [tab] transition to next dashboard [SHIFT-tab] transition to previous dashboard [.] move cursor to next file [,] move cursor to previous file {conflicts_bindings} - """ conflicts_keybindings = """ ############### ## CONFLICTS ## ############### [y] use version from your commit [b] use version from the base """ template_staged = """ STAGED: {} """ template_unstaged = """ UNSTAGED: {} """ template_untracked = """ UNTRACKED: {} """ template_merge_conflicts = """ MERGE CONFLICTS: {} """ template_stashes = """ STASHES: {} """ def __init__(self, *args, **kwargs): if self._initialized: return self.conflicts_keybindings = "\n".join(line[2:] for line in self.conflicts_keybindings.split("\n")) self._lock = threading.Lock() self.state = { 'staged_files': [], 'unstaged_files': [], 'untracked_files': [], 'merge_conflicts': [], 'clean': True, 'long_status': '', 'git_root': '', 'show_help': True, 'head': '', 'stashes': [] } super().__init__(*args, **kwargs) def title(self): return "STATUS: {}".format(os.path.basename(self.repo_path)) def refresh_view_state(self): for thunk in ( lambda: {'head': self.get_latest_commit_msg_for_head()}, lambda: {'stashes': self.get_stashes()}, ): sublime.set_timeout_async( partial(self.update_state, thunk, then=self.just_render) ) self.view.run_command("gs_update_status") status = store.current_state(self.repo_path).get("status") if status: self.update_state(status._asdict()) self.update_state({ 'git_root': self.short_repo_path, 'show_help': not self.view.settings().get("git_savvy.help_hidden") }) def update_state(self, data, then=None): if callable(data): data = data() with self._lock: self.state.update(data) if callable(then): then()
MIT License
lordmauve/adventurelib
test_adventurelib.py
active_context
python
def active_context(ctx): prev_ctx = adventurelib.current_context adventurelib.set_context(ctx) try: yield finally: adventurelib.set_context(prev_ctx)
Context manager to set the current command context.
https://github.com/lordmauve/adventurelib/blob/9e182659186e8721e7daa91ba92cc6d82fd594c3/test_adventurelib.py#L14-L21
from unittest.mock import patch from contextlib import redirect_stdout, contextmanager from io import StringIO import pytest import adventurelib from adventurelib import Pattern, when, _handle_command, say, Room, Item, Bag orig_commands = adventurelib.commands[:] @contextmanager
MIT License
google/eclipse2017
image-processor/daemon/app/pipeline.py
get_file_from_gcs
python
def get_file_from_gcs(storage_client, fname): try: blob = storage_client.get_bucket(config.GCS_BUCKET).get_blob(fname) except Exception, e: msg = 'Failed to download {0} from Cloud Storage.' logging.exception(msg.format(fname)) return False if blob: fpath = '{0}/{1}'.format(constants.IMAGE_PROCESSOR_DATA_DIR, fname) if not os.path.isfile(fpath): with open(fpath, 'w+') as file_obj: try: blob.download_to_file(file_obj) msg = 'Successfully downloaded {0} from GCS' logging.info(msg.format(fname)) except Exception, e: msg = 'Failed to download {0} from Cloud Storage.' logging.exception(msg.format(fname)) return None return fname else: return None
Download all new files from GCS bucket w/ url <src> to destination folder. Must be outside of pipeline class for use as multiprocess map worker
https://github.com/google/eclipse2017/blob/a328c6d437638b1fe89aa8a24579406f01fe970d/image-processor/daemon/app/pipeline.py#L33-L63
from datetime import datetime import logging import subprocess from functools import partial from multiprocessing import Pool from itertools import compress import os import random import cv2 from google.cloud import datastore, storage from common import config, constants from common import datastore_schema as ds from common.find_circles import findCircles import numpy as np from eclipse_gis import eclipse_gis from shapely.geometry import Point
Apache License 2.0
facebookresearch/compilergym
compiler_gym/bin/manual_env.py
ActionHistoryElement.has_no_effect
python
def has_no_effect(self): return self.info.get("action_had_no_effect")
Determine if the service thinks this action had no effect
https://github.com/facebookresearch/compilergym/blob/00ae8c0d080da4d429f95398be1df01b5d6e7b71/compiler_gym/bin/manual_env.py#L259-L261
import cmd import random import readline import sys from itertools import islice from absl import app, flags from compiler_gym.envs import CompilerEnv from compiler_gym.util.flags.benchmark_from_flags import benchmark_from_flags from compiler_gym.util.flags.env_from_flags import env_from_flags from compiler_gym.util.shell_format import emph from compiler_gym.util.tabulate import tabulate from compiler_gym.util.timer import Timer FLAGS = flags.FLAGS tutorial = "**************************".join( __doc__.split("**************************")[1:] ) class ActionHistoryElement: def __init__(self, action_name, action_index, observation, reward, done, info): self.action_name = action_name self.action_index = action_index self.observation = observation self.reward = reward self.done = done self.info = info
MIT License
docusign/docusign-python-client
docusign_esign/models/list.py
List.conditional_parent_label
python
def conditional_parent_label(self, conditional_parent_label): self._conditional_parent_label = conditional_parent_label
Sets the conditional_parent_label of this List. For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility. # noqa: E501 :param conditional_parent_label: The conditional_parent_label of this List. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/list.py#L958-L967
import pprint import re import six from docusign_esign.client.configuration import Configuration class List(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'anchor_allow_white_space_in_characters': 'str', 'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata', 'anchor_case_sensitive': 'str', 'anchor_case_sensitive_metadata': 'PropertyMetadata', 'anchor_horizontal_alignment': 'str', 'anchor_horizontal_alignment_metadata': 'PropertyMetadata', 'anchor_ignore_if_not_present': 'str', 'anchor_ignore_if_not_present_metadata': 'PropertyMetadata', 'anchor_match_whole_word': 'str', 'anchor_match_whole_word_metadata': 'PropertyMetadata', 'anchor_string': 'str', 'anchor_string_metadata': 'PropertyMetadata', 'anchor_tab_processor_version': 'str', 'anchor_tab_processor_version_metadata': 'PropertyMetadata', 'anchor_units': 'str', 'anchor_units_metadata': 'PropertyMetadata', 'anchor_x_offset': 'str', 'anchor_x_offset_metadata': 'PropertyMetadata', 'anchor_y_offset': 'str', 'anchor_y_offset_metadata': 'PropertyMetadata', 'bold': 'str', 'bold_metadata': 'PropertyMetadata', 'conditional_parent_label': 'str', 'conditional_parent_label_metadata': 'PropertyMetadata', 'conditional_parent_value': 'str', 'conditional_parent_value_metadata': 'PropertyMetadata', 'custom_tab_id': 'str', 'custom_tab_id_metadata': 'PropertyMetadata', 'document_id': 'str', 'document_id_metadata': 'PropertyMetadata', 'error_details': 'ErrorDetails', 'font': 'str', 'font_color': 'str', 'font_color_metadata': 'PropertyMetadata', 'font_metadata': 'PropertyMetadata', 'font_size': 'str', 'font_size_metadata': 'PropertyMetadata', 'form_order': 'str', 'form_order_metadata': 'PropertyMetadata', 'form_page_label': 'str', 'form_page_label_metadata': 'PropertyMetadata', 'form_page_number': 'str', 'form_page_number_metadata': 'PropertyMetadata', 'height': 'str', 'height_metadata': 'PropertyMetadata', 'italic': 'str', 'italic_metadata': 'PropertyMetadata', 'list_items': 'list[ListItem]', 'list_selected_value': 'str', 'list_selected_value_metadata': 'PropertyMetadata', 'locale_policy': 'LocalePolicyTab', 'locked': 'str', 'locked_metadata': 'PropertyMetadata', 'merge_field': 'MergeField', 'merge_field_xml': 'str', 'page_number': 'str', 'page_number_metadata': 'PropertyMetadata', 'recipient_id': 'str', 'recipient_id_guid': 'str', 'recipient_id_guid_metadata': 'PropertyMetadata', 'recipient_id_metadata': 'PropertyMetadata', 'require_all': 'str', 'require_all_metadata': 'PropertyMetadata', 'required': 'str', 'required_metadata': 'PropertyMetadata', 'require_initial_on_shared_change': 'str', 'require_initial_on_shared_change_metadata': 'PropertyMetadata', 'sender_required': 'str', 'sender_required_metadata': 'PropertyMetadata', 'shared': 'str', 'shared_metadata': 'PropertyMetadata', 'share_to_recipients': 'str', 'share_to_recipients_metadata': 'PropertyMetadata', 'smart_contract_information': 'SmartContractInformation', 'source': 'str', 'status': 'str', 'status_metadata': 'PropertyMetadata', 'tab_group_labels': 'list[str]', 'tab_group_labels_metadata': 'PropertyMetadata', 'tab_id': 'str', 'tab_id_metadata': 'PropertyMetadata', 'tab_label': 'str', 'tab_label_metadata': 'PropertyMetadata', 'tab_order': 'str', 'tab_order_metadata': 'PropertyMetadata', 'tab_type': 'str', 'tab_type_metadata': 'PropertyMetadata', 'template_locked': 'str', 'template_locked_metadata': 'PropertyMetadata', 'template_required': 'str', 'template_required_metadata': 'PropertyMetadata', 'tooltip': 'str', 'tool_tip_metadata': 'PropertyMetadata', 'underline': 'str', 'underline_metadata': 'PropertyMetadata', 'value': 'str', 'value_metadata': 'PropertyMetadata', 'width': 'str', 'width_metadata': 'PropertyMetadata', 'x_position': 'str', 'x_position_metadata': 'PropertyMetadata', 'y_position': 'str', 'y_position_metadata': 'PropertyMetadata' } attribute_map = { 'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters', 'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata', 'anchor_case_sensitive': 'anchorCaseSensitive', 'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata', 'anchor_horizontal_alignment': 'anchorHorizontalAlignment', 'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata', 'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent', 'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata', 'anchor_match_whole_word': 'anchorMatchWholeWord', 'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata', 'anchor_string': 'anchorString', 'anchor_string_metadata': 'anchorStringMetadata', 'anchor_tab_processor_version': 'anchorTabProcessorVersion', 'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata', 'anchor_units': 'anchorUnits', 'anchor_units_metadata': 'anchorUnitsMetadata', 'anchor_x_offset': 'anchorXOffset', 'anchor_x_offset_metadata': 'anchorXOffsetMetadata', 'anchor_y_offset': 'anchorYOffset', 'anchor_y_offset_metadata': 'anchorYOffsetMetadata', 'bold': 'bold', 'bold_metadata': 'boldMetadata', 'conditional_parent_label': 'conditionalParentLabel', 'conditional_parent_label_metadata': 'conditionalParentLabelMetadata', 'conditional_parent_value': 'conditionalParentValue', 'conditional_parent_value_metadata': 'conditionalParentValueMetadata', 'custom_tab_id': 'customTabId', 'custom_tab_id_metadata': 'customTabIdMetadata', 'document_id': 'documentId', 'document_id_metadata': 'documentIdMetadata', 'error_details': 'errorDetails', 'font': 'font', 'font_color': 'fontColor', 'font_color_metadata': 'fontColorMetadata', 'font_metadata': 'fontMetadata', 'font_size': 'fontSize', 'font_size_metadata': 'fontSizeMetadata', 'form_order': 'formOrder', 'form_order_metadata': 'formOrderMetadata', 'form_page_label': 'formPageLabel', 'form_page_label_metadata': 'formPageLabelMetadata', 'form_page_number': 'formPageNumber', 'form_page_number_metadata': 'formPageNumberMetadata', 'height': 'height', 'height_metadata': 'heightMetadata', 'italic': 'italic', 'italic_metadata': 'italicMetadata', 'list_items': 'listItems', 'list_selected_value': 'listSelectedValue', 'list_selected_value_metadata': 'listSelectedValueMetadata', 'locale_policy': 'localePolicy', 'locked': 'locked', 'locked_metadata': 'lockedMetadata', 'merge_field': 'mergeField', 'merge_field_xml': 'mergeFieldXml', 'page_number': 'pageNumber', 'page_number_metadata': 'pageNumberMetadata', 'recipient_id': 'recipientId', 'recipient_id_guid': 'recipientIdGuid', 'recipient_id_guid_metadata': 'recipientIdGuidMetadata', 'recipient_id_metadata': 'recipientIdMetadata', 'require_all': 'requireAll', 'require_all_metadata': 'requireAllMetadata', 'required': 'required', 'required_metadata': 'requiredMetadata', 'require_initial_on_shared_change': 'requireInitialOnSharedChange', 'require_initial_on_shared_change_metadata': 'requireInitialOnSharedChangeMetadata', 'sender_required': 'senderRequired', 'sender_required_metadata': 'senderRequiredMetadata', 'shared': 'shared', 'shared_metadata': 'sharedMetadata', 'share_to_recipients': 'shareToRecipients', 'share_to_recipients_metadata': 'shareToRecipientsMetadata', 'smart_contract_information': 'smartContractInformation', 'source': 'source', 'status': 'status', 'status_metadata': 'statusMetadata', 'tab_group_labels': 'tabGroupLabels', 'tab_group_labels_metadata': 'tabGroupLabelsMetadata', 'tab_id': 'tabId', 'tab_id_metadata': 'tabIdMetadata', 'tab_label': 'tabLabel', 'tab_label_metadata': 'tabLabelMetadata', 'tab_order': 'tabOrder', 'tab_order_metadata': 'tabOrderMetadata', 'tab_type': 'tabType', 'tab_type_metadata': 'tabTypeMetadata', 'template_locked': 'templateLocked', 'template_locked_metadata': 'templateLockedMetadata', 'template_required': 'templateRequired', 'template_required_metadata': 'templateRequiredMetadata', 'tooltip': 'tooltip', 'tool_tip_metadata': 'toolTipMetadata', 'underline': 'underline', 'underline_metadata': 'underlineMetadata', 'value': 'value', 'value_metadata': 'valueMetadata', 'width': 'width', 'width_metadata': 'widthMetadata', 'x_position': 'xPosition', 'x_position_metadata': 'xPositionMetadata', 'y_position': 'yPosition', 'y_position_metadata': 'yPositionMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._anchor_allow_white_space_in_characters = None self._anchor_allow_white_space_in_characters_metadata = None self._anchor_case_sensitive = None self._anchor_case_sensitive_metadata = None self._anchor_horizontal_alignment = None self._anchor_horizontal_alignment_metadata = None self._anchor_ignore_if_not_present = None self._anchor_ignore_if_not_present_metadata = None self._anchor_match_whole_word = None self._anchor_match_whole_word_metadata = None self._anchor_string = None self._anchor_string_metadata = None self._anchor_tab_processor_version = None self._anchor_tab_processor_version_metadata = None self._anchor_units = None self._anchor_units_metadata = None self._anchor_x_offset = None self._anchor_x_offset_metadata = None self._anchor_y_offset = None self._anchor_y_offset_metadata = None self._bold = None self._bold_metadata = None self._conditional_parent_label = None self._conditional_parent_label_metadata = None self._conditional_parent_value = None self._conditional_parent_value_metadata = None self._custom_tab_id = None self._custom_tab_id_metadata = None self._document_id = None self._document_id_metadata = None self._error_details = None self._font = None self._font_color = None self._font_color_metadata = None self._font_metadata = None self._font_size = None self._font_size_metadata = None self._form_order = None self._form_order_metadata = None self._form_page_label = None self._form_page_label_metadata = None self._form_page_number = None self._form_page_number_metadata = None self._height = None self._height_metadata = None self._italic = None self._italic_metadata = None self._list_items = None self._list_selected_value = None self._list_selected_value_metadata = None self._locale_policy = None self._locked = None self._locked_metadata = None self._merge_field = None self._merge_field_xml = None self._page_number = None self._page_number_metadata = None self._recipient_id = None self._recipient_id_guid = None self._recipient_id_guid_metadata = None self._recipient_id_metadata = None self._require_all = None self._require_all_metadata = None self._required = None self._required_metadata = None self._require_initial_on_shared_change = None self._require_initial_on_shared_change_metadata = None self._sender_required = None self._sender_required_metadata = None self._shared = None self._shared_metadata = None self._share_to_recipients = None self._share_to_recipients_metadata = None self._smart_contract_information = None self._source = None self._status = None self._status_metadata = None self._tab_group_labels = None self._tab_group_labels_metadata = None self._tab_id = None self._tab_id_metadata = None self._tab_label = None self._tab_label_metadata = None self._tab_order = None self._tab_order_metadata = None self._tab_type = None self._tab_type_metadata = None self._template_locked = None self._template_locked_metadata = None self._template_required = None self._template_required_metadata = None self._tooltip = None self._tool_tip_metadata = None self._underline = None self._underline_metadata = None self._value = None self._value_metadata = None self._width = None self._width_metadata = None self._x_position = None self._x_position_metadata = None self._y_position = None self._y_position_metadata = None self.discriminator = None setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None)) setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None)) setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None)) setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None)) setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None)) setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None)) setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None)) setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None)) setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None)) setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None)) setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None)) setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None)) setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None)) setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None)) setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None)) setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None)) setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None)) setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None)) setattr(self, "_{}".format('bold'), kwargs.get('bold', None)) setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None)) setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None)) setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None)) setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None)) setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None)) setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None)) setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None)) setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None)) setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('font'), kwargs.get('font', None)) setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None)) setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None)) setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None)) setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None)) setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None)) setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None)) setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None)) setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None)) setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None)) setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None)) setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None)) setattr(self, "_{}".format('height'), kwargs.get('height', None)) setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None)) setattr(self, "_{}".format('italic'), kwargs.get('italic', None)) setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None)) setattr(self, "_{}".format('list_items'), kwargs.get('list_items', None)) setattr(self, "_{}".format('list_selected_value'), kwargs.get('list_selected_value', None)) setattr(self, "_{}".format('list_selected_value_metadata'), kwargs.get('list_selected_value_metadata', None)) setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None)) setattr(self, "_{}".format('locked'), kwargs.get('locked', None)) setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None)) setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None)) setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None)) setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None)) setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None)) setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None)) setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None)) setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None)) setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None)) setattr(self, "_{}".format('require_all'), kwargs.get('require_all', None)) setattr(self, "_{}".format('require_all_metadata'), kwargs.get('require_all_metadata', None)) setattr(self, "_{}".format('required'), kwargs.get('required', None)) setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None)) setattr(self, "_{}".format('require_initial_on_shared_change'), kwargs.get('require_initial_on_shared_change', None)) setattr(self, "_{}".format('require_initial_on_shared_change_metadata'), kwargs.get('require_initial_on_shared_change_metadata', None)) setattr(self, "_{}".format('sender_required'), kwargs.get('sender_required', None)) setattr(self, "_{}".format('sender_required_metadata'), kwargs.get('sender_required_metadata', None)) setattr(self, "_{}".format('shared'), kwargs.get('shared', None)) setattr(self, "_{}".format('shared_metadata'), kwargs.get('shared_metadata', None)) setattr(self, "_{}".format('share_to_recipients'), kwargs.get('share_to_recipients', None)) setattr(self, "_{}".format('share_to_recipients_metadata'), kwargs.get('share_to_recipients_metadata', None)) setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None)) setattr(self, "_{}".format('source'), kwargs.get('source', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None)) setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None)) setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None)) setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None)) setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None)) setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None)) setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None)) setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None)) setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None)) setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None)) setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None)) setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None)) setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None)) setattr(self, "_{}".format('underline'), kwargs.get('underline', None)) setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None)) setattr(self, "_{}".format('value'), kwargs.get('value', None)) setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None)) setattr(self, "_{}".format('width'), kwargs.get('width', None)) setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None)) setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None)) setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None)) setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None)) setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None)) @property def anchor_allow_white_space_in_characters(self): return self._anchor_allow_white_space_in_characters @anchor_allow_white_space_in_characters.setter def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters): self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters @property def anchor_allow_white_space_in_characters_metadata(self): return self._anchor_allow_white_space_in_characters_metadata @anchor_allow_white_space_in_characters_metadata.setter def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata): self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata @property def anchor_case_sensitive(self): return self._anchor_case_sensitive @anchor_case_sensitive.setter def anchor_case_sensitive(self, anchor_case_sensitive): self._anchor_case_sensitive = anchor_case_sensitive @property def anchor_case_sensitive_metadata(self): return self._anchor_case_sensitive_metadata @anchor_case_sensitive_metadata.setter def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata): self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata @property def anchor_horizontal_alignment(self): return self._anchor_horizontal_alignment @anchor_horizontal_alignment.setter def anchor_horizontal_alignment(self, anchor_horizontal_alignment): self._anchor_horizontal_alignment = anchor_horizontal_alignment @property def anchor_horizontal_alignment_metadata(self): return self._anchor_horizontal_alignment_metadata @anchor_horizontal_alignment_metadata.setter def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata): self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata @property def anchor_ignore_if_not_present(self): return self._anchor_ignore_if_not_present @anchor_ignore_if_not_present.setter def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present): self._anchor_ignore_if_not_present = anchor_ignore_if_not_present @property def anchor_ignore_if_not_present_metadata(self): return self._anchor_ignore_if_not_present_metadata @anchor_ignore_if_not_present_metadata.setter def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata): self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata @property def anchor_match_whole_word(self): return self._anchor_match_whole_word @anchor_match_whole_word.setter def anchor_match_whole_word(self, anchor_match_whole_word): self._anchor_match_whole_word = anchor_match_whole_word @property def anchor_match_whole_word_metadata(self): return self._anchor_match_whole_word_metadata @anchor_match_whole_word_metadata.setter def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata): self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata @property def anchor_string(self): return self._anchor_string @anchor_string.setter def anchor_string(self, anchor_string): self._anchor_string = anchor_string @property def anchor_string_metadata(self): return self._anchor_string_metadata @anchor_string_metadata.setter def anchor_string_metadata(self, anchor_string_metadata): self._anchor_string_metadata = anchor_string_metadata @property def anchor_tab_processor_version(self): return self._anchor_tab_processor_version @anchor_tab_processor_version.setter def anchor_tab_processor_version(self, anchor_tab_processor_version): self._anchor_tab_processor_version = anchor_tab_processor_version @property def anchor_tab_processor_version_metadata(self): return self._anchor_tab_processor_version_metadata @anchor_tab_processor_version_metadata.setter def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata): self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata @property def anchor_units(self): return self._anchor_units @anchor_units.setter def anchor_units(self, anchor_units): self._anchor_units = anchor_units @property def anchor_units_metadata(self): return self._anchor_units_metadata @anchor_units_metadata.setter def anchor_units_metadata(self, anchor_units_metadata): self._anchor_units_metadata = anchor_units_metadata @property def anchor_x_offset(self): return self._anchor_x_offset @anchor_x_offset.setter def anchor_x_offset(self, anchor_x_offset): self._anchor_x_offset = anchor_x_offset @property def anchor_x_offset_metadata(self): return self._anchor_x_offset_metadata @anchor_x_offset_metadata.setter def anchor_x_offset_metadata(self, anchor_x_offset_metadata): self._anchor_x_offset_metadata = anchor_x_offset_metadata @property def anchor_y_offset(self): return self._anchor_y_offset @anchor_y_offset.setter def anchor_y_offset(self, anchor_y_offset): self._anchor_y_offset = anchor_y_offset @property def anchor_y_offset_metadata(self): return self._anchor_y_offset_metadata @anchor_y_offset_metadata.setter def anchor_y_offset_metadata(self, anchor_y_offset_metadata): self._anchor_y_offset_metadata = anchor_y_offset_metadata @property def bold(self): return self._bold @bold.setter def bold(self, bold): self._bold = bold @property def bold_metadata(self): return self._bold_metadata @bold_metadata.setter def bold_metadata(self, bold_metadata): self._bold_metadata = bold_metadata @property def conditional_parent_label(self): return self._conditional_parent_label @conditional_parent_label.setter
MIT License
google/cloud-forensics-utils
libcloudforensics/providers/aws/internal/ec2.py
EC2._GetBootVolumeConfigByAmi
python
def _GetBootVolumeConfigByAmi(self, ami: str, boot_volume_size: int, boot_volume_type: str) -> Dict[str, Any]: client = self.aws_account.ClientApi(common.EC2_SERVICE) try: image = client.describe_images(ImageIds=[ami]) except client.exceptions.ClientError as exception: raise errors.ResourceNotFoundError( 'Could not find image information for AMI {0:s}: {1!s}'.format( ami, exception), __name__) from exception block_device_mapping = image['Images'][0]['BlockDeviceMappings'][0] block_device_mapping['Ebs']['VolumeSize'] = boot_volume_size block_device_mapping['Ebs']['VolumeType'] = boot_volume_type if boot_volume_type == 'io1': block_device_mapping['Ebs']['Iops'] = boot_volume_size * 50 return block_device_mapping
Return a boot volume configuration for a given AMI and boot volume size. Args: ami (str): The Amazon Machine Image ID. boot_volume_size (int): Size of the boot volume, in GB. boot_volume_type (str): Type of the boot volume. Returns: Dict[str, str|Dict]]: A BlockDeviceMappings configuration for the specified AMI. Raises: ResourceNotFoundError: If AMI details cannot be found.
https://github.com/google/cloud-forensics-utils/blob/38142cf3e00f70d976aa42aa2f9a1981c0240b19/libcloudforensics/providers/aws/internal/ec2.py#L503-L544
import binascii import ipaddress import os import random from typing import TYPE_CHECKING, Dict, Optional, List, Any, Tuple import botocore from libcloudforensics import errors from libcloudforensics.providers.aws.internal import common if TYPE_CHECKING: from libcloudforensics.providers.aws.internal import account from libcloudforensics.providers.aws.internal import ebs class AWSInstance: def __init__(self, aws_account: 'account.AWSAccount', instance_id: str, region: str, availability_zone: str, vpc: str, name: Optional[str] = None) -> None: self.aws_account = aws_account self.instance_id = instance_id self.region = region self.availability_zone = availability_zone self.vpc = vpc self.name = name def GetBootVolume(self) -> 'ebs.AWSVolume': boot_device = self.aws_account.ResourceApi( common.EC2_SERVICE).Instance(self.instance_id).root_device_name volumes = self.ListVolumes() for volume_id in volumes: if volumes[volume_id].device_name == boot_device: return volumes[volume_id] raise errors.ResourceNotFoundError( 'Boot volume not found for instance: {0:s}'.format(self.instance_id), __name__) def GetVolume(self, volume_id: str) -> 'ebs.AWSVolume': volume = self.ListVolumes().get(volume_id) if not volume: raise errors.ResourceNotFoundError( 'Volume {0:s} is not attached to instance {1:s}'.format( volume_id, self.instance_id), __name__) return volume def ListVolumes(self) -> Dict[str, 'ebs.AWSVolume']: return self.aws_account.ebs.ListVolumes( filters=[{ 'Name': 'attachment.instance-id', 'Values': [self.instance_id]}]) def AttachVolume(self, volume: 'ebs.AWSVolume', device_name: str) -> None: client = self.aws_account.ClientApi(common.EC2_SERVICE) try: client.attach_volume(Device=device_name, InstanceId=self.instance_id, VolumeId=volume.volume_id) except client.exceptions.ClientError as exception: raise RuntimeError('Could not attach volume {0:s}: {1:s}'.format( volume.volume_id, str(exception))) from exception volume.device_name = device_name def Delete(self, force_delete: bool = False) -> None: client = self.aws_account.ClientApi(common.EC2_SERVICE) if force_delete: try: common.ExecuteRequest( client, 'modify_instance_attribute', { 'InstanceId': self.instance_id, 'Attribute': 'disableApiTermination', 'Value': 'False' }) except client.exceptions.ClientError as exception: raise errors.ResourceDeletionError( 'Could not toggle instance attribute disableApiTermination: ' '{0!s}'.format(exception), __name__) from exception resource_client = self.aws_account.ResourceApi(common.EC2_SERVICE) try: resource_client.Instance(self.instance_id).terminate() client.get_waiter('instance_terminated').wait( InstanceIds=[self.instance_id]) except client.exceptions.ClientError as exception: raise errors.ResourceDeletionError( 'Could not delete instance: {0!s}'.format( exception), __name__) from exception class EC2: def __init__(self, aws_account: 'account.AWSAccount') -> None: self.aws_account = aws_account def ListInstances( self, region: Optional[str] = None, filters: Optional[List[Dict[str, Any]]] = None, show_terminated: bool = False) -> Dict[str, AWSInstance]: if not filters: filters = [] instances = {} client = self.aws_account.ClientApi(common.EC2_SERVICE, region=region) responses = common.ExecuteRequest( client, 'describe_instances', {'Filters': filters}) for response in responses: for reservation in response['Reservations']: for instance in reservation['Instances']: if instance['State']['Name'] == 'terminated' and not show_terminated: continue zone = instance['Placement']['AvailabilityZone'] instance_id = instance['InstanceId'] vpc = instance['VpcId'] aws_instance = AWSInstance( self.aws_account, instance_id, zone[:-1], zone, vpc) for tag in instance.get('Tags', []): if tag.get('Key') == 'Name': aws_instance.name = tag.get('Value') break instances[instance_id] = aws_instance return instances def GetInstancesByNameOrId( self, instance_name: Optional[str] = None, instance_id: Optional[str] = None, region: Optional[str] = None) -> List[AWSInstance]: if (not instance_name and not instance_id) or (instance_name and instance_id): raise ValueError('You must specify exactly one of [instance_name, ' 'instance_id]. Got instance_name: {0:s}, instance_id: ' '{1:s}'.format(str(instance_name), str(instance_id))) if instance_name: return self.GetInstancesByName(instance_name, region=region) return [self.GetInstanceById(instance_id, region=region)] def GetInstancesByName(self, instance_name: str, region: Optional[str] = None) -> List[AWSInstance]: instances = self.ListInstances(region=region) return [instance for instance in instances.values() if instance.name == instance_name] def GetInstanceById(self, instance_id: str, region: Optional[str] = None) -> AWSInstance: instances = self.ListInstances(region=region) instance = instances.get(instance_id) if not instance: raise errors.ResourceNotFoundError( 'Instance {0:s} was not found in AWS account'.format(instance_id), __name__) return instance def ListImages( self, qfilter: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]: if not qfilter: qfilter = [] client = self.aws_account.ClientApi(common.EC2_SERVICE) try: images = client.describe_images( Filters=qfilter) except client.exceptions.ClientError as exception: raise RuntimeError(str(exception)) from exception return images['Images'] def GetOrCreateVm( self, vm_name: str, boot_volume_size: int, ami: str, cpu_cores: int, boot_volume_type: str = 'gp2', ssh_key_name: Optional[str] = None, tags: Optional[Dict[str, str]] = None, subnet_id: Optional[str] = None, security_group_id: Optional[str] = None, userdata: Optional[str] = None, instance_profile: Optional[str] = None, terminate_on_shutdown: bool = False, wait_for_health_checks: bool = True ) -> Tuple[AWSInstance, bool]: instances = self.GetInstancesByName(vm_name) if instances: created = False return instances[0], created instance_type = common.GetInstanceTypeByCPU(cpu_cores) if not tags: tags = {} tags['Name'] = vm_name client = self.aws_account.ClientApi(common.EC2_SERVICE) vm_args = { 'BlockDeviceMappings': [self._GetBootVolumeConfigByAmi( ami, boot_volume_size, boot_volume_type)], 'ImageId': ami, 'MinCount': 1, 'MaxCount': 1, 'InstanceType': instance_type, 'TagSpecifications': [common.CreateTags(common.INSTANCE, tags)], 'Placement': { 'AvailabilityZone': self.aws_account.default_availability_zone} } if ssh_key_name: vm_args['KeyName'] = ssh_key_name if subnet_id: interface = { 'AssociatePublicIpAddress': True, 'DeleteOnTermination': True, 'DeviceIndex': 0, 'SubnetId': subnet_id} if security_group_id: interface['Groups'] = [security_group_id] vm_args['NetworkInterfaces']=[interface] elif security_group_id: vm_args['SecurityGroupIds'] = [security_group_id] if userdata: vm_args['UserData'] = userdata if instance_profile: vm_args['IamInstanceProfile'] = {'Arn': instance_profile} if terminate_on_shutdown: vm_args['InstanceInitiatedShutdownBehavior'] = 'terminate' try: instance = client.run_instances(**vm_args) instance_id = instance['Instances'][0]['InstanceId'] if wait_for_health_checks: client.get_waiter('instance_running').wait(InstanceIds=[instance_id]) client.get_waiter('instance_status_ok').wait(InstanceIds=[instance_id]) vpc = instance['Instances'][0]['VpcId'] except (client.exceptions.ClientError, botocore.exceptions.WaiterError) as exception: raise errors.ResourceCreationError( 'Could not create instance {0:s}: {1!s}'.format(vm_name, exception), __name__) from exception instance = AWSInstance(self.aws_account, instance_id, self.aws_account.default_region, self.aws_account.default_availability_zone, vpc, name=vm_name) created = True return instance, created
Apache License 2.0
akleroy/phangs_imaging_scripts
phangsPipeline/handlerTemplate.py
HandlerTemplate.get_targets
python
def get_targets( self ): if self._targets_list is None: return([]) else: return(self._targets_list)
Return the list of targets to consider.
https://github.com/akleroy/phangs_imaging_scripts/blob/11217d813382d7a0d9cae36d1dd3966b3c07c0d9/phangsPipeline/handlerTemplate.py#L345-L354
import os import glob import numpy as np import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) class HandlerTemplate: def __init__( self, key_handler = None, dry_run = False, ): self._targets_first = None self._targets_last = None self._targets_skip = None self._targets_only = None self._cont_skip = None self._cont_only = None self._lines_skip = None self._lines_only = None self._interf_configs_skip = None self._interf_configs_only = None self._feather_configs_skip = None self._feather_configs_only = None self._targets_list = [] self._line_products_list = [] self._cont_products_list = [] self._interf_configs_list = [] self._feather_configs_list = [] self._no_interf = False self._no_feather = False self._no_cont = False self._no_line = False if key_handler is not None: self.set_key_handler(key_handler, nobuild=True) self.set_targets(nobuild=True) self.set_line_products(nobuild=True) self.set_cont_products(nobuild=True) self.set_interf_configs(nobuild=True) self.set_feather_configs(nobuild=True) self._build_lists() self.set_dry_run(dry_run) def set_key_handler( self, key_handler = None, nobuild = False): self._kh = key_handler if not nobuild: self._build_lists() return(None) def set_dry_run( self, dry_run = False): self._dry_run = dry_run return(None) def set_targets( self, first=None, last=None, skip=[], only=[], nobuild=False): self._targets_first = first self._targets_last = last if np.isscalar(skip): self._targets_skip = [skip] else: self._targets_skip = skip if np.isscalar(only): self._targets_only = [only] else: self._targets_only = only if not nobuild: self._build_lists() return(None) def set_line_products( self, skip=[], only=[], nobuild=False, ): if np.isscalar(skip): self._lines_skip = [skip] else: self._lines_skip = skip if np.isscalar(only): self._lines_only = [only] else: self._lines_only = only if not nobuild: self._build_lists() return(None) def set_cont_products( self, skip=[], only=[], nobuild=False, ): if np.isscalar(skip): self._cont_skip = [skip] else: self._cont_skip = skip if np.isscalar(only): self._cont_only = [only] else: self._cont_only = only if not nobuild: self._build_lists() return(None) def set_interf_configs( self, skip=[], only=[], nobuild=False, ): if np.isscalar(skip): self._interf_configs_skip = [skip] else: self._interf_configs_skip = skip if np.isscalar(only): self._interf_configs_only = [only] else: self._interf_configs_only = only if not nobuild: self._build_lists() return(None) def set_feather_configs( self, skip=[], only=[], nobuild=False, ): if np.isscalar(skip): self._feather_configs_skip = [skip] else: self._feather_configs_skip = skip if np.isscalar(only): self._feather_configs_only = [only] else: self._feather_configs_only = only if not nobuild: self._build_lists() return(None) def set_no_line_products( self, no_line = False): self._no_line = no_line self._build_lists() def set_no_cont_products( self, no_cont = False): self._no_cont = no_cont self._build_lists() def set_no_interf_configs( self, no_interf = False): self._no_interf = no_interf self._build_lists() def set_no_feather_configs( self, no_feather = False): self._no_feather = no_feather self._build_lists() def _build_lists( self ): if self._kh is None: logger.error("Cannot build lists without a handlerKeys.") raise Exception("Cannot build lists without a handlerKeys.") return(None) self._targets_list = self._kh.get_targets( only = self._targets_only, skip = self._targets_skip, first = self._targets_first, last = self._targets_last, ) if self._no_line: self._line_products_list = [] else: self._line_products_list = self._kh.get_line_products( only = self._lines_only, skip = self._lines_skip, ) if self._no_cont: self._cont_products_list = [] else: self._cont_products_list = self._kh.get_continuum_products( only = self._cont_only, skip = self._cont_skip, ) if self._no_interf: self._interf_configs_list = [] else: self._interf_configs_list = self._kh.get_interf_configs( only = self._interf_configs_only, skip = self._interf_configs_skip, ) if self._no_feather: self._feather_configs_list = [] else: self._feather_configs_list = self._kh.get_feather_configs( only = self._feather_configs_only, skip = self._feather_configs_skip, ) return()
MIT License
osmr/imgclsmob
gluon/gluoncv2/models/resnet_cifar.py
resnet164bn_svhn
python
def resnet164bn_svhn(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs)
ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385. Parameters: ---------- classes : int, default 10 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/gluon/gluoncv2/models/resnet_cifar.py#L360-L376
__all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn', 'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn', 'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn', 'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn', 'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn', 'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn', 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn', 'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn'] import os from mxnet import cpu from mxnet.gluon import nn, HybridBlock from .common import conv3x3_block from .resnet import ResUnit class CIFARResNet(HybridBlock): def __init__(self, channels, init_block_channels, bottleneck, bn_use_global_stats=False, in_channels=3, in_size=(32, 32), classes=10, **kwargs): super(CIFARResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add(conv3x3_block( in_channels=in_channels, out_channels=init_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add(ResUnit( in_channels=in_channels, out_channels=out_channels, strides=strides, bn_use_global_stats=bn_use_global_stats, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add(stage) self.features.add(nn.AvgPool2D( pool_size=8, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add(nn.Flatten()) self.output.add(nn.Dense( units=classes, in_units=in_channels)) def hybrid_forward(self, F, x): x = self.features(x) x = self.output(x) return x def get_resnet_cifar(classes, blocks, bottleneck, model_name=None, pretrained=False, ctx=cpu(), root=os.path.join("~", ".mxnet", "models"), **kwargs): assert (classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, classes=classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import get_model_file net.load_parameters( filename=get_model_file( model_name=model_name, local_model_store_dir_path=root), ctx=ctx) return net def resnet20_cifar10(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs) def resnet20_cifar100(classes=100, **kwargs): return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs) def resnet20_svhn(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs) def resnet56_cifar10(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs) def resnet56_cifar100(classes=100, **kwargs): return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs) def resnet56_svhn(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs) def resnet110_cifar10(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs) def resnet110_cifar100(classes=100, **kwargs): return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs) def resnet110_svhn(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs) def resnet164bn_cifar10(classes=10, **kwargs): return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs) def resnet164bn_cifar100(classes=100, **kwargs): return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs)
MIT License
riga/law
law/contrib/arc/util.py
get_arc_proxy_vo
python
def get_arc_proxy_vo(proxy_file=None): return _arc_proxy_info(args=["--infoitem=vomsVO"], proxy_file=proxy_file)[1].strip()
Returns the virtual organization name of the arc proxy. When *proxy_file* is *None*, it defaults to the result of :py:func:`get_arc_proxy_file`. Otherwise, when it evaluates to *False*, ``arcproxy`` is queried without a custom proxy file.
https://github.com/riga/law/blob/be56987dd1ca7e2e54b08fa8aac2ce63b6236e74/law/contrib/arc/util.py#L92-L98
__all__ = [ "get_arc_proxy_file", "get_arc_proxy_user", "get_arc_proxy_lifetime", "get_arc_proxy_vo", "check_arc_proxy_validity", "renew_arc_proxy", ] import os import re import subprocess from law.util import interruptable_popen, tmp_file, parse_duration, quote_cmd from law.logger import get_logger logger = get_logger(__name__) def get_arc_proxy_file(): if "X509_USER_PROXY" in os.environ: return os.environ["X509_USER_PROXY"] else: tmp = "/tmp" for v in ["TMPDIR", "TMP", "TEMP"]: if os.getenv(v): tmp = os.environ[v] break return os.path.join(tmp, "x509up_u{}".format(os.getuid())) def _arc_proxy_info(args=None, proxy_file=None, silent=False): if args is None: args = ["--info"] cmd = ["arcproxy"] + (args or []) if proxy_file is None: proxy_file = get_arc_proxy_file() if proxy_file: proxy_file = os.path.expandvars(os.path.expanduser(proxy_file)) cmd.extend(["--proxy", proxy_file]) code, out, err = interruptable_popen(quote_cmd(cmd), shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=subprocess.PIPE) if err: code = 1 if not silent and code != 0: raise Exception("arcproxy failed: {}".format(err)) return code, out, err def get_arc_proxy_user(proxy_file=None): out = _arc_proxy_info(args=["--infoitem=identity"], proxy_file=proxy_file)[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in arc proxy: {}".format(out)) def get_arc_proxy_lifetime(proxy_file=None): out = _arc_proxy_info(args=["--infoitem=validityLeft"], proxy_file=proxy_file)[1].strip() try: return int(out) except: raise Exception("no valid lifetime found in arc proxy: {}".format(out))
BSD 3-Clause New or Revised License
azure/autorest.python
test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py
Container.__init__
python
def __init__( self, *, name: str, properties: "ContainerProperties", metadata: Optional[Dict[str, str]] = None, **kwargs ): super(Container, self).__init__(**kwargs) self.name = name self.properties = properties self.metadata = metadata
:keyword name: Required. :paramtype name: str :keyword properties: Required. Properties of a container. :paramtype properties: ~xmlservice.models.ContainerProperties :keyword metadata: Dictionary of :code:`<string>`. :paramtype metadata: dict[str, str]
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/legacy/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py#L533-L547
import datetime from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._auto_rest_swagger_batxml_service_enums import * class AccessPolicy(msrest.serialization.Model): _validation = { "start": {"required": True}, "expiry": {"required": True}, "permission": {"required": True}, } _attribute_map = { "start": {"key": "Start", "type": "iso-8601"}, "expiry": {"key": "Expiry", "type": "iso-8601"}, "permission": {"key": "Permission", "type": "str"}, } def __init__(self, *, start: datetime.datetime, expiry: datetime.datetime, permission: str, **kwargs): super(AccessPolicy, self).__init__(**kwargs) self.start = start self.expiry = expiry self.permission = permission class AppleBarrel(msrest.serialization.Model): _attribute_map = { "good_apples": {"key": "GoodApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, "bad_apples": {"key": "BadApples", "type": "[str]", "xml": {"wrapped": True, "itemsName": "Apple"}}, } def __init__(self, *, good_apples: Optional[List[str]] = None, bad_apples: Optional[List[str]] = None, **kwargs): super(AppleBarrel, self).__init__(**kwargs) self.good_apples = good_apples self.bad_apples = bad_apples class Banana(msrest.serialization.Model): _attribute_map = { "name": {"key": "name", "type": "str", "xml": {"name": "name"}}, "flavor": {"key": "flavor", "type": "str", "xml": {"name": "flavor"}}, "expiration": {"key": "expiration", "type": "iso-8601", "xml": {"name": "expiration"}}, } _xml_map = {"name": "banana"} def __init__( self, *, name: Optional[str] = None, flavor: Optional[str] = None, expiration: Optional[datetime.datetime] = None, **kwargs ): super(Banana, self).__init__(**kwargs) self.name = name self.flavor = flavor self.expiration = expiration class Blob(msrest.serialization.Model): _validation = { "name": {"required": True}, "deleted": {"required": True}, "snapshot": {"required": True}, "properties": {"required": True}, } _attribute_map = { "name": {"key": "Name", "type": "str"}, "deleted": {"key": "Deleted", "type": "bool"}, "snapshot": {"key": "Snapshot", "type": "str"}, "properties": {"key": "Properties", "type": "BlobProperties"}, "metadata": {"key": "Metadata", "type": "{str}"}, } _xml_map = {"name": "Blob"} def __init__( self, *, name: str, deleted: bool, snapshot: str, properties: "BlobProperties", metadata: Optional[Dict[str, str]] = None, **kwargs ): super(Blob, self).__init__(**kwargs) self.name = name self.deleted = deleted self.snapshot = snapshot self.properties = properties self.metadata = metadata class BlobPrefix(msrest.serialization.Model): _validation = { "name": {"required": True}, } _attribute_map = { "name": {"key": "Name", "type": "str"}, } def __init__(self, *, name: str, **kwargs): super(BlobPrefix, self).__init__(**kwargs) self.name = name class BlobProperties(msrest.serialization.Model): _validation = { "last_modified": {"required": True}, "etag": {"required": True}, } _attribute_map = { "last_modified": {"key": "Last-Modified", "type": "rfc-1123"}, "etag": {"key": "Etag", "type": "str"}, "content_length": {"key": "Content-Length", "type": "long"}, "content_type": {"key": "Content-Type", "type": "str"}, "content_encoding": {"key": "Content-Encoding", "type": "str"}, "content_language": {"key": "Content-Language", "type": "str"}, "content_md5": {"key": "Content-MD5", "type": "str"}, "content_disposition": {"key": "Content-Disposition", "type": "str"}, "cache_control": {"key": "Cache-Control", "type": "str"}, "blob_sequence_number": {"key": "x-ms-blob-sequence-number", "type": "int"}, "blob_type": {"key": "BlobType", "type": "str"}, "lease_status": {"key": "LeaseStatus", "type": "str"}, "lease_state": {"key": "LeaseState", "type": "str"}, "lease_duration": {"key": "LeaseDuration", "type": "str"}, "copy_id": {"key": "CopyId", "type": "str"}, "copy_status": {"key": "CopyStatus", "type": "str"}, "copy_source": {"key": "CopySource", "type": "str"}, "copy_progress": {"key": "CopyProgress", "type": "str"}, "copy_completion_time": {"key": "CopyCompletionTime", "type": "rfc-1123"}, "copy_status_description": {"key": "CopyStatusDescription", "type": "str"}, "server_encrypted": {"key": "ServerEncrypted", "type": "bool"}, "incremental_copy": {"key": "IncrementalCopy", "type": "bool"}, "destination_snapshot": {"key": "DestinationSnapshot", "type": "str"}, "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"}, "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"}, "access_tier": {"key": "AccessTier", "type": "str"}, "access_tier_inferred": {"key": "AccessTierInferred", "type": "bool"}, "archive_status": {"key": "ArchiveStatus", "type": "str"}, } def __init__( self, *, last_modified: datetime.datetime, etag: str, content_length: Optional[int] = None, content_type: Optional[str] = None, content_encoding: Optional[str] = None, content_language: Optional[str] = None, content_md5: Optional[str] = None, content_disposition: Optional[str] = None, cache_control: Optional[str] = None, blob_sequence_number: Optional[int] = None, blob_type: Optional[Union[str, "BlobType"]] = None, lease_status: Optional[Union[str, "LeaseStatusType"]] = None, lease_state: Optional[Union[str, "LeaseStateType"]] = None, lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, copy_id: Optional[str] = None, copy_status: Optional[Union[str, "CopyStatusType"]] = None, copy_source: Optional[str] = None, copy_progress: Optional[str] = None, copy_completion_time: Optional[datetime.datetime] = None, copy_status_description: Optional[str] = None, server_encrypted: Optional[bool] = None, incremental_copy: Optional[bool] = None, destination_snapshot: Optional[str] = None, deleted_time: Optional[datetime.datetime] = None, remaining_retention_days: Optional[int] = None, access_tier: Optional[Union[str, "AccessTier"]] = None, access_tier_inferred: Optional[bool] = None, archive_status: Optional[Union[str, "ArchiveStatus"]] = None, **kwargs ): super(BlobProperties, self).__init__(**kwargs) self.last_modified = last_modified self.etag = etag self.content_length = content_length self.content_type = content_type self.content_encoding = content_encoding self.content_language = content_language self.content_md5 = content_md5 self.content_disposition = content_disposition self.cache_control = cache_control self.blob_sequence_number = blob_sequence_number self.blob_type = blob_type self.lease_status = lease_status self.lease_state = lease_state self.lease_duration = lease_duration self.copy_id = copy_id self.copy_status = copy_status self.copy_source = copy_source self.copy_progress = copy_progress self.copy_completion_time = copy_completion_time self.copy_status_description = copy_status_description self.server_encrypted = server_encrypted self.incremental_copy = incremental_copy self.destination_snapshot = destination_snapshot self.deleted_time = deleted_time self.remaining_retention_days = remaining_retention_days self.access_tier = access_tier self.access_tier_inferred = access_tier_inferred self.archive_status = archive_status class Blobs(msrest.serialization.Model): _attribute_map = { "blob_prefix": {"key": "BlobPrefix", "type": "[BlobPrefix]"}, "blob": {"key": "Blob", "type": "[Blob]"}, } def __init__( self, *, blob_prefix: Optional[List["BlobPrefix"]] = None, blob: Optional[List["Blob"]] = None, **kwargs ): super(Blobs, self).__init__(**kwargs) self.blob_prefix = blob_prefix self.blob = blob class ComplexTypeNoMeta(msrest.serialization.Model): _attribute_map = { "id": {"key": "ID", "type": "str"}, } def __init__(self, *, id: Optional[str] = None, **kwargs): super(ComplexTypeNoMeta, self).__init__(**kwargs) self.id = id class ComplexTypeWithMeta(msrest.serialization.Model): _attribute_map = { "id": {"key": "ID", "type": "str"}, } _xml_map = {"name": "XMLComplexTypeWithMeta"} def __init__(self, *, id: Optional[str] = None, **kwargs): super(ComplexTypeWithMeta, self).__init__(**kwargs) self.id = id class Container(msrest.serialization.Model): _validation = { "name": {"required": True}, "properties": {"required": True}, } _attribute_map = { "name": {"key": "Name", "type": "str"}, "properties": {"key": "Properties", "type": "ContainerProperties"}, "metadata": {"key": "Metadata", "type": "{str}"}, }
MIT License
dragonfly/dragonfly
dragonfly/utils/euclidean_synthetic_functions.py
get_mf_borehole_function
python
def get_mf_borehole_function(): opt_val = 309.523221 opt_pt = None mf_borehole_function = lambda z, x: borehole_function(x, z, opt_val) domain_bounds = [[0.05, 0.15], [100, 50000], [63070, 115600], [990, 1110], [63.1, 116], [700, 820], [1120, 1680], [9855, 12045]] fidel_bounds = [[0, 1]] fidel_to_opt = np.array([1]) def sf_borehole_obj(x): return borehole_function(x, fidel_to_opt, opt_val) return mf_borehole_function, sf_borehole_obj, opt_pt, opt_val, fidel_to_opt, fidel_bounds, domain_bounds
Gets the MF BoreHole function.
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/utils/euclidean_synthetic_functions.py#L171-L192
from __future__ import division import numpy as np from .general_utils import map_to_cube from ..exd.experiment_caller import EuclideanFunctionCaller def hartmann(x, alpha, A, P, max_val=np.inf): log_sum_terms = (A * (P - x)**2).sum(axis=1) return min(max_val, alpha.dot(np.exp(-log_sum_terms))) def _get_hartmann_data(domain_dim): if domain_dim == 3: A = np.array([[3.0, 10, 30], [0.1, 10, 35], [3.0, 10, 30], [0.1, 10, 35]], dtype=np.float64) P = 1e-4 * np.array([[3689, 1170, 2673], [4699, 4387, 7470], [1091, 8732, 5547], [ 381, 5743, 8828]], dtype=np.float64) alpha = np.array([1.0, 1.2, 3.0, 3.2]) domain = [[0, 1]] * 3 opt_pt = np.array([0.114614, 0.555649, 0.852547]) max_val = 3.86278 elif domain_dim == 6: A = np.array([[ 10, 3, 17, 3.5, 1.7, 8], [0.05, 10, 17, 0.1, 8, 14], [ 3, 3.5, 1.7, 10, 17, 8], [ 17, 8, 0.05, 10, 0.1, 14]], dtype=np.float64) P = 1e-4 * np.array([[1312, 1696, 5569, 124, 8283, 5886], [2329, 4135, 8307, 3736, 1004, 9991], [2348, 1451, 3522, 2883, 3047, 6650], [4047, 8828, 8732, 5743, 1091, 381]], dtype=np.float64) alpha = np.array([1.0, 1.2, 3.0, 3.2]) domain = [[0, 1]] * 6 opt_pt = np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]) max_val = 3.322368 else: raise NotImplementedError('Only implemented in 3 and 6 dimensions.') return A, P, alpha, opt_pt, domain, max_val def get_mf_hartmann_function_data(fidel_dim, domain_dim): A, P, alpha, opt_pt, domain_bounds, max_val = _get_hartmann_data(domain_dim) delta = np.array([0.1] * fidel_dim + [0] * (4-fidel_dim)) def mf_hart_obj(z, x): assert len(z) == fidel_dim z_extended = np.append(z, [0] * (4-fidel_dim)) alpha_z = alpha - (1 - z_extended) * delta return hartmann(x, alpha_z, A, P, max_val) def hart_obj(x): return hartmann(x, alpha, A, P, max_val) fidel_to_opt = np.ones(fidel_dim) fidel_bounds = [[0, 1]] * fidel_dim opt_val = hart_obj(opt_pt) return mf_hart_obj, hart_obj, opt_pt, opt_val, fidel_to_opt, fidel_bounds, domain_bounds def currin_exp(x, alpha): x1 = x[0] x2 = x[1] val_1 = 1 - alpha * np.exp(-1/(2 * x2)) val_2 = (2300*x1**3 + 1900*x1**2 + 2092*x1 + 60) / (100*x1**3 + 500*x1**2 + 4*x1 + 20) return val_1 * val_2 def get_mf_currin_exp_function_data(): opt_val = 13.7986850 def mf_currin_exp_obj(z, x): alpha_z = 1 - 0.1 * z return min(opt_val, currin_exp(x, alpha_z)) fidel_to_opt = np.array([1]) opt_pt = None def sf_currin_exp_obj(x): return min(opt_val, currin_exp(x, fidel_to_opt)) fidel_bounds = np.array([[0, 1]]) domain_bounds = np.array([[0, 1], [0, 1]]) return mf_currin_exp_obj, sf_currin_exp_obj, opt_pt, opt_val, fidel_to_opt, fidel_bounds, domain_bounds def branin_function(x, a, b, c, r, s, t): x1 = x[0] x2 = x[1] neg_ret = float(a * (x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*np.cos(x1) + s) return -neg_ret def branin_function_alpha(x, alpha, a, r, s): return branin_function(x, a, alpha[0], alpha[1], r, s, alpha[2]) def get_mf_branin_function(fidel_dim): a0 = 1 b0 = 5.1/(4*np.pi**2) c0 = 5/np.pi r0 = 6 s0 = 10 t0 = 1/(8*np.pi) alpha = np.array([b0, c0, t0]) delta = [0.01, 0.1, -0.005] delta = np.array(delta[0:fidel_dim] + [0] * (3 - fidel_dim)) def mf_branin_obj(z, x): assert len(z) == fidel_dim z_extended = np.append(z, [0] * (3-fidel_dim)) alpha_z = alpha - (1 - z_extended) * delta return branin_function_alpha(x, alpha_z, a0, r0, s0) def sf_branin_obj(x): return branin_function(x, a0, b0, c0, r0, s0, t0) fidel_to_opt = np.ones((fidel_dim)) fidel_bounds = [[0, 1]] * fidel_dim opt_pt = np.array([np.pi, 2.275]) opt_val = sf_branin_obj(opt_pt) domain_bounds = [[-5, 10], [0, 15]] return mf_branin_obj, sf_branin_obj, opt_pt, opt_val, fidel_to_opt, fidel_bounds, domain_bounds def borehole_function(x, z, max_val): rw = x[0] r = x[1] Tu = x[2] Hu = x[3] Tl = x[4] Hl = x[5] L = x[6] Kw = x[7] frac2 = 2*L*Tu/(np.log(r/rw) * rw**2 * Kw) f2 = min(max_val, 2 * np.pi * Tu * (Hu - Hl)/(np.log(r/rw) * (1 + frac2 + Tu/Tl))) f1 = 5 * Tu * (Hu - Hl)/(np.log(r/rw) * (1.5 + frac2 + Tu/Tl)) return float(f2*z + f1*(1-z))
MIT License
python-openxml/python-docx
docx/image/jpeg.py
_Marker.segment_length
python
def segment_length(self): return self._segment_length
The length in bytes of this marker's segment
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/image/jpeg.py#L314-L318
from __future__ import absolute_import, division, print_function from ..compat import BytesIO from .constants import JPEG_MARKER_CODE, MIME_TYPE from .helpers import BIG_ENDIAN, StreamReader from .image import BaseImageHeader from .tiff import Tiff class Jpeg(BaseImageHeader): @property def content_type(self): return MIME_TYPE.JPEG @property def default_ext(self): return 'jpg' class Exif(Jpeg): @classmethod def from_stream(cls, stream): markers = _JfifMarkers.from_stream(stream) px_width = markers.sof.px_width px_height = markers.sof.px_height horz_dpi = markers.app1.horz_dpi vert_dpi = markers.app1.vert_dpi return cls(px_width, px_height, horz_dpi, vert_dpi) class Jfif(Jpeg): @classmethod def from_stream(cls, stream): markers = _JfifMarkers.from_stream(stream) px_width = markers.sof.px_width px_height = markers.sof.px_height horz_dpi = markers.app0.horz_dpi vert_dpi = markers.app0.vert_dpi return cls(px_width, px_height, horz_dpi, vert_dpi) class _JfifMarkers(object): def __init__(self, markers): super(_JfifMarkers, self).__init__() self._markers = list(markers) def __str__(self): header = ' offset seglen mc name\n======= ====== == =====' tmpl = '%7d %6d %02X %s' rows = [] for marker in self._markers: rows.append(tmpl % ( marker.offset, marker.segment_length, ord(marker.marker_code), marker.name )) lines = [header] + rows return '\n'.join(lines) @classmethod def from_stream(cls, stream): marker_parser = _MarkerParser.from_stream(stream) markers = [] for marker in marker_parser.iter_markers(): markers.append(marker) if marker.marker_code == JPEG_MARKER_CODE.SOS: break return cls(markers) @property def app0(self): for m in self._markers: if m.marker_code == JPEG_MARKER_CODE.APP0: return m raise KeyError('no APP0 marker in image') @property def app1(self): for m in self._markers: if m.marker_code == JPEG_MARKER_CODE.APP1: return m raise KeyError('no APP1 marker in image') @property def sof(self): for m in self._markers: if m.marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES: return m raise KeyError('no start of frame (SOFn) marker in image') class _MarkerParser(object): def __init__(self, stream_reader): super(_MarkerParser, self).__init__() self._stream = stream_reader @classmethod def from_stream(cls, stream): stream_reader = StreamReader(stream, BIG_ENDIAN) return cls(stream_reader) def iter_markers(self): marker_finder = _MarkerFinder.from_stream(self._stream) start = 0 marker_code = None while marker_code != JPEG_MARKER_CODE.EOI: marker_code, segment_offset = marker_finder.next(start) marker = _MarkerFactory( marker_code, self._stream, segment_offset ) yield marker start = segment_offset + marker.segment_length class _MarkerFinder(object): def __init__(self, stream): super(_MarkerFinder, self).__init__() self._stream = stream @classmethod def from_stream(cls, stream): return cls(stream) def next(self, start): position = start while True: position = self._offset_of_next_ff_byte(start=position) position, byte_ = self._next_non_ff_byte(start=position+1) if byte_ == b'\x00': continue marker_code, segment_offset = byte_, position+1 break return marker_code, segment_offset def _next_non_ff_byte(self, start): self._stream.seek(start) byte_ = self._read_byte() while byte_ == b'\xFF': byte_ = self._read_byte() offset_of_non_ff_byte = self._stream.tell() - 1 return offset_of_non_ff_byte, byte_ def _offset_of_next_ff_byte(self, start): self._stream.seek(start) byte_ = self._read_byte() while byte_ != b'\xFF': byte_ = self._read_byte() offset_of_ff_byte = self._stream.tell() - 1 return offset_of_ff_byte def _read_byte(self): byte_ = self._stream.read(1) if not byte_: raise Exception('unexpected end of file') return byte_ def _MarkerFactory(marker_code, stream, offset): if marker_code == JPEG_MARKER_CODE.APP0: marker_cls = _App0Marker elif marker_code == JPEG_MARKER_CODE.APP1: marker_cls = _App1Marker elif marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES: marker_cls = _SofMarker else: marker_cls = _Marker return marker_cls.from_stream(stream, marker_code, offset) class _Marker(object): def __init__(self, marker_code, offset, segment_length): super(_Marker, self).__init__() self._marker_code = marker_code self._offset = offset self._segment_length = segment_length @classmethod def from_stream(cls, stream, marker_code, offset): if JPEG_MARKER_CODE.is_standalone(marker_code): segment_length = 0 else: segment_length = stream.read_short(offset) return cls(marker_code, offset, segment_length) @property def marker_code(self): return self._marker_code @property def name(self): return JPEG_MARKER_CODE.marker_names[self._marker_code] @property def offset(self): return self._offset @property
MIT License
svanoort/pyresttest
pyresttest/validators.py
ExtractTestValidator.get_readable_config
python
def get_readable_config(self, context=None): return "Extractor: " + self.extractor.get_readable_config(context=context)
Get a human-readable config string
https://github.com/svanoort/pyresttest/blob/f92acf8e838c4623ddd8e12e880f31046ff9317f/pyresttest/validators.py#L460-L462
import logging import json import operator import traceback import string import os import re import sys from . import parsing from . import six from .six import binary_type from .six import text_type PYTHON_MAJOR_VERSION = sys.version_info[0] if PYTHON_MAJOR_VERSION > 2: from past.builtins import basestring from past.builtins import long logger = logging.getLogger('pyresttest.validators') COMPARATORS = { 'count_eq': lambda x, y: safe_length(x) == y, 'lt': operator.lt, 'less_than': operator.lt, 'le': operator.lt, 'less_than_or_equal': operator.lt, 'eq': operator.eq, 'equals': operator.eq, 'str_eq': lambda x, y: operator.eq(str(x), str(y)), 'ne': operator.ne, 'not_equals': operator.ne, 'ge': operator.ge, 'greater_than_or_equal': operator.ge, 'gt': operator.gt, 'greater_than': operator.gt, 'contains': lambda x, y: x and operator.contains(x, y), 'contained_by': lambda x, y: y and operator.contains(y, x), 'regex': lambda x, y: regex_compare(str(x), str(y)), 'type': lambda x, y: test_type(x, y) } COMPARATORS['length_eq'] = COMPARATORS['count_eq'] TYPES = { 'null': type(None), 'none': type(None), 'number': (int, long, float), 'int': (int, long), 'float': float, 'boolean': bool, 'string': basestring, 'array': list, 'list': list, 'dict': dict, 'map': dict, 'scalar': (bool, int, long, float, basestring, type(None)), 'collection': (list, dict, set) } def test_type(val, mytype): typelist = TYPES.get(mytype.lower()) if typelist is None: raise TypeError( "Type {0} is not a valid type to test against!".format(mytype.lower())) try: for testtype in typelist: if isinstance(val, testtype): return True return False except TypeError: return isinstance(val, typelist) VALIDATOR_TESTS = { 'exists': lambda x: x is not None, 'not_exists': lambda x: x is None } EXTRACTORS = dict() VALIDATORS = dict() def safe_length(var): output = -1 try: output = len(var) except: pass return output def regex_compare(input, regex): return bool(re.search(regex, input)) FAILURE_INVALID_RESPONSE = 'Invalid HTTP Response Code' FAILURE_CURL_EXCEPTION = 'Curl Exception' FAILURE_TEST_EXCEPTION = 'Test Execution Exception' FAILURE_VALIDATOR_FAILED = 'Validator Failed' FAILURE_VALIDATOR_EXCEPTION = 'Validator Exception' FAILURE_EXTRACTOR_EXCEPTION = 'Extractor Exception' class Failure(object): message = None failure_type = None details = None validator = None def __nonzero__(self): return False def __bool__(self): return False def __str__(self): return self.message def __init__(self, message="", details="", failure_type=None, validator=None): self.message = message self.details = details self.validator = validator self.failure_type = failure_type class AbstractExtractor(object): extractor_type = None query = None is_templated = False is_body_extractor = False is_header_extractor = False args = None def __str__(self): return "Extractor type: {0}, query: {1}, is_templated: {2}, args: {3}".format(self.extractor_type, self.query, self.is_templated, self.args) def extract_internal(self, query=None, body=None, headers=None, args=None): pass def extract(self, body=None, headers=None, context=None): query = self.templated_query(context=context) args = self.args return self.extract_internal(query=query, body=body, headers=headers, args=self.args) def templated_query(self, context=None): query = self.query if context and self.is_templated: query = string.Template(query).safe_substitute( context.get_values()) return query def get_readable_config(self, context=None): query = self.templated_query(context=context) output = 'Extractor Type: {0}, Query: "{1}", Templated?: {2}'.format( self.extractor_type, query, self.is_templated) args_string = None if self.args: args_string = ", Args: " + str(self.args) output = output + args_string return output @classmethod def configure_base(cls, config, extractor_base): if isinstance(config, dict): try: config = config['template'] extractor_base.is_templated = True extractor_base.query = config except KeyError: raise ValueError( "Cannot define a dictionary config for abstract extractor without it having template key") elif isinstance(config, basestring): extractor_base.query = config extractor_base.is_templated = False else: raise TypeError( "Base extractor must have a string or {template: querystring} configuration node!") return extractor_base class MiniJsonExtractor(AbstractExtractor): extractor_type = 'jsonpath_mini' is_body_extractor = True def extract_internal(self, query=None, args=None, body=None, headers=None): if PYTHON_MAJOR_VERSION > 2 and isinstance(body, binary_type): body = text_type(body, 'utf-8') try: body = json.loads(body) return self.query_dictionary(query, body) except ValueError: raise ValueError("Not legal JSON!") @staticmethod def query_dictionary(query, dictionary, delimiter='.'): try: stripped_query = query.strip(delimiter) if stripped_query: for x in stripped_query.split(delimiter): try: x = int(x) dictionary = dictionary[x] except ValueError: dictionary = dictionary[x] except: return None return dictionary @classmethod def parse(cls, config): base = MiniJsonExtractor() return cls.configure_base(config, base) return base class HeaderExtractor(AbstractExtractor): extractor_type = 'header' is_header_extractor = True def extract_internal(self, query=None, args=None, body=None, headers=None): low = query.lower() extracted = [y[1] for y in filter(lambda x: x[0] == low, headers)] if len(extracted) == 0: raise ValueError("Invalid header name {0}".format(query)) elif len(extracted) == 1: return extracted[0] else: return extracted @classmethod def parse(cls, config, extractor_base=None): base = HeaderExtractor() return cls.configure_base(config, base) class RawBodyExtractor(AbstractExtractor): extractor_type = 'raw_body' is_header_extractor = False is_body_extractor = True def extract_internal(self, query=None, args=None, body=None, headers=None): return body @classmethod def parse(cls, config, extractor_base=None): base = RawBodyExtractor() return base def _get_extractor(config_dict): extractor = None extract_config = None for key, value in config_dict.items(): if key in EXTRACTORS: return parse_extractor(key, value) else: raise Exception( 'No valid extractor name to use in input: {0}'.format(config_dict)) class AbstractValidator(object): name = None config = None def validate(self, body=None, headers=None, context=None): pass class ComparatorValidator(AbstractValidator): name = 'ComparatorValidator' config = None extractor = None comparator = None comparator_name = "" expected = None isTemplateExpected = False def get_readable_config(self, context=None): string_frags = list() string_frags.append( "Extractor: " + self.extractor.get_readable_config(context=context)) if isinstance(self.expected, AbstractExtractor): string_frags.append("Expected value extractor: " + self.expected.get_readable_config(context=context)) elif self.isTemplateExpected: string_frags.append( 'Expected is templated, raw value: {0}'.format(self.expected)) return os.linesep.join(string_frags) def validate(self, body=None, headers=None, context=None): try: extracted_val = self.extractor.extract( body=body, headers=headers, context=context) except Exception as e: trace = traceback.format_exc() return Failure(message="Extractor threw exception", details=trace, validator=self, failure_type=FAILURE_EXTRACTOR_EXCEPTION) expected_val = None if isinstance(self.expected, AbstractExtractor): try: expected_val = self.expected.extract( body=body, headers=headers, context=context) except Exception as e: trace = traceback.format_exc() return Failure(message="Expected value extractor threw exception", details=trace, validator=self, failure_type=FAILURE_EXTRACTOR_EXCEPTION) elif self.isTemplateExpected and context: expected_val = string.Template( self.expected).safe_substitute(context.get_values()) else: expected_val = self.expected if isinstance(extracted_val, binary_type) and isinstance(expected_val, text_type): expected_val = expected_val.encode('utf-8') comparison = self.comparator(extracted_val, expected_val) if not comparison: failure = Failure(validator=self) failure.message = "Comparison failed, evaluating {0}({1}, {2}) returned False".format( self.comparator_name, extracted_val, expected_val) failure.details = self.get_readable_config(context=context) failure.failure_type = FAILURE_VALIDATOR_FAILED return failure else: return True @staticmethod def parse(config): output = ComparatorValidator() config = parsing.lowercase_keys(parsing.flatten_dictionaries(config)) output.config = config output.extractor = _get_extractor(config) if output.extractor is None: raise ValueError( "Extract function for comparison is not valid or not found!") if 'comparator' not in config: output.comparator_name = 'eq' else: output.comparator_name = config['comparator'].lower() output.comparator = COMPARATORS[output.comparator_name] if not output.comparator: raise ValueError("Invalid comparator given!") try: expected = config['expected'] except KeyError: raise ValueError( "No expected value found in comparator validator config, one must be!") if isinstance(expected, basestring) or isinstance(expected, (int, long, float, complex)): output.expected = expected elif isinstance(expected, dict): expected = parsing.lowercase_keys(expected) template = expected.get('template') if template: if not isinstance(template, basestring): raise ValueError( "Can't template a comparator-validator unless template value is a string") output.isTemplateExpected = True output.expected = template else: output.expected = _get_extractor(expected) if not output.expected: raise ValueError( "Can't supply a non-template, non-extract dictionary to comparator-validator") return output class ExtractTestValidator(AbstractValidator): name = 'ExtractTestValidator' extractor = None test_fn = None test_name = None config = None
Apache License 2.0
owasp/python-honeypot
api/server.py
error_401
python
def error_401(error): return jsonify( msg_structure(status="error", msg=error.description) ), 401
handle the 401 HTTP error Args: error: the flask error Returns: 401 JSON error
https://github.com/owasp/python-honeypot/blob/a082d1de72e22aea135eab85c5245514adf93c2b/api/server.py#L142-L154
import os import binascii import io from flask import (Flask, Response, abort, jsonify, render_template, send_file) from flask import request as flask_request from api.database_queries import ( filter_by_date, filter_by_fields, filter_by_module_name, filter_by_regex, event_types, group_by_elements, filter_by_element) from api.utility import ( all_mime_types, fix_limit, fix_skip, fix_filter_query, msg_structure, root_dir) from config import ( api_configuration, user_configuration) from core.alert import write_to_api_console from core.get_modules import load_all_modules from database.connector import elasticsearch_events from flask_swagger import swagger from flask_swagger_ui import get_swaggerui_blueprint from core.messages import load_messages DOCS_URL = '/api/docs' API_URL = 'http://localhost:5000/docs-configuration' documentation_settings = get_swaggerui_blueprint( DOCS_URL, API_URL, config={ 'app_name': "Python Honeypot Api" }, ) template_dir = os.path.join( os.path.join( os.path.dirname( os.path.dirname(__file__) ), "web" ), "static" ) app = Flask( __name__, template_folder=template_dir ) app.config.from_object( __name__ ) def get_file(filename): try: src = os.path.join(root_dir(), filename) return open(src, 'rb').read() except IOError: abort(404) def get_value_from_request(_key): try: value = flask_request.args[_key] except Exception: try: value = flask_request.form[_key] except Exception: try: value = flask_request.cookies[_key] except Exception: value = None return value def is_authorized(): api_access_key = app.config["OWASP_HONEYPOT_CONFIG"]["api_access_key"] key_from_request = get_value_from_request("key") if api_access_key is not None and api_access_key != key_from_request: abort(401, "invalid API key") return True @app.errorhandler(400) def error_400(error): return jsonify( msg_structure(status="error", msg=error.description) ), 400 @app.errorhandler(401)
Apache License 2.0
wdm0006/git-pandas
gitpandas/repository.py
Repository.commit_history
python
def commit_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None): if limit is None: if days is None: ds = [[ x.author.name, x.committer.name, x.committed_date, x.message, self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ] for x in self.repo.iter_commits(branch, max_count=sys.maxsize)] else: ds = [] c_date = time.time() commits = self.repo.iter_commits(branch, max_count=sys.maxsize) dlim = time.time() - days * 24 * 3600 while c_date > dlim: try: if sys.version_info.major == 2: x = commits.next() else: x = commits.__next__() except StopIteration: break c_date = x.committed_date if c_date > dlim: ds.append([ x.author.name, x.committer.name, x.committed_date, x.message, self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ]) else: ds = [[ x.author.name, x.committer.name, x.committed_date, x.message, self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs) ] for x in self.repo.iter_commits(branch, max_count=limit)] ds = [x[:-1] + [sum([x[-1][key]['lines'] for key in x[-1].keys()]), sum([x[-1][key]['insertions'] for key in x[-1].keys()]), sum([x[-1][key]['deletions'] for key in x[-1].keys()]), sum([x[-1][key]['insertions'] for key in x[-1].keys()]) - sum( [x[-1][key]['deletions'] for key in x[-1].keys()]) ] for x in ds if len(x[-1].keys()) > 0] df = DataFrame(ds, columns=['author', 'committer', 'date', 'message', 'lines', 'insertions', 'deletions', 'net']) df['date'] = to_datetime(df['date'].map(datetime.datetime.fromtimestamp)) df.set_index(keys=['date'], drop=True, inplace=True) return df
Returns a pandas DataFrame containing all of the commits for a given branch. Included in that DataFrame will be the columns: * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return, if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
https://github.com/wdm0006/git-pandas/blob/0afe5c97cd34a463ea998128370d5da7a5ef0365/gitpandas/repository.py#L235-L316
import os import sys import datetime import time import json import logging import tempfile import fnmatch import shutil import warnings import numpy as np from git import Repo, GitCommandError from gitpandas.cache import multicache, EphemeralCache, RedisDFCache from pandas import DataFrame, to_datetime try: from joblib import delayed, Parallel _has_joblib = True except ImportError as e: _has_joblib = False __author__ = 'willmcginnis' def _parallel_cumulative_blame_func(self_, x, committer, ignore_globs, include_globs): blm = self_.blame( rev=x['rev'], committer=committer, ignore_globs=ignore_globs, include_globs=include_globs ) x.update(json.loads(blm.to_json())['loc']) return x class Repository(object): def __init__(self, working_dir=None, verbose=False, tmp_dir=None, cache_backend=None): self.verbose = verbose self.log = logging.getLogger('gitpandas') self.__delete_hook = False self._git_repo_name = None self.cache_backend = cache_backend if working_dir is not None: if working_dir[:3] == 'git': if tmp_dir is None: if self.verbose: print('cloning repository: %s into a temporary location' % (working_dir,)) dir_path = tempfile.mkdtemp() else: dir_path = tmp_dir self.repo = Repo.clone_from(working_dir, dir_path) self._git_repo_name = working_dir.split(os.sep)[-1].split('.')[0] self.git_dir = dir_path self.__delete_hook = True else: self.git_dir = working_dir self.repo = Repo(self.git_dir) else: self.git_dir = os.getcwd() self.repo = Repo(self.git_dir) if self.verbose: print('Repository [%s] instantiated at directory: %s' % (self._repo_name(), self.git_dir)) def __del__(self): if self.__delete_hook: if os.path.exists(self.git_dir): shutil.rmtree(self.git_dir) def is_bare(self): return self.repo.bare def has_coverage(self): if os.path.exists(self.git_dir + os.sep + '.coverage'): try: with open(self.git_dir + os.sep + '.coverage', 'r') as f: blob = f.read() blob = blob.split('!')[2] json.loads(blob) return True except Exception: return False else: return False def coverage(self): if not self.has_coverage(): return DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage']) with open(self.git_dir + os.sep + '.coverage', 'r') as f: blob = f.read() blob = blob.split('!')[2] cov = json.loads(blob) ds = [] for filename in cov['lines'].keys(): idx = 0 try: with open(filename, 'r') as f: for idx, _ in enumerate(f): pass except FileNotFoundError as e: if self.verbose: warnings.warn('Could not find file %s for coverage' % (filename, )) num_lines = idx + 1 try: short_filename = filename.split(self.git_dir + os.sep)[1] ds.append([short_filename, len(cov['lines'][filename]), num_lines]) except IndexError as e: if self.verbose: warnings.warn('Could not find file %s for coverage' % (filename, )) df = DataFrame(ds, columns=['filename', 'lines_covered', 'total_lines']) df['coverage'] = df['lines_covered'] / df['total_lines'] return df def hours_estimate(self, branch='master', grouping_window=0.5, single_commit_hours=0.5, limit=None, days=None, committer=True, ignore_globs=None, include_globs=None): max_diff_in_minutes = grouping_window * 60.0 first_commit_addition_in_minutes = single_commit_hours * 60.0 ch = self.commit_history(branch=branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs) if committer: by = 'committer' else: by = 'author' people = set(ch[by].values) ds = [] for person in people: commits = ch[ch[by] == person] commits_ts = [x * 10e-10 for x in sorted(commits.index.values.tolist())] if len(commits_ts) < 2: ds.append([person, 0]) continue def estimate(index, date): next_ts = commits_ts[index + 1] diff_in_minutes = next_ts - date diff_in_minutes /= 60.0 if diff_in_minutes < max_diff_in_minutes: return diff_in_minutes / 60.0 return first_commit_addition_in_minutes / 60.0 hours = [estimate(a, b) for a, b in enumerate(commits_ts[:-1])] hours = sum(hours) ds.append([person, hours]) df = DataFrame(ds, columns=[by, 'hours']) return df
BSD 3-Clause New or Revised License
pipermerriam/eth-testrpc
testrpc/client/client.py
EthTesterClient._send_transaction
python
def _send_transaction(self, _from=None, to=None, gas=None, gas_price=None, value=0, data=b''): if _from is None: _from = self.get_coinbase() _from = normalize_address(_from) try: sender = t.keys[t.accounts.index(_from)] except ValueError: if _from in self.unlocked_accounts: unlock_expiration = self.unlocked_accounts[_from] if unlock_expiration is None or unlock_expiration > time.time(): sender = self.passphrase_account_keys[_from] else: raise ValueError("Account locked. Unlock before sending tx") else: raise if to is None: to = b'' to = normalize_address(to, allow_blank=True) if data is None: data = b'' data = decode_hex(data) try: if gas is not None: t.gas_limit = gas if gas_price is not None: t.gas_price = gas_price output = self.evm.send(sender=sender, to=to, value=value, evmdata=data) finally: t.gas_limit = t.GAS_LIMIT t.gas_price = t.GAS_PRICE return output
The tester doesn't care about gas so we discard it.
https://github.com/pipermerriam/eth-testrpc/blob/328e1ba3dfce6273527773505701522febef79a0/testrpc/client/client.py#L188-L230
import os import time import itertools import functools from testrpc.compat import threading import rlp from ethereum import transactions from ethereum import tester as t from ethereum import config from ethereum.utils import ( privtoaddr, ) from .utils import ( is_string, coerce_args_to_bytes, strip_0x, encode_32bytes, encode_address, encode_data, normalize_number, normalize_address, decode_hex, mk_random_privkey, is_array, normalize_block_identifier) from .serializers import ( serialize_txn, serialize_txn_receipt, serialize_block, ) from .filters import ( check_filter_topics_validity, process_block, get_filter_bounds, ) DEFAULT_GAS_LIMIT = t.gas_limit = t.GAS_LIMIT = int(os.environ.get('TESTRPC_GAS_LIMIT', 4000000)) config.default_config['GENESIS_GAS_LIMIT'] = DEFAULT_GAS_LIMIT def with_lock(client_method): @functools.wraps(client_method) def inner(self, *args, **kwargs): self._evm_lock.acquire() try: return client_method(self, *args, **kwargs) finally: self._evm_lock.release() return inner class EthTesterClient(object): locked_accounts = None homestead_block_number = 0 dao_fork_block_number = 0 anti_dos_fork_block_number = 0 clearing_fork_block_number = 0 dao_fork_support = True def __init__(self): self._evm_lock = threading.Lock() self.snapshots = [] self.reset_evm() self.evm.block.config['HOMESTEAD_FORK_BLKNUM'] = self.homestead_block_number self.evm.block.config['DAO_FORK_BLKNUM'] = self.dao_fork_block_number self.evm.block.config['ANTI_DOS_FORK_BLKNUM'] = self.anti_dos_fork_block_number self.evm.block.config['CLEARING_FORK_BLKNUM'] = self.clearing_fork_block_number self.passphrase_accounts = {} self.passphrase_account_keys = {} self.unlocked_accounts = {} self.log_filters = {} self.log_filters_id_generator = itertools.count() @with_lock def reset_evm(self, snapshot_idx=None): if snapshot_idx is not None: self.revert_evm(snapshot_idx) else: self.evm = t.state() self.evm.block.gas_limit = DEFAULT_GAS_LIMIT @with_lock def snapshot_evm(self): self.snapshots.append((self.evm.block.number, self.evm.snapshot())) return len(self.snapshots) - 1 @with_lock def revert_evm(self, snapshot_idx=None, reset_logs=False): if len(self.snapshots) == 0: raise ValueError("No snapshots to revert to") if snapshot_idx is not None: block_number, snapshot = self.snapshots.pop(snapshot_idx) else: block_number, snapshot = self.snapshots.pop() del self.evm.blocks[block_number:] self.evm.revert(snapshot) self.evm.blocks.append(self.evm.block) @with_lock def mine_block(self): self.evm.mine() def wait_for_block(self, block_number, max_wait=0): while self.get_block_number() < block_number: self.mine_block() return self.get_block_by_number(self.get_block_number()) def wait_for_transaction(self, txn_hash, max_wait=0): return self.get_transaction_receipt(txn_hash) def get_max_gas(self): return t.gas_limit def _get_transaction_by_hash(self, txn_hash): txn_hash = strip_0x(txn_hash) if len(txn_hash) == 64: txn_hash = decode_hex(txn_hash) for block in reversed(self.evm.blocks): txn_hashes = block.get_transaction_hashes() if txn_hash in txn_hashes: txn_index = txn_hashes.index(txn_hash) txn = block.transaction_list[txn_index] break else: raise ValueError("Transaction not found") return block, txn, txn_index def _get_block_by_number(self, block_number="latest"): if block_number == "latest": if not self.evm.blocks: raise ValueError("No blocks") elif len(self.evm.blocks) > 1: return self.evm.blocks[-2] else: return self.evm.blocks[-1] elif block_number == "earliest": return self.evm.blocks[0] elif block_number == "pending": return self.evm.block else: block_number = normalize_number(block_number) if block_number >= len(self.evm.blocks): raise ValueError("Invalid block number") return self.evm.blocks[block_number] def _get_block_by_hash(self, block_hash): if len(block_hash) > 32: block_hash = decode_hex(strip_0x(block_hash)) for block in self.evm.blocks: if block.hash == block_hash: return block else: raise ValueError("Could not find block for provided hash") @with_lock @coerce_args_to_bytes
MIT License
vlsida/openram
compiler/base/verilog.py
verilog.register_inputs
python
def register_inputs(self, port): self.add_regs(port) self.add_flops(port)
Register the control signal, address and data inputs.
https://github.com/vlsida/openram/blob/f66aac3264598eeae31225c62b6a4af52412d407/compiler/base/verilog.py#L113-L118
import math from tech import spice class verilog: def __init__(self): pass def verilog_write(self, verilog_name): self.vf = open(verilog_name, "w") self.vf.write("// OpenRAM SRAM model\n") self.vf.write("// Words: {0}\n".format(self.num_words)) self.vf.write("// Word size: {0}\n".format(self.word_size)) if self.write_size: self.vf.write("// Write size: {0}\n\n".format(self.write_size)) else: self.vf.write("\n") try: self.vdd_name = spice["power"] except KeyError: self.vdd_name = "vdd" try: self.gnd_name = spice["ground"] except KeyError: self.gnd_name = "gnd" self.vf.write("module {0}(\n".format(self.name)) self.vf.write("`ifdef USE_POWER_PINS\n") self.vf.write(" {},\n".format(self.vdd_name)) self.vf.write(" {},\n".format(self.gnd_name)) self.vf.write("`endif\n") for port in self.all_ports: if port in self.readwrite_ports: self.vf.write("// Port {0}: RW\n".format(port)) elif port in self.read_ports: self.vf.write("// Port {0}: R\n".format(port)) elif port in self.write_ports: self.vf.write("// Port {0}: W\n".format(port)) if port in self.readwrite_ports: self.vf.write(" clk{0},csb{0},web{0},".format(port)) if self.write_size: self.vf.write("wmask{},".format(port)) if self.num_spare_cols > 0: self.vf.write("spare_wen{0},".format(port)) self.vf.write("addr{0},din{0},dout{0}".format(port)) elif port in self.write_ports: self.vf.write(" clk{0},csb{0},".format(port)) if self.write_size: self.vf.write("wmask{},".format(port)) if self.num_spare_cols > 0: self.vf.write("spare_wen{0},".format(port)) self.vf.write("addr{0},din{0}".format(port)) elif port in self.read_ports: self.vf.write(" clk{0},csb{0},addr{0},dout{0}".format(port)) if port != self.all_ports[-1]: self.vf.write(",\n") self.vf.write("\n );\n\n") if self.write_size: self.num_wmasks = int(math.ceil(self.word_size / self.write_size)) self.vf.write(" parameter NUM_WMASKS = {0} ;\n".format(self.num_wmasks)) self.vf.write(" parameter DATA_WIDTH = {0} ;\n".format(self.word_size + self.num_spare_cols)) self.vf.write(" parameter ADDR_WIDTH = {0} ;\n".format(self.addr_size)) self.vf.write(" parameter RAM_DEPTH = 1 << ADDR_WIDTH;\n") self.vf.write(" // FIXME: This delay is arbitrary.\n") self.vf.write(" parameter DELAY = 3 ;\n") self.vf.write(" parameter VERBOSE = 1 ; //Set to 0 to only display warnings\n") self.vf.write(" parameter T_HOLD = 1 ; //Delay to hold dout value after posedge. Value is arbitrary\n") self.vf.write("\n") self.vf.write("`ifdef USE_POWER_PINS\n") self.vf.write(" inout {};\n".format(self.vdd_name)) self.vf.write(" inout {};\n".format(self.gnd_name)) self.vf.write("`endif\n") for port in self.all_ports: self.add_inputs_outputs(port) self.vf.write("\n") for port in self.all_ports: self.register_inputs(port) self.vf.write("reg [DATA_WIDTH-1:0] mem [0:RAM_DEPTH-1];\n") for port in self.all_ports: if port in self.write_ports: self.add_write_block(port) if port in self.read_ports: self.add_read_block(port) self.vf.write("\n") self.vf.write("endmodule\n") self.vf.close()
BSD 3-Clause New or Revised License
irmen/pyro5
Pyro5/nameserver.py
BroadcastServer.runInThread
python
def runInThread(self): thread = threading.Thread(target=self.__requestLoop) thread.setDaemon(True) thread.start() log.debug("broadcast server loop running in own thread") return thread
Run the broadcast server loop in its own thread.
https://github.com/irmen/pyro5/blob/3310d49aebcdb57b5e153b59b0b3f0d92cbd90a8/Pyro5/nameserver.py#L613-L619
import warnings import re import sys import logging import socket import time import contextlib import threading from collections.abc import MutableMapping try: import sqlite3 except ImportError: pass from . import config, core, socketutil, server, errors from .errors import NamingError, PyroError, ProtocolError __all__ = ["start_ns_loop", "start_ns"] log = logging.getLogger("Pyro5.naming") class MemoryStorage(dict): def __init__(self, **kwargs): super(MemoryStorage, self).__init__(**kwargs) def __setitem__(self, key, value): uri, metadata = value super(MemoryStorage, self).__setitem__(key, (uri, metadata or frozenset())) def optimized_prefix_list(self, prefix, return_metadata=False): return None def optimized_regex_list(self, regex, return_metadata=False): return None def optimized_metadata_search(self, metadata_all=None, metadata_any=None, return_metadata=False): return None def everything(self, return_metadata=False): if return_metadata: return self.copy() return {name: uri for name, (uri, metadata) in self.items()} def remove_items(self, items): for item in items: if item in self: del self[item] def close(self): pass class SqlStorage(MutableMapping): def __init__(self, dbfile): if dbfile == ":memory:": raise ValueError("We don't support the sqlite :memory: database type. Just use the default volatile in-memory store.") self.dbfile = dbfile with sqlite3.connect(dbfile) as db: db.execute("PRAGMA foreign_keys=ON") try: db.execute("SELECT COUNT(*) FROM pyro_names").fetchone() except sqlite3.OperationalError: self._create_schema(db) else: try: db.execute("SELECT COUNT(*) FROM pyro_metadata").fetchone() except sqlite3.OperationalError: db.execute("ALTER TABLE pyro_names RENAME TO pyro_names_old") self._create_schema(db) db.execute("INSERT INTO pyro_names(name, uri) SELECT name, uri FROM pyro_names_old") db.execute("DROP TABLE pyro_names_old") db.commit() def _create_schema(self, db): db.execute("""CREATE TABLE pyro_names ( id integer PRIMARY KEY, name nvarchar NOT NULL UNIQUE, uri nvarchar NOT NULL );""") db.execute("""CREATE TABLE pyro_metadata ( object integer NOT NULL, metadata nvarchar NOT NULL, FOREIGN KEY(object) REFERENCES pyro_names(id) );""") def __getattr__(self, item): raise NotImplementedError("SqlStorage doesn't implement method/attribute '" + item + "'") def __getitem__(self, item): try: with sqlite3.connect(self.dbfile) as db: result = db.execute("SELECT id, uri FROM pyro_names WHERE name=?", (item,)).fetchone() if result: dbid, uri = result metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()} return uri, metadata else: raise KeyError(item) except sqlite3.DatabaseError as e: raise NamingError("sqlite error in getitem: " + str(e)) def __setitem__(self, key, value): uri, metadata = value try: with sqlite3.connect(self.dbfile) as db: cursor = db.cursor() cursor.execute("PRAGMA foreign_keys=ON") dbid = cursor.execute("SELECT id FROM pyro_names WHERE name=?", (key,)).fetchone() if dbid: dbid = dbid[0] cursor.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,)) cursor.execute("DELETE FROM pyro_names WHERE id=?", (dbid,)) cursor.execute("INSERT INTO pyro_names(name, uri) VALUES(?,?)", (key, uri)) if metadata: object_id = cursor.lastrowid for m in metadata: cursor.execute("INSERT INTO pyro_metadata(object, metadata) VALUES (?,?)", (object_id, m)) cursor.close() db.commit() except sqlite3.DatabaseError as e: raise NamingError("sqlite error in setitem: " + str(e)) def __len__(self): try: with sqlite3.connect(self.dbfile) as db: return db.execute("SELECT count(*) FROM pyro_names").fetchone()[0] except sqlite3.DatabaseError as e: raise NamingError("sqlite error in len: " + str(e)) def __contains__(self, item): try: with sqlite3.connect(self.dbfile) as db: return db.execute("SELECT EXISTS(SELECT 1 FROM pyro_names WHERE name=? LIMIT 1)", (item,)).fetchone()[0] except sqlite3.DatabaseError as e: raise NamingError("sqlite error in contains: " + str(e)) def __delitem__(self, key): try: with sqlite3.connect(self.dbfile) as db: db.execute("PRAGMA foreign_keys=ON") dbid = db.execute("SELECT id FROM pyro_names WHERE name=?", (key,)).fetchone() if dbid: dbid = dbid[0] db.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,)) db.execute("DELETE FROM pyro_names WHERE id=?", (dbid,)) db.commit() except sqlite3.DatabaseError as e: raise NamingError("sqlite error in delitem: " + str(e)) def __iter__(self): try: with sqlite3.connect(self.dbfile) as db: result = db.execute("SELECT name FROM pyro_names") return iter([n[0] for n in result.fetchall()]) except sqlite3.DatabaseError as e: raise NamingError("sqlite error in iter: " + str(e)) def clear(self): try: with sqlite3.connect(self.dbfile) as db: db.execute("PRAGMA foreign_keys=ON") db.execute("DELETE FROM pyro_metadata") db.execute("DELETE FROM pyro_names") db.commit() with sqlite3.connect(self.dbfile, isolation_level=None) as db: db.execute("VACUUM") except sqlite3.DatabaseError as e: raise NamingError("sqlite error in clear: " + str(e)) def optimized_prefix_list(self, prefix, return_metadata=False): try: with sqlite3.connect(self.dbfile) as db: names = {} if return_metadata: for dbid, name, uri in db.execute("SELECT id, name, uri FROM pyro_names WHERE name LIKE ?", (prefix + '%',)).fetchall(): metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()} names[name] = uri, metadata else: for name, uri in db.execute("SELECT name, uri FROM pyro_names WHERE name LIKE ?", (prefix + '%',)).fetchall(): names[name] = uri return names except sqlite3.DatabaseError as e: raise NamingError("sqlite error in optimized_prefix_list: " + str(e)) def optimized_regex_list(self, regex, return_metadata=False): return None def optimized_metadata_search(self, metadata_all=None, metadata_any=None, return_metadata=False): try: with sqlite3.connect(self.dbfile) as db: if metadata_any: params = list(metadata_any) sql = "SELECT id, name, uri FROM pyro_names WHERE id IN (SELECT object FROM pyro_metadata WHERE metadata IN ({seq}))" .format(seq=",".join(['?'] * len(metadata_any))) else: params = list(metadata_all) params.append(len(metadata_all)) sql = "SELECT id, name, uri FROM pyro_names WHERE id IN (SELECT object FROM pyro_metadata WHERE metadata IN ({seq}) " "GROUP BY object HAVING COUNT(metadata)=?)".format(seq=",".join(['?'] * len(metadata_all))) result = db.execute(sql, params).fetchall() if return_metadata: names = {} for dbid, name, uri in result: metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()} names[name] = uri, metadata else: names = {name: uri for (dbid, name, uri) in result} return names except sqlite3.DatabaseError as e: raise NamingError("sqlite error in optimized_metadata_search: " + str(e)) def remove_items(self, items): try: with sqlite3.connect(self.dbfile) as db: db.execute("PRAGMA foreign_keys=ON") for item in items: dbid = db.execute("SELECT id FROM pyro_names WHERE name=?", (item,)).fetchone() if dbid: dbid = dbid[0] db.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,)) db.execute("DELETE FROM pyro_names WHERE id=?", (dbid,)) db.commit() except sqlite3.DatabaseError as e: raise NamingError("sqlite error in remove_items: " + str(e)) def everything(self, return_metadata=False): try: with sqlite3.connect(self.dbfile) as db: names = {} if return_metadata: for dbid, name, uri in db.execute("SELECT id, name, uri FROM pyro_names").fetchall(): metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()} names[name] = uri, metadata else: for name, uri in db.execute("SELECT name, uri FROM pyro_names").fetchall(): names[name] = uri return names except sqlite3.DatabaseError as e: raise NamingError("sqlite error in everything: " + str(e)) def close(self): pass @server.expose class NameServer(object): def __init__(self, storageProvider=None): self.storage = storageProvider if storageProvider is None: self.storage = MemoryStorage() log.debug("using volatile in-memory dict storage") self.lock = threading.RLock() def count(self): return len(self.storage) def lookup(self, name, return_metadata=False): try: uri, metadata = self.storage[name] uri = core.URI(uri) if return_metadata: return uri, set(metadata or []) return uri except KeyError: raise NamingError("unknown name: " + name) def register(self, name, uri, safe=False, metadata=None): if isinstance(uri, core.URI): uri = str(uri) elif not isinstance(uri, str): raise TypeError("only URIs or strings can be registered") else: core.URI(uri) if not isinstance(name, str): raise TypeError("name must be a str") if isinstance(metadata, str): raise TypeError("metadata should not be a str, but another iterable (set, list, etc)") metadata and iter(metadata) with self.lock: if safe and name in self.storage: raise NamingError("name already registered: " + name) self.storage[name] = uri, set(metadata) if metadata else None def set_metadata(self, name, metadata): if not isinstance(name, str): raise TypeError("name must be a str") if isinstance(metadata, str): raise TypeError("metadata should not be a str, but another iterable (set, list, etc)") metadata and iter(metadata) with self.lock: try: uri, old_meta = self.storage[name] self.storage[name] = uri, set(metadata) if metadata else None except KeyError: raise NamingError("unknown name: " + name) def remove(self, name=None, prefix=None, regex=None): if name and name in self.storage and name != core.NAMESERVER_NAME: with self.lock: del self.storage[name] return 1 if prefix: items = list(self.list(prefix=prefix).keys()) if core.NAMESERVER_NAME in items: items.remove(core.NAMESERVER_NAME) self.storage.remove_items(items) return len(items) if regex: items = list(self.list(regex=regex).keys()) if core.NAMESERVER_NAME in items: items.remove(core.NAMESERVER_NAME) self.storage.remove_items(items) return len(items) return 0 def list(self, prefix=None, regex=None, return_metadata=False): if prefix and regex: raise ValueError("you can only filter on one thing at a time") with self.lock: if prefix: result = self.storage.optimized_prefix_list(prefix, return_metadata) if result is not None: return result result = {} for name in self.storage: if name.startswith(prefix): result[name] = self.storage[name] if return_metadata else self.storage[name][0] return result elif regex: result = self.storage.optimized_regex_list(regex, return_metadata) if result is not None: return result result = {} try: regex = re.compile(regex) except re.error as x: raise errors.NamingError("invalid regex: " + str(x)) else: for name in self.storage: if regex.match(name): result[name] = self.storage[name] if return_metadata else self.storage[name][0] return result else: return self.storage.everything(return_metadata) def yplookup(self, meta_all=None, meta_any=None, return_metadata=True): if meta_all and meta_any: raise ValueError("you can't use meta_all or meta_any at the same time") with self.lock: if meta_all: if isinstance(meta_all, str): raise TypeError("metadata_all should not be a str, but another iterable (set, list, etc)") meta_all and iter(meta_all) result = self.storage.optimized_metadata_search(metadata_all=meta_all, return_metadata=return_metadata) if result is not None: return result meta_all = frozenset(meta_all) result = {} for name, (uri, meta) in self.storage.everything(return_metadata=True).items(): if meta_all.issubset(meta): result[name] = (uri, meta) if return_metadata else uri return result elif meta_any: if isinstance(meta_any, str): raise TypeError("metadata_any should not be a str, but another iterable (set, list, etc)") meta_any and iter(meta_any) result = self.storage.optimized_metadata_search(metadata_any=meta_any, return_metadata=return_metadata) if result is not None: return result meta_any = frozenset(meta_any) result = {} for name, (uri, meta) in self.storage.everything(return_metadata=True).items(): if meta_any & meta: result[name] = (uri, meta) if return_metadata else uri return result else: return {} def ping(self): pass class NameServerDaemon(server.Daemon): def __init__(self, host=None, port=None, unixsocket=None, nathost=None, natport=None, storage=None): if host is None: host = config.HOST elif not isinstance(host, str): host = str(host) if port is None: port = config.NS_PORT if nathost is None: nathost = config.NATHOST elif not isinstance(nathost, str): nathost = str(nathost) if natport is None: natport = config.NATPORT or None storage = storage or "memory" if storage == "memory": log.debug("using volatile in-memory dict storage") self.nameserver = NameServer(MemoryStorage()) elif storage.startswith("sql:") and len(storage) > 4: sqlfile = storage[4:] log.debug("using persistent sql storage in file %s", sqlfile) self.nameserver = NameServer(SqlStorage(sqlfile)) else: raise ValueError("invalid storage type '%s'" % storage) existing_count = self.nameserver.count() if existing_count > 0: log.debug("number of existing entries in storage: %d", existing_count) super(NameServerDaemon, self).__init__(host, port, unixsocket, nathost=nathost, natport=natport) self.register(self.nameserver, core.NAMESERVER_NAME) metadata = {"class:Pyro5.nameserver.NameServer"} self.nameserver.register(core.NAMESERVER_NAME, self.uriFor(self.nameserver), metadata=metadata) if config.NS_AUTOCLEAN > 0: if not AutoCleaner.override_autoclean_min and config.NS_AUTOCLEAN < AutoCleaner.min_autoclean_value: raise ValueError("NS_AUTOCLEAN cannot be smaller than " + str(AutoCleaner.min_autoclean_value)) log.debug("autoclean enabled") self.cleaner_thread = AutoCleaner(self.nameserver) self.cleaner_thread.start() else: log.debug("autoclean not enabled") self.cleaner_thread = None log.info("nameserver daemon created") def close(self): super(NameServerDaemon, self).close() if self.nameserver is not None: self.nameserver.storage.close() self.nameserver = None if self.cleaner_thread: self.cleaner_thread.stop = True self.cleaner_thread.join() self.cleaner_thread = None def __enter__(self): if not self.nameserver: raise PyroError("cannot reuse this object") return self def __exit__(self, exc_type, exc_value, traceback): if self.nameserver is not None: self.nameserver.storage.close() self.nameserver = None if self.cleaner_thread: self.cleaner_thread.stop = True self.cleaner_thread.join() self.cleaner_thread = None return super(NameServerDaemon, self).__exit__(exc_type, exc_value, traceback) def handleRequest(self, conn): try: return super(NameServerDaemon, self).handleRequest(conn) except ProtocolError as x: warnings.warn("Pyro protocol error occurred: " + str(x)) raise class AutoCleaner(threading.Thread): min_autoclean_value = 3 max_unreachable_time = 20.0 loop_delay = 2.0 override_autoclean_min = False def __init__(self, nameserver): assert config.NS_AUTOCLEAN > 0 if not self.override_autoclean_min and config.NS_AUTOCLEAN < self.min_autoclean_value: raise ValueError("NS_AUTOCLEAN cannot be smaller than " + str(self.min_autoclean_value)) super(AutoCleaner, self).__init__() self.nameserver = nameserver self.stop = False self.daemon = True self.last_cleaned = time.time() self.unreachable = {} def run(self): while not self.stop: time.sleep(self.loop_delay) time_since_last_autoclean = time.time() - self.last_cleaned if time_since_last_autoclean < config.NS_AUTOCLEAN: continue for name, uri in self.nameserver.list().items(): if name in (core.DAEMON_NAME, core.NAMESERVER_NAME): continue try: uri_obj = core.URI(uri) timeout = config.COMMTIMEOUT or 5 sock = socketutil.create_socket(connect=(uri_obj.host, uri_obj.port), timeout=timeout) sock.close() if name in self.unreachable: del self.unreachable[name] except socket.error: if name not in self.unreachable: self.unreachable[name] = time.time() if time.time() - self.unreachable[name] >= self.max_unreachable_time: log.info("autoclean: unregistering %s; cannot connect uri %s for %d sec", name, uri, self.max_unreachable_time) self.nameserver.remove(name) del self.unreachable[name] continue self.last_cleaned = time.time() if self.unreachable: log.debug("autoclean: %d/%d names currently unreachable", len(self.unreachable), self.nameserver.count()) class BroadcastServer(object): class TransportServerAdapter(object): def __init__(self, bcserver): self.sockets = [bcserver] def events(self, eventobjects): for bc in eventobjects: bc.processRequest() def __init__(self, nsUri, bchost=None, bcport=None, ipv6=False): self.transportServer = self.TransportServerAdapter(self) self.nsUri = nsUri if bcport is None: bcport = config.NS_BCPORT if bchost is None: bchost = config.NS_BCHOST elif not isinstance(bchost, str): bchost = str(bchost) if ":" in nsUri.host or ipv6: bchost = bchost or "::" self.sock = socketutil.create_bc_socket((bchost, bcport, 0, 0), reuseaddr=config.SOCK_REUSE, timeout=2.0) else: self.sock = socketutil.create_bc_socket((bchost, bcport), reuseaddr=config.SOCK_REUSE, timeout=2.0) self._sockaddr = self.sock.getsockname() bchost = bchost or self._sockaddr[0] bcport = bcport or self._sockaddr[1] if ":" in bchost: self.locationStr = "[%s]:%d" % (bchost, bcport) else: self.locationStr = "%s:%d" % (bchost, bcport) log.info("ns broadcast server created on %s - %s", self.locationStr, socketutil.family_str(self.sock)) self.running = True def close(self): log.debug("ns broadcast server closing") self.running = False with contextlib.suppress(OSError, socket.error): self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() def getPort(self): return self.sock.getsockname()[1] def fileno(self): return self.sock.fileno()
MIT License
shannonturner/python-lessons
section_05_(loops)/loops.py
loop_example
python
def loop_example(list_to_loop_through): print "I'm going to begin to loop through this list: ", list_to_loop_through, "\n" list_items_squared = [] for each_item in list_to_loop_through: print "Now I'm on: ", each_item print "{0} squared is {1}\n".format(each_item, each_item**2) list_items_squared.append(each_item**2) print "Now I'm done looping through the list, and I'm going to return the new list, where each list item has been squared." return list_items_squared
Assuming each item in list_to_loop_through is a number, return a list of each item in that list squared.
https://github.com/shannonturner/python-lessons/blob/38409c318e7a62d30b2ffd68f8a7a5a5ec00778d/section_05_(loops)/loops.py#L2-L19
MIT License
facebookresearch/pytorchvideo
pytorchvideo/data/encoded_video.py
select_video_class
python
def select_video_class(decoder: str) -> Video: if DecoderType(decoder) == DecoderType.PYAV: from .encoded_video_pyav import EncodedVideoPyAV video_cls = EncodedVideoPyAV elif DecoderType(decoder) == DecoderType.TORCHVISION: from .encoded_video_torchvision import EncodedVideoTorchVision video_cls = EncodedVideoTorchVision else: raise NotImplementedError(f"Unknown decoder type {decoder}") return video_cls
Select the class for accessing clips based on provided decoder string Args: decoder (str): Defines what type of decoder used to decode a video.
https://github.com/facebookresearch/pytorchvideo/blob/832a6bc683257f07e74c95a1f9441ebaa64d95d8/pytorchvideo/data/encoded_video.py#L16-L34
import io import logging import pathlib from iopath.common.file_io import g_pathmgr from pytorchvideo.data.decoder import DecoderType from .video import Video logger = logging.getLogger(__name__)
Apache License 2.0
bbn-q/qgl
QGL/PulseSequencePlotter.py
plot_pulse_files
python
def plot_pulse_files(metafile, time=True, backend='bqplot'): with open(metafile, 'r') as FID: meta_info = json.load(FID) fileNames = [] for el in meta_info["instruments"].values(): if isinstance(el, str): fileNames.append(el) elif isinstance(el, dict): for file in el.values(): fileNames.append(file) line_names, num_seqs, data_dicts = extract_waveforms(fileNames, time=time) localname = os.path.split(fileNames[0])[1] sequencename = localname.split('-')[0] if backend=='matplotlib': import matplotlib.pyplot as plt from ipywidgets import interact, IntSlider def update_plot(seq_ind): for line_name in line_names: dat = data_dicts[f"{line_name}_{seq_ind}"] plt.plot(dat['x'], dat['y'], label=line_name, linewidth=1.0) interact(update_plot, seq_ind=IntSlider(min=1,max=num_seqs,step=1,value=1,description="Sequence Number")) elif backend=='bqplot': from bqplot import DateScale, LinearScale, Axis, Lines, Figure, Tooltip from bqplot.colorschemes import CATEGORY10, CATEGORY20 from ipywidgets import interact, IntSlider, VBox sx = LinearScale() sy = LinearScale(min=-1.0, max=2*len(line_names)-1.0) if time: ax = Axis(label='Time (ns)', scale=sx) else: ax = Axis(label="Samples", scale=sx) ay = Axis(label='Amplitude', scale=sy, orientation='vertical') colors = CATEGORY10 if len(line_names)<10 else CATEGORY20 lines = [] tt = Tooltip(fields=['name'], labels=['Channel']) x_mult = 1.0e9 if time else 1 for i, line_name in enumerate(line_names): dat = data_dicts[f"{line_name}_1"] lines.append(Lines(labels=[line_name], x=x_mult*dat['x'], y=dat['y'], scales={'x': sx, 'y': sy}, tooltip=tt, animate=False, colors=[colors[i]])) slider = IntSlider(min=1, max=num_seqs, step=1, description='Segment', value=1) def segment_changed(change): for line, line_name in zip(lines, line_names): dat = data_dicts[f"{line_name}_{slider.value}"] line.x = x_mult*dat['x'] line.y = dat['y'] slider.observe(segment_changed, 'value') fig = Figure(marks=lines, axes=[ax, ay], title='Waveform Plotter',animation_duration=50) return VBox([slider, fig])
plot_pulse_files(metafile) Helper function to plot a list of AWG files. A jupyter slider widget allows choice of sequence number.
https://github.com/bbn-q/qgl/blob/8df9ce5d68dca61c8c057d34a1c08b98eb910a43/QGL/PulseSequencePlotter.py#L68-L129
import os.path import json from importlib import import_module import numpy as np from . import config from . import drivers import pkgutil def all_zero_seqs(seqs): return all([np.allclose([_[1] for _ in seq], 0) for seq in seqs]) def build_awg_translator_map(): translators_map = {} translators = [_[1] for _ in pkgutil.walk_packages(drivers.__path__)] for translator in translators: module = import_module('QGL.drivers.' + translator) ext = module.get_seq_file_extension() if ext in translators_map: translators_map[ext].append(module) else: translators_map[ext] = [module] return translators_map translators = build_awg_translator_map() def resolve_translator(filename, translators): ext = os.path.splitext(filename)[1] if ext not in translators: raise NameError("No translator found to open the given file %s", filename) if len(translators[ext]) == 1: return translators[ext][0] for t in translators[ext]: if t.is_compatible_file(filename): return t raise NameError("No translator found to open the given file %s", filename)
Apache License 2.0
abhisharma404/vault
src/lib/attacks/ddos/ddos.py
DDoS.generatePacket
python
def generatePacket(self, ip, source_port): IP_PACKET = IP(src=ip, dst=self.target_ip) TCP_PACKET = TCP(sport=source_port, dport=self.dport) PKT = IP_PACKET/TCP_PACKET return PKT
Generates scapy packet
https://github.com/abhisharma404/vault/blob/0303cf425f028ce38cfaf40640d625861b7c805a/src/lib/attacks/ddos/ddos.py#L100-L107
import random from scapy.all import * import threading import socket import sys from urllib.parse import urlparse import colors import time class DDoS(object): def __init__(self, url, ip, start_port, end_port, dport, threads, interval): if url is not None and ip is not None: colors.error('Please provide either the URL or the IP address...') sys.exit(1) if ip is not None: self.target_ip = ip elif url is not None: self.target_ip = self.getIP(url) else: colors.error('Please provide URL or the IP address to attack...') if start_port is not None: if start_port > 0 and start_port < 65355: self.start_port = int(start_port) else: self.start_port = random.randint(1, 100) if end_port is not None: if end_port > 1 and end_port < 65356: self.end_port = int(end_port) else: self.end_port = random.randint(1000, 65355) if dport is None: self.dport = 80 else: if dport < 65356 and dport > 0: self.dport = int(dport) else: colors.error('Please provide a valid destination port') sys.exit(1) if interval is not None: self.INTER = int(interval) else: self.INTER = 0.0001 if threads is not None: threads = int(threads) self.threadValidator(threads) else: self.threads = 1 self.number_of_packets = 0 def threadValidator(self, threads): if threads > 100: choice = input('Are you sure you want to use {} threads...?' 'This can slow down your system.(Y/N)' .format(threads)) if choice == 'N' or choice == 'n': threads = int(input('>> Please enter the number of threads' ' you want to use...')) self.threadValidator(threads) else: self.threads = threads else: self.threads = threads @staticmethod def getIP(url): url = urlparse(url) return socket.gethostbyname(url.netloc) @staticmethod def generateIP(): ip = str(random.randint(1, 254)) + '.' + str(random.randint(0, 255)) + '.' + str(random.randint(0, 255)) + '.' + str(random.randint(0, 255)) return ip
MIT License
zzzeek/sqlalchemy
lib/sqlalchemy/future/engine.py
Connection.rollback
python
def rollback(self): if self._transaction: self._transaction.rollback()
Roll back the transaction that is currently in progress. This method rolls back the current transaction if one has been started. If no transaction was started, the method has no effect. If a transaction was started and the connection is in an invalidated state, the transaction is cleared using this method. A transaction is begun on a :class:`_future.Connection` automatically whenever a statement is first executed, or when the :meth:`_future.Connection.begin` method is called. .. note:: The :meth:`_future.Connection.rollback` method only acts upon the primary database transaction that is linked to the :class:`_future.Connection` object. It does not operate upon a SAVEPOINT that would have been invoked from the :meth:`_future.Connection.begin_nested` method; for control of a SAVEPOINT, call :meth:`_engine.NestedTransaction.rollback` on the :class:`_engine.NestedTransaction` that is returned by the :meth:`_future.Connection.begin_nested` method itself.
https://github.com/zzzeek/sqlalchemy/blob/979ea6b21f71605314dc0ac1231dd385eced98c4/lib/sqlalchemy/future/engine.py#L213-L237
from .. import util from ..engine import Connection as _LegacyConnection from ..engine import create_engine as _create_engine from ..engine import Engine as _LegacyEngine from ..engine.base import OptionEngineMixin NO_OPTIONS = util.immutabledict() def create_engine(*arg, **kw): kw["_future_engine_class"] = Engine return _create_engine(*arg, **kw) class Connection(_LegacyConnection): _is_future = True def _branch(self): raise NotImplementedError( "sqlalchemy.future.Connection does not support " "'branching' of new connections." ) def begin(self): return super(Connection, self).begin() def begin_nested(self): return super(Connection, self).begin_nested() def commit(self): if self._transaction: self._transaction.commit()
MIT License
oubiwann-unsupported/django-riak-engine
django_riak_engine/util/dist.py
has_docutils
python
def has_docutils(): try: import docutils return True except ImportError: return False
Check to see if docutils is installed.
https://github.com/oubiwann-unsupported/django-riak-engine/blob/cd4adc6c1e7dfa3d4e94346624578a1c3990efd5/django_riak_engine/util/dist.py#L48-L56
import os rest_error_help = """ ReST validation error See the following: http://docutils.sourceforge.net/docs/user/rst/cheatsheet.txt http://docutils.sourceforge.net/docs/user/rst/quickstart.html """ legalReSTFiles = [ "README", "TODO", "DEPENDENCIES", ] def setup(*args, **kwds): try: from setuptools import setup except ImportError: from distutils.core import setup return setup(*args, **kwds) def find_packages(library_name): try: from setuptools import find_packages return find_packages() except ImportError: pass packages = [] for directory, subdirectories, files in os.walk(library_name): if "__init__.py" in files: packages.append(directory.replace(os.sep, ".")) return packages
BSD 2-Clause Simplified License
voxel51/eta
eta/core/events.py
VideoEvent.is_empty
python
def is_empty(self): return not ( self.has_label or self.has_event_attributes or self.has_video_objects or self.has_detections )
Whether the event has no labels of any kind.
https://github.com/voxel51/eta/blob/e51510fda0722ac7cadb17b109bad413a6602ed3/eta/core/events.py#L638-L645
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import * from future.utils import iteritems, itervalues from collections import defaultdict from copy import deepcopy import logging import eta.core.data as etad import eta.core.frameutils as etaf import eta.core.geometry as etag import eta.core.labels as etal import eta.core.objects as etao import eta.core.serial as etas import eta.core.utils as etau logger = logging.getLogger(__name__) class DetectedEvent(etal.Labels, etag.HasBoundingBox): def __init__( self, label=None, bounding_box=None, mask=None, confidence=None, name=None, top_k_probs=None, index=None, frame_number=None, attrs=None, objects=None, ): self.type = etau.get_class_name(self) self.label = label self.bounding_box = bounding_box self.mask = mask self.confidence = confidence self.name = name self.top_k_probs = top_k_probs self.index = index self.frame_number = frame_number self.attrs = attrs or etad.AttributeContainer() self.objects = objects or etao.DetectedObjectContainer() @property def is_empty(self): return not ( self.has_label or self.has_bounding_box or self.has_mask or self.has_attributes or self.has_objects ) @property def has_label(self): return self.label is not None @property def has_bounding_box(self): return self.bounding_box is not None @property def has_mask(self): return self.mask is not None @property def has_confidence(self): return self.confidence is not None @property def has_name(self): return self.name is not None @property def has_top_k_probs(self): return self.top_k_probs is not None @property def has_index(self): return self.index is not None @property def has_frame_number(self): return self.frame_number is not None @property def has_attributes(self): return bool(self.attrs) @property def has_objects(self): return bool(self.objects) @classmethod def get_schema_cls(cls): return EventSchema def iter_attributes(self): return iter(self.attrs) def iter_objects(self): return iter(self.objects) def get_bounding_box(self): return self.bounding_box def get_index(self): return self.index def offset_index(self, offset): if self.has_index: self.index += offset def clear_index(self): self.index = None def get_object_indexes(self): return self.objects.get_indexes() def offset_object_indexes(self, offset): self.objects.offset_indexes(offset) def clear_object_indexes(self): self.objects.clear_indexes() def add_attribute(self, attr): self.attrs.add(attr) def add_attributes(self, attrs): self.attrs.add_container(attrs) def add_object(self, obj): self.objects.add(obj) def add_objects(self, objs): self.objects.add_container(objs) def pop_attributes(self): attrs = self.attrs self.clear_attributes() return attrs def pop_objects(self): objects = self.objects self.clear_objects() return objects def clear_attributes(self): self.attrs = etad.AttributeContainer() def clear_objects(self): self.objects = etao.DetectedObjectContainer() def clear_object_attributes(self): for obj in self.objects: obj.clear_attributes() def filter_by_schema(self, schema, allow_none_label=False): if self.label is None: if not allow_none_label: raise EventSchemaError( "None event label is not allowed by the schema" ) elif self.label != schema.get_label(): raise EventSchemaError( "Label '%s' does not match event schema" % self.label ) self.attrs.filter_by_schema( schema.frames, constant_schema=schema.attrs ) self.objects.filter_by_schema(schema.objects) def remove_objects_without_attrs(self, labels=None): self.objects.remove_objects_without_attrs(labels=labels) def attributes(self): _attrs = ["type"] _noneable_attrs = [ "label", "bounding_box", "mask", "confidence", "name", "top_k_probs", "index", "frame_number", ] _attrs.extend( [a for a in _noneable_attrs if getattr(self, a) is not None] ) if self.attrs: _attrs.append("attrs") if self.objects: _attrs.append("objects") return _attrs @classmethod def from_dict(cls, d): bounding_box = d.get("bounding_box", None) if bounding_box is not None: bounding_box = etag.BoundingBox.from_dict(bounding_box) mask = d.get("mask", None) if mask is not None: mask = etas.deserialize_numpy_array(mask) attrs = d.get("attrs", None) if attrs is not None: attrs = etad.AttributeContainer.from_dict(attrs) objects = d.get("objects", None) if objects is not None: objects = etao.DetectedObjectContainer.from_dict(objects) return cls( label=d.get("label", None), bounding_box=bounding_box, mask=mask, confidence=d.get("confidence", None), name=d.get("name", None), top_k_probs=d.get("top_k_probs", None), index=d.get("index", None), frame_number=d.get("frame_number", None), attrs=attrs, objects=objects, ) class DetectedEventContainer(etal.LabelsContainer): _ELE_CLS = DetectedEvent _ELE_CLS_FIELD = "_EVENT_CLS" _ELE_ATTR = "events" def get_labels(self): return set(devent.label for devent in self) def get_indexes(self): return set(devent.index for devent in self if devent.has_index) def offset_indexes(self, offset): for devent in self: devent.offset_index(offset) def clear_indexes(self): for devent in self: devent.clear_index() def get_object_indexes(self): obj_indexes = set() for devent in self: obj_indexes.update(devent.get_object_indexes()) return obj_indexes def offset_object_indexes(self, offset): for devent in self: devent.offset_object_indexes(offset) def clear_object_indexes(self): for devent in self: devent.clear_object_indexes() def sort_by_confidence(self, reverse=False): self.sort_by("confidence", reverse=reverse) def sort_by_index(self, reverse=False): self.sort_by("index", reverse=reverse) def sort_by_frame_number(self, reverse=False): self.sort_by("frame_number", reverse=reverse) def filter_by_schema(self, schema): filter_func = lambda event: schema.has_event_label(event.label) self.filter_elements([filter_func]) for event in self: event_schema = schema.get_event_schema(event.label) event.filter_by_schema(event_schema) def remove_objects_without_attrs(self, labels=None): for event in self: event.remove_objects_without_attrs(labels=labels) class VideoEvent( etal.Labels, etal.HasLabelsSupport, etal.HasFramewiseView, etal.HasSpatiotemporalView, ): def __init__( self, label=None, confidence=None, name=None, index=None, support=None, attrs=None, objects=None, frames=None, ): self.type = etau.get_class_name(self) self.label = label self.confidence = confidence self.name = name self.index = index self.attrs = attrs or etad.AttributeContainer() self.objects = objects or etao.VideoObjectContainer() self.frames = frames or {} etal.HasLabelsSupport.__init__(self, support=support) @property
Apache License 2.0
elife-asu/pyinform
pyinform/utils/encoding.py
encode
python
def encode(state, b=None): xs = np.ascontiguousarray(state, dtype=np.int32) data = xs.ctypes.data_as(POINTER(c_int)) if xs.size == 0: raise ValueError("cannot encode an empty array") if b is None: b = max(2, np.amax(xs) + 1) e = ErrorCode(0) encoding = _inform_encode(data, c_ulong(xs.size), c_int(b), byref(e)) error_guard(e) return encoding
Encode a base-*b* array of integers into a single integer. This function uses a `big-endian`__ encoding scheme. That is, the most significant bits of the encoded integer are determined by the left-most end of the unencoded state. .. doctest:: utils >>> utils.encode([0,0,1], b=2) 1 >>> utils.encode([0,1,0], b=3) 3 >>> utils.encode([1,0,0], b=4) 16 >>> utils.encode([1,0,4], b=5) 29 If *b* is not provided (or is None), the base is inferred from the state with a minimum value of 2. .. doctest:: utils >>> utils.encode([0,0,2]) 2 >>> utils.encode([0,2,0]) 6 >>> utils.encode([1,2,1]) 16 See also :py:func:`.decode`. .. __: https://en.wikipedia.org/wiki/Endianness#Examples :param sequence state: the state to encode :param int b: the base in which to encode :return: the encoded state :rtype: int :raises ValueError: if the state is empty :raises InformError: if an error occurs in the ``inform`` C call
https://github.com/elife-asu/pyinform/blob/832d8f28d3ac214f631753bab4907b39dee25442/pyinform/utils/encoding.py#L34-L89
import numpy as np from ctypes import byref, c_int, c_ulong, POINTER from pyinform import _inform from pyinform.error import ErrorCode, error_guard
MIT License
packtpublishing/conversational-ai-with-rasa
Chapter10/service/normalization.py
text_to_coordinate
python
def text_to_coordinate(text_city: str) -> Tuple[float, float]: list_of_locations = reg.locations_for(text_city) city = list_of_locations[0] return city.lat, city.lon
parse city name to coordinate return latitude, longitude
https://github.com/packtpublishing/conversational-ai-with-rasa/blob/850dc56c3b724a5fc6e2b66e7582a7f62b302b74/Chapter10/service/normalization.py#L9-L20
import datetime from typing import Optional, Tuple from pyowm.owm import OWM reg = OWM("not-actually-used-key").city_id_registry()
MIT License
mirantis/ceph-lcm
decapodlib/decapodlib/client.py
json_response
python
def json_response(func): @six.wraps(func) def decorator(*args, **kwargs): raw_response = kwargs.pop("raw_response", False) response = func(*args, **kwargs) if raw_response: return response if isinstance(response, dict): return response if response.ok: content_type = response.headers.get("Content-Type") content_type = content_type or "application/json" if content_type == "application/json": return response.json() return response.text raise exceptions.DecapodAPIError(response) return decorator
Decorator which parses :py:class:`requests.Response` and returns unpacked JSON. If ``Content-Type`` of response is not ``application/json``, then it returns text. :return: Data of :py:class:`requests.Response` from decorated function. :raises decapodlib.exceptions.DecapodAPIError: if decoding is not possible or response status code is not ``200``.
https://github.com/mirantis/ceph-lcm/blob/fad9bad0b94f2ef608362953583b10a54a841d24/decapodlib/decapodlib/client.py#L135-L165
from __future__ import absolute_import from __future__ import unicode_literals import abc import inspect import logging import socket import warnings import pkg_resources import requests import requests.adapters import six from decapodlib import auth from decapodlib import exceptions try: import simplejson as json except ImportError: import json LOG = logging.getLogger(__name__) try: VERSION = pkg_resources.get_distribution("decapodlib").version except pkg_resources.DistributionNotFound as exc: warnings.warn("Module is imported outside of distribution.", ImportWarning) VERSION = "unknown" __all__ = "VERSION", "Client", "V1Client" def json_dumps(data): return json.dumps(data, separators=(",", ":")) def make_query_params(**request_params): params = {} for key, value in six.iteritems(request_params): if value is not None: params[key] = value return params
Apache License 2.0
microsoft/deepspeedexamples
Megatron-LM-v1.1.5-3D_parallelism/megatron/mpu/initialize.py
set_model_parallel_world_size
python
def set_model_parallel_world_size(world_size): global _MPU_WORLD_SIZE _MPU_WORLD_SIZE = world_size
Set the model parallel size
https://github.com/microsoft/deepspeedexamples/blob/174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5/Megatron-LM-v1.1.5-3D_parallelism/megatron/mpu/initialize.py#L184-L187
import torch from .utils import ensure_divisibility _MODEL_PARALLEL_GROUP = None _DATA_PARALLEL_GROUP = None _PIPE_PARALLEL_GROUP = None _IO_PARALLEL_GROUP = None _MPU_WORLD_SIZE = None _MPU_RANK = None _MPU_TOPOLOGY = None def is_unitialized(): return _DATA_PARALLEL_GROUP is None def initialize_model_parallel(model_parallel_size_, topology=None): if torch.distributed.get_rank() == 0: print('> initializing model parallel with size {}'.format( model_parallel_size_)) assert torch.distributed.is_initialized() world_size = torch.distributed.get_world_size() model_parallel_size = min(model_parallel_size_, world_size) ensure_divisibility(world_size, model_parallel_size) rank = torch.distributed.get_rank() global _MPU_TOPOLOGY if topology: _MPU_TOPOLOGY = topology global _DATA_PARALLEL_GROUP assert _DATA_PARALLEL_GROUP is None, 'data parallel group is already initialized' if topology: for dp_group in topology.get_axis_comm_lists('data'): group = torch.distributed.new_group(ranks=dp_group) if rank == 0: print(f'MPU DP:', dp_group) if rank in dp_group: _DATA_PARALLEL_GROUP = group else: for i in range(model_parallel_size): ranks = range(i, world_size, model_parallel_size) group = torch.distributed.new_group(ranks) if i == (rank % model_parallel_size): _DATA_PARALLEL_GROUP = group if topology is not None: global _PIPE_PARALLEL_GROUP for pp_group in topology.get_axis_comm_lists('pipe'): group = torch.distributed.new_group(ranks=pp_group) if rank == 0: print(f'MPU PP:', pp_group) if rank in pp_group: _PIPE_PARALLEL_GROUP = group global _IO_PARALLEL_GROUP if topology and topology.get_dim('pipe') > 1: io_stages = [0, topology.get_dim('pipe') - 1] io_group = [] for stage in io_stages: io_group.extend(topology.filter_match(pipe=stage, model=0)) if rank == 0: print(f'MPU IO:', io_group) group = torch.distributed.new_group(ranks=io_group) if rank in io_group: _IO_PARALLEL_GROUP = group else: _IO_PARALLEL_GROUP = get_data_parallel_group() global _MODEL_PARALLEL_GROUP assert _MODEL_PARALLEL_GROUP is None, 'model parallel group is already initialized' if topology: if model_parallel_size == 1: for group_rank in range(world_size): group = torch.distributed.new_group(ranks=[group_rank]) if rank == 0: print(f'MPU MP:', [group_rank]) if rank == group_rank: _MODEL_PARALLEL_GROUP = group return for mp_group in topology.get_axis_comm_lists('model'): group = torch.distributed.new_group(ranks=mp_group) if rank == 0: print(f'MPU MP:', mp_group) if rank in mp_group: _MODEL_PARALLEL_GROUP = group else: for i in range(world_size // model_parallel_size): ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) group = torch.distributed.new_group(ranks) if i == (rank // model_parallel_size): _MODEL_PARALLEL_GROUP = group def model_parallel_is_initialized(): if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None: return False return True def get_model_parallel_group(): assert _MODEL_PARALLEL_GROUP is not None, 'model parallel group is not initialized' return _MODEL_PARALLEL_GROUP def get_data_parallel_group(): assert _DATA_PARALLEL_GROUP is not None, 'data parallel group is not initialized' return _DATA_PARALLEL_GROUP def get_io_parallel_group(): assert _IO_PARALLEL_GROUP is not None, 'IO parallel group is not initialized' return _IO_PARALLEL_GROUP
MIT License
ganeti/ganeti
lib/storage/bdev.py
LogicalVolume.Snapshot
python
def Snapshot(self, snap_name=None, snap_size=None): if not snap_name: snap_name = self._lv_name + ".snap" if not snap_size: snap_size = self.size snap = LogicalVolume((self._vg_name, snap_name), None, snap_size, self.params, self.dyn_params) base.IgnoreError(snap.Remove) vg_info = self.GetVGInfo([self._vg_name], False) if not vg_info: base.ThrowError("Can't compute VG info for vg %s", self._vg_name) free_size, _, _ = vg_info[0] if free_size < snap_size: base.ThrowError("Not enough free space: required %s," " available %s", snap_size, free_size) _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % snap_size, "-s", "-n%s" % snap_name, self.dev_path])) return (self._vg_name, snap_name)
Create a snapshot copy of an lvm block device. @returns: tuple (vg, lv)
https://github.com/ganeti/ganeti/blob/4d21019c72cba4d746f5d17ca22098f4c7682e9c/lib/storage/bdev.py#L616-L646
import re import stat import os import logging import math from ganeti import utils from ganeti import errors from ganeti import constants from ganeti import objects from ganeti import compat from ganeti import serializer from ganeti.storage import base from ganeti.storage import drbd from ganeti.storage.filestorage import FileStorage from ganeti.storage.gluster import GlusterStorage from ganeti.storage.extstorage import ExtStorageDevice class RbdShowmappedJsonError(Exception): pass def _CheckResult(result): if result.failed: base.ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason, result.output) class LogicalVolume(base.BlockDev): _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$") _PARSE_PV_DEV_RE = re.compile(r"^([^ ()]+)\([0-9]+\)$") _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"]) _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"]) def __init__(self, unique_id, children, size, params, dyn_params, **kwargs): super(LogicalVolume, self).__init__(unique_id, children, size, params, dyn_params, **kwargs) if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: raise ValueError("Invalid configuration data %s" % str(unique_id)) self._vg_name, self._lv_name = unique_id self._ValidateName(self._vg_name) self._ValidateName(self._lv_name) self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name) self._degraded = True self.major = self.minor = self.pe_size = self.stripe_count = None self.pv_names = None lvs_cache = kwargs.get("lvs_cache") if lvs_cache: lv_info = lvs_cache.get(self.dev_path) self.Attach(lv_info=lv_info) else: self.Attach() @staticmethod def _GetStdPvSize(pvs_info): assert len(pvs_info) > 0 smallest = min([pv.size for pv in pvs_info]) return smallest // (1 + constants.PART_MARGIN + constants.PART_RESERVED) @staticmethod def _ComputeNumPvs(size, pvs_info): assert len(pvs_info) > 0 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info)) return int(math.ceil(float(size) / pv_size)) @staticmethod def _GetEmptyPvNames(pvs_info, max_pvs=None): empty_pvs = [pv for pv in pvs_info if objects.LvmPvInfo.IsEmpty(pv)] if max_pvs is not None: empty_pvs = empty_pvs[:max_pvs] return [pv.name for pv in empty_pvs] @classmethod def Create(cls, unique_id, children, size, spindles, params, excl_stor, dyn_params, **kwargs): if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: raise errors.ProgrammerError("Invalid configuration data %s" % str(unique_id)) vg_name, lv_name = unique_id cls._ValidateName(vg_name) cls._ValidateName(lv_name) pvs_info = cls.GetPVInfo([vg_name]) if not pvs_info: if excl_stor: msg = "No (empty) PVs found" else: msg = "Can't compute PV info for vg %s" % vg_name base.ThrowError(msg) pvs_info.sort(key=(lambda pv: pv.free), reverse=True) pvlist = [pv.name for pv in pvs_info] if compat.any(":" in v for v in pvlist): base.ThrowError("Some of your PVs have the invalid character ':' in their" " name, this is not supported - please filter them out" " in lvm.conf using either 'filter' or 'preferred_names'") current_pvs = len(pvlist) desired_stripes = params[constants.LDP_STRIPES] stripes = min(current_pvs, desired_stripes) if excl_stor: if spindles is None: base.ThrowError("Unspecified number of spindles: this is required" "when exclusive storage is enabled, try running" " gnt-cluster repair-disk-sizes") (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info) if err_msgs: for m in err_msgs: logging.warning(m) req_pvs = cls._ComputeNumPvs(size, pvs_info) if spindles < req_pvs: base.ThrowError("Requested number of spindles (%s) is not enough for" " a disk of %d MB (at least %d spindles needed)", spindles, size, req_pvs) else: req_pvs = spindles pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs) current_pvs = len(pvlist) if current_pvs < req_pvs: base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d" " MB: %d available, %d needed", size, current_pvs, req_pvs) assert current_pvs == len(pvlist) stripes = current_pvs if stripes > desired_stripes: logging.warning("Using %s stripes instead of %s, to be able to use" " %s spindles", stripes, desired_stripes, current_pvs) else: if stripes < desired_stripes: logging.warning("Could not use %d stripes for VG %s, as only %d PVs are" " available.", desired_stripes, vg_name, current_pvs) free_size = sum([pv.free for pv in pvs_info]) if free_size < size: base.ThrowError("Not enough free space: required %s," " available %s", size, free_size) cmd = ["lvcreate", "-Wn", "-L%dm" % size, "-n%s" % lv_name] for stripes_arg in range(stripes, 0, -1): result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist) if not result.failed: break if result.failed: base.ThrowError("LV create failed (%s): %s", result.fail_reason, result.output) return LogicalVolume(unique_id, children, size, params, dyn_params, **kwargs) @staticmethod def _GetVolumeInfo(lvm_cmd, fields): if not fields: raise errors.ProgrammerError("No fields specified") sep = "|" cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered", "--separator=%s" % sep, "-o%s" % ",".join(fields)] result = utils.RunCmd(cmd) if result.failed: raise errors.CommandError("Can't get the volume information: %s - %s" % (result.fail_reason, result.output)) data = [] for line in result.stdout.splitlines(): splitted_fields = line.strip().split(sep) if len(fields) != len(splitted_fields): raise errors.CommandError("Can't parse %s output: line '%s'" % (lvm_cmd, line)) data.append(splitted_fields) return data @classmethod def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False): if include_lvs: lvfield = "lv_name" else: lvfield = "pv_name" try: info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free", "pv_attr", "pv_size", lvfield]) except errors.GenericError as err: logging.error("Can't get PV information: %s", err) return None if include_lvs: info.sort(key=(lambda i: (i[0], i[5]))) data = [] lastpvi = None for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info: if filter_allocatable and pv_attr[0] != "a": continue if vg_names and vg_name not in vg_names: continue if lastpvi and lastpvi.name == pv_name: if include_lvs and lv_name: if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name: lastpvi.lv_list.append(lv_name) else: if include_lvs and lv_name: lvl = [lv_name] else: lvl = [] lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name, size=float(pv_size), free=float(pv_free), attributes=pv_attr, lv_list=lvl) data.append(lastpvi) return data @classmethod def _GetRawFreePvInfo(cls, vg_name): pvs_info = cls.GetPVInfo([vg_name]) if not pvs_info: pv_size = 0.0 free_pvs = 0 num_pvs = 0 else: pv_size = cls._GetStdPvSize(pvs_info) free_pvs = len(cls._GetEmptyPvNames(pvs_info)) num_pvs = len(pvs_info) return (pv_size, free_pvs, num_pvs) @classmethod def _GetExclusiveStorageVgFree(cls, vg_name): (pv_size, free_pvs, _) = cls._GetRawFreePvInfo(vg_name) return pv_size * free_pvs @classmethod def GetVgSpindlesInfo(cls, vg_name): (_, free_pvs, num_pvs) = cls._GetRawFreePvInfo(vg_name) return (free_pvs, num_pvs) @classmethod def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True): try: info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr", "vg_size"]) except errors.GenericError as err: logging.error("Can't get VG information: %s", err) return None data = [] for vg_name, vg_free, vg_attr, vg_size in info: if filter_readonly and vg_attr[0] == "r": continue if vg_names and vg_name not in vg_names: continue if excl_stor: es_free = cls._GetExclusiveStorageVgFree(vg_name) assert es_free <= vg_free vg_free = es_free data.append((float(vg_free), float(vg_size), vg_name)) return data @classmethod def _ValidateName(cls, name): if (not cls._VALID_NAME_RE.match(name) or name in cls._INVALID_NAMES or compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)): base.ThrowError("Invalid LVM name '%s'", name) def Remove(self): if not self.minor and not self.Attach(): return result = utils.RunCmd(["lvremove", "-f", "%s/%s" % (self._vg_name, self._lv_name)]) if result.failed: base.ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output) def Rename(self, new_id): if not isinstance(new_id, (tuple, list)) or len(new_id) != 2: raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id) new_vg, new_name = new_id if new_vg != self._vg_name: raise errors.ProgrammerError("Can't move a logical volume across" " volume groups (from %s to to %s)" % (self._vg_name, new_vg)) result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name]) if result.failed: base.ThrowError("Failed to rename the logical volume: %s", result.output) self._lv_name = new_name self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name) @staticmethod def _ParseLvInfoLine(line, sep): elems = line.strip().split(sep) if len(elems) == 9 and elems[-1] == "": elems.pop() if len(elems) != 8: base.ThrowError("Can't parse LVS output, len(%s) != 8", str(elems)) (vg_name, lv_name, status, major, minor, pe_size, stripes, pvs) = elems path = os.path.join(os.environ.get('DM_DEV_DIR', '/dev'), vg_name, lv_name) if len(status) < 6: base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status) try: major = int(major) minor = int(minor) except (TypeError, ValueError) as err: base.ThrowError("lvs major/minor cannot be parsed: %s", str(err)) try: pe_size = int(float(pe_size)) except (TypeError, ValueError) as err: base.ThrowError("Can't parse vg extent size: %s", err) try: stripes = int(stripes) except (TypeError, ValueError) as err: base.ThrowError("Can't parse the number of stripes: %s", err) pv_names = [] if pvs != "": for pv in pvs.split(","): m = re.match(LogicalVolume._PARSE_PV_DEV_RE, pv) if not m: base.ThrowError("Can't parse this device list: %s", pvs) pv_names.append(m.group(1)) return (path, (status, major, minor, pe_size, stripes, pv_names)) @staticmethod def GetLvGlobalInfo(_run_cmd=utils.RunCmd): sep = "|" result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep, "--units=k", "--nosuffix", "-ovg_name,lv_name,lv_attr,lv_kernel_major," "lv_kernel_minor,vg_extent_size,stripes,devices"]) if result.failed: logging.warning("lvs command failed, the LV cache will be empty!") logging.info("lvs failure: %r", result.stderr) return {} out = result.stdout.splitlines() if not out: logging.warning("lvs command returned an empty output, the LV cache will" "be empty!") return {} return dict([LogicalVolume._ParseLvInfoLine(line, sep) for line in out]) def Attach(self, lv_info=None, **kwargs): self.attached = False if not lv_info: lv_info = LogicalVolume.GetLvGlobalInfo().get(self.dev_path) if not lv_info: return False (status, major, minor, pe_size, stripes, pv_names) = lv_info self.major = major self.minor = minor self.pe_size = pe_size self.stripe_count = stripes self._degraded = status[0] == "v" self.pv_names = pv_names self.attached = True return True def Assemble(self): result = utils.RunCmd(["lvchange", "-ay", self.dev_path]) if result.failed: base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output) def Shutdown(self): pass def GetSyncStatus(self): if self._degraded: ldisk_status = constants.LDS_FAULTY else: ldisk_status = constants.LDS_OKAY return objects.BlockDevStatus(dev_path=self.dev_path, major=self.major, minor=self.minor, sync_percent=None, estimated_time=None, is_degraded=self._degraded, ldisk_status=ldisk_status) def Open(self, force=False, exclusive=True): pass def Close(self): pass
BSD 2-Clause Simplified License
distributedsystemsgroup/zoe
zoe_api/tests/mock_master_api.py
MockAPIManager.execution_delete
python
def execution_delete(self, exec_id: int) -> APIReturnType: assert isinstance(exec_id, int) return self._request_reply()
Delete an execution.
https://github.com/distributedsystemsgroup/zoe/blob/c8e0c908af1954a8b41d0f6de23d08589564f0ab/zoe_api/tests/mock_master_api.py#L51-L54
import logging from typing import Tuple, Union log = logging.getLogger(__name__) APIReturnType = Tuple[bool, Union[str, dict]] class MockAPIManager: def __init__(self): self.fails = False def _request_reply(self) -> APIReturnType: if not self.fails: return True, 'No error message' else: return False, 'Fake error message' def execution_start(self, exec_id: int) -> APIReturnType: assert isinstance(exec_id, int) return self._request_reply() def execution_terminate(self, exec_id: int) -> APIReturnType: assert isinstance(exec_id, int) return self._request_reply()
Apache License 2.0
red-hat-storage/ocs-ci
tests/fixtures.py
create_pods
python
def create_pods(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "pod_objs"): for pod in class_instance.pod_objs: pod.delete() request.addfinalizer(finalizer) class_instance.pod_objs = list() for pvc_obj in class_instance.pvc_objs: class_instance.pod_objs.append( helpers.create_pod( interface_type=class_instance.interface, pvc_name=pvc_obj.name, do_reload=False, namespace=class_instance.namespace, ) ) for pod in class_instance.pod_objs: helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING)
Create multiple pods
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/tests/fixtures.py#L250-L278
import pytest from ocs_ci.ocs.resources.pod import delete_deploymentconfig_pods from ocs_ci.helpers import helpers from ocs_ci.ocs import constants, ocp @pytest.fixture() def create_rbd_secret(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "rbd_secret_obj"): class_instance.rbd_secret_obj.delete() class_instance.rbd_secret_obj.ocp.wait_for_delete( class_instance.rbd_secret_obj.name ) request.addfinalizer(finalizer) class_instance.rbd_secret_obj = helpers.create_secret( interface_type=constants.CEPHBLOCKPOOL ) assert class_instance.rbd_secret_obj, "Failed to create secret" @pytest.fixture() def create_cephfs_secret(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "cephfs_secret_obj"): class_instance.cephfs_secret_obj.delete() class_instance.cephfs_secret_obj.ocp.wait_for_delete( class_instance.cephfs_secret_obj.name ) request.addfinalizer(finalizer) class_instance.cephfs_secret_obj = helpers.create_secret( interface_type=constants.CEPHFILESYSTEM ) assert class_instance.cephfs_secret_obj, "Failed to create secret" @pytest.fixture() def create_ceph_block_pool(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "cbp_obj"): class_instance.cbp_obj.delete() request.addfinalizer(finalizer) class_instance.cbp_obj = helpers.create_ceph_block_pool() assert class_instance.cbp_obj, "Failed to create block pool" @pytest.fixture() def create_rbd_storageclass(request): class_instance = request.node.cls def finalizer(): if class_instance.sc_obj.get(): class_instance.sc_obj.delete() class_instance.sc_obj.ocp.wait_for_delete(class_instance.sc_obj.name) request.addfinalizer(finalizer) if not hasattr(class_instance, "reclaim_policy"): class_instance.reclaim_policy = constants.RECLAIM_POLICY_DELETE class_instance.sc_obj = helpers.create_storage_class( interface_type=constants.CEPHBLOCKPOOL, interface_name=class_instance.cbp_obj.name, secret_name=class_instance.rbd_secret_obj.name, reclaim_policy=class_instance.reclaim_policy, ) assert class_instance.sc_obj, "Failed to create storage class" @pytest.fixture() def create_cephfs_storageclass(request): class_instance = request.node.cls def finalizer(): if class_instance.sc_obj.get(): class_instance.sc_obj.delete() class_instance.sc_obj.ocp.wait_for_delete(class_instance.sc_obj.name) request.addfinalizer(finalizer) class_instance.sc_obj = helpers.create_storage_class( interface_type=constants.CEPHFILESYSTEM, interface_name=helpers.get_cephfs_data_pool_name(), secret_name=class_instance.cephfs_secret_obj.name, ) assert class_instance.sc_obj, "Failed to create storage class" @pytest.fixture() def create_project(request): class_instance = request.node.cls def finalizer(): ocp.switch_to_default_rook_cluster_project() class_instance.project_obj.delete(resource_name=class_instance.namespace) class_instance.project_obj.wait_for_delete(class_instance.namespace) request.addfinalizer(finalizer) class_instance.project_obj = helpers.create_project() class_instance.namespace = class_instance.project_obj.namespace @pytest.fixture() def create_pvc(request): class_instance = request.node.cls class_instance.pvc_obj = helpers.create_pvc( sc_name=class_instance.sc_obj.name, namespace=class_instance.namespace ) helpers.wait_for_resource_state(class_instance.pvc_obj, constants.STATUS_BOUND) class_instance.pvc_obj.reload() @pytest.fixture() def delete_pvc(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "pvc_obj"): class_instance.pvc_obj.delete() request.addfinalizer(finalizer) @pytest.fixture() def create_rbd_pod(request): class_instance = request.node.cls class_instance.pod_obj = helpers.create_pod( interface_type=constants.CEPHBLOCKPOOL, pvc_name=class_instance.pvc_obj.name, namespace=class_instance.namespace, ) helpers.wait_for_resource_state(class_instance.pod_obj, constants.STATUS_RUNNING) class_instance.pod_obj.reload() @pytest.fixture() def delete_pod(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "pod_obj"): class_instance.pod_obj.delete() request.addfinalizer(finalizer) @pytest.fixture() def create_pvcs(request): class_instance = request.node.cls def finalizer(): if hasattr(class_instance, "pvc_objs"): for pvc_obj in class_instance.pvc_objs: pvc_obj.reload() backed_pv_name = pvc_obj.backed_pv pvc_obj.delete() for pvc_obj in class_instance.pvc_objs: pvc_obj.ocp.wait_for_delete(pvc_obj.name) helpers.validate_pv_delete(backed_pv_name) request.addfinalizer(finalizer) class_instance.pvc_objs, _ = helpers.create_multiple_pvcs( sc_name=class_instance.sc_obj.name, number_of_pvc=class_instance.num_of_pvcs, size=class_instance.pvc_size, namespace=class_instance.namespace, ) for pvc_obj in class_instance.pvc_objs: helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND) pvc_obj.reload() @pytest.fixture()
MIT License
resonantgeodata/resonantgeodata
django-rgd/rgd/filters.py
SpatialEntryFilter.filter_distance
python
def filter_distance(self, queryset, name, value): if value and self._has_geom: geom = self._geometry if value.start is not None: queryset = queryset.filter(footprint__distance_gte=(geom, D(m=value.start))) if value.stop is not None: queryset = queryset.filter(footprint__distance_lte=(geom, D(m=value.stop))) return queryset
Filter the queryset by distance to the queried geometry. We may wish to use the distance in degrees later on. This is very taxing on the DBMS right now. The distance in degrees can be provided by the initial geometry query.
https://github.com/resonantgeodata/resonantgeodata/blob/0b2577aaa2138c3365885033ba9206e4dadf94f0/django-rgd/rgd/filters.py#L111-L124
from django.contrib.gis import forms from django.contrib.gis.db.models.functions import GeometryDistance from django.contrib.gis.measure import D from django.core.validators import RegexValidator from django.db.models import F from django_filters import rest_framework as filters from rgd.models import SpatialEntry class GeometryFilter(filters.Filter): field_class = forms.GeometryField field_class.widget.map_srid = 4326 class SpatialEntryFilter(filters.FilterSet): q = GeometryFilter( help_text='A Well-known text (WKT) representation of a geometry or a GeoJSON.', label='WKT/GeoJSON', method='filter_q', ) predicate = filters.ChoiceFilter( choices=( ('contains', 'contains'), ('crosses', 'crosses'), ('disjoint', 'disjoint'), ('equals', 'equals'), ('intersects', 'intersects'), ('overlaps', 'overlaps'), ('touches', 'touches'), ('within', 'within'), ), help_text=( 'A named spatial predicate based on the DE-9IM. This spatial predicate will be used ' 'to filter data such that `predicate(a, b)` where `b` is the queried geometry.' ), label='Spatial predicate', method='filter_predicate', ) relates = filters.CharFilter( help_text=( 'Specify exactly how the queried geometry should relate to the data using a DE-9IM ' 'string code.' ), label='DE-9IM string code', max_length=9, method='filter_relates', min_length=9, validators=( RegexValidator(regex=r'^[\*012TF]{9}$', message='Enter a valid DE-9IM string code.'), ), ) distance = filters.RangeFilter( help_text='The minimum/maximum distance around the queried geometry in meters.', label='Distance', method='filter_distance', ) acquired = filters.IsoDateTimeFromToRangeFilter( field_name='acquisition_date', help_text='The ISO 8601 formatted date and time when data was acquired.', label='Acquired', ) instrumentation = filters.CharFilter( field_name='instrumentation', help_text='The instrumentation used to acquire at least one of these data.', label='Instrumentation', lookup_expr='icontains', ) time_of_day = filters.TimeRangeFilter( help_text='The minimum/maximum times during the day the records were acquired.', label='Time of Day', method='filter_time_of_day', ) @property def _geometry(self): return self.form.cleaned_data['q'] @property def _has_geom(self): return self._geometry is not None def filter_q(self, queryset, name, value): if value: queryset = queryset.annotate(distance=GeometryDistance('footprint', value)).order_by( 'distance' ) return queryset def filter_predicate(self, queryset, name, value): if value and self._has_geom: queryset = queryset.filter(**{f'footprint__{value}': self._geometry}) return queryset def filter_relates(self, queryset, name, value): if value and self._has_geom: queryset = queryset.filter(footprint__relates=(self._geometry, value)) return queryset
Apache License 2.0
hobson/aima
aima/logic.py
variables
python
def variables(s): result = set([]) def walk(s): if is_variable(s): result.add(s) else: for arg in s.args: walk(arg) walk(s) return result
Return a set of the variables in expression s. >>> ppset(variables(F(x, A, y))) set([x, y]) >>> ppset(variables(F(G(x), z))) set([x, z]) >>> ppset(variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, z)'))) set([x, y, z])
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/logic.py#L258-L275
import itertools, re import agents from utils import * class KB: def __init__(self, sentence=None): abstract def tell(self, sentence): abstract def ask(self, query): for result in self.ask_generator(query): return result return False def ask_generator(self, query): abstract def retract(self, sentence): abstract class PropKB(KB): def __init__(self, sentence=None): self.clauses = [] if sentence: self.tell(sentence) def tell(self, sentence): self.clauses.extend(conjuncts(to_cnf(sentence))) def ask_generator(self, query): if tt_entails(Expr('&', *self.clauses), query): yield {} def retract(self, sentence): for c in conjuncts(to_cnf(sentence)): if c in self.clauses: self.clauses.remove(c) def KB_AgentProgram(KB): steps = itertools.count() def program(percept): t = steps.next() KB.tell(make_percept_sentence(percept, t)) action = KB.ask(make_action_query(t)) KB.tell(make_action_sentence(action, t)) return action def make_percept_sentence(self, percept, t): return Expr("Percept")(percept, t) def make_action_query(self, t): return expr("ShouldDo(action, %d)" % t) def make_action_sentence(self, action, t): return Expr("Did")(action[expr('action')], t) return program class Expr: def __init__(self, op, *args): assert isinstance(op, str) or (isnumber(op) and not args) self.op = num_or_str(op) self.args = map(expr, args) def __call__(self, *args): assert is_symbol(self.op) and not self.args return Expr(self.op, *args) def __repr__(self): if not self.args: return str(self.op) elif is_symbol(self.op): return '%s(%s)' % (self.op, ', '.join(map(repr, self.args))) elif len(self.args) == 1: return self.op + repr(self.args[0]) else: return '(%s)' % (' '+self.op+' ').join(map(repr, self.args)) def __eq__(self, other): return (other is self) or (isinstance(other, Expr) and self.op == other.op and self.args == other.args) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.op) ^ hash(tuple(self.args)) def __lt__(self, other): return Expr('<', self, other) def __le__(self, other): return Expr('<=', self, other) def __ge__(self, other): return Expr('>=', self, other) def __gt__(self, other): return Expr('>', self, other) def __add__(self, other): return Expr('+', self, other) def __sub__(self, other): return Expr('-', self, other) def __and__(self, other): return Expr('&', self, other) def __div__(self, other): return Expr('/', self, other) def __truediv__(self, other):return Expr('/', self, other) def __invert__(self): return Expr('~', self) def __lshift__(self, other): return Expr('<<', self, other) def __rshift__(self, other): return Expr('>>', self, other) def __mul__(self, other): return Expr('*', self, other) def __neg__(self): return Expr('-', self) def __or__(self, other): return Expr('|', self, other) def __pow__(self, other): return Expr('**', self, other) def __xor__(self, other): return Expr('^', self, other) def __mod__(self, other): return Expr('<=>', self, other) def expr(s): if isinstance(s, Expr): return s if isnumber(s): return Expr(s) s = s.replace('==>', '>>').replace('<==', '<<') s = s.replace('<=>', '%').replace('=/=', '^') s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s) return eval(s, {'Expr':Expr}) def is_symbol(s): return isinstance(s, str) and s[:1].isalpha() def is_var_symbol(s): return is_symbol(s) and s[0].islower() def is_prop_symbol(s): return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'
MIT License
backtick-se/cowait
cowait/engine/docker/docker.py
DockerProvider.destroy_all
python
def destroy_all(self) -> None: try: containers = self.docker.containers.list( all=True, filters={ 'label': LABEL_TASK_ID, }, ) for container in containers: container.remove(force=True) except requests.exceptions.ConnectionError: raise ProviderError('Docker engine unavailable')
Destroys all running tasks
https://github.com/backtick-se/cowait/blob/ca1efb9857b5e0dda3ed0b4237c1ad4b7bb62454/cowait/engine/docker/docker.py#L86-L100
import docker import requests.exceptions from cowait.network import get_remote_url from cowait.tasks import TaskDefinition from cowait.utils import json_stream from cowait.engine.const import LABEL_TASK_ID, LABEL_PARENT_ID from cowait.engine.cluster import ClusterProvider from cowait.engine.errors import ProviderError from cowait.engine.routers import create_router from .task import DockerTask from .volumes import create_volumes from .utils import create_env, create_ports, extract_container_taskdef DEFAULT_NETWORK = 'cowait' class DockerProvider(ClusterProvider): def __init__(self, args={}): super().__init__('docker', args) self.docker = docker.from_env() self.router = create_router(self, self.args.get('router', 'local')) @property def network(self): return self.args.get('network', DEFAULT_NETWORK) def spawn(self, taskdef: TaskDefinition, deploy: bool = False) -> DockerTask: try: self.ensure_network() self.emit_sync('prepare', taskdef=taskdef) cpu_period = 100000 cpu_quota = float(taskdef.cpu_limit or 0) * cpu_period container = self.docker.containers.run( detach=True, image=taskdef.image, name=taskdef.id, hostname=taskdef.id, network=self.network, ports=create_ports(taskdef), environment=create_env(self, taskdef), mounts=create_volumes(taskdef.volumes), cpu_quota=int(cpu_quota), cpu_period=int(cpu_period), mem_reservation=str(taskdef.memory or 0), mem_limit=str(taskdef.memory_limit or 0), restart_policy=None if not deploy else {'Name': 'always'}, labels={ LABEL_TASK_ID: taskdef.id, LABEL_PARENT_ID: taskdef.parent, **taskdef.meta, }, ) task = DockerTask(self, taskdef, container) self.emit_sync('spawn', task=task) return task except docker.errors.APIError as e: raise ProviderError(e.explanation) except requests.exceptions.ConnectionError: raise ProviderError('Docker engine unavailable') def wait(self, task: DockerTask) -> bool: result = task.container.wait() return result['StatusCode'] == 0 def list_all(self) -> list: try: containers = self.docker.containers.list( filters={'label': LABEL_TASK_ID}, ) return [extract_container_taskdef(c) for c in containers] except requests.exceptions.ConnectionError: raise ProviderError('Docker engine unavailable')
Apache License 2.0
lark-parser/lark
lark/parsers/earley_forest.py
ForestTransformer.transform_intermediate_node
python
def transform_intermediate_node(self, node, data): return node
Transform an intermediate node.
https://github.com/lark-parser/lark/blob/ee664b57201fb6e31bdb106d509ceab22ef1dd66/lark/parsers/earley_forest.py#L404-L406
from random import randint from collections import deque from operator import attrgetter from importlib import import_module from functools import partial from ..parse_tree_builder import AmbiguousIntermediateExpander from ..visitors import Discard from ..lexer import Token from ..utils import logger from ..tree import Tree class ForestNode: pass class SymbolNode(ForestNode): __slots__ = ('s', 'start', 'end', '_children', 'paths', 'paths_loaded', 'priority', 'is_intermediate', '_hash') def __init__(self, s, start, end): self.s = s self.start = start self.end = end self._children = set() self.paths = set() self.paths_loaded = False self.priority = float('-inf') self.is_intermediate = isinstance(s, tuple) self._hash = hash((self.s, self.start, self.end)) def add_family(self, lr0, rule, start, left, right): self._children.add(PackedNode(self, lr0, rule, start, left, right)) def add_path(self, transitive, node): self.paths.add((transitive, node)) def load_paths(self): for transitive, node in self.paths: if transitive.next_titem is not None: vn = SymbolNode(transitive.next_titem.s, transitive.next_titem.start, self.end) vn.add_path(transitive.next_titem, node) self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, vn) else: self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, node) self.paths_loaded = True @property def is_ambiguous(self): return len(self.children) > 1 @property def children(self): if not self.paths_loaded: self.load_paths() return sorted(self._children, key=attrgetter('sort_key')) def __iter__(self): return iter(self._children) def __eq__(self, other): if not isinstance(other, SymbolNode): return False return self is other or (type(self.s) == type(other.s) and self.s == other.s and self.start == other.start and self.end is other.end) def __hash__(self): return self._hash def __repr__(self): if self.is_intermediate: rule = self.s[0] ptr = self.s[1] before = ( expansion.name for expansion in rule.expansion[:ptr] ) after = ( expansion.name for expansion in rule.expansion[ptr:] ) symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) else: symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.end, self.priority) class PackedNode(ForestNode): __slots__ = ('parent', 's', 'rule', 'start', 'left', 'right', 'priority', '_hash') def __init__(self, parent, s, rule, start, left, right): self.parent = parent self.s = s self.start = start self.rule = rule self.left = left self.right = right self.priority = float('-inf') self._hash = hash((self.left, self.right)) @property def is_empty(self): return self.left is None and self.right is None @property def sort_key(self): return self.is_empty, -self.priority, self.rule.order @property def children(self): return [x for x in [self.left, self.right] if x is not None] def __iter__(self): yield self.left yield self.right def __eq__(self, other): if not isinstance(other, PackedNode): return False return self is other or (self.left == other.left and self.right == other.right) def __hash__(self): return self._hash def __repr__(self): if isinstance(self.s, tuple): rule = self.s[0] ptr = self.s[1] before = ( expansion.name for expansion in rule.expansion[:ptr] ) after = ( expansion.name for expansion in rule.expansion[ptr:] ) symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) else: symbol = self.s.name return "({}, {}, {}, {})".format(symbol, self.start, self.priority, self.rule.order) class TokenNode(ForestNode): __slots__ = ('token', 'term', 'priority', '_hash') def __init__(self, token, term, priority=None): self.token = token self.term = term if priority is not None: self.priority = priority else: self.priority = term.priority if term is not None else 0 self._hash = hash(token) def __eq__(self, other): if not isinstance(other, TokenNode): return False return self is other or (self.token == other.token) def __hash__(self): return self._hash def __repr__(self): return repr(self.token) class ForestVisitor: def __init__(self, single_visit=False): self.single_visit = single_visit def visit_token_node(self, node): pass def visit_symbol_node_in(self, node): pass def visit_symbol_node_out(self, node): pass def visit_packed_node_in(self, node): pass def visit_packed_node_out(self, node): pass def on_cycle(self, node, path): pass def get_cycle_in_path(self, node, path): index = len(path) - 1 while id(path[index]) != id(node): index -= 1 return path[index:] def visit(self, root): visiting = set() visited = set() path = [] input_stack = deque([root]) vpno = getattr(self, 'visit_packed_node_out') vpni = getattr(self, 'visit_packed_node_in') vsno = getattr(self, 'visit_symbol_node_out') vsni = getattr(self, 'visit_symbol_node_in') vino = getattr(self, 'visit_intermediate_node_out', vsno) vini = getattr(self, 'visit_intermediate_node_in', vsni) vtn = getattr(self, 'visit_token_node') oc = getattr(self, 'on_cycle') while input_stack: current = next(reversed(input_stack)) try: next_node = next(current) except StopIteration: input_stack.pop() continue except TypeError: pass else: if next_node is None: continue if id(next_node) in visiting: oc(next_node, path) continue input_stack.append(next_node) continue if isinstance(current, TokenNode): vtn(current.token) input_stack.pop() continue current_id = id(current) if current_id in visiting: if isinstance(current, PackedNode): vpno(current) elif current.is_intermediate: vino(current) else: vsno(current) input_stack.pop() path.pop() visiting.remove(current_id) visited.add(current_id) elif self.single_visit and current_id in visited: input_stack.pop() else: visiting.add(current_id) path.append(current) if isinstance(current, PackedNode): next_node = vpni(current) elif current.is_intermediate: next_node = vini(current) else: next_node = vsni(current) if next_node is None: continue if not isinstance(next_node, ForestNode): next_node = iter(next_node) elif id(next_node) in visiting: oc(next_node, path) continue input_stack.append(next_node) class ForestTransformer(ForestVisitor): def __init__(self): super(ForestTransformer, self).__init__() self.data = dict() self.node_stack = deque() def transform(self, root): self.node_stack.append('result') self.data['result'] = [] self.visit(root) assert len(self.data['result']) <= 1 if self.data['result']: return self.data['result'][0] def transform_symbol_node(self, node, data): return node
MIT License
veeso/attila
attila/virtual/virtualserial.py
VirtualSerial.read_line
python
def read_line(self) -> bytes: return self.__readCB(0).encode("utf-8")
Read line :returns bytes
https://github.com/veeso/attila/blob/b354251433e03fc7531cd2c7556881327c74079b/attila/virtual/virtualserial.py#L142-L148
from attila.virtual.exceptions import VirtualSerialException from typing import Callable, Optional class VirtualSerial(object): def __init__(self, serial_port: str, baudrate: int, timeout: int = 0, read_callback: Optional[Callable[[], str]] = None, write_callback: Optional[Callable[[str], None]] = None, in_waiting_callback: Optional[Callable[[], int]] = None): self.serial_port = serial_port self.baudrate = baudrate self.timeout = timeout self.__writeCB = write_callback self.__readCB = read_callback self.__in_waiting_callback = in_waiting_callback @property def serial_port(self): return self._serial_port @serial_port.setter def serial_port(self, serial_port: str): self._serial_port = serial_port @property def baudrate(self): return self._baudrate @baudrate.setter def baudrate(self, baud_rate: int): self._baudrate = baud_rate @property def timeout(self): return self._timeout @timeout.setter def timeout(self, timeout: int): if timeout: if timeout > 0: self._timeout = timeout else: self._timeout = 10 else: self._timeout = 10 @property def in_waiting(self): if self.__in_waiting_callback: return self.__in_waiting_callback() else: return False def open(self): if not self.serial_port or not self.baudrate: raise VirtualSerialException( "Could not open %s" % self.serial_port) return True def close(self): if not self.serial_port or not self.baudrate: raise VirtualSerialException( "Could not close %s" % self.serial_port) return True def write(self, data: bytes) -> bool: if self.__writeCB: self.__writeCB(data) return True def read(self, nbytes: int = 1) -> bytes: if self.__readCB: response = self.__readCB(nbytes) return response.encode("utf-8") def read_lines(self) -> bytearray: response = [] str_tokens = self.__readCB(-1).splitlines() for token in str_tokens: response.append(token.encode("utf-8")) return response
MIT License
devopshq/teamcity
dohq_teamcity/models/agent.py
Agent.ip
python
def ip(self, ip): self._ip = ip
Sets the ip of this Agent. :param ip: The ip of this Agent. # noqa: E501 :type: str
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/agent.py#L321-L329
from dohq_teamcity.custom.base_model import TeamCityObject class Agent(TeamCityObject): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'name': 'str', 'type_id': 'int', 'connected': 'bool', 'enabled': 'bool', 'authorized': 'bool', 'uptodate': 'bool', 'ip': 'str', 'protocol': 'str', 'version': 'str', 'last_activity_time': 'str', 'disconnection_comment': 'str', 'href': 'str', 'web_url': 'str', 'build': 'Build', 'links': 'Links', 'enabled_info': 'EnabledInfo', 'authorized_info': 'AuthorizedInfo', 'properties': 'Properties', 'environment': 'Environment', 'pool': 'AgentPool', 'compatible_build_types': 'BuildTypes', 'incompatible_build_types': 'Compatibilities', 'locator': 'str' } attribute_map = { 'id': 'id', 'name': 'name', 'type_id': 'typeId', 'connected': 'connected', 'enabled': 'enabled', 'authorized': 'authorized', 'uptodate': 'uptodate', 'ip': 'ip', 'protocol': 'protocol', 'version': 'version', 'last_activity_time': 'lastActivityTime', 'disconnection_comment': 'disconnectionComment', 'href': 'href', 'web_url': 'webUrl', 'build': 'build', 'links': 'links', 'enabled_info': 'enabledInfo', 'authorized_info': 'authorizedInfo', 'properties': 'properties', 'environment': 'environment', 'pool': 'pool', 'compatible_build_types': 'compatibleBuildTypes', 'incompatible_build_types': 'incompatibleBuildTypes', 'locator': 'locator' } def __init__(self, id=None, name=None, type_id=None, connected=False, enabled=False, authorized=False, uptodate=False, ip=None, protocol=None, version=None, last_activity_time=None, disconnection_comment=None, href=None, web_url=None, build=None, links=None, enabled_info=None, authorized_info=None, properties=None, environment=None, pool=None, compatible_build_types=None, incompatible_build_types=None, locator=None, teamcity=None): self._id = None self._name = None self._type_id = None self._connected = None self._enabled = None self._authorized = None self._uptodate = None self._ip = None self._protocol = None self._version = None self._last_activity_time = None self._disconnection_comment = None self._href = None self._web_url = None self._build = None self._links = None self._enabled_info = None self._authorized_info = None self._properties = None self._environment = None self._pool = None self._compatible_build_types = None self._incompatible_build_types = None self._locator = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name if type_id is not None: self.type_id = type_id if connected is not None: self.connected = connected if enabled is not None: self.enabled = enabled if authorized is not None: self.authorized = authorized if uptodate is not None: self.uptodate = uptodate if ip is not None: self.ip = ip if protocol is not None: self.protocol = protocol if version is not None: self.version = version if last_activity_time is not None: self.last_activity_time = last_activity_time if disconnection_comment is not None: self.disconnection_comment = disconnection_comment if href is not None: self.href = href if web_url is not None: self.web_url = web_url if build is not None: self.build = build if links is not None: self.links = links if enabled_info is not None: self.enabled_info = enabled_info if authorized_info is not None: self.authorized_info = authorized_info if properties is not None: self.properties = properties if environment is not None: self.environment = environment if pool is not None: self.pool = pool if compatible_build_types is not None: self.compatible_build_types = compatible_build_types if incompatible_build_types is not None: self.incompatible_build_types = incompatible_build_types if locator is not None: self.locator = locator super(Agent, self).__init__(teamcity=teamcity) @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def type_id(self): return self._type_id @type_id.setter def type_id(self, type_id): self._type_id = type_id @property def connected(self): return self._connected @connected.setter def connected(self, connected): self._connected = connected @property def enabled(self): return self._enabled @enabled.setter def enabled(self, enabled): self._enabled = enabled @property def authorized(self): return self._authorized @authorized.setter def authorized(self, authorized): self._authorized = authorized @property def uptodate(self): return self._uptodate @uptodate.setter def uptodate(self, uptodate): self._uptodate = uptodate @property def ip(self): return self._ip @ip.setter
MIT License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/chunk/api.py
ChunkParserI.parse
python
def parse(self, tokens): raise NotImplementedError()
Return the best chunk structure for the given tokens and return a tree. :param tokens: The list of (word, tag) tokens to be chunked. :type tokens: list(tuple) :rtype: Tree
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/chunk/api.py#L25-L34
from nltk.parse import ParserI from nltk.chunk.util import ChunkScore class ChunkParserI(ParserI):
MIT License
yelp/paasta
paasta_tools/mesos_maintenance.py
get_hosts_forgotten_draining
python
def get_hosts_forgotten_draining(grace=0): draining_hosts = get_draining_hosts() log.debug("draining_hosts: %s" % draining_hosts) hosts_past_maintenance_start = get_hosts_past_maintenance_start(grace=grace) log.debug("hosts_past_maintenance_start: %s" % hosts_past_maintenance_start) forgotten_draining = list( set(draining_hosts).intersection(hosts_past_maintenance_start) ) log.debug("forgotten_draining: %s" % forgotten_draining) return forgotten_draining
Find hosts that are still marked as draining (rather than down) after the start of their maintenance window. :param grace: integer number of nanoseconds to allow a host to be left in the draining state after the start of its maintenance window before we consider it forgotten. :returns: a list of hostnames of hosts forgotten draining
https://github.com/yelp/paasta/blob/bc1716253bbe003cec01bd02016010910c2b039c/paasta_tools/mesos_maintenance.py#L258-L276
import argparse import datetime import json import logging from socket import gaierror from socket import getfqdn from socket import gethostbyname from typing import List from typing import NamedTuple from typing import Optional import a_sync from dateutil import parser from pytimeparse import timeparse from requests import Request from requests import Session from requests.exceptions import HTTPError from paasta_tools.mesos_tools import get_count_running_tasks_on_slave from paasta_tools.mesos_tools import get_mesos_config_path from paasta_tools.mesos_tools import get_mesos_leader from paasta_tools.mesos_tools import get_mesos_master from paasta_tools.mesos_tools import MESOS_MASTER_PORT from paasta_tools.utils import SystemPaastaConfig from paasta_tools.utils import time_cache from paasta_tools.utils import to_bytes log = logging.getLogger(__name__) class Hostname(NamedTuple): host: str ip: str class Credentials(NamedTuple): file: str principal: str secret: str class Resource(NamedTuple): name: str amount: int MAINTENANCE_ROLE = "maintenance" def base_api(mesos_config_path: Optional[str] = None): leader = get_mesos_leader(mesos_config_path) def execute_request(method, endpoint, timeout=(3, 2), **kwargs): url = "http://%s:%d%s" % (leader, MESOS_MASTER_PORT, endpoint) s = Session() s.auth = (get_principal(), get_secret()) req = Request(method, url, **kwargs) prepared = s.prepare_request(req) try: resp = s.send(prepared, timeout=timeout) resp.raise_for_status() return resp except HTTPError: raise HTTPError("Error executing API request calling %s." % url) return execute_request def master_api(mesos_config_path: Optional[str] = None): def execute_master_api_request(method, endpoint, **kwargs): base_api_client = base_api(mesos_config_path=mesos_config_path) return base_api_client(method, "/master%s" % endpoint, **kwargs) return execute_master_api_request def operator_api(mesos_config_path: Optional[str] = None): def execute_operator_api_request(**kwargs): base_api_client = base_api(mesos_config_path=mesos_config_path) if "headers" in kwargs: kwargs["headers"]["Content-Type"] = "application/json" else: kwargs["headers"] = {"Content-Type": "application/json"} data = kwargs.pop("data") return base_api_client("POST", "/api/v1", data=json.dumps(data), **kwargs) return execute_operator_api_request def reserve_api(): def execute_reserve_api_request(method, endpoint, **kwargs): master_api_client = master_api() return master_api_client(method, "/reserve%s" % endpoint, **kwargs) return execute_reserve_api_request def unreserve_api(): def execute_unreserve_api_request(method, endpoint, **kwargs): master_api_client = master_api() return master_api_client(method, "/unreserve%s" % endpoint, **kwargs) return execute_unreserve_api_request def maintenance_api(): def execute_schedule_api_request(method, endpoint, **kwargs): master_api_client = master_api() return master_api_client( method, "/maintenance%s" % endpoint, timeout=(3, 10), **kwargs ) return execute_schedule_api_request def get_schedule_client(): def execute_schedule_api_request(method, endpoint, **kwargs): maintenance_api_client = maintenance_api() return maintenance_api_client(method, "/schedule%s" % endpoint, **kwargs) return execute_schedule_api_request def get_maintenance_schedule(): client_fn = operator_api() return client_fn(data={"type": "GET_MAINTENANCE_SCHEDULE"}) @time_cache(ttl=10) def get_maintenance_status(mesos_config_path: Optional[str] = None): client_fn = operator_api(mesos_config_path=mesos_config_path) return client_fn(data={"type": "GET_MAINTENANCE_STATUS"}) def schedule(): try: schedule = get_maintenance_schedule() except HTTPError: raise HTTPError("Error getting maintenance schedule.") return schedule.text def get_hosts_with_state( state, system_paasta_config: Optional[SystemPaastaConfig] = None ) -> List[str]: mesos_config_path = get_mesos_config_path(system_paasta_config) try: status = get_maintenance_status(mesos_config_path).json() status = status["get_maintenance_status"]["status"] except HTTPError: raise HTTPError("Error getting maintenance status.") if not status or state not in status: return [] if "id" in status[state][0]: return [machine["id"]["hostname"] for machine in status[state]] else: return [machine["hostname"] for machine in status[state]] def get_draining_hosts(system_paasta_config: Optional[SystemPaastaConfig] = None): return get_hosts_with_state( state="draining_machines", system_paasta_config=system_paasta_config ) def get_down_hosts(): return get_hosts_with_state(state="down_machines") def is_host_draining(hostname=getfqdn()): return hostname in get_draining_hosts() def is_host_down(hostname=getfqdn()): return hostname in get_down_hosts()
Apache License 2.0
humanbrainproject/neuroglancer-scripts
src/neuroglancer_scripts/scripts/scale_stats.py
show_scale_file_info
python
def show_scale_file_info(url, options={}): accessor = neuroglancer_scripts.accessor.get_accessor_for_url(url, options) io = precomputed_io.get_IO_for_existing_dataset(accessor) info = io.info show_scales_info(info)
Show information about a list of scales.
https://github.com/humanbrainproject/neuroglancer-scripts/blob/5f301df5fc3e487f31d48244fb18ffbfce91e9ae/src/neuroglancer_scripts/scripts/scale_stats.py#L46-L51
import sys import numpy as np import neuroglancer_scripts.accessor from neuroglancer_scripts import precomputed_io from neuroglancer_scripts.utils import readable_count def show_scales_info(info): total_size = 0 total_chunks = 0 total_directories = 0 dtype = np.dtype(info["data_type"]).newbyteorder("<") num_channels = info["num_channels"] for scale in info["scales"]: scale_name = scale["key"] size = scale["size"] for chunk_size in scale["chunk_sizes"]: size_in_chunks = [(s - 1) // cs + 1 for s, cs in zip(size, chunk_size)] num_chunks = np.prod(size_in_chunks) num_directories = size_in_chunks[0] * (1 + size_in_chunks[1]) size_bytes = np.prod(size) * dtype.itemsize * num_channels print("Scale {}, chunk size {}:" " {:,d} chunks, {:,d} directories, raw uncompressed size {}B" .format(scale_name, chunk_size, num_chunks, num_directories, readable_count(size_bytes))) total_size += size_bytes total_chunks += num_chunks total_directories += num_directories print("---") print("Total: {:,d} chunks, {:,d} directories, raw uncompressed size {}B" .format(total_chunks, total_directories, readable_count(total_size)))
MIT License
schemaorg/sdopythonapp
lib/pyRdfa/rdfs/cache.py
CachedVocabIndex._give_preference_path
python
def _give_preference_path(self) : from pyRdfa import CACHE_DIR_VAR if CACHE_DIR_VAR in os.environ : return os.environ[CACHE_DIR_VAR] else : platform = sys.platform if platform in self.architectures : system = self.architectures[platform] else : system = "unix" if system == "win" : app_data = os.path.expandvars("%APPDATA%") return os.path.join(app_data,self.preference_path[system]) else : return os.path.join(os.path.expanduser('~'),self.preference_path[system])
Find the vocab cache directory.
https://github.com/schemaorg/sdopythonapp/blob/128be97d359178b26e5211a3e758933ff3a7b3df/lib/pyRdfa/rdfs/cache.py#L203-L223
import os, sys, datetime, re PY3 = (sys.version_info[0] >= 3) import rdflib from rdflib import URIRef from rdflib import Literal from rdflib import BNode from rdflib import Namespace if rdflib.__version__ >= "3.0.0" : from rdflib import RDF as ns_rdf from rdflib import RDFS as ns_rdfs from rdflib import Graph else : from rdflib.RDFS import RDFSNS as ns_rdfs from rdflib.RDF import RDFNS as ns_rdf from rdflib.Graph import Graph from .. import HTTPError, RDFaError from ..host import MediaTypes, HostLanguage from ..utils import create_file_name, URIOpener, quote_URI from ..options import Options from .. import ns_rdfa from . import err_outdated_cache from . import err_unreachable_vocab from . import err_unparsable_Turtle_vocab from . import err_unparsable_xml_vocab from . import err_unparsable_ntriples_vocab from . import err_unparsable_rdfa_vocab from . import err_unrecognised_vocab_type from . import VocabCachingInfo xml_application_media_type = re.compile("application/[a-zA-Z0-9]+\+xml") from ..utils import URIOpener if PY3 : import pickle else : import cPickle as pickle _Pickle_Protocol = 1 def _load(fname) : try : f = open(fname) return pickle.load(f) finally : f.close() def _dump(obj, fname) : try : f = open(fname, "w") pickle.dump(obj, f, _Pickle_Protocol) f.flush() finally : f.close() class CachedVocabIndex : vocabs = "cache_index" preference_path = { "mac" : "Library/Application Support/pyRdfa-cache", "win" : "pyRdfa-cache", "unix" : ".pyRdfa-cache" } architectures = { "darwin" : "mac", "nt" : "win", "win32" : "win", "cygwin" : "win" } def __init__(self, options = None) : self.options = options self.report = (options != None) and options.vocab_cache_report self.app_data_dir = self._give_preference_path() self.index_fname = os.path.join(self.app_data_dir, self.vocabs) self.indeces = {} if not os.path.isdir(self.app_data_dir) : try : os.mkdir(self.app_data_dir) except Exception : (type,value,traceback) = sys.exc_info() if self.report: options.add_info("Could not create the vocab cache area %s" % value, VocabCachingInfo) return else : if not os.access(self.app_data_dir, os.R_OK) : if self.report: options.add_info("Vocab cache directory is not readable", VocabCachingInfo) return if not os.access(self.app_data_dir, os.W_OK) : if self.report: options.add_info("Vocab cache directory is not writeable, but readable", VocabCachingInfo) return if os.path.exists(self.index_fname) : if os.access(self.index_fname, os.R_OK) : self.indeces = _load(self.index_fname) else : if self.report: options.add_info("Vocab cache index not readable", VocabCachingInfo) else : if os.access(self.app_data_dir, os.W_OK) : try : _dump(self.indeces, self.index_fname) except Exception : (type,value,traceback) = sys.exc_info() if self.report: options.add_info("Could not create the vocabulary index %s" % value, VocabCachingInfo) else : if self.report: options.add_info("Vocabulary cache directory is not writeable", VocabCachingInfo) self.cache_writeable = False def add_ref(self, uri, vocab_reference) : self.indeces[uri] = vocab_reference try : _dump(self.indeces, self.index_fname) except Exception : (type,value,traceback) = sys.exc_info() if self.report: self.options.add_info("Could not store the cache index %s" % value, VocabCachingInfo) def get_ref(self, uri) : if uri in self.indeces : return tuple(self.indeces[uri]) else : return None
Apache License 2.0
ciwpython/ciw
ciw/network.py
Network.__init__
python
def __init__(self, service_centres, customer_classes): self.service_centres = service_centres self.customer_classes = customer_classes self.number_of_nodes = len(service_centres) self.number_of_classes = len(customer_classes) self.number_of_priority_classes = len(set([clss.priority_class for clss in customer_classes])) self.priority_class_mapping = {i: clss.priority_class for i, clss in enumerate(customer_classes)}
Initialises the Network object
https://github.com/ciwpython/ciw/blob/8d5978108f797a6c3e42d8f70f31510f889f2dd0/ciw/network.py#L63-L72
class ServiceCentre(object): def __init__(self, number_of_servers, queueing_capacity, class_change_matrix=None, schedule=None, preempt=False, ps_threshold=1): self.number_of_servers = number_of_servers self.queueing_capacity = queueing_capacity self.class_change_matrix = class_change_matrix self.schedule = schedule self.preempt = preempt self.ps_threshold = ps_threshold class CustomerClass(object): def __init__(self, arrival_distributions, service_distributions, routing, priority_class, baulking_functions, batching_distributions): self.arrival_distributions = arrival_distributions self.service_distributions = service_distributions self.batching_distributions = batching_distributions self.routing = routing self.priority_class = priority_class self.baulking_functions = baulking_functions class Network(object):
MIT License
aliyun/aliyun-log-python-sdk
aliyun/log/etl_core/trans_comp/trans_json.py
json_transformer.__init__
python
def __init__(self, jmes=None, jmes_ignore_none=None, output=None, expand=None, depth=None, include_node=None, exclude_node=None, include_path=None, exclude_path=None, fmt=None, sep=None, prefix=None, suffix=None, expand_array=None, fmt_array=None, mode=None ): super(json_transformer, self).__init__(mode=mode) self.expand = expand if expand is None: self.expand = not jmes or not output self.jmes = self._u(jmes or "") self.prefix = self._u("" if prefix is None else prefix) self.suffix = self._u("" if suffix is None else suffix) self.sep = self._u(self.DEFAULT_SEP if sep is None else sep) self.output = self._u(output or "") self.jmes_filter = None self.jmes_ignore_none = True if jmes_ignore_none is None else jmes_ignore_none if jmes: try: self.jmes_filter = jmespath.compile(jmes) except jmespath.exceptions.ParseError as ex: raise SettingError(ex=ex, msg="Invalid JMES filter setting", settings=jmes) elif self.output: logger.warning(u"json_transformer: parameter output '{0}' will be ignored as there's no filter is selected." .format(output)) self.depth = min((depth or self.DEFAULT_DEPTH), self.DEFAULT_DEPTH) self.include_node = self._u(include_node or self.DEFAULT_INCLUDE_NODE) self.exclude_node = self._u(exclude_node or self.DEFAULT_EXCLUDE_NODE) self.include_path = self._u(include_path or self.DEFAULT_INCLUDE_PATH) self.exclude_path = self._u(exclude_path or self.DEFAULT_EXCLUDE_PATH) self.fmt = self._u(fmt or self.DEFAULT_FMT) try: self.include_node_match = get_re_full_match(self.include_node) self.exclude_node_match = get_re_full_match(self.exclude_node) self.include_path_match = re.compile(self.include_path).match self.exclude_path_match = re.compile(self.exclude_path).match except Exception as ex: raise SettingError(ex=ex, msg="Invalid regex string for include/exclude") self.expand_array = True if expand_array is None else expand_array self.format_array = self._u(fmt_array or self.DEFAULT_FMT_ARRAY)
:param jmes: jmes filter to select or generate new field :param jmes_ignore_none: if jmes filter is null, ignore it (default). Or else consider it as "". default is :param output: put the value parsed from jmes filter to this field :param expand: If jmes filter is configure, expand the result or not (Default is False in this case), If jmes is not configured, directly expand the field passed or not (Default is True in this case). :param depth: depth to scan, 1 means first layer, default is 100. :param include_node: keys to expand and include. regex string. using '|' for multiple ones. default is all. :param exclude_node: keys to skip, regex string. using '|' for multiple ones. default is nothing. :param include_path: path to expand and include. regex string to match from begin. using '|' for multiple ones. default is all. e.g. r"data\.k1", all sub-keys in data.k1 will be included. :param exclude_path: path to skip, regex string to match from begin. using '|' for multiple ones. default is nothing. . e.g. r"data\.k2", all sub-keys in data.k2 will be excluded. :param fmt: during expansion, how to format the key name, there're several types or customized as described in FMT_MAP :param sep: sep used in formatting during expansion :param prefix: prefix used in formatting during expansion :param suffix: suffix used in formatting during expansion :param expand_array: if expand array or just render it. default is True. item in array will be with name index, e.g. [1,2,3] will be considered as {"0": 1, "1": 2, "2": 3} :param fmt_array: format string for key name of each array element, default is "{parent_rlist[0]}_{index}", can be custom formatting string using placehodler: parent_list, parent_list, current
https://github.com/aliyun/aliyun-log-python-sdk/blob/49b7b92798729d962268252dbbae9d7c098e60f8/aliyun/log/etl_core/trans_comp/trans_json.py#L62-L125
import inspect import json import logging try: from collections.abc import Iterable except ImportError: from collections import Iterable import jmespath import re import six from jmespath.exceptions import ParseError from .trans_base import trans_comp_check_mdoe_base from ..etl_util import get_re_full_match from ..exceptions import SettingError __all__ = ['trans_comp_json'] logger = logging.getLogger(__name__) def trans_comp_json(*args, **kwargs): if (args and isinstance(args[0], dict)) or ('event' in kwargs and isinstance(kwargs['event'], dict)): trans = json_transformer() return trans(*args, **kwargs) else: return json_transformer(*args, **kwargs) class json_transformer(trans_comp_check_mdoe_base): DEFAULT_SEP = u'.' DEFAULT_FMT = "simple" DEFAULT_DEPTH = 100 DEFAULT_INCLUDE_NODE = trans_comp_check_mdoe_base.DEFAULT_KEYWORD_PTN DEFAULT_EXCLUDE_NODE = u'' DEFAULT_INCLUDE_PATH = u'' DEFAULT_EXCLUDE_PATH = u'' DEFAULT_FMT_ARRAY = u"{parent_rlist[0]}_{index}" FMT_MAP = { "simple": lambda prefix, current, suffix, *args, **kwargs: u"{prefix}{current}{suffix}".format(prefix=prefix, current=current, suffix=suffix), "full": lambda parent_list, sep, prefix, current, suffix, *args, **kwargs: u"{parent_list_str}{sep}{prefix}{current}{suffix}".format( parent_list_str=sep.join(parent_list), current=current, sep=sep, prefix=prefix, suffix=suffix), "parent": lambda parent_list, sep, prefix, current, suffix, *args, **kwargs: u"{parent}{sep}{prefix}{current}{suffix}".format(parent=parent_list[-1], current=current, sep=sep, prefix=prefix, suffix=suffix), "root": lambda parent_list, sep, prefix, current, suffix, *args, **kwargs: u"{parent_list[0]}{sep}{prefix}{current}{suffix}".format(parent_list=parent_list, current=current, sep=sep, prefix=prefix, suffix=suffix) }
MIT License
ybrs/pydisque
pydisque/client.py
Client.qscan
python
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None, maxlen=None, importrate=None): command = ["QSCAN", cursor] if count: command += ["COUNT", count] if busyloop: command += ["BUSYLOOP"] if minlen: command += ["MINLEN", minlen] if maxlen: command += ["MAXLEN", maxlen] if importrate: command += ["IMPORTRATE", importrate] return self.execute_command(*command)
Iterate all the existing queues in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param minlen: Don't return elements with less than count jobs queued. :param maxlen: Don't return elements with more than count jobs queued. :param importrate: Only return elements with an job import rate (from other nodes) >= rate.
https://github.com/ybrs/pydisque/blob/ea5ce1576b66398c1cce32cad0f15709b1ea8df8/pydisque/client.py#L453-L477
import redis from redis.exceptions import ConnectionError, RedisError from functools import wraps try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest import logging logger = logging.getLogger(__name__) class Job(object): def __init__(self, id, queue_name, payload): self.id = id self.queue_name = queue_name self.payload = payload def __repr__(self): return '<Job id:%s queue_name:%s>' % (self.id, self.queue_name) class Node(object): def __init__(self, node_id, host, port, connection): self.node_id = node_id self.host = host self.port = port self.connection = connection def __repr__(self): return '<Node %s:%s>' % (self.host, self.port) class retry(object): def __init__(self, retry_count=2): self.retry_count = retry_count def __call__(self, fn): @wraps(fn) def wrapped_f(*args, **kwargs): c = 0 while c <= self.retry_count: try: return fn(*args, **kwargs) except RedisError: logging.critical("retrying because of this exception - %s", c) logging.exception("exception to retry ") if c == self.retry_count: raise c += 1 return wrapped_f class Client(object): def __init__(self, nodes=None, **kwargs): if nodes is None: nodes = ['localhost:7711'] self.nodes = {} for n in nodes: self.nodes[n] = None self.client_kw_args = kwargs self.connected_node = None def connect(self): self.connected_node = None for i, node in self.nodes.items(): host, port = i.split(':') port = int(port) redis_client = redis.Redis(host, port, **self.client_kw_args) try: ret = redis_client.execute_command('HELLO') format_version, node_id = ret[0], ret[1] others = ret[2:] self.nodes[i] = Node(node_id, host, port, redis_client) self.connected_node = self.nodes[i] except redis.exceptions.ConnectionError: pass if not self.connected_node: raise ConnectionError('couldnt connect to any nodes') logger.info("connected to node %s" % self.connected_node) def get_connection(self): if self.connected_node: return self.connected_node.connection else: raise ConnectionError("not connected") @retry() def execute_command(self, *args, **kwargs): try: return self.get_connection().execute_command(*args, **kwargs) except ConnectionError as e: logger.warn('trying to reconnect') self.connect() logger.warn('connected') raise def _grouper(self, iterable, n, fillvalue=None): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args) def info(self): return self.execute_command("INFO") def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None, retry=None, ttl=None, maxlen=None, asynchronous=None): command = ['ADDJOB', queue_name, job, timeout] if replicate: command += ['REPLICATE', replicate] if delay: command += ['DELAY', delay] if retry is not None: command += ['RETRY', retry] if ttl: command += ['TTL', ttl] if maxlen: command += ['MAXLEN', maxlen] if asynchronous: command += ['ASYNC'] logger.debug("sending job - %s", command) job_id = self.execute_command(*command) logger.debug("sent job - %s", command) logger.debug("job_id: %s " % job_id) return job_id def get_job(self, queues, timeout=None, count=None, nohang=False, withcounters=False): assert queues command = ['GETJOB'] if nohang: command += ['NOHANG'] if timeout: command += ['TIMEOUT', timeout] if count: command += ['COUNT', count] if withcounters: command += ['WITHCOUNTERS'] command += ['FROM'] + queues results = self.execute_command(*command) if not results: return [] if withcounters: return [(job_id, queue_name, job, nacks, additional_deliveries) for job_id, queue_name, job, _, nacks, _, additional_deliveries in results] else: return [(job_id, queue_name, job) for job_id, queue_name, job in results] def ack_job(self, *job_ids): self.execute_command('ACKJOB', *job_ids) def nack_job(self, *job_ids): self.execute_command('NACK', *job_ids) def fast_ack(self, *job_ids): self.execute_command('FASTACK', *job_ids) def working(self, job_id): return self.execute_command('WORKING', job_id) def qlen(self, queue_name): return self.execute_command('QLEN', queue_name) def qstat(self, queue_name, return_dict=False): rtn = self.execute_command('QSTAT', queue_name) if return_dict: grouped = self._grouper(rtn, 2) rtn = dict((a, b) for a, b in grouped) return rtn def qpeek(self, queue_name, count): return self.execute_command("QPEEK", queue_name, count) def enqueue(self, *job_ids): return self.execute_command("ENQUEUE", *job_ids) def dequeue(self, *job_ids): return self.execute_command("DEQUEUE", *job_ids) def del_job(self, *job_ids): return self.execute_command("DELJOB", *job_ids) def show(self, job_id, return_dict=False): rtn = self.execute_command('SHOW', job_id) if return_dict: grouped = self._grouper(rtn, 2) rtn = dict((a, b) for a, b in grouped) return rtn def pause(self, queue_name, kw_in=None, kw_out=None, kw_all=None, kw_none=None, kw_state=None, kw_bcast=None): command = ["PAUSE", queue_name] if kw_in: command += ["in"] if kw_out: command += ["out"] if kw_all: command += ["all"] if kw_none: command += ["none"] if kw_state: command += ["state"] if kw_bcast: command += ["bcast"] return self.execute_command(*command)
MIT License
chargepoint/pydnp3
examples/master_cmd.py
MasterCmd.do_o1
python
def do_o1(self, line): self.application.send_direct_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON), 5, command_callback)
Send a DirectOperate BinaryOutput (group 12) index 5 LATCH_ON to the Outstation. Command syntax is: o1
https://github.com/chargepoint/pydnp3/blob/3699581dc80409b82e0d64a71b26e0db1863525a/examples/master_cmd.py#L93-L97
import cmd import logging import sys from datetime import datetime from pydnp3 import opendnp3, openpal from master import MyMaster, MyLogger, AppChannelListener, SOEHandler, MasterApplication from master import command_callback, restart_callback stdout_stream = logging.StreamHandler(sys.stdout) stdout_stream.setFormatter(logging.Formatter('%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')) _log = logging.getLogger(__name__) _log.addHandler(stdout_stream) _log.setLevel(logging.DEBUG) class MasterCmd(cmd.Cmd): def __init__(self): cmd.Cmd.__init__(self) self.prompt = 'master> ' self.application = MyMaster(log_handler=MyLogger(), listener=AppChannelListener(), soe_handler=SOEHandler(), master_application=MasterApplication()) def startup(self): print('Welcome to the DNP3 master request command line. Supported commands include:') self.do_menu('') self.cmdloop('Please enter a command.') exit() def do_menu(self, line): print('\tchan_log_all\tSet the channel log level to ALL_COMMS.') print('\tchan_log_normal\tSet the channel log level to NORMAL.') print('\tdisable_unsol\tPerform the function DISABLE_UNSOLICITED.') print('\thelp\t\tDisplay command-line help.') print('\tmast_log_all\tSet the master log level to ALL_COMMS.') print('\tmast_log_normal\tSet the master log level to NORMAL.') print('\tmenu\t\tDisplay this menu.') print('\to1\t\tSend a DirectOperate LATCH_ON command.') print('\to2\t\tSend a DirectOperate analog value.') print('\to3\t\tSend a DirectOperate CommandSet.') print('\tquit') print('\trestart\t\tRequest an outstation cold restart.') print('\ts1\t\tSend a SelectAndOperate LATCH_ON command.') print('\ts2\t\tSend a SelectAndOperate CommandSet.') print('\tscan_all\tRead data from the outstation (ScanAllObjects).') print('\tscan_fast\tDemand immediate execution of the fast (every 1 mins) Class 1 scan.') print('\tscan_range\tPerform an ad-hoc scan (ScanRange) of GroupVariation 1.2, range 0..3.') print('\tscan_slow\tDemand immediate execution of the slow (every 30 mins) All-Classes scan.') print('\twrite_time\tWrite a TimeAndInterval to the outstation.') def do_chan_log_all(self, line): self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS)) def do_chan_log_normal(self, line): self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL)) print('Channel log filtering level is now: {0}'.format(opendnp3.levels.NORMAL)) def do_disable_unsol(self, line): headers = [opendnp3.Header().AllObjects(60, 2), opendnp3.Header().AllObjects(60, 3), opendnp3.Header().AllObjects(60, 4)] self.application.master.PerformFunction("disable unsolicited", opendnp3.FunctionCode.DISABLE_UNSOLICITED, headers, opendnp3.TaskConfig().Default()) def do_mast_log_all(self, line): self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS)) _log.debug('Master log filtering level is now: {0}'.format(opendnp3.levels.ALL_COMMS)) def do_mast_log_normal(self, line): self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL)) _log.debug('Master log filtering level is now: {0}'.format(opendnp3.levels.NORMAL))
Apache License 2.0
tensorflow/graphics
tensorflow_graphics/math/sampling.py
stratified_1d
python
def stratified_1d(near: TensorLike, far: TensorLike, num_samples: int, name="stratified_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) bin_borders = tf.linspace(0.0, 1.0, num_samples + 1, axis=-1) bin_below = bin_borders[..., :-1] bin_above = bin_borders[..., 1:] target_shape = tf.concat([tf.shape(near), [num_samples]], axis=-1) random_point_in_bin = tf.random.uniform(target_shape) z_values = bin_below + (bin_above - bin_below) * random_point_in_bin z_values = (tf.expand_dims(near, -1) * (1. - z_values) + tf.expand_dims(far, -1) * z_values) return z_values
Stratified sampling based on evenly-spaced bin size of an interval. Args: near: A tensor of shape `[A1, ... An]` containing the starting points of the sampling interval. far: A tensor of shape `[A1, ... An]` containing the ending points of the sampling interval. num_samples: The number M of points to be sampled. name: A name for this op that defaults to "stratified_1d". Returns: A tensor of shape `[A1, ..., An, M]` indicating the M points on the ray
https://github.com/tensorflow/graphics/blob/d0817aec7dee35635814e925a59d83955459d93c/tensorflow_graphics/math/sampling.py#L120-L156
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_graphics.util import export_api from tensorflow_graphics.util import safe_ops from tensorflow_graphics.util import shape from tensorflow_graphics.util.type_alias import TensorLike def regular_1d(near: TensorLike, far: TensorLike, num_samples: int, name="regular_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) return tf.linspace(near, far, num_samples, axis=-1) def regular_inverse_1d(near: TensorLike, far: TensorLike, num_samples: int, name="regular_inverse_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) return 1. / tf.linspace(1. / near, 1. / far, num_samples, axis=-1) def uniform_1d(near: TensorLike, far: TensorLike, num_samples: int, name="uniform_1d") -> tf.Tensor: with tf.name_scope(name): near = tf.convert_to_tensor(near) far = tf.convert_to_tensor(far) shape.compare_batch_dimensions( tensors=(tf.expand_dims(near, axis=-1), tf.expand_dims(far, axis=-1)), tensor_names=("near", "far"), last_axes=-1, broadcast_compatible=True) target_shape = tf.concat([tf.shape(near), [num_samples]], axis=-1) random_samples = tf.random.uniform(target_shape, minval=tf.expand_dims(near, -1), maxval=tf.expand_dims(far, -1)) return tf.sort(random_samples, axis=-1)
Apache License 2.0
cisco-en-programmability/dnacentersdk
dnacentersdk/api/v1_3_0/pnp.py
Pnp.update_pnp_server_profile
python
def update_pnp_server_profile(self, autoSyncPeriod=None, ccoUser=None, expiry=None, lastSync=None, profile=None, smartAccountId=None, syncResult=None, syncResultStr=None, syncStartTime=None, syncStatus=None, tenantId=None, token=None, virtualAccountId=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'autoSyncPeriod': autoSyncPeriod, 'ccoUser': ccoUser, 'expiry': expiry, 'lastSync': lastSync, 'profile': profile, 'smartAccountId': smartAccountId, 'syncResult': syncResult, 'syncResultStr': syncResultStr, 'syncStartTime': syncStartTime, 'syncStatus': syncStatus, 'tenantId': tenantId, 'token': token, 'virtualAccountId': virtualAccountId, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_6f9819e84178870c_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-settings/savacct') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_6f9819e84178870c_v1_3_0', json_data)
Updates the PnP Server profile in a registered Virtual Account in the PnP database. The response payload returns the updated smart & virtual account info. Args: autoSyncPeriod(number): SAVAMapping's autoSyncPeriod. ccoUser(string): SAVAMapping's ccoUser. expiry(number): SAVAMapping's expiry. lastSync(number): SAVAMapping's lastSync. profile(object): SAVAMapping's profile. smartAccountId(string): SAVAMapping's smartAccountId. syncResult(object): SAVAMapping's syncResult. syncResultStr(string): SAVAMapping's syncResultStr. syncStartTime(number): SAVAMapping's syncStartTime. syncStatus(string): SAVAMapping's syncStatus. Available values are 'NOT_SYNCED', 'SYNCING', 'SUCCESS' and 'FAILURE'. tenantId(string): SAVAMapping's tenantId. token(string): SAVAMapping's token. virtualAccountId(string): SAVAMapping's virtualAccountId. headers(dict): Dictionary of HTTP Headers to send with the Request . payload(dict): A JSON serializable Python object to send in the body of the Request. active_validation(bool): Enable/Disable payload validation. Defaults to True. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
https://github.com/cisco-en-programmability/dnacentersdk/blob/ef2adde6113e7a6acd28a287007eb470fa39d31f/dnacentersdk/api/v1_3_0/pnp.py#L871-L994
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from builtins import * from past.builtins import basestring from ...restsession import RestSession from ...utils import ( check_type, dict_from_items_with_values, apply_path_params, dict_of_str, ) class Pnp(object): def __init__(self, session, object_factory, request_validator): check_type(session, RestSession) super(Pnp, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator def un_claim_device(self, deviceIdList=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'deviceIdList': deviceIdList, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_0b836b7b4b6a9fd5_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-device/unclaim') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_0b836b7b4b6a9fd5_v1_3_0', json_data) def get_sync_result_for_virtual_account(self, domain, name, headers=None, **request_parameters): check_type(headers, dict) check_type(domain, basestring, may_be_none=False) check_type(name, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'domain': domain, 'name': name, } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-' + 'device/sacct/${domain}/vacct/${name}/sync-result') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_0a9c988445cb91c8_v1_3_0', json_data) def update_device(self, id, _id=None, deviceInfo=None, runSummaryList=None, systemResetWorkflow=None, systemWorkflow=None, tenantId=None, version=None, workflow=None, workflowParameters=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } _payload = { '_id': _id, 'deviceInfo': deviceInfo, 'runSummaryList': runSummaryList, 'systemResetWorkflow': systemResetWorkflow, 'systemWorkflow': systemWorkflow, 'tenantId': tenantId, 'version': version, 'workflow': workflow, 'workflowParameters': workflowParameters, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_09b0f9ce4239ae10_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-device/${id}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_09b0f9ce4239ae10_v1_3_0', json_data) def deregister_virtual_account(self, domain, name, headers=None, **request_parameters): check_type(headers, dict) check_type(domain, basestring, may_be_none=False) check_type(name, basestring, may_be_none=False) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { 'domain': domain, 'name': name, } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-settings/vacct') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.delete(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.delete(endpoint_full_url, params=_params) return self._object_factory('bpm_2499e9ad42e8ae5b_v1_3_0', json_data) def add_virtual_account(self, autoSyncPeriod=None, ccoUser=None, expiry=None, lastSync=None, profile=None, smartAccountId=None, syncResult=None, syncResultStr=None, syncStartTime=None, syncStatus=None, tenantId=None, token=None, virtualAccountId=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'autoSyncPeriod': autoSyncPeriod, 'ccoUser': ccoUser, 'expiry': expiry, 'lastSync': lastSync, 'profile': profile, 'smartAccountId': smartAccountId, 'syncResult': syncResult, 'syncResultStr': syncResultStr, 'syncStartTime': syncStartTime, 'syncStatus': syncStatus, 'tenantId': tenantId, 'token': token, 'virtualAccountId': virtualAccountId, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_1e962af345b8b59f_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-settings/savacct') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_1e962af345b8b59f_v1_3_0', json_data) def import_devices_in_bulk(self, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, list) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = payload or [] if active_validation: self._request_validator('jsd_21a6db2540298f55_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-device/import') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_21a6db2540298f55_v1_3_0', json_data) def update_workflow(self, id, _id=None, addToInventory=None, addedOn=None, configId=None, currTaskIdx=None, description=None, endTime=None, execTime=None, imageId=None, instanceType=None, lastupdateOn=None, name=None, startTime=None, state=None, tasks=None, tenantId=None, type=None, useState=None, version=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) check_type(id, basestring, may_be_none=False) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { 'id': id, } _payload = { '_id': _id, 'addToInventory': addToInventory, 'addedOn': addedOn, 'configId': configId, 'currTaskIdx': currTaskIdx, 'description': description, 'endTime': endTime, 'execTime': execTime, 'imageId': imageId, 'instanceType': instanceType, 'lastupdateOn': lastupdateOn, 'name': name, 'startTime': startTime, 'state': state, 'tasks': tasks, 'tenantId': tenantId, 'type': type, 'useState': useState, 'version': version, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_3086c9624f498b85_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-workflow/${id}') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.put(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_3086c9624f498b85_v1_3_0', json_data) def get_smart_account_list(self, headers=None, **request_parameters): check_type(headers, dict) if headers is not None: if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-settings/sacct') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_3cb24acb486b89d2_v1_3_0', json_data) def claim_a_device_to_a_site(self, deviceId=None, siteId=None, type=None, headers=None, payload=None, active_validation=True, **request_parameters): check_type(headers, dict) check_type(payload, dict) if headers is not None: if 'Content-Type' in headers: check_type(headers.get('Content-Type'), basestring, may_be_none=False) if 'X-Auth-Token' in headers: check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = { } _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = { } _payload = { 'deviceId': deviceId, 'siteId': siteId, 'type': type, } _payload.update(payload or {}) _payload = dict_from_items_with_values(_payload) if active_validation: self._request_validator('jsd_5889fb844939a13b_v1_3_0') .validate(_payload) with_custom_headers = False _headers = self._session.headers or {} if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = ('/dna/intent/api/v1/onboarding/pnp-device/site-claim') endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) else: json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) return self._object_factory('bpm_5889fb844939a13b_v1_3_0', json_data)
MIT License
pyro-ppl/pyro
pyro/contrib/tracking/dynamic_models.py
DynamicModel.mean2pv
python
def mean2pv(self, x): raise NotImplementedError
Compute and return PV state from native state. Useful for combining state estimates of different types in IMM (Interacting Multiple Model) filtering. :param x: native state estimate mean. :return: PV state estimate mean.
https://github.com/pyro-ppl/pyro/blob/751843a16ffca0fec0ec722aa4d57cad246db648/pyro/contrib/tracking/dynamic_models.py#L79-L88
from abc import ABCMeta, abstractmethod import torch from torch import nn from torch.nn import Parameter import pyro.distributions as dist from pyro.distributions.util import eye_like class DynamicModel(nn.Module, metaclass=ABCMeta): def __init__(self, dimension, dimension_pv, num_process_noise_parameters=None): self._dimension = dimension self._dimension_pv = dimension_pv self._num_process_noise_parameters = num_process_noise_parameters super().__init__() @property def dimension(self): return self._dimension @property def dimension_pv(self): return self._dimension_pv @property def num_process_noise_parameters(self): return self._num_process_noise_parameters @abstractmethod def forward(self, x, dt, do_normalization=True): raise NotImplementedError def geodesic_difference(self, x1, x0): return x1 - x0 @abstractmethod
Apache License 2.0
gabstopper/smc-python
smc/elements/other.py
ContactAddress.addresses
python
def addresses(self): return self.data['addresses']
List of addresses set as contact address :rtype: list
https://github.com/gabstopper/smc-python/blob/54386c8a710727cc1acf69334a57b155d2f5408c/smc/elements/other.py#L302-L308
from smc.base.model import Element, ElementCreator, ElementList from smc.api.exceptions import ModificationFailed from smc.base.util import element_resolver from smc.base.structs import NestedDict from smc.base.decorators import cached_property class Category(Element): typeof = 'category_tag' categories = ElementList('category_parent_ref') @classmethod def create(cls, name, comment=None): json = {'name': name, 'comment': comment} return ElementCreator(cls, json) def search_elements(self): return [Element.from_meta(**tag) for tag in self.make_request(resource='search_elements_from_category_tag')] def add_element(self, element): element = element_resolver(element) self.make_request( ModificationFailed, method='create', resource='category_add_element', json={'value': element}) def remove_element(self, element): element = element_resolver(element) self.make_request( ModificationFailed, method='create', resource='category_remove_element', json={'value': element}) def add_category_tag(self, tags, append_lists=True): tags = element_resolver(tags) self.update( category_parent_ref=tags, append_lists=append_lists) def add_category(self, tags): pass class CategoryTag(Element): typeof = 'category_group_tag' child_categories = ElementList('category_child_ref') parent_categories = ElementList('parent_categories') @classmethod def create(cls, name, comment=None): json = {'name': name, 'comment': comment} return ElementCreator(cls, json) def remove_category(self, categories): categories = element_resolver(categories) diff = [category for category in self.data['category_child_ref'] if category not in categories] self.update(category_child_ref=diff) class SituationTag(Category): typeof = 'situation_tag' class FilterExpression(Element): typeof = 'filter_expression' class Location(Element): typeof = 'location' @classmethod def create(cls, name, comment=None): json = {'name': name, 'comment': comment} return ElementCreator(cls, json) @property def used_on(self): return [Element.from_meta(**element) for element in self.make_request(resource='search_nated_elements_from_location')] class LogicalInterface(Element): typeof = 'logical_interface' @classmethod def create(cls, name, comment=None): json = {'name': name, 'comment': comment} return ElementCreator(cls, json) class MacAddress(Element): typeof = 'mac_address' @classmethod def create(cls, name, mac_address, comment=None): json = {'name': name, 'address': mac_address, 'comment': comment} return ElementCreator(cls, json) class ContactAddress(NestedDict): @property
Apache License 2.0
laszukdawid/pyemd
PyEMD/EMD.py
EMD._not_duplicate
python
def _not_duplicate(S: np.ndarray) -> np.ndarray: dup = np.r_[S[1:-1]==S[0:-2]] & np.r_[S[1:-1]==S[2:]] not_dup_idx = np.arange(1, len(S)-1)[~dup] idx = np.empty(len(not_dup_idx)+2, dtype=np.int) idx[0] = 0 idx[-1] = len(S)-1 idx[1:-1] = not_dup_idx return idx
Returns indices for not repeating values, where there is no extremum. Example ------- >>> S = [0, 1, 1, 1, 2, 3] >>> idx = self._not_duplicate(S) [0, 1, 3, 4, 5]
https://github.com/laszukdawid/pyemd/blob/3d8ec292cd2ba8cba327d3e0ad576366a8ead6ff/PyEMD/EMD.py#L479-L497
from __future__ import division, print_function import logging import numpy as np from typing import Optional, Tuple from scipy.interpolate import interp1d from PyEMD.splines import akima, cubic_spline_3pts from PyEMD.utils import get_timeline FindExtremaOutput = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] class EMD: logger = logging.getLogger(__name__) def __init__(self, spline_kind: str = 'cubic', nbsym: int = 2, **kwargs): self.energy_ratio_thr = float(kwargs.get('energy_ratio_thr', 0.2)) self.std_thr = float(kwargs.get('std_thr', 0.2)) self.svar_thr = float(kwargs.get('svar_thr', 0.001)) self.total_power_thr = float(kwargs.get('total_power_thr', 0.005)) self.range_thr = float(kwargs.get('range_thr', 0.001)) self.nbsym = int(kwargs.get('nbsym', nbsym)) self.scale_factor = float(kwargs.get('scale_factor', 1.)) self.spline_kind = spline_kind self.extrema_detection = kwargs.get('extrema_detection', 'simple') assert self.extrema_detection in ('simple', 'parabol'), "Only 'simple' and 'parabol' values supported" self.DTYPE = kwargs.get('DTYPE', np.float64) self.FIXE = int(kwargs.get('FIXE', 0)) self.FIXE_H = int(kwargs.get('FIXE_H', 0)) self.MAX_ITERATION = int(kwargs.get('MAX_ITERATION', 1000)) self.imfs = None self.residue = None def __call__(self, S: np.ndarray, T: Optional[np.ndarray] = None, max_imf: int = -1) -> np.ndarray: return self.emd(S, T=T, max_imf=max_imf) def extract_max_min_spline(self, T: np.ndarray, S: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: ext_res = self.find_extrema(T, S) max_pos, max_val = ext_res[0], ext_res[1] min_pos, min_val = ext_res[2], ext_res[3] if len(max_pos) + len(min_pos) < 3: return [-1]*4 max_extrema, min_extrema = self.prepare_points(T, S, max_pos, max_val, min_pos, min_val) _, max_spline = self.spline_points(T, max_extrema) _, min_spline = self.spline_points(T, min_extrema) return max_spline, min_spline, max_extrema, min_extrema def prepare_points( self, T: np.ndarray, S: np.ndarray, max_pos: np.ndarray, max_val: np.ndarray, min_pos: np.ndarray, min_val: np.ndarray ): if self.extrema_detection == "parabol": return self._prepare_points_parabol(T, S, max_pos, max_val, min_pos, min_val) elif self.extrema_detection == "simple": return self._prepare_points_simple(T, S, max_pos, max_val, min_pos, min_val) else: msg = "Incorrect extrema detection type. Please try: 'simple' or 'parabol'." raise ValueError(msg) def _prepare_points_parabol(self, T, S, max_pos, max_val, min_pos, min_val) -> Tuple[np.ndarray, np.ndarray]: max_extrema = np.zeros((2,len(max_pos)), dtype=self.DTYPE) min_extrema = np.zeros((2,len(min_pos)), dtype=self.DTYPE) max_extrema[0], min_extrema[0] = max_pos, min_pos max_extrema[1], min_extrema[1] = max_val, min_val nbsym = self.nbsym end_min, end_max = len(min_pos), len(max_pos) d_pos = max_pos[0] - min_pos[0] left_ext_max_type = d_pos<0 if left_ext_max_type: if (S[0]>min_val[0]) and (np.abs(d_pos)>(max_pos[0]-T[0])): expand_left_max_pos = 2*max_pos[0] - max_pos[1:nbsym+1] expand_left_min_pos = 2*max_pos[0] - min_pos[0:nbsym] expand_left_max_val = max_val[1:nbsym+1] expand_left_min_val = min_val[0:nbsym] else: expand_left_max_pos = 2*T[0] - max_pos[0:nbsym] expand_left_min_pos = 2*T[0] - np.append(T[0], min_pos[0:nbsym-1]) expand_left_max_val = max_val[0:nbsym] expand_left_min_val = np.append(S[0], min_val[0:nbsym-1]) else: if (S[0] < max_val[0]) and (np.abs(d_pos)>(min_pos[0]-T[0])): expand_left_max_pos = 2*min_pos[0] - max_pos[0:nbsym] expand_left_min_pos = 2*min_pos[0] - min_pos[1:nbsym+1] expand_left_max_val = max_val[0:nbsym] expand_left_min_val = min_val[1:nbsym+1] else: expand_left_max_pos = 2*T[0] - np.append(T[0], max_pos[0:nbsym-1]) expand_left_min_pos = 2*T[0] - min_pos[0:nbsym] expand_left_max_val = np.append(S[0], max_val[0:nbsym-1]) expand_left_min_val = min_val[0:nbsym] if not expand_left_min_pos.shape: expand_left_min_pos, expand_left_min_val = min_pos, min_val if not expand_left_max_pos.shape: expand_left_max_pos, expand_left_max_val = max_pos, max_val expand_left_min = np.vstack((expand_left_min_pos[::-1], expand_left_min_val[::-1])) expand_left_max = np.vstack((expand_left_max_pos[::-1], expand_left_max_val[::-1])) d_pos = max_pos[-1] - min_pos[-1] right_ext_max_type = d_pos > 0 if not right_ext_max_type: if (S[-1] < max_val[-1]) and (np.abs(d_pos)>(T[-1]-min_pos[-1])): idx_max = max(0, end_max-nbsym) idx_min = max(0, end_min-nbsym-1) expand_right_max_pos = 2*min_pos[-1] - max_pos[idx_max:] expand_right_min_pos = 2*min_pos[-1] - min_pos[idx_min:-1] expand_right_max_val = max_val[idx_max:] expand_right_min_val = min_val[idx_min:-1] else: idx_max = max(0, end_max-nbsym+1) idx_min = max(0, end_min-nbsym) expand_right_max_pos = 2*T[-1] - np.append(max_pos[idx_max:], T[-1]) expand_right_min_pos = 2*T[-1] - min_pos[idx_min:] expand_right_max_val = np.append(max_val[idx_max:],S[-1]) expand_right_min_val = min_val[idx_min:] else: if (S[-1] > min_val[-1]) and len(max_pos)>1 and (np.abs(d_pos)>(T[-1]-max_pos[-1])): idx_max = max(0, end_max-nbsym-1) idx_min = max(0, end_min-nbsym) expand_right_max_pos = 2*max_pos[-1] - max_pos[idx_max:-1] expand_right_min_pos = 2*max_pos[-1] - min_pos[idx_min:] expand_right_max_val = max_val[idx_max:-1] expand_right_min_val = min_val[idx_min:] else: idx_max = max(0, end_max-nbsym) idx_min = max(0, end_min-nbsym+1) expand_right_max_pos = 2*T[-1] - max_pos[idx_max:] expand_right_min_pos = 2*T[-1] - np.append(min_pos[idx_min:], T[-1]) expand_right_max_val = max_val[idx_max:] expand_right_min_val = np.append(min_val[idx_min:], S[-1]) if not expand_right_min_pos.shape: expand_right_min_pos, expand_right_min_val = min_pos, min_val if not expand_right_max_pos.shape: expand_right_max_pos, expand_right_max_val = max_pos, max_val expand_right_min = np.vstack((expand_right_min_pos[::-1], expand_right_min_val[::-1])) expand_right_max = np.vstack((expand_right_max_pos[::-1], expand_right_max_val[::-1])) max_extrema = np.hstack((expand_left_max, max_extrema, expand_right_max)) min_extrema = np.hstack((expand_left_min, min_extrema, expand_right_min)) return max_extrema, min_extrema def _prepare_points_simple( self, T: np.ndarray, S: np.ndarray, max_pos: np.ndarray, max_val: Optional[np.ndarray], min_pos: np.ndarray, min_val: Optional[np.ndarray] ) -> Tuple[np.ndarray, np.ndarray]: ind_min = min_pos.astype(int) ind_max = max_pos.astype(int) nbsym = self.nbsym end_min, end_max = len(min_pos), len(max_pos) if ind_max[0] < ind_min[0]: if S[0] > S[ind_min[0]]: lmax = ind_max[1:min(end_max,nbsym+1)][::-1] lmin = ind_min[0:min(end_min,nbsym+0)][::-1] lsym = ind_max[0] else: lmax = ind_max[0:min(end_max,nbsym)][::-1] lmin = np.append(ind_min[0:min(end_min,nbsym-1)][::-1],0) lsym = 0 else: if S[0] < S[ind_max[0]]: lmax = ind_max[0:min(end_max,nbsym+0)][::-1] lmin = ind_min[1:min(end_min,nbsym+1)][::-1] lsym = ind_min[0] else: lmax = np.append(ind_max[0:min(end_max,nbsym-1)][::-1],0) lmin = ind_min[0:min(end_min,nbsym)][::-1] lsym = 0 if ind_max[-1] < ind_min[-1]: if S[-1] < S[ind_max[-1]]: rmax = ind_max[max(end_max-nbsym,0):][::-1] rmin = ind_min[max(end_min-nbsym-1,0):-1][::-1] rsym = ind_min[-1] else: rmax = np.append(ind_max[max(end_max-nbsym+1,0):], len(S)-1)[::-1] rmin = ind_min[max(end_min-nbsym,0):][::-1] rsym = len(S)-1 else: if S[-1] > S[ind_min[-1]]: rmax = ind_max[max(end_max-nbsym-1,0):-1][::-1] rmin = ind_min[max(end_min-nbsym,0):][::-1] rsym = ind_max[-1] else: rmax = ind_max[max(end_max-nbsym,0):][::-1] rmin = np.append(ind_min[max(end_min-nbsym+1,0):], len(S)-1)[::-1] rsym = len(S)-1 if not lmin.size: lmin = ind_min if not rmin.size: rmin = ind_min if not lmax.size: lmax = ind_max if not rmax.size: rmax = ind_max tlmin = 2*T[lsym]-T[lmin] tlmax = 2*T[lsym]-T[lmax] trmin = 2*T[rsym]-T[rmin] trmax = 2*T[rsym]-T[rmax] if tlmin[0] > T[0] or tlmax[0] > T[0]: if lsym == ind_max[0]: lmax = ind_max[0:min(end_max,nbsym)][::-1] else: lmin = ind_min[0:min(end_min,nbsym)][::-1] if lsym == 0: raise Exception('Left edge BUG') lsym = 0 tlmin = 2*T[lsym]-T[lmin] tlmax = 2*T[lsym]-T[lmax] if trmin[-1] < T[-1] or trmax[-1] < T[-1]: if rsym == ind_max[-1]: rmax = ind_max[max(end_max-nbsym,0):][::-1] else: rmin = ind_min[max(end_min-nbsym,0):][::-1] if rsym == len(S)-1: raise Exception('Right edge BUG') rsym = len(S)-1 trmin = 2*T[rsym]-T[rmin] trmax = 2*T[rsym]-T[rmax] zlmax = S[lmax] zlmin = S[lmin] zrmax = S[rmax] zrmin = S[rmin] tmin = np.append(tlmin, np.append(T[ind_min], trmin)) tmax = np.append(tlmax, np.append(T[ind_max], trmax)) zmin = np.append(zlmin, np.append(S[ind_min], zrmin)) zmax = np.append(zlmax, np.append(S[ind_max], zrmax)) max_extrema = np.array([tmax, zmax]) min_extrema = np.array([tmin, zmin]) max_dup_idx = np.where(max_extrema[0, 1:] == max_extrema[0,:-1]) max_extrema = np.delete(max_extrema, max_dup_idx, axis=1) min_dup_idx = np.where(min_extrema[0, 1:] == min_extrema[0,:-1]) min_extrema = np.delete(min_extrema, min_dup_idx, axis=1) return max_extrema, min_extrema def spline_points(self, T: np.ndarray, extrema: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: kind = self.spline_kind.lower() t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]] if kind == "akima": return t, akima(extrema[0], extrema[1], t) elif kind == 'cubic': if extrema.shape[1] > 3: return t, interp1d(extrema[0], extrema[1], kind=kind)(t) else: return cubic_spline_3pts(extrema[0], extrema[1], t) elif kind in ['slinear', 'quadratic', 'linear']: return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE) else: raise ValueError("No such interpolation method!") @staticmethod
Apache License 2.0
aws/sagemaker-containers
src/sagemaker_containers/_mapping.py
to_cmd_args
python
def to_cmd_args(mapping): sorted_keys = sorted(mapping.keys()) def arg_name(obj): string = _decode(obj) if string: return u"--%s" % string if len(string) > 1 else u"-%s" % string else: return u"" arg_names = [arg_name(argument) for argument in sorted_keys] def arg_value(value): if hasattr(value, "items"): map_items = ["%s=%s" % (k, v) for k, v in sorted(value.items())] return ",".join(map_items) return _decode(value) arg_values = [arg_value(mapping[key]) for key in sorted_keys] items = zip(arg_names, arg_values) return [item for item in itertools.chain.from_iterable(items)]
Transform a dictionary in a list of cmd arguments. Example: >>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25}) >>> >>>print(args) ['--model_dir', '/opt/ml/model', '--batch_size', 25] Args: mapping (dict[str, object]): A Python mapping. Returns: (list): List of cmd arguments
https://github.com/aws/sagemaker-containers/blob/526dda90d636c7fb0c25791e6c57d077bd972000/src/sagemaker_containers/_mapping.py#L60-L94
from __future__ import absolute_import import collections import itertools import json import six SplitResultSpec = collections.namedtuple("SplitResultSpec", "included excluded") def to_env_vars(mapping): def format_key(key): if key: decoded_name = "SM_%s" % str(key).upper() return decoded_name else: return "" def format_value(_mapping): if six.PY3 and isinstance(_mapping, six.binary_type): return _mapping.decode("latin1") elif _mapping is None: return "" elif isinstance(_mapping, six.string_types): return str(_mapping) else: return json.dumps(_mapping, sort_keys=True, separators=(",", ":"), ensure_ascii=True) return {format_key(k): format_value(v) for k, v in mapping.items()}
Apache License 2.0
academysoftwarefoundation/opencue
pycue/opencue/wrappers/job.py
NestedJob.markAsWaiting
python
def markAsWaiting(self, **request): self.asJob().markAsWaiting(**request)
Changes the matching frames from the depend state to the waiting state. :type request: Dict :param request: FrameSearch parameters
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/pycue/opencue/wrappers/job.py#L833-L839
import enum import os import time from opencue import Cuebot from opencue.compiled_proto import comment_pb2 from opencue.compiled_proto import job_pb2 import opencue.search import opencue.wrappers.comment import opencue.wrappers.depend import opencue.wrappers.frame import opencue.wrappers.layer class Job(object): class JobState(enum.IntEnum): PENDING = job_pb2.PENDING FINISHED = job_pb2.FINISHED STARTUP = job_pb2.STARTUP SHUTDOWN = job_pb2.SHUTDOWN POSTED = job_pb2.POSTED def __init__(self, job=None): self.data = job self.stub = Cuebot.getStub('job') self.__frameStateTotals = {} def kill(self): self.stub.Kill(job_pb2.JobKillRequest(job=self.data), timeout=Cuebot.Timeout) def pause(self): self.stub.Pause(job_pb2.JobPauseRequest(job=self.data), timeout=Cuebot.Timeout) def resume(self): self.stub.Resume(job_pb2.JobResumeRequest(job=self.data), timeout=Cuebot.Timeout) def killFrames(self, **request): criteria = opencue.search.FrameSearch.criteriaFromOptions(**request) self.stub.KillFrames(job_pb2.JobKillFramesRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) def eatFrames(self, **request): criteria = opencue.search.FrameSearch.criteriaFromOptions(**request) return self.stub.EatFrames(job_pb2.JobEatFramesRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) def retryFrames(self, **request): criteria = opencue.search.FrameSearch.criteriaFromOptions(**request) return self.stub.RetryFrames(job_pb2.JobRetryFramesRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) def markdoneFrames(self, **request): criteria = opencue.search.FrameSearch.criteriaFromOptions(**request) return self.stub.MarkDoneFrames( job_pb2.JobMarkDoneFramesRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) def markAsWaiting(self, **request): criteria = opencue.search.FrameSearch.criteriaFromOptions(**request) return self.stub.MarkAsWaiting( job_pb2.JobMarkAsWaitingRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) def setMinCores(self, minCores): self.stub.SetMinCores(job_pb2.JobSetMinCoresRequest(job=self.data, val=minCores), timeout=Cuebot.Timeout) def setMaxCores(self, maxCores): self.stub.SetMaxCores(job_pb2.JobSetMaxCoresRequest(job=self.data, val=maxCores), timeout=Cuebot.Timeout) def setMinGpus(self, minGpus): self.stub.SetMinGpus(job_pb2.JobSetMinGpusRequest(job=self.data, val=minGpus), timeout=Cuebot.Timeout) def setMaxGpus(self, maxGpus): self.stub.SetMaxGpus(job_pb2.JobSetMaxGpusRequest(job=self.data, val=maxGpus), timeout=Cuebot.Timeout) def setPriority(self, priority): self.stub.SetPriority(job_pb2.JobSetPriorityRequest(job=self.data, val=priority), timeout=Cuebot.Timeout) def setMaxRetries(self, maxRetries): self.stub.SetMaxRetries( job_pb2.JobSetMaxRetriesRequest(job=self.data, max_retries=maxRetries), timeout=Cuebot.Timeout) def getLayers(self): response = self.stub.GetLayers(job_pb2.JobGetLayersRequest(job=self.data), timeout=Cuebot.Timeout) layerSeq = response.layers return [opencue.wrappers.layer.Layer(lyr) for lyr in layerSeq.layers] def getFrames(self, **options): criteria = opencue.search.FrameSearch.criteriaFromOptions(**options) response = self.stub.GetFrames(job_pb2.JobGetFramesRequest(job=self.data, req=criteria), timeout=Cuebot.Timeout) frameSeq = response.frames return [opencue.wrappers.frame.Frame(frm) for frm in frameSeq.frames] def getUpdatedFrames(self, lastCheck, layers=None): if layers is not None: layerSeq = job_pb2.LayerSeq() layerSeq.layers.extend(layers) else: layerSeq = None return self.stub.GetUpdatedFrames( job_pb2.JobGetUpdatedFramesRequest(job=self.data, last_check=lastCheck, layer_filter=layerSeq), timeout=Cuebot.Timeout) def setAutoEating(self, value): self.stub.SetAutoEat(job_pb2.JobSetAutoEatRequest(job=self.data, value=value), timeout=Cuebot.Timeout) def addRenderPartition(self, hostname, threads, max_cores, num_mem, max_gpus, max_gpu_memory): self.stub.AddRenderPartition( job_pb2.JobAddRenderPartRequest(job=self.data, host=hostname, threads=threads, max_cores=max_cores, max_memory=num_mem, max_gpus=max_gpus, max_gpu_memory=max_gpu_memory, username=os.getenv("USER", "unknown"))) def getWhatDependsOnThis(self): response = self.stub.GetWhatDependsOnThis( job_pb2.JobGetWhatDependsOnThisRequest(job=self.data), timeout=Cuebot.Timeout) dependSeq = response.depends return [opencue.wrappers.depend.Depend(dep) for dep in dependSeq.depends] def getWhatThisDependsOn(self): response = self.stub.GetWhatThisDependsOn( job_pb2.JobGetWhatThisDependsOnRequest(job=self.data), timeout=Cuebot.Timeout) dependSeq = response.depends return [opencue.wrappers.depend.Depend(dep) for dep in dependSeq.depends] def getDepends(self): response = self.stub.GetDepends( job_pb2.JobGetDependsRequest(job=self.data), timeout=Cuebot.Timeout) dependSeq = response.depends return [opencue.wrappers.depend.Depend(dep) for dep in dependSeq.depends] def dropDepends(self, target): return self.stub.DropDepends(job_pb2.JobDropDependsRequest(job=self.data, target=target), timeout=Cuebot.Timeout) def createDependencyOnJob(self, job): response = self.stub.CreateDependencyOnJob( job_pb2.JobCreateDependencyOnJobRequest(job=self.data, on_job=job.data), timeout=Cuebot.Timeout) return opencue.wrappers.depend.Depend(response.depend) def createDependencyOnLayer(self, layer): response = self.stub.CreateDependencyOnLayer( job_pb2.JobCreateDependencyOnLayerRequest(job=self.data, layer=layer.data), timeout=Cuebot.Timeout) return opencue.wrappers.depend.Depend(response.depend) def createDependencyOnFrame(self, frame): response = self.stub.CreateDependencyOnFrame( job_pb2.JobCreateDependencyOnFrameRequest(job=self.data, frame=frame.data), timeout=Cuebot.Timeout) return opencue.wrappers.depend.Depend(response.depend) def addComment(self, subject, message): comment = comment_pb2.Comment( user=os.getenv("USER", "unknown"), subject=subject, message=message or " ", timestamp=0) self.stub.AddComment(job_pb2.JobAddCommentRequest(job=self.data, new_comment=comment), timeout=Cuebot.Timeout) def getComments(self): response = self.stub.GetComments(job_pb2.JobGetCommentsRequest(job=self.data), timeout=Cuebot.Timeout) commentSeq = response.comments return [opencue.wrappers.comment.Comment(cmt) for cmt in commentSeq.comments] def setGroup(self, group): self.stub.SetGroup(job_pb2.JobSetGroupRequest(job=self.data, group_id=group.id()), timeout=Cuebot.Timeout) def reorderFrames(self, frame_range, order): self.stub.ReorderFrames( job_pb2.JobReorderFramesRequest(job=self.data, range=frame_range, order=order), timeout=Cuebot.Timeout) def staggerFrames(self, frame_range, stagger): self.stub.StaggerFrames( job_pb2.JobStaggerFramesRequest(job=self.data, range=frame_range, stagger=stagger), timeout=Cuebot.Timeout) def facility(self): return self.data.facility def id(self): return self.data.id def name(self): return self.data.name def show(self): return self.data.show def shot(self): return self.data.shot def logDir(self): return self.data.log_dir def uid(self): return self.data.uid if self.data.HasField("uid") else None def user(self): return self.data.user def username(self): return self.user() def state(self): return self.data.state def priority(self): return self.data.priority def minCores(self): return self.data.min_cores def maxCores(self): return self.data.max_cores def minGpus(self): return self.data.min_gpus def maxGpus(self): return self.data.max_gpus def os(self): return self.data.os def startTime(self, format=None): if not format: return self.data.start_time return time.strftime(format, time.localtime(self.data.start_time)) def stopTime(self, format=None): if not format: return self.data.stop_time return time.strftime(format, time.localtime(self.data.stop_time)) def runTime(self): if self.data.stop_time == 0: return int(time.time() - self.data.start_time) return self.data.stop_time - self.data.start_time def coreSecondsRemaining(self): return self.data.job_stats.remaining_core_sec def age(self): return int(time.time() - self.data.start_time) def isPaused(self): return self.data.is_paused def isAutoEating(self): return self.data.auto_eat def isCommented(self): return self.data.has_comment def setAutoEat(self, value): self.setAutoEating(value) self.data.auto_eat = value def coresReserved(self): return self.data.job_stats.reserved_cores def totalFrames(self): return self.data.job_stats.total_frames def totalLayers(self): return self.data.job_stats.total_layers def dependFrames(self): return self.data.job_stats.depend_frames def succeededFrames(self): return self.data.job_stats.succeeded_frames def runningFrames(self): return self.data.job_stats.running_frames def deadFrames(self): return self.data.job_stats.dead_frames def waitingFrames(self): return self.data.job_stats.waiting_frames def eatenFrames(self): return self.data.job_stats.eaten_frames def pendingFrames(self): return self.data.job_stats.pending_frames def frameStateTotals(self): if not self.__frameStateTotals: self.__frameStateTotals.clear() for state in job_pb2.FrameState.keys(): frameCount = getattr(self.data.job_stats, '{}_frames'.format(state.lower()), 0) self.__frameStateTotals[getattr(job_pb2, state)] = frameCount return self.__frameStateTotals def percentCompleted(self): try: return self.data.job_stats.succeeded_frames / float(self.data.job_stats.total_frames) * 100.0 except ZeroDivisionError: return 0 def group(self): return self.data.group def avgFrameTime(self): return self.data.job_stats.avg_frame_sec def averageCoreTime(self): return self.data.job_stats.avg_core_sec def maxRss(self): return self.data.job_stats.max_rss class NestedJob(Job): def __init__(self, nestedJob=None): super(NestedJob, self).__init__(nestedJob) self.__children = [] def children(self): return self.__children def kill(self): self.asJob().kill() def pause(self): self.asJob().pause() def resume(self): self.asJob().resume() def killFrames(self, **request): self.asJob().killFrames(**request) def eatFrames(self, **request): self.asJob().eatFrames(**request) def retryFrames(self, **request): self.asJob().retryFrames(**request) def markdoneFrames(self, **request): self.asJob().markdoneFrames(**request)
Apache License 2.0
dvska/gdata-python3
src/gdata/gauth.py
generate_client_login_request_body
python
def generate_client_login_request_body(email, password, service, source, account_type='HOSTED_OR_GOOGLE', captcha_token=None, captcha_response=None): request_fields = {'Email': email, 'Passwd': password, 'accountType': account_type, 'service': service, 'source': source} if captcha_token and captcha_response: request_fields['logintoken'] = captcha_token request_fields['logincaptcha'] = captcha_response return urllib.parse.urlencode(request_fields)
Creates the body of the autentication request See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request for more details. Args: email: str password: str service: str source: str account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid values are 'GOOGLE' and 'HOSTED' captcha_token: str (optional) captcha_response: str (optional) Returns: The HTTP body to send in a request for a client login token.
https://github.com/dvska/gdata-python3/blob/a34c35901473e4ba7223ea4607136141301fbe88/src/gdata/gauth.py#L177-L209
import datetime import random import time import urllib.error import urllib.parse import urllib.parse import urllib.request import atom.http_core try: import simplejson from simplejson.decoder import JSONDecodeError except ImportError: JSONDecodeError = None try: from django.utils import simplejson except ImportError: import json as simplejson try: from urllib.parse import parse_qsl except ImportError: from cgi import parse_qsl PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' AUTHSUB_AUTH_LABEL = 'AuthSub token=' OAUTH2_AUTH_LABEL = 'Bearer ' AUTH_SCOPES = { 'cl': ( 'https://www.google.com/calendar/feeds/', 'http://www.google.com/calendar/feeds/'), 'gbase': ( 'http://base.google.com/base/feeds/', 'http://www.google.com/base/feeds/'), 'blogger': ( 'http://www.blogger.com/feeds/',), 'codesearch': ( 'http://www.google.com/codesearch/feeds/',), 'cp': ( 'https://www.google.com/m8/feeds/', 'http://www.google.com/m8/feeds/'), 'finance': ( 'http://finance.google.com/finance/feeds/',), 'health': ( 'https://www.google.com/health/feeds/',), 'writely': ( 'https://docs.google.com/feeds/', 'https://spreadsheets.google.com/feeds/', 'https://docs.googleusercontent.com/'), 'lh2': ( 'http://picasaweb.google.com/data/',), 'apps': ( 'https://apps-apis.google.com/a/feeds/user/', 'https://apps-apis.google.com/a/feeds/policies/', 'https://apps-apis.google.com/a/feeds/alias/', 'https://apps-apis.google.com/a/feeds/groups/', 'https://apps-apis.google.com/a/feeds/compliance/audit/', 'https://apps-apis.google.com/a/feeds/migration/', 'https://apps-apis.google.com/a/feeds/emailsettings/2.0/'), 'weaver': ( 'https://www.google.com/h9/feeds/',), 'wise': ( 'https://spreadsheets.google.com/feeds/',), 'sitemaps': ( 'https://www.google.com/webmasters/tools/feeds/',), 'youtube': ( 'http://gdata.youtube.com/feeds/api/', 'http://uploads.gdata.youtube.com/feeds/api', 'http://gdata.youtube.com/action/GetUploadToken'), 'books': ( 'http://www.google.com/books/feeds/',), 'analytics': ( 'https://www.google.com/analytics/feeds/',), 'jotspot': ( 'http://sites.google.com/feeds/', 'https://sites.google.com/feeds/'), 'local': ( 'http://maps.google.com/maps/feeds/',), 'code': ( 'http://code.google.com/feeds/issues',)} class Error(Exception): pass class UnsupportedTokenType(Error): pass class OAuth2AccessTokenError(Error): def __init__(self, error_message): self.error_message = error_message class OAuth2RevokeError(Error): def __init__(self, http_response, response_body=None): body = response_body or http_response.read() self.status = http_response.status self.reason = http_response.reason self.body = body self.headers = atom.http_core.get_headers(http_response) self.error_msg = 'Invalid response %s.' % self.status try: json_from_body = simplejson.loads(body.decode('utf-8')) if isinstance(json_from_body, dict): self.error_msg = json_from_body.get('error', self.error_msg) except (ValueError, JSONDecodeError): pass def __str__(self): return 'OAuth2RevokeError(status=%i, error=%s)' % (self.status, self.error_msg)
Apache License 2.0
ronreiter/interactive-tutorials
suds/mx/appender.py
Appender.resume
python
def resume(self, content): self.marshaller.resume(content)
Notify I{marshaller} that appending this content has resumed. @param content: The content for which proccessing has been resumed. @type content: L{Object}
https://github.com/ronreiter/interactive-tutorials/blob/aee01e7198f454fc8ca37b2f9575f7d12b94eced/suds/mx/appender.py#L168-L174
from logging import getLogger from suds import null, tostr from suds.mx import Content from suds.sudsobject import footprint from suds.sudsobject import Object, Property from suds.sax.element import Element from suds.sax.text import Text log = getLogger(__name__) class Matcher: def __init__(self, cls): self.cls = cls def __eq__(self, x): if self.cls is None: return x is None else: return isinstance(x, self.cls) class ContentAppender: def __init__(self, marshaller): self.default = PrimativeAppender(marshaller) self.appenders = ( (Matcher(None), NoneAppender(marshaller)), (Matcher(null), NoneAppender(marshaller)), (Matcher(Property), PropertyAppender(marshaller)), (Matcher(Object), ObjectAppender(marshaller)), (Matcher(Element), ElementAppender(marshaller)), (Matcher(Text), TextAppender(marshaller)), (Matcher(list), ListAppender(marshaller)), (Matcher(tuple), ListAppender(marshaller)), (Matcher(dict), DictAppender(marshaller)), ) def append(self, parent, content): appender = self.default for a in self.appenders: if a[0] == content.value: appender = a[1] break appender.append(parent, content) class Appender: def __init__(self, marshaller): self.marshaller = marshaller def node(self, content): return self.marshaller.node(content) def setnil(self, node, content): self.marshaller.setnil(node, content) def setdefault(self, node, content): return self.marshaller.setdefault(node, content) def optional(self, content): return self.marshaller.optional(content) def suspend(self, content): self.marshaller.suspend(content)
Apache License 2.0
cigna/ibis
ibis/utilities/sqoop_helper.py
SqoopHelper.get_ddl_query
python
def get_ddl_query(self, jdbc, database, tbl, schema=None): db_type = self.get_jdbc_source(jdbc) if db_type == SQLSERVER: params = {'db': database, 'tbl': tbl, 'schema': schema} elif db_type == MYSQL: params = {'db': database, 'tbl': tbl} elif db_type == POSTGRESQL: params = {'db': database, 'tbl': tbl, 'schema': schema} else: params = {'db': database.upper(), 'tbl': tbl.upper()} columns_and_types = { 'db2': ("SELECT NAME, COLTYPE FROM SYSIBM.SYSCOLUMNS" " WHERE TBCREATOR='{db}' AND TBNAME='{tbl}'"), 'sqlserver': ("SELECT COLUMN_NAME, DATA_TYPE FROM " "INFORMATION_SCHEMA.COLUMNS WHERE " "TABLE_CATALOG='{db}' AND TABLE_NAME='{tbl}' " "AND TABLE_SCHEMA='{schema}'"), 'oracle': ("SELECT COLUMN_NAME, DATA_TYPE FROM all_tab_columns" " WHERE OWNER='{db}' AND TABLE_NAME='{tbl}'"), 'teradata': "HELP COLUMN {db}.{tbl}.*;", 'mysql': ("SELECT COLUMN_NAME, DATA_TYPE FROM " "INFORMATION_SCHEMA.COLUMNS WHERE " "TABLE_SCHEMA = '{db}' and TABLE_NAME = '{tbl}'"), 'postgresql': ("SELECT COLUMN_NAME, DATA_TYPE FROM " "INFORMATION_SCHEMA.COLUMNS WHERE " "TABLE_CATALOG='{db}' AND TABLE_NAME='{tbl}' " "AND TABLE_SCHEMA='{schema}'")} sql_stmt = columns_and_types[db_type] sql_stmt = sql_stmt.format(**params) return sql_stmt
Get ddl query
https://github.com/cigna/ibis/blob/f99e3b7a677652a8a1c00a069e645d97682e839c/ibis/utilities/sqoop_helper.py#L180-L210
import subprocess import re import sys from ibis.custom_logging import get_logger ORACLE = 'oracle' DB2 = 'db2' TERADATA = 'teradata' SQLSERVER = 'sqlserver' MYSQL = 'mysql' POSTGRESQL = 'postgresql' VALID_SOURCES = [ORACLE, DB2, SQLSERVER, TERADATA, MYSQL, POSTGRESQL] DRIVERS = { "teradata": "com.teradata.jdbc.TeraDriver", "oracle": "com.quest.oraoop.OraOopManagerFactory", "db2": "com.cloudera.sqoop.manager.DefaultManagerFactory", "sqlserver": ['com.cloudera.sqoop.manager.DefaultManagerFactory', 'net.sourceforge.jtds.jdbc.Driver'], "mysql": "com.cloudera.sqoop.manager.DefaultManagerFactory", "postgresql": "com.cloudera.sqoop.manager.DefaultManagerFactory"} SQOOP_CACHE = {} SQOOP_CACHE_VIEW = {} class SqoopHelper(object): def __init__(self, cfg_mgr): self.cfg_mgr = cfg_mgr self.pattern_non_alphanumeric = re.compile(r'[^A-Za-z0-9_]') self.pattern_numeric_at_start = r'^\d' self.pattern_underscore_at_start = r'^_' self.logger = get_logger(self.cfg_mgr) def get_sqoop_eval(self): sqoop_eval = True try: subprocess.Popen('sqoop-eval', stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as oe: sqoop_eval = False msg = "sqoop-eval command not available" self.logger.warning(msg) err_msg = "Error: sqoop-eval command not available - reason %s" err_msg = err_msg % oe.message sys.exit(err_msg) return sqoop_eval def eval(self, jdbc, sql_stmt, db_username, password_file): src = self.get_jdbc_source(jdbc) driver = self.get_driver(jdbc) results = [] if self.get_sqoop_eval(): if 'jceks' in password_file: try: jceks, password_alias = password_file.split('#') except ValueError as err_exp: msg = ('Error unpacking jceks path and password alias ' 'from {password}. ' 'Expecting jceks:path#password_alias. Example: ' 'jceks://hdfs/user/dev/fake.passwords.jceks#' 'fake.password.alias') msg = msg.format(password=password_file) self.logger.error(msg) err_msg = ('Error found in sqoop_helper, exit from process' ' with errors - reason {0}') self.logger.error(err_msg.format(err_exp.message)) sys.exit(1) cmd_list = [ 'sqoop-eval', '-D hadoop.security.credential.provider.path=' + jceks, '--driver', driver, '--verbose', '--connect', jdbc, '--query', sql_stmt, '--username', db_username, '--password-alias', password_alias] else: cmd_list = [ 'sqoop-eval', '--driver', driver, '--verbose', '--connect', jdbc, '--query', sql_stmt, '--username', db_username, '--password-file', password_file] self.logger.info('Sqoop eval: {stmt}'.format(stmt=sql_stmt)) proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = proc.communicate() if err: if 'updateCount=-1' not in err: self.logger.error(output) self.logger.error(err) raise ValueError('Failed on sqoop eval!') re_expr = re.compile(r"\|.*\|") cleaned_lines = re_expr.findall(output) cleaned_lines = [line.split('|') for line in cleaned_lines] results = [[y.strip() for y in c] for c in cleaned_lines] results = [x[1:-1] for x in results[1:]] else: msg = 'Warning sqoop-eval not available' self.logger.warning(msg) sys.exit("Error: Command not available - reason %s" % msg) return results def _eval(self, jdbc, sql_stmt, db_username, password_file): src = self.get_jdbc_source(jdbc) driver = self.get_driver(jdbc) if 'jceks' in password_file: jceks, password_alias = password_file.split('#') cmd_list = ['sqoop-eval', '-D hadoop.security.credential.provider.path=' + jceks, '--driver', driver, '--verbose', '--connect', jdbc, '--query', sql_stmt, '--username', db_username, '--password-alias', password_alias] else: cmd_list = ['sqoop-eval', '--driver', driver, '--verbose', '--connect', jdbc, '--query', sql_stmt, '--username', db_username, '--password-file', password_file] proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = proc.communicate() return proc.returncode, output, err def get_driver(self, jdbc): driver = '' windows_auth = 'domain' for db_src in VALID_SOURCES: if db_src == SQLSERVER: if db_src in jdbc and windows_auth in jdbc: driver = DRIVERS[db_src][1] else: driver = DRIVERS[db_src][0] elif db_src in jdbc: driver = DRIVERS[db_src] return driver def get_jdbc_source(self, jdbc): src = '' for db_src in VALID_SOURCES: if db_src in jdbc: src = db_src return src
Apache License 2.0
oboynitro/django-frontier
venv/Lib/site-packages/setuptools/pep425tags.py
get_darwin_arches
python
def get_darwin_arches(major, minor, machine): arches = [] def _supports_arch(major, minor, arch): if arch == 'ppc': return (major, minor) <= (10, 5) if arch == 'ppc64': return (major, minor) == (10, 5) if arch == 'i386': return (major, minor) >= (10, 4) if arch == 'x86_64': return (major, minor) >= (10, 5) if arch in groups: for garch in groups[arch]: if _supports_arch(major, minor, garch): return True return False groups = OrderedDict([ ("fat", ("i386", "ppc")), ("intel", ("x86_64", "i386")), ("fat64", ("x86_64", "ppc64")), ("fat32", ("x86_64", "i386", "ppc")), ]) if _supports_arch(major, minor, machine): arches.append(machine) for garch in groups: if machine in groups[garch] and _supports_arch(major, minor, garch): arches.append(garch) arches.append('universal') return arches
Return a list of supported arches (including group arches) for the given major, minor and machine architecture of a macOS machine.
https://github.com/oboynitro/django-frontier/blob/89bec0199aadcc5e976a1cc42ad9284603f6439a/venv/Lib/site-packages/setuptools/pep425tags.py#L162-L220
from __future__ import absolute_import import distutils.util from distutils import log import platform import re import sys import sysconfig import warnings from collections import OrderedDict from .extern import six from . import glibc _osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') def get_config_var(var): try: return sysconfig.get_config_var(var) except IOError as e: warnings.warn("{}".format(e), RuntimeWarning) return None def get_abbr_impl(): if hasattr(sys, 'pypy_version_info'): pyimpl = 'pp' elif sys.platform.startswith('java'): pyimpl = 'jy' elif sys.platform == 'cli': pyimpl = 'ip' else: pyimpl = 'cp' return pyimpl def get_impl_ver(): impl_ver = get_config_var("py_version_nodot") if not impl_ver or get_abbr_impl() == 'pp': impl_ver = ''.join(map(str, get_impl_version_info())) return impl_ver def get_impl_version_info(): if get_abbr_impl() == 'pp': return (sys.version_info[0], sys.pypy_version_info.major, sys.pypy_version_info.minor) else: return sys.version_info[0], sys.version_info[1] def get_impl_tag(): return "{}{}".format(get_abbr_impl(), get_impl_ver()) def get_flag(var, fallback, expected=True, warn=True): val = get_config_var(var) if val is None: if warn: log.debug("Config variable '%s' is unset, Python ABI tag may " "be incorrect", var) return fallback() return val == expected def get_abi_tag(): soabi = get_config_var('SOABI') impl = get_abbr_impl() if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' if get_flag('Py_DEBUG', lambda: hasattr(sys, 'gettotalrefcount'), warn=(impl == 'cp')): d = 'd' if get_flag('WITH_PYMALLOC', lambda: impl == 'cp', warn=(impl == 'cp')): m = 'm' if get_flag('Py_UNICODE_SIZE', lambda: sys.maxunicode == 0x10ffff, expected=4, warn=(impl == 'cp' and six.PY2)) and six.PY2: u = 'u' abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) elif soabi and soabi.startswith('cpython-'): abi = 'cp' + soabi.split('-')[1] elif soabi: abi = soabi.replace('.', '_').replace('-', '_') else: abi = None return abi def _is_running_32bit(): return sys.maxsize == 2147483647 def get_platform(): if sys.platform == 'darwin': release, _, machine = platform.mac_ver() split_ver = release.split('.') if machine == "x86_64" and _is_running_32bit(): machine = "i386" elif machine == "ppc64" and _is_running_32bit(): machine = "ppc" return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) result = distutils.util.get_platform().replace('.', '_').replace('-', '_') if result == "linux_x86_64" and _is_running_32bit(): result = "linux_i686" return result def is_manylinux1_compatible(): if get_platform() not in {"linux_x86_64", "linux_i686"}: return False try: import _manylinux return bool(_manylinux.manylinux1_compatible) except (ImportError, AttributeError): pass return glibc.have_compatible_glibc(2, 5)
MIT License
vertexproject/synapse
synapse/lib/jupyter.py
getDocData
python
def getDocData(fp, root=None): fpath = getDocPath(fp, root) if fpath.endswith('.yaml'): return s_common.yamlload(fpath) if fpath.endswith('.json'): return s_common.jsload(fpath) with s_common.genfile(fpath) as fd: if fpath.endswith('.mpk'): return s_msgpack.un(fd.read()) if fpath.endswith('.jsonl'): recs = [] for line in fd.readlines(): recs.append(json.loads(line.decode())) return recs return fd.read()
Args: fn (str): Name of the file to retrieve the data of. root (str): Optional root path to look for a docdata directory in. Notes: Will detect json/jsonl/yaml/mpk extensions and automatically decode that data if found; otherwise it returns bytes. Defaults to looking for the ``docdata`` directory in the current working directory. This behavior works fine for notebooks nested in the docs directory of synapse; but this root directory that is looked for may be overridden by providing an alternative root. Returns: data: May be deserialized data or bytes. Raises: ValueError if the file does not exist or directory traversal attempted..
https://github.com/vertexproject/synapse/blob/a9d62ffacd9cc236ac52f92a734deef55c66ecf3/synapse/lib/jupyter.py#L63-L98
import os import copy import json import logging import pathlib import contextlib import synapse.common as s_common import synapse.cortex as s_cortex import synapse.lib.base as s_base import synapse.lib.cmdr as s_cmdr import synapse.lib.msgpack as s_msgpack loggers_to_supress = ( 'synapse.lib.view', ) def getDocPath(fn, root=None): cwd = pathlib.Path(os.getcwd()) if root: cwd = pathlib.Path(root) while True: dpath = cwd.joinpath('docdata') if dpath.is_dir(): break parent = cwd.parent if parent == cwd: raise ValueError(f'Unable to find data directory from {os.getcwd()}.') cwd = parent fpath = os.path.abspath(os.path.join(dpath.as_posix(), fn)) if not fpath.startswith(dpath.as_posix()): raise ValueError(f'Path escaping detected: {fn}') if not os.path.isfile(fpath): raise ValueError(f'File does not exist: {fn}') return fpath
Apache License 2.0
muxinc/mux-python
mux_python/models/asset_response.py
AssetResponse.__init__
python
def __init__(self, data=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._data = None self.discriminator = None if data is not None: self.data = data
AssetResponse - a model defined in OpenAPI
https://github.com/muxinc/mux-python/blob/57c10a3002a0bc65a0dc8938f08176bd5b030a93/mux_python/models/asset_response.py#L44-L54
import inspect import pprint import re import six from mux_python.configuration import Configuration class AssetResponse(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'data': 'Asset' } attribute_map = { 'data': 'data' }
MIT License
google/transitfeed
merge.py
ApproximateDistanceBetweenPoints
python
def ApproximateDistanceBetweenPoints(pa, pb): alat, alon = pa blat, blon = pb sa = transitfeed.Stop(lat=alat, lng=alon) sb = transitfeed.Stop(lat=blat, lng=blon) return transitfeed.ApproximateDistanceBetweenStops(sa, sb)
Finds the distance between two points on the Earth's surface. This is an approximate distance based on assuming that the Earth is a sphere. The points are specified by their lattitude and longitude. Args: pa: the first (lat, lon) point tuple pb: the second (lat, lon) point tuple Returns: The distance as a float in metres.
https://github.com/google/transitfeed/blob/d727e97cb66ac2ca2d699a382ea1d449ee26c2a1/merge.py#L63-L80
from __future__ import print_function __author__ = 'timothy.stranex@gmail.com (Timothy Stranex)' import datetime import optparse import os import re import sys import time import transitfeed from transitfeed import util import webbrowser
Apache License 2.0
laurencium/causalinference
causalinference/causal.py
CausalModel.est_via_blocking
python
def est_via_blocking(self, adj=1): self.estimates['blocking'] = Blocking(self.strata, adj)
Estimates average treatment effects using regression within blocks. This method should only be executed after the sample has been stratified. Parameters ---------- adj: int (0, 1, or 2) Indicates how covariate adjustments are to be performed for each within-bin regression. Set adj = 0 to not include any covariates. Set adj = 1 to include treatment indicator D and covariates X separately. Set adj = 2 to additionally include interaction terms between D and X. Defaults to 1.
https://github.com/laurencium/causalinference/blob/630e8fb195754a720da41791b725d3dadabfb257/causalinference/causal.py#L251-L272
from __future__ import division import numpy as np from itertools import combinations_with_replacement from .core import Data, Summary, Propensity, PropensitySelect, Strata from .estimators import OLS, Blocking, Weighting, Matching, Estimators class CausalModel(object): def __init__(self, Y, D, X): self.old_data = Data(Y, D, X) self.reset() def reset(self): Y, D, X = self.old_data['Y'], self.old_data['D'], self.old_data['X'] self.raw_data = Data(Y, D, X) self.summary_stats = Summary(self.raw_data) self.propensity = None self.cutoff = None self.blocks = None self.strata = None self.estimates = Estimators() def est_propensity(self, lin='all', qua=None): lin_terms = parse_lin_terms(self.raw_data['K'], lin) qua_terms = parse_qua_terms(self.raw_data['K'], qua) self.propensity = Propensity(self.raw_data, lin_terms, qua_terms) self.raw_data._dict['pscore'] = self.propensity['fitted'] self._post_pscore_init() def est_propensity_s(self, lin_B=None, C_lin=1, C_qua=2.71): lin_basic = parse_lin_terms(self.raw_data['K'], lin_B) self.propensity = PropensitySelect(self.raw_data, lin_basic, C_lin, C_qua) self.raw_data._dict['pscore'] = self.propensity['fitted'] self._post_pscore_init() def trim(self): if 0 < self.cutoff <= 0.5: pscore = self.raw_data['pscore'] keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff) Y_trimmed = self.raw_data['Y'][keep] D_trimmed = self.raw_data['D'][keep] X_trimmed = self.raw_data['X'][keep] self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed) self.raw_data._dict['pscore'] = pscore[keep] self.summary_stats = Summary(self.raw_data) self.strata = None self.estimates = Estimators() elif self.cutoff == 0: pass else: raise ValueError('Invalid cutoff.') def trim_s(self): pscore = self.raw_data['pscore'] g = 1.0/(pscore*(1-pscore)) self.cutoff = select_cutoff(g) self.trim() def stratify(self): Y, D, X = self.raw_data['Y'], self.raw_data['D'], self.raw_data['X'] pscore = self.raw_data['pscore'] if isinstance(self.blocks, int): blocks = split_equal_bins(pscore, self.blocks) else: blocks = self.blocks[:] blocks[0] = 0 def subset(p_low, p_high): return (p_low < pscore) & (pscore <= p_high) subsets = [subset(*ps) for ps in zip(blocks, blocks[1:])] strata = [CausalModel(Y[s], D[s], X[s]) for s in subsets] self.strata = Strata(strata, subsets, pscore) def stratify_s(self): pscore_order = self.raw_data['pscore'].argsort() pscore = self.raw_data['pscore'][pscore_order] D = self.raw_data['D'][pscore_order] logodds = np.log(pscore / (1-pscore)) K = self.raw_data['K'] blocks_uniq = set(select_blocks(pscore, logodds, D, K, 0, 1)) self.blocks = sorted(blocks_uniq) self.stratify() def est_via_ols(self, adj=2): self.estimates['ols'] = OLS(self.raw_data, adj)
BSD 3-Clause New or Revised License
icb-dcm/pyabc
pyabc/sampler/redis_eps/sampler.py
post_check_acceptance
python
def post_check_acceptance( sample_with_id, ana_id, t, redis, ana_vars, logger: RedisSamplerLogger, ) -> Tuple: sample: Sample = sample_with_id[1] if not any(particle.preliminary for particle in sample.all_particles): n_accepted = len(sample.accepted_particles) if n_accepted != 1: raise AssertionError( "Expected exactly one accepted particle in sample.") logger.n_accepted += 1 if sample.is_look_ahead: logger.n_lookahead_accepted += 1 return sample_with_id, True if len(sample.all_particles) != 1: raise AssertionError( "Expected number of particles in sample: 1. " f"Got: {len(sample.all_particles)}") logger.n_preliminary += 1 for particle in sample.all_particles: particle = evaluate_preliminary_particle(particle, t, ana_vars) if particle.accepted: sample.accepted_particles = [particle] sample.rejected_particles = [] redis.incr(idfy(N_ACC, ana_id, t), 1) logger.n_accepted += 1 logger.n_lookahead_accepted += 1 else: sample.accepted_particles = [] if sample.record_rejected: sample.rejected_particles = [particle] else: sample.rejected_particles = [] return sample_with_id, len(sample.accepted_particles) > 0
Check whether the sample is really acceptable. This is where evaluation of preliminary samples happens, using the analysis variables from the actual generation `t` and the previously simulated data. The sample is modified in-place. Returns ------- sample_with_id, any_accepted: The (maybe post-evaluated) id-sample tuple, and an indicator whether any particle in the sample was accepted, s.t. the sample should be kept.
https://github.com/icb-dcm/pyabc/blob/3cef3237a819caba40efe6eb4f775822b4d66955/pyabc/sampler/redis_eps/sampler.py#L604-L673
import numpy as np from time import sleep from datetime import datetime import cloudpickle as pickle import copy import logging from redis import StrictRedis from typing import Callable, Dict, List, Tuple from jabbar import jabbar from ...inference_util import ( AnalysisVars, create_simulate_function, evaluate_preliminary_particle, termination_criteria_fulfilled, ) from ...distance import Distance from ...epsilon import Epsilon from ...acceptor import Acceptor from ...sampler import Sampler from ...weighted_statistics import effective_sample_size from ...population import Sample from .cmd import ( SSA, N_EVAL, N_ACC, N_REQ, N_FAIL, N_LOOKAHEAD_EVAL, ALL_ACCEPTED, N_WORKER, QUEUE, MSG, START, MODE, DYNAMIC, SLEEP_TIME, BATCH_SIZE, IS_LOOK_AHEAD, ANALYSIS_ID, GENERATION, MAX_N_EVAL_LOOK_AHEAD, DONE_IXS, idfy, ) from .redis_logging import RedisSamplerLogger logger = logging.getLogger("ABC.Sampler") class RedisSamplerBase(Sampler): def __init__( self, host: str = "localhost", port: int = 6379, password: str = None, log_file: str = None, ): super().__init__() logger.debug( f"Redis sampler: host={host} port={port}") self.redis: StrictRedis = StrictRedis( host=host, port=port, password=password) self.logger = RedisSamplerLogger(log_file) def n_worker(self) -> int: return self.redis.pubsub_numsub(MSG)[0][-1] def set_analysis_id(self, analysis_id: str): super().set_analysis_id(analysis_id) if self.redis.get(ANALYSIS_ID): raise AssertionError( "The server seems busy with an analysis already") self.redis.set(ANALYSIS_ID, analysis_id) def sample_until_n_accepted( self, n: int, simulate_one: Callable, t: int, *, max_eval: int = np.inf, all_accepted: bool = False, ana_vars: AnalysisVars = None, ) -> Sample: raise NotImplementedError() def stop(self): self.redis.delete(ANALYSIS_ID) self.redis.delete(idfy(GENERATION, self.analysis_id)) class RedisEvalParallelSampler(RedisSamplerBase): def __init__( self, host: str = "localhost", port: int = 6379, password: str = None, batch_size: int = 1, look_ahead: bool = False, look_ahead_delay_evaluation: bool = True, max_n_eval_look_ahead_factor: float = 10., wait_for_all_samples: bool = False, log_file: str = None, ): super().__init__( host=host, port=port, password=password, log_file=log_file) self.batch_size: int = batch_size self.look_ahead: bool = look_ahead self.look_ahead_delay_evaluation: bool = look_ahead_delay_evaluation self.max_n_eval_look_ahead_factor: float = max_n_eval_look_ahead_factor self.wait_for_all_samples: bool = wait_for_all_samples def sample_until_n_accepted( self, n, simulate_one, t, *, max_eval=np.inf, all_accepted=False, ana_vars=None) -> Sample: ana_id = self.analysis_id def get_int(var: str): return int(self.redis.get(idfy(var, ana_id, t)).decode()) if self.generation_t_was_started(t): self.redis.set( idfy(SSA, ana_id, t), pickle.dumps((simulate_one, self.sample_factory))) self.redis.set(idfy(N_REQ, ana_id, t), n) self.redis.set(idfy(IS_LOOK_AHEAD, ana_id, t), int(False)) if get_int(N_WORKER) == 0 and get_int(N_ACC) < get_int(N_REQ): self.redis.publish(MSG, START) else: self.start_generation_t( n=n, t=t, simulate_one=simulate_one, all_accepted=all_accepted, is_look_ahead=False) id_results = [] self.logger.reset_counters() with jabbar(total=n, enable=self.show_progress, keep=False) as bar: while len(id_results) < n: dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1] sample_with_id = pickle.loads(dump) sample_with_id, any_particle_accepted = post_check_acceptance( sample_with_id, ana_id=ana_id, t=t, redis=self.redis, ana_vars=ana_vars, logger=self.logger) if any_particle_accepted: id_results.append(sample_with_id) bar.update(len(id_results)) self.maybe_start_next_generation( t=t, n=n, id_results=id_results, all_accepted=all_accepted, ana_vars=ana_vars) if self.wait_for_all_samples: while get_int(N_WORKER) > 0: sleep(SLEEP_TIME) else: max_ix = sorted(id_result[0] for id_result in id_results)[n-1] missing_ixs = set(range(1, max_ix+1)) while ( missing_ixs and get_int(N_WORKER) > 0 ): _var = idfy(DONE_IXS, ana_id, t) with self.redis.pipeline(transaction=True) as p: p.lrange(_var, 0, -1).delete(_var) vals = p.execute()[0] for val in vals: done_ix = int(val.decode()) if done_ix in missing_ixs: missing_ixs.discard(done_ix) sleep(SLEEP_TIME) while self.redis.llen(idfy(QUEUE, ana_id, t)) > 0: dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1] sample_with_id = pickle.loads(dump) sample_with_id, any_particle_accepted = post_check_acceptance( sample_with_id, ana_id=ana_id, t=t, redis=self.redis, ana_vars=ana_vars, logger=self.logger) if any_particle_accepted: id_results.append(sample_with_id) self.nr_evaluations_ = get_int(N_EVAL) n_lookahead_eval = get_int(N_LOOKAHEAD_EVAL) if self.wait_for_all_samples: self.clear_generation_t(t=t) else: for _t in range(-1, t+1): n_worker_b = self.redis.get(idfy(N_WORKER, ana_id, _t)) if n_worker_b is not None and int(n_worker_b.decode()) == 0: pass sample = self.create_sample(id_results, n) self.logger.add_row( t=t, n_evaluated=self.nr_evaluations_, n_lookahead=n_lookahead_eval) self.logger.write() sample = self_normalize_within_subpopulations(sample, n) return sample def start_generation_t( self, n: int, t: int, simulate_one: Callable, all_accepted: bool, is_look_ahead: bool, max_n_eval_look_ahead: float = np.inf, ) -> None: ana_id = self.analysis_id (self.redis.pipeline() .set(idfy(SSA, ana_id, t), pickle.dumps((simulate_one, self.sample_factory))) .set(idfy(N_EVAL, ana_id, t), 0) .set(idfy(N_ACC, ana_id, t), 0) .set(idfy(N_REQ, ana_id, t), n) .set(idfy(N_FAIL, ana_id, t), 0) .set(idfy(N_LOOKAHEAD_EVAL, ana_id, t), 0) .set(idfy(ALL_ACCEPTED, ana_id, t), int(all_accepted)) .set(idfy(N_WORKER, ana_id, t), 0) .set(idfy(BATCH_SIZE, ana_id, t), self.batch_size) .set(idfy(IS_LOOK_AHEAD, ana_id, t), int(is_look_ahead)) .set(idfy(MAX_N_EVAL_LOOK_AHEAD, ana_id, t), max_n_eval_look_ahead) .set(idfy(MODE, ana_id, t), DYNAMIC) .set(idfy(GENERATION, ana_id), t) .execute()) self.redis.publish(MSG, START) def generation_t_was_started(self, t: int) -> bool: return self.redis.exists(idfy(N_REQ, self.analysis_id, t)) def clear_generation_t(self, t: int) -> None: ana_id = self.analysis_id (self.redis.pipeline() .delete(idfy(SSA, ana_id, t)) .delete(idfy(N_EVAL, ana_id, t)) .delete(idfy(N_ACC, ana_id, t)) .delete(idfy(N_REQ, ana_id, t)) .delete(idfy(N_FAIL, ana_id, t)) .delete(idfy(N_LOOKAHEAD_EVAL, ana_id, t)) .delete(idfy(ALL_ACCEPTED, ana_id, t)) .delete(idfy(N_WORKER, ana_id, t)) .delete(idfy(BATCH_SIZE, ana_id, t)) .delete(idfy(IS_LOOK_AHEAD, ana_id, t)) .delete(idfy(MAX_N_EVAL_LOOK_AHEAD, ana_id, t)) .delete(idfy(MODE, ana_id, t)) .delete(idfy(DONE_IXS, ana_id, t)) .delete(idfy(QUEUE, ana_id, t)) .execute()) def maybe_start_next_generation( self, t: int, n: int, id_results: List, all_accepted: bool, ana_vars: AnalysisVars, ) -> None: if not self.look_ahead: return if all_accepted: return sample = self.create_sample(id_results, n) sample = copy.deepcopy(sample) sample = self_normalize_within_subpopulations(sample, n) sample.normalize_weights() population = sample.get_accepted_population() nr_evaluations = int( self.redis.get(idfy(N_EVAL, self.analysis_id, t)).decode()) acceptance_rate = len(population) / nr_evaluations total_nr_simulations = ana_vars.prev_total_nr_simulations + nr_evaluations walltime = datetime.now() - ana_vars.init_walltime if termination_criteria_fulfilled( current_eps=ana_vars.eps(t), min_eps=ana_vars.min_eps, stop_if_single_model_alive= ana_vars.stop_if_single_model_alive, nr_of_models_alive=population.nr_of_models_alive(), acceptance_rate=acceptance_rate, min_acceptance_rate=ana_vars.min_acceptance_rate, total_nr_simulations=total_nr_simulations, max_total_nr_simulations=ana_vars.max_total_nr_simulations, walltime=walltime, max_walltime=ana_vars.max_walltime, t=t, max_t=ana_vars.max_t, ): return simulate_one_prel = create_preliminary_simulate_one( t=t + 1, population=population, delay_evaluation=self.look_ahead_delay_evaluation, ana_vars=ana_vars) if self.look_ahead_delay_evaluation: nr_evaluations_ = int( self.redis.get(idfy(N_EVAL, self.analysis_id, t)).decode()) max_n_eval_look_ahead = nr_evaluations_ * self.max_n_eval_look_ahead_factor else: max_n_eval_look_ahead = np.inf self.start_generation_t( n=n, t=t + 1, simulate_one=simulate_one_prel, all_accepted=False, is_look_ahead=True, max_n_eval_look_ahead=max_n_eval_look_ahead) def create_sample(self, id_results: List[Tuple], n: int) -> Sample: id_results.sort(key=lambda x: x[0]) id_results = id_results[:n] results = [res[1] for res in id_results] sample = self._create_empty_sample() for j in range(n): sample += results[j] return sample def check_analysis_variables( self, distance_function: Distance, eps: Epsilon, acceptor: Acceptor, ) -> None: if self.look_ahead_delay_evaluation: return def check_bad(var): if var.is_adaptive(): raise AssertionError( f"{var.__class__.__name__} cannot be used in look-ahead " "mode without delayed acceptance. Consider setting the " "sampler's `look_ahead_delay_evaluation` flag.") check_bad(acceptor) check_bad(distance_function) check_bad(eps) def create_preliminary_simulate_one( t, population, delay_evaluation: bool, ana_vars: AnalysisVars, ) -> Callable: model_probabilities = population.get_model_probabilities() transitions = copy.deepcopy(ana_vars.transitions) for m in population.get_alive_models(): parameters, w = population.get_distribution(m) transitions[m].fit(parameters, w) return create_simulate_function( t=t, model_probabilities=model_probabilities, model_perturbation_kernel=ana_vars.model_perturbation_kernel, transitions=transitions, model_prior=ana_vars.model_prior, parameter_priors=ana_vars.parameter_priors, models=ana_vars.models, summary_statistics=ana_vars.summary_statistics, x_0=ana_vars.x_0, distance_function=ana_vars.distance_function, eps=ana_vars.eps, acceptor=ana_vars.acceptor, evaluate=not delay_evaluation, proposal_id=-1, )
BSD 3-Clause New or Revised License