repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
gujingxiao/baidustar2020-traffic-sign-detection-and-pair-competition-solution
star2020/process_match/matchsEval_F1.py
cal_iou
python
def cal_iou(box1, box2): xmin1, ymin1, xmax1, ymax1 = box1 xmin2, ymin2, xmax2, ymax2 = box2 s1 = (xmax1 - xmin1) * (ymax1 - ymin1) s2 = (xmax2 - xmin2) * (ymax2 - ymin2) xmin = max(xmin1, xmin2) ymin = max(ymin1, ymin2) xmax = min(xmax1, xmax2) ymax = min(ymax1, ymax2) w = max(0, xmax - xmin) h = max(0, ymax - ymin) area = w * h iou = area / (s1 + s2 - area) return iou
:param box1: = [xmin1, ymin1, xmax1, ymax1] :param box2: = [xmin2, ymin2, xmax2, ymax2] :return:
https://github.com/gujingxiao/baidustar2020-traffic-sign-detection-and-pair-competition-solution/blob/8c67e7b3cb524db8489ac54f63711fe6090b74d9/star2020/process_match/matchsEval_F1.py#L56-L78
import os import numpy as np import json import matplotlib.pyplot as plt labels = { '102': 0, '103': 1, '104': 2, '105': 3, '106': 4, '107': 5, '108': 6, '109': 7, '110': 8, '111': 9, '112': 10, '201': 11, '202': 12, '203': 13, '204': 14, '205': 15, '206': 16, '207': 17, '301': 18, } def idgettype(signid, signs_list): for sign_info in signs_list: if sign_info['sign_id'] == signid: return sign_info['type'] def deleteDuplicatedElementFromList(list): resultList = [] for item in list: if len(resultList) == 0: resultList.append(item) else: flag = 1 for item1 in resultList: if item == item1: flag = 0 else: continue if flag == 1: resultList.append(item) return resultList
Apache License 2.0
mozilla/elasticutils
elasticutils/__init__.py
S.filter_raw
python
def filter_raw(self, filter_): return self._clone(next_step=('filter_raw', filter_))
Return a new S instance with a filter_raw. :arg filter_: Python dict specifying the complete filter to send to Elasticsearch Example:: S().filter_raw({'term': {'title': 'example'}}) .. Note:: If there's a filter_raw in your S, then that's your filter. All ``.filter()`` and anything else that affects the filter clause is ignored.
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/__init__.py#L819-L837
import copy import logging from datetime import datetime import six from six import string_types from elasticsearch import Elasticsearch from elasticsearch.helpers import bulk_index from elasticutils._version import __version__ from elasticutils import monkeypatch monkeypatch.monkeypatch_es() log = logging.getLogger('elasticutils') DEFAULT_URLS = ['localhost'] DEFAULT_DOCTYPES = None DEFAULT_INDEXES = None DEFAULT_TIMEOUT = 5 FACET_TYPES = [ 'date_histogram', 'filter', 'histogram', 'query', 'range', 'statistical', 'terms', 'terms_stats', ] QUERY_ACTION_MAP = { None: 'term', 'in': 'in', 'term': 'term', 'terms': 'terms', 'prefix': 'prefix', 'match': 'match', 'match_phrase': 'match_phrase', 'wildcard': 'wildcard', 'fuzzy': 'fuzzy' } MATCH_ACTIONS = ['match', 'match_phrase'] RANGE_ACTIONS = ['gt', 'gte', 'lt', 'lte'] class ElasticUtilsError(Exception): pass class InvalidFieldActionError(ElasticUtilsError): pass class InvalidFlagsError(ElasticUtilsError): pass class InvalidFacetType(ElasticUtilsError): pass class BadSearch(ElasticUtilsError): pass def _build_key(urls, timeout, **settings): settings = sorted(settings.items(), key=lambda item: item[0]) settings = repr([(k, v) for k, v in settings]) if isinstance(urls, string_types): urls = (urls,) else: urls = tuple(urls) key = (urls, timeout, settings) return key _cached_elasticsearch = {} def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings): urls = urls or DEFAULT_URLS if 'hosts' in settings: raise DeprecationWarning('"hosts" is deprecated in favor of "urls".') if not force_new: key = _build_key(urls, timeout, **settings) if key in _cached_elasticsearch: return _cached_elasticsearch[key] es = Elasticsearch(urls, timeout=timeout, **settings) if not force_new: _cached_elasticsearch[key] = es return es def split_field_action(s): if '__' in s: return s.rsplit('__', 1) return s, None def _process_facets(facets, flags): rv = {} for fieldname in facets: facet_type = {'terms': {'field': fieldname}} if flags.get('size'): facet_type['terms']['size'] = flags['size'] if flags.get('global_'): facet_type['global'] = flags['global_'] elif flags.get('filtered'): facet_type['facet_filter'] = None rv[fieldname] = facet_type return rv def _facet_counts(items): facets = {} for name, data in items: facets[name] = FacetResult(name, data) return facets class FacetResult(object): def __init__(self, name, data, *args, **kwargs): if data['_type'] not in FACET_TYPES: raise InvalidFacetType( 'Facet _type "{0}". key "{1}" val "{2}"'.format( data['_type'], name, data)) self._data = data self.__dict__.update(data) for attr in ('entries', 'ranges', 'terms'): if attr in data: self.data = getattr(self, attr)[:] break else: self.data = [] def __repr__(self): return repr(self._data) def __iter__(self): return iter(self.data) def __getitem__(self, key): try: return getattr(self, key) except AttributeError as exc: raise KeyError(exc.message) class F(object): def __init__(self, **filters): filters = filters.items() if six.PY3: filters = list(filters) if len(filters) > 1: self.filters = [{'and': filters}] else: self.filters = filters def __repr__(self): return '<F {0}>'.format(self.filters) def _combine(self, other, conn='and'): f = F() self_filters = copy.deepcopy(self.filters) other_filters = copy.deepcopy(other.filters) if not self.filters: f.filters = other_filters elif not other.filters: f.filters = self_filters elif conn in self.filters[0]: f.filters = self_filters f.filters[0][conn].extend(other_filters) elif conn in other.filters[0]: f.filters = other_filters f.filters[0][conn].extend(self_filters) else: f.filters = [{conn: self_filters + other_filters}] return f def __or__(self, other): return self._combine(other, 'or') def __and__(self, other): return self._combine(other, 'and') def __invert__(self): f = F() self_filters = copy.deepcopy(self.filters) if len(self_filters) == 0: f.filters = [] elif (len(self_filters) == 1 and isinstance(self_filters[0], dict) and self_filters[0].get('not', {}).get('filter', {})): f.filters = self_filters[0]['not']['filter'] else: f.filters = [{'not': {'filter': self_filters}}] return f class Q(object): def __init__(self, **queries): self.should_q = [] self.must_q = [] self.must_not_q = [] should_flag = queries.pop('should', False) must_flag = queries.pop('must', False) must_not_flag = queries.pop('must_not', False) if should_flag + must_flag + must_not_flag > 1: raise InvalidFlagsError( 'Either should, must or must_not can be True, but not ' 'more than one.') if should_flag: self.should_q.extend(queries.items()) elif must_not_flag: self.must_not_q.extend(queries.items()) else: self.must_q.extend(queries.items()) def __repr__(self): return '<Q should={0} must={1} must_not={2}>'.format( self.should_q, self.must_q, self.must_not_q) def __add__(self, other): q = Q() q.should_q = list(self.should_q) q.must_q = list(self.must_q) q.must_not_q = list(self.must_not_q) q.should_q.extend(other.should_q) q.must_q.extend(other.must_q) q.must_not_q.extend(other.must_not_q) return q def __eq__(self, other): return (sorted(self.should_q) == sorted(other.should_q) and sorted(self.must_q) == sorted(other.must_q) and sorted(self.must_not_q) == sorted(other.must_not_q)) def _boosted_value(name, action, key, value, boost): if boost is not None: value_key = 'query' if action in MATCH_ACTIONS else 'value' return {name: {'boost': boost, value_key: value}} return {name: value} class PythonMixin(object): def to_python(self, obj): if isinstance(obj, string_types): if len(obj) == 26: try: return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S.%f') except (TypeError, ValueError): pass elif len(obj) == 19: try: return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S') except (TypeError, ValueError): pass elif len(obj) == 10: try: return datetime.strptime(obj, '%Y-%m-%d') except (TypeError, ValueError): pass elif isinstance(obj, dict): for key, val in obj.items(): obj[key] = self.to_python(val) elif isinstance(obj, list): return [self.to_python(item) for item in obj] return obj class S(PythonMixin): def __init__(self, type_=None): self.type = type_ self.steps = [] self.start = 0 self.stop = None self.as_list = self.as_dict = False self.field_boosts = {} self._results_cache = None def __repr__(self): try: return '<S {0}>'.format(repr(self.build_search())) except RuntimeError: return repr(self.steps) def _clone(self, next_step=None): new = self.__class__(self.type) new.steps = list(self.steps) if next_step: new.steps.append(next_step) new.start = self.start new.stop = self.stop new.field_boosts = self.field_boosts.copy() return new def es(self, **settings): return self._clone(next_step=('es', settings)) def indexes(self, *indexes): return self._clone(next_step=('indexes', indexes)) def doctypes(self, *doctypes): return self._clone(next_step=('doctypes', doctypes)) def explain(self, value=True): return self._clone(next_step=('explain', value)) def values_list(self, *fields): return self._clone(next_step=('values_list', fields)) def values_dict(self, *fields): return self._clone(next_step=('values_dict', fields)) def order_by(self, *fields): return self._clone(next_step=('order_by', fields)) def query(self, *queries, **kw): q = Q() for query in queries: q += query if 'or_' in kw: or_query = kw.pop('or_') or_query['should'] = True q += Q(**or_query) q += Q(**kw) return self._clone(next_step=('query', q)) def query_raw(self, query): return self._clone(next_step=('query_raw', query)) def filter(self, *filters, **kw): items = kw.items() if six.PY3: items = list(items) return self._clone( next_step=('filter', list(filters) + items))
BSD 3-Clause New or Revised License
cfedermann/appraise
appraise/wmt16/views.py
_compute_language_pair_stats
python
def _compute_language_pair_stats(): language_pair_stats = [] for choice in LANGUAGE_PAIR_CHOICES: _code = choice[0] _name = choice[1] _remaining_hits = HIT.compute_remaining_hits(language_pair=_code) _completed_hits = HIT.objects.filter(completed=True, mturk_only=False, language_pair=_code) _unique_systems_for_language_pair = set() for _hit in _completed_hits: for _result in RankingResult.objects.filter(item__hit=_hit): for _translation in _result.item.translations: for _system in set(_translation[1]['system'].split(',')): _unique_systems_for_language_pair.add(_system) LOGGER.info(_unique_systems_for_language_pair) _completed_hits = _completed_hits.count() _total_hits = _remaining_hits + _completed_hits _data = ( _name, len(_unique_systems_for_language_pair), (_remaining_hits, 100 * _remaining_hits/float(_total_hits or 1)), (_completed_hits, 100 * _completed_hits/float(_total_hits or 1)) ) language_pair_stats.append(_data) return language_pair_stats
Computes HIT statistics per language pair.
https://github.com/cfedermann/appraise/blob/2cce477efd5594699d6e0fa58f6312df60e05394/appraise/wmt16/views.py#L664-L700
 import logging from datetime import datetime, timedelta from hashlib import md5 from os.path import join from random import seed, shuffle from subprocess import check_output from tempfile import gettempdir from urllib import unquote from django.contrib.auth import authenticate, login from django.contrib.auth.decorators import login_required from django.contrib.auth.models import Group, User from django.core.urlresolvers import reverse from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned from django.http import HttpResponse, HttpResponseForbidden from django.shortcuts import get_object_or_404, redirect, render from django.template import Context from django.template.loader import get_template from appraise.wmt16.models import LANGUAGE_PAIR_CHOICES, UserHITMapping, HIT, RankingTask, RankingResult, UserHITMapping, UserInviteToken, Project, GROUP_HIT_REQUIREMENTS, MAX_USERS_PER_HIT, initialize_database, TimedKeyValueData from appraise.settings import LOG_LEVEL, LOG_HANDLER, COMMIT_TAG, ROOT_PATH, STATIC_URL from appraise.utils import datetime_to_seconds, seconds_to_timedelta logging.basicConfig(level=LOG_LEVEL) LOGGER = logging.getLogger('appraise.wmt16.views') LOGGER.addHandler(LOG_HANDLER) BASE_CONTEXT = { 'commit_tag': COMMIT_TAG, 'title': 'Appraise evaluation system', 'installed_apps': ['wmt16'], 'static_url': STATIC_URL, } STATUS_CACHE = {} RANKINGS_CACHE = {} initialize_database() def _identify_groups_for_user(user): groups = [] for group in user.groups.all(): if group.name == 'WMT16' or group.name.lower().startswith('wmt') or group.name.startswith('eng2') or group.name.endswith('2eng'): continue if not group in groups: groups.append(group) return groups def _get_active_users_for_group(group_to_check): ninetydaysago = datetime.now() - timedelta(days=90) active_users = [] if group_to_check.exists(): active_users = group_to_check[0].user_set.filter(last_login__gt=ninetydaysago) return active_users def _compute_next_task_for_user(user, project, language_pair): if not project in user.project_set.all(): LOGGER.debug('User {0} does not work on project {1}.'.format( user, project )) return None if not user.groups.filter(name=language_pair): LOGGER.debug('User {0} does not know language pair {1}.'.format( user, language_pair)) return None current_hitmap = UserHITMapping.objects.filter(user=user, project=project, hit__language_pair=language_pair) if not current_hitmap: LOGGER.debug('No current HIT for user {0}, fetching HIT.'.format( user)) hits = HIT.objects.filter(active=True, mturk_only=False, completed=False, project=project, language_pair=language_pair) LOGGER.debug("HITs = {0}".format(hits)) hit_ids = list(set(hits.values_list('hit_id', flat=True))) shuffle(hit_ids) LOGGER.debug("HIT IDs = {0}".format(hit_ids)) random_hit = None for hit_id in hit_ids: for hit in hits.filter(hit_id=hit_id): hit_users = list(hit.users.all()) for hitmap in UserHITMapping.objects.filter(hit=hit): if not hitmap.user in hit_users: hit_users.append(hitmap.user) if not user in hit_users: if len(hit_users) < MAX_USERS_PER_HIT: random_hit = hit break if random_hit: break if not random_hit: return None current_hitmap = UserHITMapping.objects.create(user=user, project=project, hit=random_hit) else: current_hitmap = current_hitmap[0] hit_users = list(current_hitmap.hit.users.all()) if user in hit_users or len(hit_users) >= 1 or not current_hitmap.hit.active: LOGGER.debug('Detected stale User/HIT mapping {0}->{1}'.format( user, current_hitmap.hit)) current_hitmap.delete() return _compute_next_task_for_user(user, project, language_pair) LOGGER.debug('User {0} currently working on HIT {1}'.format(user, current_hitmap.hit)) return current_hitmap.hit def _save_results(item, user, duration, raw_result): LOGGER.debug('item: {}, user: {}, duration: {}, raw_result: {}'.format( item, user, duration, raw_result.encode('utf-8'))) _existing_result = RankingResult.objects.filter(item=item, user=user) if _existing_result: _result = _existing_result[0] else: _result = RankingResult(item=item, user=user) LOGGER.debug(u'\n\nResults data for user "{0}":\n\n{1}\n'.format( user.username or "Anonymous", u'\n'.join([str(x) for x in [_result, duration, raw_result]]))) _result.duration = str(duration) _result.raw_result = raw_result _result.save() def _find_next_item_to_process(items, user, random_order=False): user_results = RankingResult.objects.filter(user=user) processed_items = user_results.values_list('item__pk', flat=True) unprocessed_items = list(items.exclude(pk__in=processed_items)) if random_order: shuffle(unprocessed_items) if unprocessed_items: return unprocessed_items[0] return None def _compute_context_for_item(item): source_text = [None, None, None] reference_text = [None, None, None] left_context = RankingTask.objects.filter(hit=item.hit, pk=item.id-1) right_context = RankingTask.objects.filter(hit=item.hit, pk=item.id+1) _item_doc_id = getattr(item.attributes, 'doc-id', None) source_text[1] = item.source[0] if item.reference: reference_text[1] = item.reference[0] if left_context: _left = left_context[0] _left_doc_id = getattr(_left.attributes, 'doc-id', None) if _left_doc_id == _item_doc_id: source_text[0] = _left.source[0] if _left.reference: reference_text[0] = _left.reference[0] if right_context: _right = right_context[0] _right_doc_id = getattr(_right.attributes, 'doc-id', None) if _right_doc_id == _item_doc_id: source_text[2] = _right.source[0] if _right.reference: reference_text[2] = _right.reference[0] return (source_text, reference_text) @login_required def _handle_ranking(request, task, items): form_valid = False if request.method == "POST": item_id = request.POST.get('item_id', None) end_timestamp = request.POST.get('end_timestamp', None) order_random = request.POST.get('order', None) start_timestamp = request.POST.get('start_timestamp', None) submit_button = request.POST.get('submit_button', None) form_valid = all((item_id, end_timestamp, order_random, start_timestamp, submit_button)) if form_valid: current_item = get_object_or_404(RankingTask, pk=int(item_id)) start_datetime = datetime.fromtimestamp(float(start_timestamp)) end_datetime = datetime.fromtimestamp(float(end_timestamp)) duration = end_datetime - start_datetime order = [int(x) for x in order_random.split(',')] ranks = {} for index in range(len(current_item.translations)): rank = request.POST.get('rank_{0}'.format(index), -1) ranks[order[index]] = int(rank) if submit_button == 'FLAG_ERROR': _raw_result = 'SKIPPED' elif submit_button == 'SUBMIT': _raw_result = range(len(current_item.translations)) _raw_result = ','.join([str(ranks[x]) for x in _raw_result]) _results_data = [current_item, type(current_item), request.user, type(request.user), duration, type(duration), _raw_result, type(_raw_result)] LOGGER.debug(u'\n\nResults data for user "{0}":\n\n{1}\n'.format( request.user.username or "Anonymous", u'\n'.join([str(x) for x in _results_data]))) _save_results(current_item, request.user, duration, _raw_result) item = _find_next_item_to_process(items, request.user, False) if not item: return redirect('appraise.wmt16.views.overview') source_text, reference_text = _compute_context_for_item(item) finished_items = 1 + RankingResult.objects.filter(user=request.user, item__hit=item.hit).count() translations = [] order = range(len(item.translations)) shuffle(order) for index in order: translations.append(item.translations[index]) dictionary = { 'action_url': request.path, 'item_id': item.id, 'sentence_id': item.source[1]['id'], 'language_pair': item.hit.get_language_pair_display(), 'order': ','.join([str(x) for x in order]), 'reference_text': reference_text, 'source_text': source_text, 'task_progress': '{0}/3'.format(finished_items), 'title': 'Ranking', 'translations': translations, } dictionary.update(BASE_CONTEXT) return render(request, 'wmt16/ranking.html', dictionary) @login_required def hit_handler(request, hit_id): LOGGER.info('Rendering task handler view for user "{0}".'.format( request.user.username or "Anonymous")) hit = get_object_or_404(HIT, hit_id=hit_id) if not hit.active: LOGGER.debug('Detected inactive User/HIT mapping {0}->{1}'.format( request.user, hit)) if hit.project_set.count() > 0: annotation_project = list(hit.project_set.all())[0] new_hit = _compute_next_task_for_user(request.user, annotation_project, hit.language_pair) if new_hit: return redirect('appraise.wmt16.views.hit_handler', hit_id=new_hit.hit_id) return redirect('appraise.wmt16.views.overview') items = RankingTask.objects.filter(hit=hit) if not items: return redirect('appraise.wmt16.views.overview') return _handle_ranking(request, hit, items) @login_required def overview(request): LOGGER.info('Rendering WMT16 HIT overview for user "{0}".'.format( request.user.username or "Anonymous")) seed(None) language_codes = set([x[0] for x in LANGUAGE_PAIR_CHOICES]) language_pairs = request.user.groups.filter(name__in=language_codes) annotation_projects = request.user.project_set.all() hit_data = [] total = [0, 0, 0] for language_pair in language_pairs: for annotation_project in annotation_projects: hit = _compute_next_task_for_user(request.user, annotation_project, language_pair) user_status = HIT.compute_status_for_user(request.user, annotation_project, language_pair) for i in range(3): total[i] = total[i] + user_status[i] if hit: for i in range(2): user_status[i+1] = seconds_to_timedelta(int(user_status[i+1])) hit_data.append( (hit.get_language_pair_display(), hit.get_absolute_url(), hit.hit_id, user_status, annotation_project) ) total[1] = seconds_to_timedelta(int(total[2]) / float(int(total[0]) or 1)) total[1] = total[1] - timedelta(microseconds=total[1].microseconds) total[2] = seconds_to_timedelta(int(total[2])) groups = _identify_groups_for_user(request.user) group = None if len(groups) > 1: LOGGER.debug(u'User "{0}" assigned to multiple annotation groups: {1}'.format( request.user.username or u'Anonymous', u', '.join([x.name for x in groups])) ) group = groups[0] if group is not None: group_name = group.name group_status = HIT.compute_status_for_group(group) for i in range(2): group_status[i+1] = seconds_to_timedelta(int(group_status[i+1])) else: group_status = None group_name = None LOGGER.debug(u'\n\nHIT data for user "{0}":\n\n{1}\n'.format( request.user.username or "Anonymous", u'\n'.join([u'{0}\t{1}\t{2}\t{3}'.format(*x) for x in hit_data]))) admin_url = None if request.user.is_superuser: admin_url = reverse('admin:index') dictionary = { 'active_page': "OVERVIEW", 'hit_data': hit_data, 'total': total, 'group_name': group_name, 'group_status': group_status, 'admin_url': admin_url, 'title': 'WMT16 Dashboard', 'annotation_groups': [x.name for x in groups], } dictionary.update(BASE_CONTEXT) LOGGER.info(dictionary.values()) return render(request, 'wmt16/overview.html', dictionary) @login_required def status(request): LOGGER.info('Rendering WMT16 HIT status for user "{0}".'.format( request.user.username or "Anonymous")) if not STATUS_CACHE.has_key('global_stats'): update_status(key='global_stats') if not STATUS_CACHE.has_key('language_pair_stats'): update_status(key='language_pair_stats') if not STATUS_CACHE.has_key('group_stats'): update_status(key='group_stats') if not STATUS_CACHE.has_key('user_stats'): update_status(key='user_stats') admin_url = None if request.user.is_superuser: admin_url = reverse('admin:index') dictionary = { 'active_page': "STATUS", 'global_stats': STATUS_CACHE['global_stats'], 'language_pair_stats': STATUS_CACHE['language_pair_stats'], 'group_stats': STATUS_CACHE['group_stats'], 'user_stats': STATUS_CACHE['user_stats'], 'clusters': RANKINGS_CACHE.get('clusters', []), 'admin_url': admin_url, 'title': 'WMT16 Status', } dictionary.update(BASE_CONTEXT) return render(request, 'wmt16/status.html', dictionary) def update_ranking(request=None): if request is not None: RANKINGS_CACHE['clusters'] = _compute_ranking_clusters(load_file=True) return HttpResponse('Ranking updated successfully') else: RANKINGS_CACHE['clusters'] = _compute_ranking_clusters() def update_status(request=None, key=None): status_keys = ('global_stats', 'language_pair_stats', 'group_stats', 'user_stats', 'clusters') if key: status_keys = (key,) for status_key in status_keys: if status_key == 'global_stats': STATUS_CACHE[status_key] = _compute_global_stats() elif status_key == 'language_pair_stats': STATUS_CACHE[status_key] = _compute_language_pair_stats() elif status_key == 'group_stats': STATUS_CACHE[status_key] = _compute_group_stats() elif status_key == 'user_stats': user_stats = _compute_user_stats() STATUS_CACHE[status_key] = user_stats[:25] if request is not None: return HttpResponse('Status updated successfully') def _compute_global_stats(): global_stats = [] wmt16_group = Group.objects.filter(name='WMT16') wmt16_users = _get_active_users_for_group(wmt16_group) hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count() for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False): if hit.users.count() >= 1: hits_completed = hits_completed + 1 hit.completed = True hit.save() hits_remaining = HIT.compute_remaining_hits() ranking_results = RankingResult.objects.filter( item__hit__completed=True, item__hit__mturk_only=False) from math import factorial system_comparisons = 0 for result in ranking_results: result.reload_dynamic_fields() combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0 system_comparisons = system_comparisons + combinations groups = set() for user in wmt16_users: for group in _identify_groups_for_user(user): groups.add(group) durations = RankingResult.objects.all().values_list('duration', flat=True) total_time = sum([datetime_to_seconds(x) for x in durations]) avg_time = total_time / float(hits_completed or 1) avg_user_time = total_time / float(3 * hits_completed or 1) global_stats.append(('Users', len(wmt16_users))) global_stats.append(('Groups', len(groups))) global_stats.append(('HITs completed', '{0:,}'.format(hits_completed))) global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining))) global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count()))) global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons))) global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time))) global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time))) global_stats.append(('Total duration', seconds_to_timedelta(total_time))) TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users))) TimedKeyValueData.update_status_if_changed('groups', str(len(groups))) TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed)) TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining)) TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count())) TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons)) TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time))) TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time))) TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time))) return global_stats
BSD 3-Clause New or Revised License
trevor/calendarserver
txdav/caldav/datastore/scheduling/implicit.py
ImplicitScheduler.findRemovedAttendeesOnRecurrenceChange
python
def findRemovedAttendeesOnRecurrenceChange(self): self.cancelledAttendees = set() new_master_attendees = set([attendee for attendee, _ignore in self.calendar.masterComponent().getAttendeesByInstance(onlyScheduleAgentServer=True)]) for attendee, rid in self.oldAttendeesByInstance: if attendee not in new_master_attendees: self.cancelledAttendees.add((attendee, rid,))
Look for attendees that have been removed during a change to the overall recurrence. This is a special case to try and minimize the number of cancels sent to just those attendees actually removed. The basic policy is this: 1) If an attendee is present in the master component of the new event, they never receive a CANCEL as they will always receive a REQUEST with the entire new event data. i.e., they will see an event "replacement" rather than a cancel+new request. 2) For all attendees in the old event, not in the new master, send a cancel of the master or each override they appear in. That happens even if they appear in an override in the new calendar, since in all likelihood there is no guaranteed exact mapping between old and new instances.
https://github.com/trevor/calendarserver/blob/c9970b06a70445ca75b62e3d170c26bc897a035e/txdav/caldav/datastore/scheduling/implicit.py#L981-L1002
from twext.python.log import Logger from txweb2.dav.http import ErrorResponse from twisted.internet.defer import inlineCallbacks, returnValue from txweb2 import responsecode from txweb2.http import HTTPError from twistedcaldav.caldavxml import caldav_namespace from twistedcaldav.config import config from twistedcaldav.ical import Property from txdav.caldav.datastore.scheduling.caldav.scheduler import CalDAVScheduler from txdav.caldav.datastore.scheduling.cuaddress import InvalidCalendarUser, LocalCalendarUser, OtherServerCalendarUser, calendarUserFromCalendarUserAddress, calendarUserFromCalendarUserUID from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr, uidFromCalendarUserAddress from txdav.caldav.datastore.scheduling.icaldiff import iCalDiff from txdav.caldav.datastore.scheduling.itip import iTipGenerator, iTIPRequestStatus from txdav.caldav.datastore.scheduling.utils import getCalendarObjectForRecord from txdav.caldav.datastore.scheduling.work import ScheduleReplyWork, ScheduleReplyCancelWork, ScheduleOrganizerWork, ScheduleOrganizerSendWork import collections __all__ = [ "ImplicitScheduler", ] log = Logger() class ImplicitSchedulingWorkError(Exception): pass class ImplicitScheduler(object): STATUS_OK = 0 STATUS_ORPHANED_CANCELLED_EVENT = 1 STATUS_ORPHANED_EVENT = 2 def __init__(self, logItems=None): self.return_status = ImplicitScheduler.STATUS_OK self.logItems = logItems self.allowed_to_schedule = True self.suppress_refresh = False self.split_details = None NotAllowedExceptionDetails = collections.namedtuple("NotAllowedExceptionDetails", ("type", "args", "kwargs",)) def setSchedulingNotAllowed(self, ex, *ex_args, **ex_kwargs): self.not_allowed = ImplicitScheduler.NotAllowedExceptionDetails(ex, ex_args, ex_kwargs) self.allowed_to_schedule = False def testSchedulingAllowed(self): if not self.allowed_to_schedule: raise self.not_allowed.type(*self.not_allowed.args, **self.not_allowed.kwargs) @inlineCallbacks def testImplicitSchedulingPUT(self, parent, resource, calendar, internal_request=False): self.txn = parent._txn self.parent = parent self.resource = resource self.calendar = calendar self.internal_request = internal_request self.calendar_home = self.parent.ownerHome() existing_resource = resource is not None is_scheduling_object = (yield self.checkSchedulingObjectResource(resource)) existing_type = "schedule" if is_scheduling_object else "calendar" new_type = "schedule" if (yield self.checkImplicitState()) else "calendar" if existing_type != new_type and existing_resource: resource.isScheduleObject = None is_scheduling_object = (yield self.checkSchedulingObjectResource(resource)) existing_type = "schedule" if is_scheduling_object else "calendar" if existing_type == "calendar": self.action = "create" if new_type == "schedule" else "none" else: self.action = "modify" if new_type == "schedule" else "remove" if not existing_resource or self.action == "create": yield self.hasCalendarResourceUIDSomewhereElse(resource, new_type) if self.action == "remove": if self.organizer is None and self.attendees: log.error("organizer-allowed: Organizer removal also requires attendees to be removed for UID: {uid}", uid=self.uid) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "organizer-allowed"), "Organizer removal also requires attendees to be removed.", )) self.return_calendar = calendar self.calendar = (yield resource.componentForUser()) yield self.checkImplicitState() self.checkValidOrganizer() if ( not self.internal_request and self.state == "attendee" and (existing_type != new_type) and existing_resource ): log.error( "valid-attendee-change: Cannot change scheduling object mode from {old} to {new} for UID: {uid}", old=existing_type, new=new_type, uid=self.uid, ) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "valid-attendee-change"), "Cannot change scheduling object mode", )) if self.state == "organizer" and self.calendar.masterComponent() is None: log.error("organizer-allowed: Organizer cannot schedule without a master component for UID: {uid}", uid=self.uid) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "organizer-allowed"), "Organizer cannot schedule without a master component.", )) returnValue((self.action != "none", new_type == "schedule",)) @inlineCallbacks def testImplicitSchedulingDELETE(self, parent, resource, calendar, internal_request=False): self.txn = parent._txn self.parent = parent self.resource = resource self.calendar = calendar self.internal_request = internal_request self.calendar_home = self.parent.ownerHome() yield self.checkImplicitState() is_scheduling_object = (yield self.checkSchedulingObjectResource(resource)) resource_type = "schedule" if is_scheduling_object else "calendar" self.action = "remove" if resource_type == "schedule" else "none" returnValue((self.action != "none", False,)) @inlineCallbacks def testAttendeeEvent(self, parent, resource, calendar): self.txn = parent._txn self.parent = parent self.resource = resource self.calendar = calendar self.internal_request = False self.action = "modify" self.calendar_home = self.parent.ownerHome() is_scheduling_object = (yield self.checkSchedulingObjectResource(resource)) if not is_scheduling_object: returnValue(False) yield self.checkImplicitState() returnValue(self.state in ("attendee", "attendee-missing",)) def checkValidOrganizer(self): if self.action == "create": if self.organizerAddress.hosted() and not self.organizerAddress.record.enabledAsOrganizer(): log.error("organizer-allowed: ORGANIZER not allowed to be an Organizer: {organizer}", organizer=self.organizer) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "organizer-allowed"), "Organizer cannot schedule", )) @inlineCallbacks def checkSchedulingObjectResource(self, resource): if resource is not None: implicit = resource.isScheduleObject if implicit is not None: returnValue(implicit) else: calendar = (yield resource.componentForUser()) try: organizer = calendar.validOrganizerForScheduling() except ValueError: returnValue(False) returnValue(organizer is not None) returnValue(False) @inlineCallbacks def checkImplicitState(self): yield self.extractCalendarData() organizer_scheduling = (yield self.isOrganizerScheduling()) if organizer_scheduling: self.state = "organizer" elif (yield self.isAttendeeScheduling()): self.state = "attendee" elif self.organizer: self.state = "attendee-missing" else: self.state = None returnValue(self.state is not None) @inlineCallbacks def doImplicitScheduling(self, do_smart_merge=False, split_details=None): self.do_smart_merge = do_smart_merge self.except_attendees = () self.only_refresh_attendees = None self.split_details = split_details if self.state == "organizer": yield self.doImplicitOrganizer() elif self.state == "attendee": yield self.doImplicitAttendee() elif self.state == "attendee-missing": yield self.doImplicitMissingAttendee() else: returnValue(None) if self.return_status: returnValue(self.return_status) else: returnValue(self.return_calendar if hasattr(self, "return_calendar") else self.calendar) @inlineCallbacks def refreshAllAttendeesExceptSome(self, txn, resource, except_attendees=(), only_attendees=None): self.txn = resource._txn self.resource = resource self.calendar_home = self.resource.parentCollection().ownerHome() self.calendar = (yield self.resource.componentForUser()) self.state = "organizer" self.action = "modify" self.internal_request = True self.except_attendees = except_attendees self.only_refresh_attendees = only_attendees self.changed_rids = None self.reinvites = None yield self.extractCalendarData() self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn)) self.originator = self.organizer self.suppress_refresh = False for attendee in self.calendar.getAllAttendeeProperties(): if attendee.parameterValue("PARTSTAT", "NEEDS-ACTION").upper() == "NEEDS-ACTION": self.suppress_refresh = True if hasattr(self.txn, "doing_attendee_refresh"): self.txn.doing_attendee_refresh += 1 else: self.txn.doing_attendee_refresh = 1 try: refreshCount = (yield self.processRequests()) finally: self.txn.doing_attendee_refresh -= 1 if self.txn.doing_attendee_refresh == 0: delattr(self.txn, "doing_attendee_refresh") if refreshCount and self.logItems is not None: self.logItems["itip.refreshes"] = refreshCount @inlineCallbacks def queuedOrganizerProcessing(self, txn, action, home, resource, uid, calendar_old, calendar_new, smart_merge): self.txn = txn self.action = action self.state = "organizer" self.calendar_home = home self.resource = resource self.do_smart_merge = smart_merge self.queuedResponses = [] cal_uid = calendar_old.resourceUID() if calendar_old is not None else (calendar_new.resourceUID() if calendar_new is not None else "unknown") if action == "create": resources = (yield self.calendar_home.objectResourcesWithUID(uid, ignore_children=["inbox"], allowShared=False)) if len(resources) != 1: log.debug("ImplicitScheduler - queuedOrganizerProcessing 'create' cannot find organizer resource for UID: {uid}", uid=cal_uid) returnValue(None) self.resource = resources[0] self.calendar = calendar_new elif action in ("modify", "modify-cancelled"): if self.resource is None: log.debug("ImplicitScheduler - queuedOrganizerProcessing 'modify' cannot find organizer resource for UID: {uid}", uid=cal_uid) returnValue(None) self.calendar = calendar_new self.oldcalendar = calendar_old elif action == "remove": self.calendar = calendar_old yield self.extractCalendarData() self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn)) self.originator = self.organizer self.except_attendees = () self.only_refresh_attendees = None self.split_details = None yield self.doImplicitOrganizer(queued=True) @inlineCallbacks def queuedOrganizerSending(self, txn, action, home, resource, uid, organizer, attendee, itipmsg, no_refresh): self.txn = txn self.action = action self.state = "organizer" self.calendar_home = home self.resource = resource self.queuedResponses = [] self.suppress_refresh = no_refresh self.uid = uid self.calendar = None self.oldcalendar = None self.organizer = organizer self.attendees = None self.organizerAddress = None self.originator = self.organizer self.except_attendees = () self.only_refresh_attendees = None self.split_details = None yield self.processSend(attendee, itipmsg, jobqueue=False) @inlineCallbacks def sendAttendeeReply(self, txn, resource): self.txn = txn self.resource = resource self.calendar_home = self.resource.parentCollection().ownerHome() self.calendar = (yield self.resource.componentForUser()) self.action = "modify" self.state = "attendee" self.internal_request = True self.changed_rids = None yield self.extractCalendarData() self.attendeeAddress = (yield calendarUserFromCalendarUserUID(self.calendar_home.uid(), self.txn)) self.originator = self.attendee = self.attendeeAddress.record.canonicalCalendarUserAddress() result = (yield self.scheduleWithOrganizer()) returnValue(result) @inlineCallbacks def extractCalendarData(self): originatorAddress = yield calendarUserFromCalendarUserUID(self.calendar_home.uid(), self.txn) self.originator = originatorAddress.record.canonicalCalendarUserAddress() try: self.organizer = self.calendar.validOrganizerForScheduling() except ValueError: log.error("single-organizer: Only one ORGANIZER is allowed in an iCalendar object:\n{calendar}", calendar=self.calendar) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "single-organizer"), "Only one organizer allowed in scheduling object resource", )) yield self.extractAttendees() self.uid = self.calendar.resourceUID() self.instances = set(self.calendar.getComponentInstances()) @inlineCallbacks def extractAttendees(self): yield self.coerceAttendeeScheduleAgent() self.attendeesByInstance = self.calendar.getAttendeesByInstance(True, onlyScheduleAgentServer=True) self.attendees = set() for attendee, _ignore in self.attendeesByInstance: self.attendees.add(attendee) @inlineCallbacks def hasCalendarResourceUIDSomewhereElse(self, check_resource, mode): if self.internal_request or self.action == "remove": returnValue(None) foundElsewhere = (yield self.calendar_home.hasCalendarResourceUIDSomewhereElse(self.uid, check_resource, mode)) if foundElsewhere is not None: log.debug("unique-scheduling-object-resource: Found component with same UID in a different collection: {resource}", resource=check_resource) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "unique-scheduling-object-resource"), "Cannot duplicate scheduling object resource", )) @inlineCallbacks def isOrganizerScheduling(self): if not self.organizer: returnValue(False) self.organizerAddress = (yield calendarUserFromCalendarUserAddress(self.organizer, self.txn)) if not self.organizerAddress.hosted(): returnValue(False) if self.calendar_home.uid() != self.organizerAddress.record.uid: returnValue(False) returnValue(True) @inlineCallbacks def isAttendeeScheduling(self): if not self.organizer: returnValue(False) for attendee in self.attendees: uid = uidFromCalendarUserAddress(attendee) if uid is not None and uid == self.calendar_home.uid(): attendeeAddress = yield calendarUserFromCalendarUserAddress(attendee, self.txn) if attendeeAddress.hosted() and attendeeAddress.record.uid == self.calendar_home.uid(): self.attendee = attendee self.attendeeAddress = attendeeAddress returnValue(True) for attendee in self.attendees: attendeeAddress = yield calendarUserFromCalendarUserAddress(attendee, self.txn) if attendeeAddress.hosted() and attendeeAddress.record.uid == self.calendar_home.uid(): self.attendee = attendee self.attendeeAddress = attendeeAddress returnValue(True) returnValue(False) def makeScheduler(self): return CalDAVScheduler(self.txn, self.calendar_home.uid(), logItems=self.logItems) @inlineCallbacks def doImplicitOrganizer(self, queued=False): if not queued or not config.Scheduling.Options.WorkQueues.Enabled: self.oldcalendar = None self.changed_rids = None self.cancelledAttendees = () self.reinvites = None self.needs_action_rids = None self.needs_sequence_change = False self.coerceOrganizerScheduleAgent() if self.action == "remove": log.debug("Implicit - organizer '{organizer}' is removing UID: '{uid}'", organizer=self.organizer, uid=self.uid) self.oldcalendar = self.calendar self.cancelledAttendees = [(attendee, None) for attendee in self.attendees] self.needs_sequence_change = True elif self.action in ("modify", "modify-cancelled"): if not queued or not config.Scheduling.Options.WorkQueues.Enabled: self.oldcalendar = (yield self.resource.componentForUser()) self.oldAttendeesByInstance = self.oldcalendar.getAttendeesByInstance(True, onlyScheduleAgentServer=True) self.oldInstances = set(self.oldcalendar.getComponentInstances()) self.coerceAttendeesPartstatOnModify() if self.oldcalendar and (not queued or not config.Scheduling.Options.WorkQueues.Enabled): self.calendar.sequenceInSync(self.oldcalendar) no_change, self.changed_rids, self.needs_action_rids, reinvites, recurrence_reschedule, status_cancelled, only_status = self.isOrganizerChangeInsignificant() if no_change: if reinvites: log.debug("Implicit - organizer '{organizer}' is re-inviting UID: '{uid}', attendees: {attendees}", organizer=self.organizer, uid=self.uid, attendees=", ".join(reinvites)) self.reinvites = reinvites else: log.debug("Implicit - organizer '{organizer}' is modifying UID: '{uid}' but change is not significant", organizer=self.organizer, uid=self.uid) returnValue(None) else: if self.split_details is None: log.debug("Implicit - organizer '{organizer}' is modifying UID: '{uid}'", organizer=self.organizer, uid=self.uid) for rid in self.needs_action_rids: comp = self.calendar.overriddenComponent(rid) if comp is None: comp = self.calendar.deriveInstance(rid) if comp is not None: self.calendar.addComponent(comp) for attendee in comp.getAllAttendeeProperties(): if attendee.hasParameter("PARTSTAT"): cuaddr = attendee.value() if cuaddr in self.organizerAddress.record.calendarUserAddresses: continue attendee.setParameter("PARTSTAT", "NEEDS-ACTION") else: log.debug("Implicit - organizer '{organizer}' is splitting UID: '{uid}'", organizer=self.organizer, uid=self.uid) if not recurrence_reschedule: self.findRemovedAttendees() else: self.findRemovedAttendeesOnRecurrenceChange() self.checkStatusCancelled(status_cancelled, only_status) self.needs_sequence_change = self.calendar.needsiTIPSequenceChange(self.oldcalendar) elif self.action == "create": if self.split_details is None: log.debug("Implicit - organizer '{organizer}' is creating UID: '{uid}'", organizer=self.organizer, uid=self.uid) self.coerceAttendeesPartstatOnCreate() else: log.debug("Implicit - organizer '{organizer}' is creating a split UID: '{uid}'", organizer=self.organizer, uid=self.uid) self.needs_sequence_change = False for attendee in self.calendar.getAllAttendeeProperties(): if attendee.parameterValue("PARTSTAT", "NEEDS-ACTION").upper() == "NEEDS-ACTION": attendee.setParameter("RSVP", "TRUE") if queued or not config.Scheduling.Options.WorkQueues.Enabled or self.split_details is not None: if self.action == "create": if self.split_details is None: seqs = map(lambda x: x.value(), self.calendar.getAllPropertiesInAnyComponent("SEQUENCE", depth=1)) maxseq = max(seqs) if seqs else 0 if maxseq != 0: self.calendar.replacePropertyInAllComponents(Property("SEQUENCE", maxseq + 1000)) elif self.needs_sequence_change: self.calendar.bumpiTIPInfo(oldcalendar=self.oldcalendar, doSequence=True) yield self.scheduleWithAttendees() else: yield self.queuedScheduleWithAttendees() for attendee in self.calendar.getAllAttendeeProperties(): try: attendee.removeParameter("SCHEDULE-FORCE-SEND") except KeyError: pass def isOrganizerChangeInsignificant(self): rids = None date_changed_rids = None reinvites = None recurrence_reschedule = False status_cancelled = set() only_status = None differ = iCalDiff(self.oldcalendar, self.calendar, self.do_smart_merge) no_change = differ.organizerDiff() if not no_change: diffs = differ.whatIsDifferent() rids = set() date_changed_rids = set() checkOrganizerValue = False for rid, props in diffs.iteritems(): if "ORGANIZER" in props: checkOrganizerValue = True rids.add(rid) if any([testprop in props for testprop in ( "DTSTART", "DTEND", "DURATION", "DUE", "RECURRENCE-ID", )]): date_changed_rids.add(rid) if rid is None: if "DTSTART" in props and self.calendar.masterComponent().hasProperty("RRULE"): recurrence_reschedule = True elif "RRULE" in props: recurrence_reschedule = True oldrrule = tuple(self.oldcalendar.masterComponent().properties("RRULE")) oldrrule = oldrrule[0].value() if len(oldrrule) else None newrrule = tuple(self.calendar.masterComponent().properties("RRULE")) newrrule = newrrule[0].value() if len(newrrule) else None if newrrule is not None and oldrrule is not None: oldrrule = oldrrule.duplicate() newrrule = newrrule.duplicate() oldrrule.setUseUntil(False) oldrrule.setUntil(None) oldrrule.setUseCount(False) oldrrule.setCount(0) newrrule.setUseUntil(False) newrrule.setUntil(None) newrrule.setUseCount(False) newrrule.setCount(0) if newrrule == oldrrule: recurrence_reschedule = False elif newrrule is not None: date_changed_rids.update(self.calendar.getComponentInstances()) elif oldrrule is not None: date_changed_rids.add("") if "STATUS" in props: if only_status is None and len(props) == 1: only_status = True instance = self.calendar.overriddenComponent(rid) if instance and instance.propertyValue("STATUS") == "CANCELLED": status_cancelled.add(rid) else: only_status = False if checkOrganizerValue: oldOrganizer = self.oldcalendar.getOrganizer() newOrganizer = self.calendar.getOrganizer() if oldOrganizer != newOrganizer: log.error("valid-organizer-change: Cannot change ORGANIZER: UID:{uid}", uid=self.uid) raise HTTPError(ErrorResponse( responsecode.FORBIDDEN, (caldav_namespace, "valid-organizer-change"), "Organizer cannot be changed", )) else: reinvites = set() for attendee in self.calendar.getAllAttendeeProperties(): try: if attendee.parameterValue("SCHEDULE-FORCE-SEND", "").upper() == "REQUEST": reinvites.add(attendee.value()) except KeyError: pass return ( no_change, rids, date_changed_rids, reinvites, recurrence_reschedule, status_cancelled, only_status ) def findRemovedAttendees(self): mappedOld = set(self.oldAttendeesByInstance) mappedNew = set(self.attendeesByInstance) removedInstances = self.oldInstances - self.instances addedInstances = self.instances - self.oldInstances oldexdates = set() for property in self.oldcalendar.masterComponent().properties("EXDATE"): oldexdates.update([value.getValue() for value in property.value()]) newexdates = set() for property in self.calendar.masterComponent().properties("EXDATE"): newexdates.update([value.getValue() for value in property.value()]) addedexdates = newexdates - oldexdates self.cancelledAttendees = set() for item in mappedOld: if item not in mappedNew: new_attendee, rid = item if rid is None or rid not in removedInstances: self.cancelledAttendees.add(item) else: if (new_attendee, None) not in mappedNew or rid in addedexdates: self.cancelledAttendees.add(item) master_attendees = self.oldcalendar.masterComponent().getAttendeesByInstance(onlyScheduleAgentServer=True) for attendee, _ignore in master_attendees: for exdate in addedexdates: if exdate not in removedInstances: self.cancelledAttendees.add((attendee, exdate,)) for attendee, _ignore in master_attendees: for rid in addedInstances: if (attendee, rid) not in mappedNew and rid not in oldexdates: self.cancelledAttendees.add((attendee, rid,))
Apache License 2.0
boxeehacks/boxeehack
hack/boxee/scripts/OpenSubtitles/resources/lib/plugins/SubtitleDatabase.py
SubtitleDB.getLG
python
def getLG(self, language): try: return self.revertlangs[language] except KeyError, e: log.warn("Ooops, you found a missing language in the config file of %s: %s. Send a bug report to have it added." %(self.__class__.__name__, language))
Returns the short (two-character) representation of the long language name
https://github.com/boxeehacks/boxeehack/blob/bdfa63187f662b542261dce64aff68548f63ecaf/hack/boxee/scripts/OpenSubtitles/resources/lib/plugins/SubtitleDatabase.py#L118-L123
import os, shutil, urllib2, sys, logging, traceback, zipfile import xbmc import struct import socket import re log = logging.getLogger(__name__) USER_AGENT = 'BoxeeSubs/1.0' class SubtitleDB(object): def __init__(self, langs, revertlangs = None): if langs: self.langs = langs self.revertlangs = dict(map(lambda item: (item[1],item[0]), self.langs.items())) if revertlangs: self.revertlangs = revertlangs self.langs = dict(map(lambda item: (item[1],item[0]), self.revertlangs.items())) self.tvshowRegex = re.compile('(?P<show>.*)S(?P<season>[0-9]{2})E(?P<episode>[0-9]{2}).(?P<teams>.*)', re.IGNORECASE) self.tvshowRegex2 = re.compile('(?P<show>.*).(?P<season>[0-9]{1,2})x(?P<episode>[0-9]{1,2}).(?P<teams>.*)', re.IGNORECASE) self.movieRegex = re.compile('(?P<movie>.*)[\.|\[|\(| ]{1}(?P<year>(?:(?:19|20)[0-9]{2}))(?P<teams>.*)', re.IGNORECASE) def searchInThread(self, queue, filename, langs): try: subs = self.process(filename, langs) map(lambda item: item.setdefault("plugin", self), subs) map(lambda item: item.setdefault("filename", filename), subs) log.info("%s writing %s items to queue" % (self.__class__.__name__, len(subs))) except: log.exception("Error occured") subs = [] queue.put(subs, True) def process(self, filepath, langs): fname = self.getFileName(filepath) try: return self.query(fname, langs) except Exception, e: log.exception("Error occured") return [] def createFile(self, subtitle): suburl = subtitle["link"] videofilename = subtitle["filename"] srtbasefilename = videofilename.rsplit(".", 1)[0] zipfilename = srtbasefilename +".zip" self.downloadFile(suburl, zipfilename) if zipfile.is_zipfile(zipfilename): log.debug("Unzipping file " + zipfilename) zf = zipfile.ZipFile(zipfilename, "r") for el in zf.infolist(): if el.orig_filename.rsplit(".", 1)[1] in ("srt", "sub", "txt"): outfile = open(srtbasefilename + "." + el.orig_filename.rsplit(".", 1)[1], "wb") outfile.write(zf.read(el.orig_filename)) outfile.flush() outfile.close() else: log.info("File %s does not seem to be valid " %el.orig_filename) zf.close() os.remove(zipfilename) return srtbasefilename + ".srt" else: log.info("Unexpected file type (not zip)") os.remove(zipfilename) return None def downloadContent(self, url, timeout = None): try: log.debug("Downloading %s" % url) req = urllib2.Request(url, headers={'Referer' : url, 'User-Agent' : USER_AGENT}) if timeout: socket.setdefaulttimeout(timeout) f = urllib2.urlopen(req) content = f.read() f.close() return content except urllib2.HTTPError, e: log.warning("HTTP Error: %s - %s" % (e.code, url)) except urllib2.URLError, e: log.warning("URL Error: %s - %s" % (e.reason, url)) def downloadFile(self, url, filename): content = self.downloadContent(url) dump = open(filename, "wb") dump.write(content) dump.close() log.debug("Download finished to file %s. Size : %s"%(filename,os.path.getsize(filename)))
MIT License
kyclark/biofx_python
06_hamm/solution6_filter.py
hamming
python
def hamming(seq1: str, seq2: str) -> int: distance = filter(lambda t: t[0] != t[1], zip_longest(seq1, seq2)) return len(list((distance)))
Calculate Hamming distance
https://github.com/kyclark/biofx_python/blob/65888923d35e6cf63bf0b43ecd7b51e42d261cd4/06_hamm/solution6_filter.py#L41-L46
import argparse from itertools import zip_longest from typing import NamedTuple class Args(NamedTuple): seq1: str seq2: str def get_args(): parser = argparse.ArgumentParser( description='Hamming distance', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('seq1', metavar='str', help='Sequence 1') parser.add_argument('seq2', metavar='str', help='Sequence 2') args = parser.parse_args() return Args(args.seq1, args.seq2) def main(): args = get_args() print(hamming(args.seq1, args.seq2))
MIT License
apple/ccs-twistedextensions
twext/enterprise/dal/parseschema.py
_ColumnParser.nextColumn
python
def nextColumn(self): maybeIdent = self.next() if maybeIdent.ttype == Name: return self.parseColumn(maybeIdent.value) elif isinstance(maybeIdent, Identifier): return self.parseColumn(maybeIdent.get_name()) else: return self.parseConstraint(maybeIdent)
Parse the next column or constraint, depending on the next token.
https://github.com/apple/ccs-twistedextensions/blob/2c4046df88873dcf33fba7840ed90e4238dcbec7/twext/enterprise/dal/parseschema.py#L340-L350
from __future__ import print_function __all__ = [ "tableFromCreateStatement", "schemaFromPath", "schemaFromString", "addSQLToSchema", "ViolatedExpectation", "nameOrIdentifier", "expectSingle", "expect", "significant", "iterSignificant", ] from itertools import chain from re import compile from sqlparse import parse, keywords from sqlparse.tokens import ( Keyword, Punctuation, Number, String, Name, Comparison as CompTok ) from sqlparse.sql import (Comment, Identifier, Parenthesis, IdentifierList, Function, Comparison) from twext.enterprise.dal.model import ( Schema, Table, SQLType, ProcedureCall, Constraint, Sequence, Index, Function as FunctionModel) from twext.enterprise.dal.syntax import ( ColumnSyntax, CompoundComparison, Constant, Function as FunctionSyntax ) def _fixKeywords(): keywords.KEYWORDS["SEQUENCE"] = Keyword for columnNameKeyword in ["ACCESS", "SIZE", "UID"]: try: del keywords.KEYWORDS[columnNameKeyword] except: pass for keyword in ["GROUPS", "TIME"]: try: del keywords.KEYWORDS_ORACLE[keyword] except: pass _fixKeywords() def tableFromCreateStatement(schema, stmt): i = iterSignificant(stmt) expect(i, ttype=Keyword.DDL, value="CREATE") expect(i, ttype=Keyword, value="TABLE") function = expect(i, cls=Function) i = iterSignificant(function) name = expect(i, cls=Identifier).get_name().encode("utf-8") self = Table(schema, name) parens = expect(i, cls=Parenthesis) cp = _ColumnParser(self, iterSignificant(parens), parens) cp.parse() return self def schemaFromPath(path): schema = Schema(path.basename()) schemaData = path.getContent() addSQLToSchema(schema, schemaData) return schema def schemaFromString(data): schema = Schema() addSQLToSchema(schema, data) return schema def addSQLToSchema(schema, schemaData): schemaData = "\n".join(filter(lambda x: not x == "/", schemaData.splitlines())) parsed = parse(schemaData) for stmt in parsed: preface = "" while stmt.tokens and not significant(stmt.tokens[0]): preface += str(stmt.tokens.pop(0)) if not stmt.tokens: continue if stmt.get_type() == "CREATE": createType = stmt.token_next(1, True)[1].value.upper() if createType == u"TABLE": t = tableFromCreateStatement(schema, stmt) t.addComment(preface) elif createType == u"SEQUENCE": Sequence( schema, stmt.token_next(2, True)[1].get_name().encode("utf-8") ) elif createType in (u"INDEX", u"UNIQUE"): signifindex = iterSignificant(stmt) expect(signifindex, ttype=Keyword.DDL, value="CREATE") token = signifindex.next() unique = False if token.match(Keyword, "UNIQUE"): unique = True token = signifindex.next() if not token.match(Keyword, "INDEX"): raise ViolatedExpectation("INDEX or UNQIUE", token.value) indexName = nameOrIdentifier(signifindex.next()) expect(signifindex, ttype=Keyword, value="ON") token = signifindex.next() if isinstance(token, Function): [tableName, columnArgs] = iterSignificant(token) else: tableName = token token = signifindex.next() if token.match(Keyword, "USING"): [_ignore, columnArgs] = iterSignificant( expect(signifindex, cls=Function) ) else: raise ViolatedExpectation("USING", token) tableName = nameOrIdentifier(tableName) arggetter = iterSignificant(columnArgs) expect(arggetter, ttype=Punctuation, value=u"(") valueOrValues = arggetter.next() if isinstance(valueOrValues, IdentifierList): valuelist = valueOrValues.get_identifiers() else: valuelist = [valueOrValues] expect(arggetter, ttype=Punctuation, value=u")") idx = Index( schema, indexName, schema.tableNamed(tableName), unique ) for token in valuelist: columnName = nameOrIdentifier(token) idx.addColumn(idx.table.columnNamed(columnName)) elif createType == u"FUNCTION": parseFunction(schema, stmt) elif stmt.get_type() == "INSERT": insertTokens = iterSignificant(stmt) expect(insertTokens, ttype=Keyword.DML, value="INSERT") expect(insertTokens, ttype=Keyword, value="INTO") token = insertTokens.next() if isinstance(token, Function): [tableName, columnArgs] = iterSignificant(token) tableName = tableName.get_name() columns = namesInParens(columnArgs) else: tableName = token.get_name() columns = None expect(insertTokens, ttype=Keyword, value="VALUES") values = expect(insertTokens, cls=Parenthesis) vals = iterSignificant(values) expect(vals, ttype=Punctuation, value="(") valuelist = expect(vals, cls=IdentifierList) expect(vals, ttype=Punctuation, value=")") rowData = [] for ident in valuelist.get_identifiers(): rowData.append( {Number.Integer: int, String.Single: _destringify} [ident.ttype](ident.value) ) schema.tableNamed(tableName).insertSchemaRow(rowData, columns=columns) elif stmt.get_type() == "CREATE OR REPLACE": createType = stmt.token_next(1, True)[1].value.upper() if createType == u"FUNCTION": parseFunction(schema, stmt) elif stmt.get_type() != "UNKNOWN": print("unknown type:", stmt.get_type()) return schema def parseFunction(schema, stmt): fn_name = stmt.token_next(2, True)[1] if isinstance(fn_name, Function): [fn_name, _ignore_args] = iterSignificant(fn_name) fn_name = fn_name.get_name() else: fn_name = fn_name.get_name() FunctionModel( schema, fn_name.encode("utf-8"), ) class _ColumnParser(object): def __init__(self, table, parenIter, parens): self.parens = parens self.iter = parenIter self.table = table def __iter__(self): return self def next(self): result = self.iter.next() if isinstance(result, IdentifierList): while result.tokens: it = result.tokens.pop() if significant(it): self.pushback(it) return self.next() return result def pushback(self, value): self.iter = chain(iter((value,)), self.iter) def parse(self): expect(self.iter, ttype=Punctuation, value=u"(") while self.nextColumn(): pass
Apache License 2.0
maniacallabs/bibliopixel
bibliopixel/animation/animation.py
Animation.__init__
python
def __init__(self, layout, *, preclear=True, fail_on_exception=None, **kwds): self.palette = legacy_palette.pop_legacy_palette( kwds, *self.COLOR_DEFAULTS) self.palette.length = layout.numLEDs attributes.set_reserved(self, 'animation', **kwds) self.layout = layout assert layout self.internal_delay = None self.on_completion = None self.state = runner.STATE.ready self.preclear = preclear self.runner = None self.time = time.time self.sleep_time = 0 self.preframe_callbacks = [] self.fail_on_exception = self.FAIL_ON_EXCEPTION if fail_on_exception is None else fail_on_exception
Arguments: preclear: If True, clear the layout before rendering the frame; otherwise, the results of the previous frame are preserved fail_on_exception: If False, exceptions thrown in the animation frame are caught and reported; if True, exceptions are are raised, potentially ending the animation cycle and the program; if None or not set, the value of Animation.FAIL_ON_EXCEPTION is used
https://github.com/maniacallabs/bibliopixel/blob/afb993fbbe56e75e7c98f252df402b0f3e83bb6e/bibliopixel/animation/animation.py#L43-L70
import contextlib, threading, time from . import adaptor, animation_threading, runner from .. util import deprecated, log from .. colors import palettes, legacy_palette from .. project import attributes, fields class Animation(object): free_run = False pre_recursion = fields.default_converter subframes = 1 top_level = True if deprecated.allowed(): _step = 0 FAIL_ON_EXCEPTION = False COLOR_DEFAULTS = () @classmethod def construct(cls, project, *, run=None, name=None, data=None, **desc): from . failed import Failed exception = desc.pop('_exception', None) if exception: a = Failed(project.layout, desc, exception) else: try: a = cls(project.layout, **desc) a._set_runner(run or {}) except Exception as e: if cls.FAIL_ON_EXCEPTION: raise a = Failed(project.layout, desc, e) a.name = name a.data = data return a
MIT License
fasterspeeding/tanjun
tanjun/injecting.py
AbstractInjectionContext.cache_result
python
def cache_result(self, callback: CallbackSig[_T], value: _T, /) -> None:
Cache the result of a callback within the scope of this context. Parameters ---------- callback : CallbackSig[_T] The callback to cache the result of. value : _T The value to cache.
https://github.com/fasterspeeding/tanjun/blob/9ca8c9412e7f938b01576c958392f38ff761392b/tanjun/injecting.py#L118-L127
from __future__ import annotations __all__: list[str] = [ "AbstractInjectionContext", "BasicInjectionContext", "CallbackDescriptor", "Descriptor", "CallbackSig", "Undefined", "UNDEFINED", "UndefinedOr", "inject", "injected", "Injected", "InjectorClient", "TypeDescriptor", ] import abc import collections.abc as collections import copy import inspect import typing from . import abc as tanjun_abc from . import errors if typing.TYPE_CHECKING: _BaseInjectableCallbackT = typing.TypeVar("_BaseInjectableCallbackT", bound="BaseInjectableCallback[typing.Any]") _CallbackDescriptorT = typing.TypeVar("_CallbackDescriptorT", bound="CallbackDescriptor[typing.Any]") _InjectorClientT = typing.TypeVar("_InjectorClientT", bound="InjectorClient") _T = typing.TypeVar("_T") CallbackSig = collections.Callable[..., tanjun_abc.MaybeAwaitableT[_T]] class Undefined: __instance: Undefined def __bool__(self) -> typing.Literal[False]: return False def __new__(cls) -> Undefined: try: return cls.__instance except AttributeError: new = super().__new__(cls) assert isinstance(new, Undefined) cls.__instance = new return cls.__instance UNDEFINED: typing.Final[Undefined] = Undefined() UndefinedOr = typing.Union[Undefined, _T] class AbstractInjectionContext(abc.ABC): __slots__ = () @property @abc.abstractmethod def injection_client(self) -> InjectorClient: @abc.abstractmethod
BSD 3-Clause New or Revised License
tonybeltramelli/graphics-and-vision
Stereo-Vision-System/bergar/com.simonsen.stereovision/Cameras/CamerasParameters.py
CamerasParameters.P1
python
def P1(self): return self.__p1
Get a 3x4 projection matrix in the new (rectified) coordinate systems for the first camera.
https://github.com/tonybeltramelli/graphics-and-vision/blob/a1dbeada8e907b119ecce1fe421ae91e64ff3371/Stereo-Vision-System/bergar/com.simonsen.stereovision/Cameras/CamerasParameters.py#L89-L91
__version__ = '$Revision: 2015040701 $' from Settings.ClassProperty import ClassProperty class CamerasParameters(object): @property def CameraMatrix1(self): return self.__cameraMatrix1 @CameraMatrix1.setter def CameraMatrix1(self, value): self.__cameraMatrix1 = value @property def CameraMatrix2(self): return self.__cameraMatrix2 @CameraMatrix2.setter def CameraMatrix2(self, value): self.__cameraMatrix2 = value @property def DistCoeffs1(self): return self.__distCoeffs1 @DistCoeffs1.setter def DistCoeffs1(self, value): self.__distCoeffs1 = value @property def DistCoeffs2(self): return self.__distCoeffs2 @DistCoeffs2.setter def DistCoeffs2(self, value): self.__distCoeffs2 = value @property def R1(self): return self.__r1 @R1.setter def R1(self, value): self.__r1 = value @property def R2(self): return self.__r2 @R2.setter def R2(self, value): self.__r2 = value @property
Apache License 2.0
facelessuser/wcmatch
wcmatch/_wcparse.py
is_negative
python
def is_negative(pattern, flags): if flags & MINUSNEGATE: return flags & NEGATE and pattern[0:1] in MINUS_NEGATIVE_SYM elif flags & EXTMATCH: return flags & NEGATE and pattern[0:1] in NEGATIVE_SYM and pattern[1:2] not in ROUND_BRACKET else: return flags & NEGATE and pattern[0:1] in NEGATIVE_SYM
Check if negative pattern.
https://github.com/facelessuser/wcmatch/blob/3bf73867de62e1dfbecca4532de292de43f91b28/wcmatch/_wcparse.py#L459-L467
import re import functools import bracex import os from . import util from . import posix from . _wcmatch import WcRegexp UNICODE_RANGE = '\u0000-\U0010ffff' ASCII_RANGE = '\x00-\xff' PATTERN_LIMIT = 1000 RE_WIN_DRIVE_START = re.compile(r'((?:\\\\|/){2}((?:\\[^\\/]|[^\\/])+)|([\\]?[a-z][\\]?:))((?:\\\\|/)|$)', re.I) RE_WIN_DRIVE_LETTER = re.compile(r'([a-z]:)((?:\\|/)|$)', re.I) RE_WIN_DRIVE_PART = re.compile(r'((?:\\[^\\/]|[^\\/])+)((?:\\\\|/)|$)', re.I) RE_WIN_DRIVE_UNESCAPE = re.compile(r'\\(.)', re.I) RE_WIN_DRIVE = ( re.compile( r'''(?x) ( (?:\\\\|/){2}[?.](?:\\\\|/)(?: [a-z]:| unc(?:(?:\\\\|/)[^\\/]+){2} | (?:global(?:\\\\|/))+(?:[a-z]:|unc(?:(?:\\\\|/)[^\\/]+){2}|[^\\/]+) ) | (?:\\\\|/){2}[^\\/]+(?:\\\\|/)[^\\/]+| [a-z]: )((?:\\\\|/){1}|$) ''', re.I ), re.compile( br'''(?x) ( (?:\\\\|/){2}[?.](?:\\\\|/)(?: [a-z]:| unc(?:(?:\\\\|/)[^\\/]+){2} | (?:global(?:\\\\|/))+(?:[a-z]:|unc(?:(?:\\\\|/)[^\\/]+){2}|[^\\/]+) ) | (?:\\\\|/){2}[^\\/]+(?:\\\\|/)[^\\/]+| [a-z]: )((?:\\\\|/){1}|$) ''', re.I ) ) RE_MAGIC_ESCAPE = ( re.compile(r'([-!~*?()\[\]|{}]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))'), re.compile(br'([-!~*?()\[\]|{}]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))') ) MAGIC_DEF = ( frozenset("*?[]\\"), frozenset(b"*?[]\\") ) MAGIC_SPLIT = ( frozenset("|"), frozenset(b"|") ) MAGIC_NEGATE = ( frozenset('!'), frozenset(b'!') ) MAGIC_MINUS_NEGATE = ( frozenset('-'), frozenset(b'-') ) MAGIC_TILDE = ( frozenset('~'), frozenset(b'~') ) MAGIC_EXTMATCH = ( frozenset('()'), frozenset(b'()') ) MAGIC_BRACE = ( frozenset("{}"), frozenset(b"{}") ) RE_MAGIC = ( re.compile(r'([-!~*?(\[|{\\])'), re.compile(br'([-!~*?(\[|{\\])') ) RE_WIN_DRIVE_MAGIC = ( re.compile(r'([{}|]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))'), re.compile(br'([{}|]|(?<!\\)(?:(?:[\\]{2})*)\\(?!\\))') ) RE_NO_DIR = ( re.compile(r'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$'), re.compile(br'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$') ) RE_WIN_NO_DIR = ( re.compile(r'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$'), re.compile(br'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$') ) RE_TILDE = ( re.compile(r'~[^/]*(?=/|$)'), re.compile(br'~[^/]*(?=/|$)') ) RE_WIN_TILDE = ( re.compile(r'~(?:\\(?![\\/])|[^\\/])*(?=\\\\|/|$)'), re.compile(br'~(?:\\(?![\\/])|[^\\/])*(?=\\\\|/|$)') ) TILDE_SYM = ( '~', b'~' ) RE_ANCHOR = re.compile(r'^/+') RE_WIN_ANCHOR = re.compile(r'^(?:\\\\|/)+') RE_POSIX = re.compile(r':(alnum|alpha|ascii|blank|cntrl|digit|graph|lower|print|punct|space|upper|word|xdigit):\]') SET_OPERATORS = frozenset(('&', '~', '|')) NEGATIVE_SYM = frozenset((b'!', '!')) MINUS_NEGATIVE_SYM = frozenset((b'-', '-')) ROUND_BRACKET = frozenset((b'(', '(')) EXT_TYPES = frozenset(('*', '?', '+', '@', '!')) CASE = 0x0001 IGNORECASE = 0x0002 RAWCHARS = 0x0004 NEGATE = 0x0008 MINUSNEGATE = 0x0010 PATHNAME = 0x0020 DOTMATCH = 0x0040 EXTMATCH = 0x0080 GLOBSTAR = 0x0100 BRACE = 0x0200 REALPATH = 0x0400 FOLLOW = 0x0800 SPLIT = 0x1000 MATCHBASE = 0x2000 NODIR = 0x4000 NEGATEALL = 0x8000 FORCEWIN = 0x10000 FORCEUNIX = 0x20000 GLOBTILDE = 0x40000 NOUNIQUE = 0x80000 NODOTDIR = 0x100000 _TRANSLATE = 0x100000000 _ANCHOR = 0x200000000 _EXTMATCHBASE = 0x400000000 _NOABSOLUTE = 0x800000000 _RTL = 0x1000000000 FLAG_MASK = ( CASE | IGNORECASE | RAWCHARS | NEGATE | MINUSNEGATE | PATHNAME | DOTMATCH | EXTMATCH | GLOBSTAR | BRACE | REALPATH | FOLLOW | MATCHBASE | NODIR | NEGATEALL | FORCEWIN | FORCEUNIX | GLOBTILDE | SPLIT | NOUNIQUE | NODOTDIR | _TRANSLATE | _ANCHOR | _EXTMATCHBASE | _RTL | _NOABSOLUTE ) CASE_FLAGS = IGNORECASE | CASE _QMARK = r'.' _STAR = r'.*?' _PATH_TRAIL = r'{}*?' _NO_DIR = r'(?!(?:\.{{1,2}})(?:$|[{sep}]))' _PATH_STAR = r'[^{sep}]*?' _PATH_STAR_DOTMATCH = _NO_DIR + _PATH_STAR _PATH_STAR_NO_DOTMATCH = _NO_DIR + r'(?:(?!\.){})?'.format(_PATH_STAR) _PATH_GSTAR_DOTMATCH = r'(?:(?!(?:[{sep}]|^)(?:\.{{1,2}})($|[{sep}])).)*?' _PATH_GSTAR_NO_DOTMATCH = r'(?:(?!(?:[{sep}]|^)\.).)*?' _PATH_GSTAR_RTL_MATCH = r'.*?' _NO_DOT = r'(?![.])' _PATH_NO_SLASH_DOT = r'(?![{sep}.])' _PATH_NO_SLASH = r'(?![{sep}])' _ONE_OR_MORE = r'+' _EOP = r'$' _PATH_EOP = r'(?:$|[{sep}])' _GLOBSTAR_DIV = r'(?:^|$|{})+' _NEED_CHAR_PATH = r'(?=[^{sep}])' _NEED_CHAR = r'(?=.)' _NEED_SEP = r'(?={})' _QMARK_GROUP = r'(?:{})?' _QMARK_CAPTURE_GROUP = r'((?#)(?:{})?)' _STAR_GROUP = r'(?:{})*' _STAR_CAPTURE_GROUP = r'((?#)(?:{})*)' _PLUS_GROUP = r'(?:{})+' _PLUS_CAPTURE_GROUP = r'((?#)(?:{})+)' _GROUP = r'(?:{})' _CAPTURE_GROUP = r'((?#){})' _EXCLA_GROUP = r'(?:(?!(?:{})' _EXCLA_CAPTURE_GROUP = r'((?#)(?!(?:{})' _EXCLA_GROUP_CLOSE = r'){})' _NO_ROOT = r'(?!/)' _NO_WIN_ROOT = r'(?!(?:[\\/]|[a-zA-Z]:))' _NO_NIX_DIR = ( r'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$', rb'^(?:.*?(?:/\.{1,2}/*|/)|\.{1,2}/*)$' ) _NO_WIN_DIR = ( r'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$', rb'^(?:.*?(?:[\\/]\.{1,2}[\\/]*|[\\/])|\.{1,2}[\\/]*)$' ) class InvPlaceholder(str): class PathNameException(Exception): class DotException(Exception): class PatternLimitException(Exception): def escape(pattern, unix=None, pathname=True, raw=False): if isinstance(pattern, bytes): drive_pat = RE_WIN_DRIVE[util.BYTES] magic = RE_MAGIC_ESCAPE[util.BYTES] drive_magic = RE_WIN_DRIVE_MAGIC[util.BYTES] replace = br'\\\1' slash = b'\\' double_slash = b'\\\\' drive = b'' else: drive_pat = RE_WIN_DRIVE[util.UNICODE] magic = RE_MAGIC_ESCAPE[util.UNICODE] drive_magic = RE_WIN_DRIVE_MAGIC[util.UNICODE] replace = r'\\\1' slash = '\\' double_slash = '\\\\' drive = '' if not raw: pattern = pattern.replace(slash, double_slash) length = 0 if pathname and ((unix is None and util.platform() == "windows") or unix is False): m = drive_pat.match(pattern) if m: drive = m.group(0) length = len(drive) drive = drive_magic.sub(replace, m.group(0)) pattern = pattern[length:] return drive + magic.sub(replace, pattern) def _get_win_drive(pattern, regex=False, case_sensitive=False): drive = None slash = False end = 0 root_specified = False m = RE_WIN_DRIVE_START.match(pattern) if m: end = m.end(0) if m.group(3) and RE_WIN_DRIVE_LETTER.match(m.group(0)): if regex: drive = escape_drive(RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(3)), case_sensitive) else: drive = RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(0)) slash = bool(m.group(4)) root_specified = True elif m.group(2): root_specified = True part = [RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(2))] is_special = part[-1].lower() in ('.', '?') complete = 1 first = 1 count = 0 for count, m in enumerate(RE_WIN_DRIVE_PART.finditer(pattern, m.end(0)), 1): end = m.end(0) part.append(RE_WIN_DRIVE_UNESCAPE.sub(r'\1', m.group(1))) slash = bool(m.group(2)) if is_special: if count == first and part[-1].lower() == 'unc': complete += 2 elif count == first and part[-1].lower() == 'global': first += 1 complete += 1 if count == complete: break if count == complete: if not regex: drive = '\\\\{}{}'.format('\\'.join(part), '\\' if slash else '') else: drive = r'[\\/]{2}' + r'[\\/]'.join([escape_drive(p, case_sensitive) for p in part]) elif pattern.startswith(('\\\\', '/')): root_specified = True return root_specified, drive, slash, end def _get_magic_symbols(ptype, unix, flags): if ptype == util.BYTES: slash = b'\\' else: slash = '\\' magic = set() magic_drive = set() if unix else set(slash) magic |= MAGIC_DEF[ptype] if flags & BRACE: magic |= MAGIC_BRACE[ptype] magic_drive |= MAGIC_BRACE[ptype] if flags & SPLIT: magic |= MAGIC_SPLIT[ptype] magic_drive |= MAGIC_SPLIT[ptype] if flags & GLOBTILDE: magic |= MAGIC_TILDE[ptype] if flags & EXTMATCH: magic |= MAGIC_EXTMATCH[ptype] if flags & NEGATE: if flags & MINUSNEGATE: magic |= MAGIC_MINUS_NEGATE[ptype] else: magic |= MAGIC_NEGATE[ptype] return magic, magic_drive def is_magic(pattern, flags=0): magical = False unix = is_unix_style(flags) ptype = util.BYTES if isinstance(pattern, bytes) else util.UNICODE drive_pat = RE_WIN_DRIVE[ptype] magic, magic_drive = _get_magic_symbols(ptype, unix, flags) is_path = flags & PATHNAME length = 0 if is_path and ((unix is None and util.platform() == "windows") or unix is False): m = drive_pat.match(pattern) if m: drive = m.group(0) length = len(drive) for c in magic_drive: if c in drive: magical = True break if not magical: pattern = pattern[length:] for c in magic: if c in pattern: magical = True break return magical
MIT License
pauliacomi/pygaps
src/pygaps/modelling/base_model.py
IsothermBaseModel.fit
python
def fit( self, pressure, loading, param_guess, optimization_params=None, verbose=False ): if verbose: logger.info(f"Attempting to model using {self.name}.") param_names = [param for param in self.params] guess = numpy.array([param_guess[param] for param in param_names]) bounds = [[self.param_bounds[param][0] for param in param_names], [self.param_bounds[param][1] for param in param_names]] def fit_func(x, p, L): for i, _ in enumerate(param_names): self.params[param_names[i]] = x[i] return self.loading(p) - L kwargs = dict( bounds=bounds, ) if optimization_params: kwargs.update(optimization_params) try: opt_res = scipy.optimize.least_squares( fit_func, guess, args=(pressure, loading ), **kwargs ) except ValueError as e: raise CalculationError( f"Fitting routine for {self.name} failed with error:" f"\n\t{e}" ) if not opt_res.success: raise CalculationError( f"Fitting routine for {self.name} failed with error:" f"\n\t{opt_res.message}" f"\nTry a different starting point in the nonlinear optimization" f"\nby passing a dictionary of parameter guesses, param_guess, to the constructor." f"\nDefault starting guess for parameters:" f"\n{param_guess}\n" ) for index, _ in enumerate(param_names): self.params[param_names[index]] = opt_res.x[index] self.rmse = numpy.sqrt(numpy.sum((opt_res.fun)**2) / len(loading)) if verbose: logger.info(f"Model {self.name} success, RMSE is {self.rmse:.3f}")
Fit model to data using nonlinear optimization with least squares loss function. Resulting parameters are assigned to self. Parameters ---------- pressure : ndarray The pressures of each point. loading : ndarray The loading for each point. param_guess : ndarray The initial guess for the fitting function. optimization_params : dict Custom parameters to pass to SciPy.optimize.least_squares. verbose : bool, optional Prints out extra information about steps taken.
https://github.com/pauliacomi/pygaps/blob/c4d45b710e171c937471686437e382e05aec4ed5/src/pygaps/modelling/base_model.py#L176-L254
import abc import logging logger = logging.getLogger('pygaps') import numpy from .. import scipy from ..utilities.exceptions import CalculationError class IsothermBaseModel(): __metaclass__ = abc.ABCMeta name = None calculates = None param_names = [] param_bounds = None rmse = numpy.nan pressure_range = [numpy.nan, numpy.nan] loading_range = [numpy.nan, numpy.nan] def __init__(self, params=None): if params: self.rmse = params.pop('rmse', numpy.nan) self.pressure_range = params.pop( 'pressure_range', [numpy.nan, numpy.nan] ) self.loading_range = params.pop( 'loading_range', [numpy.nan, numpy.nan] ) self.params = {} for param in self.param_names: try: self.params[param] = params['parameters'][param] except KeyError as err: raise KeyError( f"""The isotherm model is missing parameter '{param}'.""" ) from err else: self.params = {param: numpy.nan for param in self.param_names} def __init_parameters__(self, parameters): def __repr__(self): return f"pyGAPS Isotherm Model, '{self.name}' type" def __str__(self): ret_string = ( f"{self.name} isotherm model.\n" f"RMSE = {self.rmse:.4f}\n" "Model parameters:\n" ) for param, val in self.params.items(): ret_string += f"\t{param} = {val:.2f}\n" ret_string += ( "Model applicable range:\n" + f"\tPressure range: {self.pressure_range[0]:.2f} - {self.pressure_range[1]:.2f}\n" f"\tLoading range: {self.loading_range[0]:.2f} - {self.loading_range[1]:.2f}\n" ) return ret_string def to_dict(self): return { 'name': self.name, 'rmse': self.rmse, 'parameters': self.params, 'pressure_range': tuple(map(float, self.pressure_range)), 'loading_range': tuple(map(float, self.loading_range)), } @abc.abstractmethod def loading(self, pressure): return @abc.abstractmethod def pressure(self, loading): return @abc.abstractmethod def spreading_pressure(self, pressure): return def initial_guess(self, pressure, loading): loading = numpy.atleast_1d(loading) pressure = numpy.atleast_1d(pressure) zero_values = ~numpy.logical_and(pressure > 0, loading > 0) if any(zero_values): pressure = pressure[~zero_values] loading = loading[~zero_values] saturation_loading = 1.1 * max(loading) langmuir_k = loading[0] / pressure[0] / ( saturation_loading - loading[0] ) return saturation_loading, langmuir_k
MIT License
codeinn/vcs
vcs/backends/base.py
BaseChangeset.id
python
def id(self): raise NotImplementedError
Returns string identifying this changeset.
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/base.py#L395-L399
import datetime import itertools from vcs.utils import author_name, author_email from vcs.utils.lazy import LazyProperty from vcs.utils.helpers import get_dict_for_attrs from vcs.conf import settings from vcs.exceptions import ( ChangesetError, EmptyRepositoryError, NodeAlreadyAddedError, NodeAlreadyChangedError, NodeAlreadyExistsError, NodeAlreadyRemovedError, NodeDoesNotExistError, NodeNotChangedError, RepositoryError ) class BaseRepository(object): scm = None DEFAULT_BRANCH_NAME = None EMPTY_CHANGESET = '0' * 40 def __init__(self, repo_path, create=False, **kwargs): raise NotImplementedError def __str__(self): return '<%s at %s>' % (self.__class__.__name__, self.path) def __repr__(self): return self.__str__() def __len__(self): return self.count() def __eq__(self, other): same_instance = isinstance(other, self.__class__) return same_instance and getattr(other, 'path', None) == self.path def __ne__(self, other): return not self.__eq__(other) @LazyProperty def alias(self): for k, v in settings.BACKENDS.items(): if v.split('.')[-1] == str(self.__class__.__name__): return k @LazyProperty def name(self): raise NotImplementedError @LazyProperty def owner(self): raise NotImplementedError @LazyProperty def description(self): raise NotImplementedError @LazyProperty def size(self): size = 0 try: tip = self.get_changeset() for topnode, dirs, files in tip.walk('/'): for f in files: size += tip.get_file_size(f.path) for dir in dirs: for f in files: size += tip.get_file_size(f.path) except RepositoryError: pass return size def is_valid(self): raise NotImplementedError def get_last_change(self): self.get_changesets() def get_changeset(self, revision=None): raise NotImplementedError def __iter__(self): for revision in self.revisions: yield self.get_changeset(revision) def get_changesets(self, start=None, end=None, start_date=None, end_date=None, branch_name=None, reverse=False): raise NotImplementedError def __getslice__(self, i, j): for rev in self.revisions[i:j]: yield self.get_changeset(rev) def __getitem__(self, key): return self.get_changeset(key) def count(self): return len(self.revisions) def tag(self, name, user, revision=None, message=None, date=None, **opts): raise NotImplementedError def remove_tag(self, name, user, message=None, date=None): raise NotImplementedError def get_diff(self, rev1, rev2, path=None, ignore_whitespace=False, context=3): raise NotImplementedError @LazyProperty def in_memory_changeset(self): raise NotImplementedError def add(self, filenode, **kwargs): raise NotImplementedError def remove(self, filenode, **kwargs): raise NotImplementedError def commit(self, message, **kwargs): raise NotImplementedError def get_state(self): raise NotImplementedError def get_config_value(self, section, name, config_file=None): raise NotImplementedError def get_user_name(self, config_file=None): raise NotImplementedError def get_user_email(self, config_file=None): raise NotImplementedError @LazyProperty def workdir(self): raise NotImplementedError class BaseChangeset(object): def __str__(self): return '<%s at %s:%s>' % (self.__class__.__name__, self.revision, self.short_id) def __repr__(self): return self.__str__() def __unicode__(self): return u'%s:%s' % (self.revision, self.short_id) def __eq__(self, other): return self.raw_id == other.raw_id @LazyProperty def last(self): if self.repository is None: raise ChangesetError("Cannot check if it's most recent revision") return self.raw_id == self.repository.revisions[-1] @LazyProperty def parents(self): raise NotImplementedError @LazyProperty def children(self): raise NotImplementedError @LazyProperty
MIT License
rockyzhengwu/transformer-en-zh
transformer/utils/tokenizer.py
Subtokenizer.encode
python
def encode(self, raw_string, add_eos=False): ret = [] tokens = _split_string_to_tokens(_native_to_unicode(raw_string)) for token in tokens: ret.extend(self._token_to_subtoken_ids(token)) if add_eos: ret.append(EOS_ID) return ret
Encodes a string into a list of int subtoken ids.
https://github.com/rockyzhengwu/transformer-en-zh/blob/4179c12f22842893931567877901758f6e064381/transformer/utils/tokenizer.py#L123-L131
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import sys import unicodedata import numpy as np import six from six.moves import xrange import tensorflow as tf PAD = "<pad>" PAD_ID = 0 EOS = "<EOS>" EOS_ID = 1 RESERVED_TOKENS = [PAD, EOS] _ESCAPE_CHARS = set(u"\\_u;0123456789") _UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") _UNDEFINED_UNICODE = u"\u3013" _ALPHANUMERIC_CHAR_SET = set( six.unichr(i) for i in xrange(sys.maxunicode) if (unicodedata.category(six.unichr(i)).startswith("L") or unicodedata.category(six.unichr(i)).startswith("N"))) _MIN_MIN_COUNT = 1 _MAX_MIN_COUNT = 1000 class Subtokenizer(object): def __init__(self, vocab_file, reserved_tokens=None): tf.logging.info("Initializing Subtokenizer from file %s." % vocab_file) if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens) self.alphabet = _generate_alphabet_dict(self.subtoken_list) self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list) self.max_subtoken_length = 0 for subtoken in self.subtoken_list: self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken)) self._cache_size = 2 ** 20 self._cache = [(None, None)] * self._cache_size @staticmethod def init_from_files( vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1e6, reserved_tokens=None): if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS if tf.gfile.Exists(vocab_file): tf.logging.info("Vocab file already exists (%s)" % vocab_file) else: tf.logging.info("Begin steps to create subtoken vocabulary...") token_counts = _count_tokens(files, file_byte_limit) alphabet = _generate_alphabet_dict(token_counts) subtoken_list = _generate_subtokens_with_target_vocab_size( token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens) tf.logging.info("Generated vocabulary with %d subtokens." % len(subtoken_list)) _save_vocab_file(vocab_file, subtoken_list) return Subtokenizer(vocab_file)
Apache License 2.0
geopython/owslib
owslib/util.py
bind_url
python
def bind_url(url): if url.find('?') == -1: binder = '?' if url.find('=') != -1: if url.find('&', -1) != -1: binder = '' else: binder = '&' if url.find('?') != -1: if url.find('?', -1) != -1: binder = '' elif url.find('&', -1) == -1: binder = '&' return '%s%s' % (url, binder)
binds an HTTP GET query string endpiont
https://github.com/geopython/owslib/blob/8db1fbd38b61381e70e9cd2ce81da61c5ab69d61/owslib/util.py#L722-L740
import os import sys from collections import OrderedDict from dateutil import parser from datetime import datetime, timedelta import pytz from owslib.etree import etree, ParseError from owslib.namespaces import Namespaces from urllib.parse import urlsplit, urlencode, urlparse, parse_qs, urlunparse, parse_qsl import copy from io import StringIO, BytesIO import re from copy import deepcopy import warnings import requests from requests.auth import AuthBase import codecs class ServiceException(Exception): pass def dict_union(d1, d2): return dict((x, (dict_union(d1.get(x, {}), d2[x]) if isinstance(d2.get(x), dict) else d2.get(x, d1.get(x)))) for x in set(list(d1.keys()) + list(d2.keys()))) class InfiniteDateTime(object): def __lt__(self, other): return False def __gt__(self, other): return True def timetuple(self): return tuple() class NegativeInfiniteDateTime(object): def __lt__(self, other): return True def __gt__(self, other): return False def timetuple(self): return tuple() first_cap_re = re.compile('(.)([A-Z][a-z]+)') all_cap_re = re.compile('([a-z0-9])([A-Z])') def format_string(prop_string): if prop_string is None: return '' st_r = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', prop_string) st_r = st_r.replace(' ', '') return re.sub('([a-z0-9])([A-Z])', r'\1_\2', st_r).lower() def xml_to_dict(root, prefix=None, depth=1, diction=None): ret = diction if diction is not None else dict() for child in root: val = testXMLValue(child) if val is None or val == '': if depth > 1: ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret) continue key = format_string(child.tag.split('}')[-1]) if prefix is not None: key = prefix + key ret[key] = val if depth > 1: ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret) return ret class ResponseWrapper(object): def __init__(self, response): self._response = response def info(self): return self._response.headers def read(self): return self._response.content def geturl(self): return self._response.url.replace('&&', '&') def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None, verify=True, cert=None, auth=None): headers = headers if headers is not None else {} rkwargs = {} rkwargs['timeout'] = timeout if auth: if username: auth.username = username if password: auth.password = password if cert: auth.cert = cert verify = verify and auth.verify else: auth = Authentication(username, password, cert, verify) if auth.username and auth.password: rkwargs['auth'] = (auth.username, auth.password) elif auth.auth_delegate is not None: rkwargs['auth'] = auth.auth_delegate rkwargs['cert'] = auth.cert rkwargs['verify'] = verify method = method.split("}")[-1] if method.lower() == 'post': try: etree.fromstring(data) headers['Content-Type'] = 'text/xml' except (ParseError, UnicodeEncodeError): pass rkwargs['data'] = data elif method.lower() == 'get': rkwargs['params'] = data else: raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method) if cookies is not None: rkwargs['cookies'] = cookies req = requests.request(method.upper(), url_base, headers=headers, **rkwargs) if req.status_code in [400, 401]: raise ServiceException(req.text) if req.status_code in [404, 500, 502, 503, 504]: req.raise_for_status() if 'Content-Type' in req.headers and req.headers['Content-Type'] in ['text/xml', 'application/xml', 'application/vnd.ogc.se_xml']: se_tree = etree.fromstring(req.content) possible_errors = [ '{http://www.opengis.net/ows}Exception', '{http://www.opengis.net/ows/1.1}Exception', '{http://www.opengis.net/ogc}ServiceException', 'ServiceException' ] for possible_error in possible_errors: serviceException = se_tree.find(possible_error) if serviceException is not None: raise ServiceException('\n'.join([t.strip() for t in serviceException.itertext() if t.strip()])) return ResponseWrapper(req) OWS_NAMESPACE = 'http://www.opengis.net/ows/1.1' def nspath(path, ns=OWS_NAMESPACE): if ns is None or path is None: return -1 components = [] for component in path.split('/'): if component != '*': component = '{%s}%s' % (ns, component) components.append(component) return '/'.join(components) def nspath_eval(xpath, namespaces): out = [] for chunks in xpath.split('/'): namespace, element = chunks.split(':') out.append('{%s}%s' % (namespaces[namespace], element)) return '/'.join(out) def cleanup_namespaces(element): if etree.__name__ == 'lxml.etree': etree.cleanup_namespaces(element) return element else: return etree.fromstring(etree.tostring(element)) def add_namespaces(root, ns_keys): if isinstance(ns_keys, str): ns_keys = [ns_keys] namespaces = Namespaces() ns_keys = [(x, namespaces.get_namespace(x)) for x in ns_keys] if etree.__name__ != 'lxml.etree': existing_namespaces = set() for elem in root.iter(): if elem.tag[0] == "{": uri, tag = elem.tag[1:].split("}") existing_namespaces.add(namespaces.get_namespace_from_url(uri)) for key, link in ns_keys: if link is not None and key not in existing_namespaces: root.set("xmlns:%s" % key, link) return root else: new_map = root.nsmap for key, link in ns_keys: if link is not None: new_map[key] = link new_root = etree.Element(root.tag, nsmap=new_map) for a, v in list(root.items()): new_root.set(a, v) for child in root: new_root.append(deepcopy(child)) return new_root def getXMLInteger(elem, tag): e = elem.find(tag) if e is None: raise ValueError('Missing %s in %s' % (tag, elem)) return int(e.text.strip()) def testXMLValue(val, attrib=False): if val is not None: if attrib: return val.strip() elif val.text: return val.text.strip() else: return None else: return None def testXMLAttribute(element, attribute): if element is not None: return element.get(attribute) return None def http_post(url=None, request=None, lang='en-US', timeout=10, username=None, password=None, auth=None): if url is None: raise ValueError("URL required") u = urlsplit(url) headers = { 'User-Agent': 'OWSLib (https://geopython.github.io/OWSLib)', 'Content-type': 'text/xml', 'Accept': 'text/xml,application/xml', 'Accept-Language': lang, 'Accept-Encoding': 'gzip,deflate', 'Host': u.netloc, } if isinstance(request, dict): headers['Content-type'] = 'application/json' headers.pop('Accept') rkwargs = {} if auth: if username: auth.username = username if password: auth.password = password else: auth = Authentication(username, password) if auth.username is not None and auth.password is not None: rkwargs['auth'] = (auth.username, auth.password) elif auth.auth_delegate is not None: rkwargs['auth'] = auth.auth_delegate rkwargs['verify'] = auth.verify rkwargs['cert'] = auth.cert if not isinstance(request, dict): return requests.post(url, request, headers=headers, **rkwargs) else: return requests.post(url, json=request, headers=headers, **rkwargs) def http_get(*args, **kwargs): rkwargs = copy.deepcopy(kwargs) auth = rkwargs.pop('auth', None) if auth is not None: if isinstance(auth, (tuple, list)): auth = Authentication(*auth) else: auth = Authentication() if 'username' in rkwargs: auth.username = rkwargs.pop('username') if 'password' in rkwargs: auth.password = rkwargs.pop('password') if 'cert' in rkwargs: auth.cert = rkwargs.pop('cert') if 'verify' in rkwargs: auth.verify = rkwargs.pop('verify') if auth.username and auth.password: rkwargs.setdefault('auth', (auth.username, auth.password)) elif auth.auth_delegate is not None: rkwargs['auth'] = auth.auth_delegate else: rkwargs.setdefault('auth', None) rkwargs.setdefault('cert', rkwargs.get('cert')) rkwargs.setdefault('verify', rkwargs.get('verify', True)) return requests.get(*args, **rkwargs) def element_to_string(element, encoding=None, xml_declaration=False): output = None if encoding is None: encoding = "ISO-8859-1" if etree.__name__ == 'lxml.etree': if xml_declaration: if encoding in ['unicode', 'utf-8']: output = '<?xml version="1.0" encoding="utf-8" standalone="no"?>\n{}'.format( etree.tostring(element, encoding='unicode')) else: output = etree.tostring(element, encoding=encoding, xml_declaration=True) else: output = etree.tostring(element) else: if xml_declaration: output = '<?xml version="1.0" encoding="{}" standalone="no"?>\n{}'.format( encoding, etree.tostring(element, encoding=encoding)) else: output = etree.tostring(element) return output def xml2string(xml): warnings.warn("DEPRECIATION WARNING! You should now use the 'element_to_string' method \ The 'xml2string' method will be removed in a future version of OWSLib.") return '<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>\n' + xml def xmlvalid(xml, xsd): xsd1 = etree.parse(xsd) xsd2 = etree.XMLSchema(xsd1) doc = etree.parse(StringIO(xml)) return xsd2.validate(doc) def xmltag_split(tag): try: return tag.split('}')[1] except Exception: return tag def getNamespace(element): if element.tag[0] == '{': return element.tag[1:].split("}")[0] else: return "" def build_get_url(base_url, params, overwrite=False): qs_base = [] if base_url.find('?') != -1: qs_base = parse_qsl(base_url.split('?')[1]) qs_params = [] for key, value in list(params.items()): qs_params.append((key, value)) qs = qs_add = [] if overwrite is True: qs = qs_params qs_add = qs_base else: qs = qs_base qs_add = qs_params pars = [x[0] for x in qs] for key, value in qs_add: if key not in pars: qs.append((key, value)) urlqs = urlencode(tuple(qs)) return base_url.split('?')[0] + '?' + urlqs def dump(obj, prefix=''): print(("{} {}.{} : {}".format(prefix, obj.__module__, obj.__class__.__name__, obj.__dict__))) def getTypedValue(data_type, value): if value is None: return if data_type == 'boolean': return True if value.lower() == 'true' else False elif data_type == 'integer': return int(value) elif data_type == 'float': return float(value) elif data_type == 'string': return str(value) else: return value def extract_time(element): if element is None: return None try: dt = parser.parse(element.text) except Exception: att = testXMLValue(element.attrib.get('indeterminatePosition'), True) if att and att == 'now': dt = datetime.utcnow() dt.replace(tzinfo=pytz.utc) else: dt = None return dt def extract_xml_list(elements): keywords = (re.split(r'[\n\r]+', f.text) for f in elements if f.text) flattened = (item.strip() for sublist in keywords for item in sublist) remove_blank = [_f for _f in flattened if _f] return remove_blank def strip_bom(raw_text): boms = [ codecs.BOM_UTF8, codecs.BOM, codecs.BOM_BE, codecs.BOM_LE, codecs.BOM_UTF16, codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE, codecs.BOM_UTF32, codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE ] if isinstance(raw_text, bytes): for bom in boms: if raw_text.startswith(bom): return raw_text[len(bom):] return raw_text def clean_ows_url(url): if url is None or not url.startswith('http'): return url filtered_kvp = {} basic_service_elements = ('service', 'version', 'request') parsed = urlparse(url) qd = parse_qs(parsed.query, keep_blank_values=True) for key, value in list(qd.items()): if key.lower() not in basic_service_elements: filtered_kvp[key] = value newurl = urlunparse([ parsed.scheme, parsed.netloc, parsed.path, parsed.params, urlencode(filtered_kvp, doseq=True), parsed.fragment ]) return newurl
BSD 3-Clause New or Revised License
opennode/waldur-core
waldur_core/logging/views.py
EmailHookViewSet.create
python
def create(self, request, *args, **kwargs): return super(EmailHookViewSet, self).create(request, *args, **kwargs)
To create new email hook issue **POST** against */api/hooks-email/* as an authenticated user. You should specify list of event_types or event_groups. Example of a request: .. code-block:: http POST /api/hooks-email/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "event_types": ["openstack_instance_start_succeeded"], "event_groups": ["users"], "email": "test@example.com" } You may temporarily disable hook without deleting it by issuing following **PATCH** request against hook URL: .. code-block:: javascript { "is_active": "false" }
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/logging/views.py#L409-L438
from __future__ import unicode_literals from django.core.exceptions import PermissionDenied from django.db.models import Count from django.utils.translation import ugettext_lazy as _ from django_filters.rest_framework import DjangoFilterBackend from rest_framework import response, viewsets, permissions, status, decorators, mixins from waldur_core.core import serializers as core_serializers, filters as core_filters, permissions as core_permissions from waldur_core.core.managers import SummaryQuerySet from waldur_core.logging import elasticsearch_client, models, serializers, filters, utils from waldur_core.logging.loggers import get_event_groups, get_alert_groups, event_logger class EventViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet): permission_classes = (permissions.IsAuthenticated, core_permissions.IsAdminOrReadOnly) filter_backends = (filters.EventFilterBackend,) serializer_class = serializers.EventSerializer def get_queryset(self): return elasticsearch_client.ElasticsearchResultList() def list(self, request, *args, **kwargs): self.queryset = self.filter_queryset(self.get_queryset()) page = self.paginate_queryset(self.queryset) if page is not None: return self.get_paginated_response(page) return response.Response(self.queryset) def get(self, request, *args, **kwargs): return self.list(request, *args, **kwargs) def perform_create(self, serializer): scope = serializer.validated_data.get('scope') context = {'scope': scope} if scope is not None else {} event_logger.custom.process( level=serializer.validated_data.get('level'), message_template=serializer.validated_data.get('message'), event_type='custom_notification', event_context=context, fail_silently=False ) @decorators.list_route() def count(self, request, *args, **kwargs): self.queryset = self.filter_queryset(self.get_queryset()) return response.Response({'count': self.queryset.count()}, status=status.HTTP_200_OK) @decorators.list_route() def count_history(self, request, *args, **kwargs): queryset = self.filter_queryset(self.get_queryset()) mapped = { 'start': request.query_params.get('start'), 'end': request.query_params.get('end'), 'points_count': request.query_params.get('points_count'), 'point_list': request.query_params.getlist('point'), } serializer = core_serializers.HistorySerializer(data={k: v for k, v in mapped.items() if v}) serializer.is_valid(raise_exception=True) timestamp_ranges = [{'end': point_date} for point_date in serializer.get_filter_data()] aggregated_count = queryset.aggregated_count(timestamp_ranges) return response.Response( [{'point': int(ac['end']), 'object': {'count': ac['count']}} for ac in aggregated_count], status=status.HTTP_200_OK) @decorators.list_route() def scope_types(self, request, *args, **kwargs): return response.Response(utils.get_scope_types_mapping().keys()) @decorators.list_route() def event_groups(self, request, *args, **kwargs): return response.Response(get_event_groups()) class AlertViewSet(mixins.CreateModelMixin, viewsets.ReadOnlyModelViewSet): queryset = models.Alert.objects.all() serializer_class = serializers.AlertSerializer lookup_field = 'uuid' filter_backends = ( DjangoFilterBackend, filters.AdditionalAlertFilterBackend, filters.ExternalAlertFilterBackend, filters.AlertScopeFilterBackend, ) filter_class = filters.AlertFilter def get_queryset(self): return models.Alert.objects.filtered_for_user(self.request.user).order_by('-created') def list(self, request, *args, **kwargs): return super(AlertViewSet, self).list(request, *args, **kwargs) def create(self, request, *args, **kwargs): return super(AlertViewSet, self).create(request, *args, **kwargs) @decorators.detail_route(methods=['post']) def close(self, request, *args, **kwargs): if not request.user.is_staff: raise PermissionDenied() alert = self.get_object() alert.close() return response.Response(status=status.HTTP_204_NO_CONTENT) @decorators.detail_route(methods=['post']) def acknowledge(self, request, *args, **kwargs): alert = self.get_object() if not alert.acknowledged: alert.acknowledge() return response.Response(status=status.HTTP_200_OK) else: return response.Response({'detail': _('Alert is already acknowledged.')}, status=status.HTTP_409_CONFLICT) @decorators.detail_route(methods=['post']) def cancel_acknowledgment(self, request, *args, **kwargs): alert = self.get_object() if alert.acknowledged: alert.cancel_acknowledgment() return response.Response(status=status.HTTP_200_OK) else: return response.Response({'detail': _('Alert is not acknowledged.')}, status=status.HTTP_409_CONFLICT) @decorators.list_route() def stats(self, request, *args, **kwargs): queryset = self.filter_queryset(self.get_queryset()) alerts_severities_count = queryset.values('severity').annotate(count=Count('severity')) severity_names = dict(models.Alert.SeverityChoices.CHOICES) alerts_severities_count = { severity_names[asc['severity']].lower(): asc['count'] for asc in alerts_severities_count} for severity_name in severity_names.values(): if severity_name.lower() not in alerts_severities_count: alerts_severities_count[severity_name.lower()] = 0 return response.Response(alerts_severities_count, status=status.HTTP_200_OK) def perform_create(self, serializer): if not self.request.user.is_staff: raise PermissionDenied() super(AlertViewSet, self).perform_create(serializer) @decorators.list_route() def alert_groups(self, request, *args, **kwargs): return response.Response(get_alert_groups()) class BaseHookViewSet(viewsets.ModelViewSet): filter_backends = (core_filters.StaffOrUserFilter, DjangoFilterBackend) lookup_field = 'uuid' class WebHookViewSet(BaseHookViewSet): queryset = models.WebHook.objects.all() filter_class = filters.WebHookFilter serializer_class = serializers.WebHookSerializer def create(self, request, *args, **kwargs): return super(WebHookViewSet, self).create(request, *args, **kwargs) class EmailHookViewSet(BaseHookViewSet): queryset = models.EmailHook.objects.all() filter_class = filters.EmailHookFilter serializer_class = serializers.EmailHookSerializer
MIT License
tao12345666333/tornado-zh
tornado/httputil.py
_parse_request_range
python
def _parse_request_range(range_header): unit, _, value = range_header.partition("=") unit, value = unit.strip(), value.strip() if unit != "bytes": return None start_b, _, end_b = value.partition("-") try: start = _int_or_none(start_b) end = _int_or_none(end_b) except ValueError: return None if end is not None: if start is None: if end != 0: start = -end end = None else: end += 1 return (start, end)
Parses a Range header. Returns either ``None`` or tuple ``(start, end)``. Note that while the HTTP headers use inclusive byte positions, this method returns indexes suitable for use in slices. >>> start, end = _parse_request_range("bytes=1-2") >>> start, end (1, 3) >>> [0, 1, 2, 3, 4][start:end] [1, 2] >>> _parse_request_range("bytes=6-") (6, None) >>> _parse_request_range("bytes=-6") (-6, None) >>> _parse_request_range("bytes=-0") (None, 0) >>> _parse_request_range("bytes=") (None, None) >>> _parse_request_range("foo=42") >>> _parse_request_range("bytes=1-2,6-10") Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). See [0] for the details of the range header. [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
https://github.com/tao12345666333/tornado-zh/blob/e9e8519beb147d9e1290f6a4fa7d61123d1ecb1c/tornado/httputil.py#L600-L646
from __future__ import absolute_import, division, print_function, with_statement import calendar import collections import copy import datetime import email.utils import numbers import re import time from tornado.escape import native_str, parse_qs_bytes, utf8 from tornado.log import gen_log from tornado.util import ObjectDict try: import Cookie except ImportError: import http.cookies as Cookie try: from httplib import responses except ImportError: from http.client import responses responses try: from urllib import urlencode except ImportError: from urllib.parse import urlencode try: from ssl import SSLError except ImportError: class SSLError(Exception): pass _CRLF_RE = re.compile(r'\r?\n') class _NormalizedHeaderCache(dict): def __init__(self, size): super(_NormalizedHeaderCache, self).__init__() self.size = size self.queue = collections.deque() def __missing__(self, key): normalized = "-".join([w.capitalize() for w in key.split("-")]) self[key] = normalized self.queue.append(key) if len(self.queue) > self.size: old_key = self.queue.popleft() del self[old_key] return normalized _normalized_headers = _NormalizedHeaderCache(1000) class HTTPHeaders(collections.MutableMapping): def __init__(self, *args, **kwargs): self._dict = {} self._as_list = {} self._last_key = None if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders)): for k, v in args[0].get_all(): self.add(k, v) else: self.update(*args, **kwargs) def add(self, name, value): norm_name = _normalized_headers[name] self._last_key = norm_name if norm_name in self: self._dict[norm_name] = (native_str(self[norm_name]) + ',' + native_str(value)) self._as_list[norm_name].append(value) else: self[norm_name] = value def get_list(self, name): norm_name = _normalized_headers[name] return self._as_list.get(norm_name, []) def get_all(self): for name, values in self._as_list.items(): for value in values: yield (name, value) def parse_line(self, line): if line[0].isspace(): new_part = ' ' + line.lstrip() self._as_list[self._last_key][-1] += new_part self._dict[self._last_key] += new_part else: name, value = line.split(":", 1) self.add(name, value.strip()) @classmethod def parse(cls, headers): h = cls() for line in _CRLF_RE.split(headers): if line: h.parse_line(line) return h def __setitem__(self, name, value): norm_name = _normalized_headers[name] self._dict[norm_name] = value self._as_list[norm_name] = [value] def __getitem__(self, name): return self._dict[_normalized_headers[name]] def __delitem__(self, name): norm_name = _normalized_headers[name] del self._dict[norm_name] del self._as_list[norm_name] def __len__(self): return len(self._dict) def __iter__(self): return iter(self._dict) def copy(self): return HTTPHeaders(self) __copy__ = copy class HTTPServerRequest(object): def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, body=None, host=None, files=None, connection=None, start_line=None): if start_line is not None: method, uri, version = start_line self.method = method self.uri = uri self.version = version self.headers = headers or HTTPHeaders() self.body = body or b"" context = getattr(connection, 'context', None) self.remote_ip = getattr(context, 'remote_ip', None) self.protocol = getattr(context, 'protocol', "http") self.host = host or self.headers.get("Host") or "127.0.0.1" self.files = files or {} self.connection = connection self._start_time = time.time() self._finish_time = None self.path, sep, self.query = uri.partition('?') self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) self.query_arguments = copy.deepcopy(self.arguments) self.body_arguments = {} def supports_http_1_1(self): return self.version == "HTTP/1.1" @property def cookies(self): if not hasattr(self, "_cookies"): self._cookies = Cookie.SimpleCookie() if "Cookie" in self.headers: try: self._cookies.load( native_str(self.headers["Cookie"])) except Exception: self._cookies = {} return self._cookies def write(self, chunk, callback=None): assert isinstance(chunk, bytes) assert self.version.startswith("HTTP/1."), "deprecated interface only supported in HTTP/1.x" self.connection.write(chunk, callback=callback) def finish(self): self.connection.finish() self._finish_time = time.time() def full_url(self): return self.protocol + "://" + self.host + self.uri def request_time(self): if self._finish_time is None: return time.time() - self._start_time else: return self._finish_time - self._start_time def get_ssl_certificate(self, binary_form=False): try: return self.connection.stream.socket.getpeercert( binary_form=binary_form) except SSLError: return None def _parse_body(self): parse_body_arguments( self.headers.get("Content-Type", ""), self.body, self.body_arguments, self.files, self.headers) for k, v in self.body_arguments.items(): self.arguments.setdefault(k, []).extend(v) def __repr__(self): attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) return "%s(%s, headers=%s)" % ( self.__class__.__name__, args, dict(self.headers)) class HTTPInputError(Exception): pass class HTTPOutputError(Exception): pass class HTTPServerConnectionDelegate(object): def start_request(self, server_conn, request_conn): raise NotImplementedError() def on_close(self, server_conn): pass class HTTPMessageDelegate(object): def headers_received(self, start_line, headers): pass def data_received(self, chunk): pass def finish(self): pass def on_connection_close(self): pass class HTTPConnection(object): def write_headers(self, start_line, headers, chunk=None, callback=None): raise NotImplementedError() def write(self, chunk, callback=None): raise NotImplementedError() def finish(self): raise NotImplementedError() def url_concat(url, args): if not args: return url if url[-1] not in ('?', '&'): url += '&' if ('?' in url) else '?' return url + urlencode(args) class HTTPFile(ObjectDict): pass
MIT License
mozilla/badges.mozilla.org
badger/printing.py
fit_text
python
def fit_text(c, text, x, y, max_w, max_h, font_name='Helvetica', padding_w=4.5, padding_h=4.5, font_decrement=0.0625): max_w -= (padding_w * 2.0) max_h -= (padding_h * 2.0) x += padding_w y += padding_h font_size = max_h while font_size > 1.0: ps = ParagraphStyle(name='text', alignment=TA_CENTER, fontName=font_name, fontSize=font_size, leading=font_size) p = Paragraph(text, ps) actual_w, actual_h = p.wrapOn(c, max_w, max_h) if actual_h > max_h or actual_w > max_w: font_size -= font_decrement else: y_pad = (max_h - actual_h) / 2 p.drawOn(c, x, y + y_pad) return
Draw text, reducing font size until it fits with a given max width and height.
https://github.com/mozilla/badges.mozilla.org/blob/283e1a02a558c47b4bd3c2f316e47e6149784d33/badger/printing.py#L202-L226
import logging import math import urllib import urllib2 try: from cStringIO import cStringIO as StringIO except ImportError: from StringIO import StringIO from reportlab.pdfgen import canvas from reportlab.lib import pagesizes from reportlab.lib.units import inch from reportlab.platypus import ( SimpleDocTemplate, BaseDocTemplate, Paragraph, Preformatted, Spacer, PageBreak, Frame, FrameBreak, PageTemplate, Image, Table) from reportlab.platypus.doctemplate import LayoutError from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch from reportlab.lib.enums import TA_LEFT, TA_CENTER from reportlab.lib import colors from reportlab.lib import textsplit from reportlab.lib.utils import ImageReader from django.conf import settings from django.http import (HttpResponseRedirect, HttpResponse, HttpResponseForbidden, HttpResponseNotFound) from django.utils.html import conditional_escape def render_claims_to_pdf(request, slug, claim_group, deferred_awards): metrics = dict( page_width=(8.5 * inch), page_height=(11.0 * inch), top_margin=(0.5 * inch), left_margin=((25.0 / 32.0) * inch), qr_overlap=((1.0 / 32.0) * inch), padding=((1.0 / 16.0) * inch), horizontal_spacing=((5.0 / 16.0) * inch), vertical_spacing=((13.0 / 64.0) * inch), width=(1.5 * inch), height=(1.5 * inch), ) debug = (request.GET.get('debug', False) is not False) pagesize = (metrics['page_width'], metrics['page_height']) cols = int((metrics['page_width'] - metrics['left_margin']) / (metrics['width'] + metrics['horizontal_spacing'])) rows = int((metrics['page_height'] - metrics['top_margin']) / (metrics['height'] + metrics['vertical_spacing'])) per_page = (cols * rows) label_ct = len(deferred_awards) page_ct = math.ceil(label_ct / per_page) pages = [deferred_awards[x:x + (per_page)] for x in range(0, label_ct, per_page)] response = HttpResponse(content_type='application/pdf; charset=utf-8') if not debug: response['Content-Disposition'] = ('attachment; filename="%s-%s.pdf"' % (slug.encode('utf-8', 'replace'), claim_group)) badge_img = None fout = StringIO() c = canvas.Canvas(fout, pagesize=pagesize) for page in pages: c.translate(metrics['left_margin'], metrics['page_height'] - metrics['top_margin']) for row in range(0, rows, 1): c.translate(0.0, 0 - metrics['height']) c.saveState() for col in range(0, cols, 1): try: da = page.pop(0) except IndexError: continue if not badge_img: image_fin = da.badge.image.file image_fin.open() badge_img = ImageReader(StringIO(image_fin.read())) c.saveState() render_label(request, c, metrics, da, badge_img, debug) c.restoreState() dx = (metrics['width'] + metrics['horizontal_spacing']) c.translate(dx, 0.0) c.restoreState() c.translate(0.0, 0 - metrics['vertical_spacing']) c.showPage() c.save() response.write(fout.getvalue()) return response def render_label(request, c, metrics, da, badge_img, debug): badge = da.badge badge_image_width = (1.0 + (1.0 / 64.0)) * inch badge_image_height = (1.0 + (1.0 / 64.0)) * inch qr_left = badge_image_width - metrics['qr_overlap'] qr_bottom = badge_image_height - metrics['qr_overlap'] qr_width = metrics['width'] - qr_left qr_height = metrics['height'] - qr_bottom if False and debug: c.setLineWidth(0.3) c.rect(0, 0, metrics['width'], metrics['height']) c.rect(qr_left, qr_bottom, qr_width, qr_height) c.rect(0, 0, badge_image_width, badge_image_height) fit_text(c, da.badge.title, 0.0, badge_image_height, badge_image_width, qr_height) c.saveState() c.rotate(-90) code_height = qr_height * (0.45) claim_height = qr_height - code_height c.setFont("Courier", code_height) c.drawCentredString(0 - (badge_image_width / 2.0), metrics['height'] - code_height, da.claim_code) text = """ <font name="Helvetica">Claim at</font> <font name="Courier">%s</font> """ % (settings.SITE_TITLE) fit_text(c, text, 0 - badge_image_height, badge_image_width, badge_image_width, claim_height) c.restoreState() claim_url = request.build_absolute_uri(da.get_claim_url()) qr_img = None try: from PyQRNative import QRCode, QRErrorCorrectLevel if len(claim_url) < 20: qr = QRCode(3, QRErrorCorrectLevel.L) elif len(claim_url) < 50: qr = QRCode(4, QRErrorCorrectLevel.L) else: qr = QRCode(10, QRErrorCorrectLevel.L) qr.addData(claim_url) qr.make() qr_img = ImageReader(qr.makeImage()) except ImportError: try: qr_url = ("http://api.qrserver.com/v1/create-qr-code/?%s" % urllib.urlencode({'size': '%sx%s' % (500, 500), 'data': claim_url})) qr_img = ImageReader(StringIO(urllib2.urlopen(qr_url).read())) except Exception: pass if qr_img: c.drawImage(qr_img, qr_left, qr_bottom, qr_width, qr_height) c.drawImage(badge_img, 0.0 * inch, 0.0 * inch, badge_image_width, badge_image_height)
BSD 3-Clause New or Revised License
eifinger/homeassistant-config
custom_components/here_weather/weather.py
get_temperature_from_here_data
python
def get_temperature_from_here_data( here_data: list, mode: str, offset: int = 0 ) -> float | None: if mode == MODE_DAILY_SIMPLE: temperature = get_attribute_from_here_data(here_data, "highTemperature", offset) else: temperature = get_attribute_from_here_data(here_data, "temperature", offset) if temperature is not None: return float(temperature) return None
Return the temperature from here_data.
https://github.com/eifinger/homeassistant-config/blob/fb26cb795ea407710e30fb679d2ca51cfad8cecf/custom_components/here_weather/weather.py#L277-L287
from __future__ import annotations from homeassistant.components.weather import ( ATTR_FORECAST_CONDITION, ATTR_FORECAST_PRECIPITATION, ATTR_FORECAST_PRECIPITATION_PROBABILITY, ATTR_FORECAST_PRESSURE, ATTR_FORECAST_TEMP, ATTR_FORECAST_TEMP_LOW, ATTR_FORECAST_TIME, ATTR_FORECAST_WIND_BEARING, ATTR_FORECAST_WIND_SPEED, Forecast, WeatherEntity, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS from homeassistant.core import HomeAssistant from homeassistant.helpers.entity import DeviceInfo from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, ) from .const import ( CONDITION_CLASSES, DEFAULT_MODE, DOMAIN, MODE_ASTRONOMY, MODE_DAILY_SIMPLE, SENSOR_TYPES, ) from .utils import ( convert_temperature_unit_of_measurement_if_needed, get_attribute_from_here_data, ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities ): here_weather_coordinators = hass.data[DOMAIN][entry.entry_id] entities_to_add = [] for sensor_type in SENSOR_TYPES: if sensor_type != MODE_ASTRONOMY: entities_to_add.append( HEREDestinationWeather( entry, here_weather_coordinators[sensor_type], sensor_type, ) ) async_add_entities(entities_to_add) class HEREDestinationWeather(CoordinatorEntity, WeatherEntity): def __init__( self, entry: ConfigEntry, coordinator: DataUpdateCoordinator, mode: str ) -> None: super().__init__(coordinator) self._name = entry.data[CONF_NAME] self._mode = mode self._unique_id = "".join( f"{entry.data[CONF_LATITUDE]}_{entry.data[CONF_LONGITUDE]}_{self._mode}".lower().split() ) @property def name(self): return f"{self._name} {self._mode}" @property def unique_id(self): return self._unique_id @property def condition(self) -> str | None: return get_condition_from_here_data(self.coordinator.data) @property def temperature(self) -> float | None: return get_temperature_from_here_data(self.coordinator.data, self._mode) @property def temperature_unit(self) -> str: return convert_temperature_unit_of_measurement_if_needed( self.coordinator.hass.config.units.name, TEMP_CELSIUS ) @property def pressure(self) -> float | None: return get_pressure_from_here_data(self.coordinator.data, self._mode) @property def humidity(self) -> float | None: if ( humidity := get_attribute_from_here_data(self.coordinator.data, "humidity") ) is not None: return float(humidity) return None @property def wind_speed(self) -> float | None: return get_wind_speed_from_here_data(self.coordinator.data) @property def wind_bearing(self) -> float | str | None: return get_wind_bearing_from_here_data(self.coordinator.data) @property def attribution(self) -> str | None: return None @property def visibility(self) -> float | None: if "visibility" in SENSOR_TYPES[self._mode]: if ( visibility := get_attribute_from_here_data( self.coordinator.data, "visibility" ) ) is not None: return float(visibility) return None @property def forecast(self) -> list[Forecast] | None: data: list[Forecast] = [] for offset in range(len(self.coordinator.data)): data.append( { ATTR_FORECAST_CONDITION: get_condition_from_here_data( self.coordinator.data, offset ), ATTR_FORECAST_TIME: get_time_from_here_data( self.coordinator.data, offset ), ATTR_FORECAST_PRECIPITATION_PROBABILITY: get_precipitation_probability( self.coordinator.data, self._mode, offset ), ATTR_FORECAST_PRECIPITATION: calc_precipitation( self.coordinator.data, offset ), ATTR_FORECAST_PRESSURE: get_pressure_from_here_data( self.coordinator.data, self._mode, offset ), ATTR_FORECAST_TEMP: get_high_or_default_temperature_from_here_data( self.coordinator.data, self._mode, offset ), ATTR_FORECAST_TEMP_LOW: get_low_or_default_temperature_from_here_data( self.coordinator.data, self._mode, offset ), ATTR_FORECAST_WIND_BEARING: get_wind_bearing_from_here_data( self.coordinator.data, offset ), ATTR_FORECAST_WIND_SPEED: get_wind_speed_from_here_data( self.coordinator.data, offset ), } ) return data @property def entity_registry_enabled_default(self): return self._mode == DEFAULT_MODE @property def device_info(self) -> DeviceInfo: return { "identifiers": {(DOMAIN, self._unique_id)}, "name": self.name, "manufacturer": "here.com", "entry_type": "service", } def get_wind_speed_from_here_data(here_data: list, offset: int = 0) -> float: wind_speed = get_attribute_from_here_data(here_data, "windSpeed", offset) assert wind_speed is not None return float(wind_speed) def get_wind_bearing_from_here_data(here_data: list, offset: int = 0) -> int: wind_bearing = get_attribute_from_here_data(here_data, "windDirection", offset) assert wind_bearing is not None return int(wind_bearing) def get_time_from_here_data(here_data: list, offset: int = 0) -> str: time = get_attribute_from_here_data(here_data, "utcTime", offset) assert time is not None return time def get_pressure_from_here_data( here_data: list, mode: str, offset: int = 0 ) -> float | None: if "barometerPressure" in SENSOR_TYPES[mode]: if ( pressure := get_attribute_from_here_data( here_data, "barometerPressure", offset ) ) is not None: return float(pressure) return None def get_precipitation_probability( here_data: list, mode: str, offset: int = 0 ) -> int | None: if "precipitationProbability" in SENSOR_TYPES[mode]: if ( precipitation_probability := get_attribute_from_here_data( here_data, "precipitationProbability", offset ) ) is not None: return int(precipitation_probability) return None def get_condition_from_here_data(here_data: list, offset: int = 0) -> str | None: return next( ( k for k, v in CONDITION_CLASSES.items() if get_attribute_from_here_data(here_data, "iconName", offset) in v ), None, ) def get_high_or_default_temperature_from_here_data( here_data: list, mode: str, offset: int = 0 ) -> float | None: temperature = get_attribute_from_here_data(here_data, "highTemperature", offset) if temperature is not None: return float(temperature) return get_temperature_from_here_data(here_data, mode, offset) def get_low_or_default_temperature_from_here_data( here_data: list, mode: str, offset: int = 0 ) -> float | None: temperature = get_attribute_from_here_data(here_data, "lowTemperature", offset) if temperature is not None: return float(temperature) return get_temperature_from_here_data(here_data, mode, offset)
MIT License
allegro/django-powerdns-dnssec
powerdns/tests/utils.py
user_client
python
def user_client(user): client = APIClient() client.force_authenticate(user=user) return client
Returns client for a given user
https://github.com/allegro/django-powerdns-dnssec/blob/333bdc668b6cda1a2ff240efd814a1896a1a8e07/powerdns/tests/utils.py#L141-L145
import functools as ft import factory from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.test import TestCase from factory.django import DjangoModelFactory from rest_framework.test import APIClient from powerdns.models import ( DeleteRequest, Domain, DomainOwner, DomainTemplate, Record, RecordRequest, RecordTemplate, Service, ServiceOwner, ) from powerdns.utils import AutoPtrOptions class UserFactory(factory.django.DjangoModelFactory): class Meta: model = get_user_model() django_get_or_create = ('username',) username = factory.Sequence(lambda n: "user_%d" % n) @classmethod def _generate(cls, create, attrs): user = super(UserFactory, cls)._generate(create, attrs) user.set_password('password') user.save() return user class DomainTemplateFactory(DjangoModelFactory): class Meta: model = DomainTemplate name = factory.Sequence(lambda n: 'name%d' % n) class RecordTemplateFactory(DjangoModelFactory): class Meta: model = RecordTemplate class ServiceFactory(DjangoModelFactory): class Meta: model = Service name = factory.Sequence(lambda n: 'service%d' % n) uid = factory.Sequence(lambda n: 'uid%d' % n) class ServiceOwnerFactory(DjangoModelFactory): class Meta: model = ServiceOwner service = factory.SubFactory(ServiceFactory) owner = factory.SubFactory(UserFactory) class DomainFactory(DjangoModelFactory): class Meta: model = Domain name = factory.Sequence(lambda n: 'name%d.com' % n) service = factory.SubFactory(ServiceFactory) auto_ptr = AutoPtrOptions.NEVER class DomainOwnerFactory(DjangoModelFactory): class Meta: model = DomainOwner domain = factory.SubFactory(DomainFactory) owner = factory.SubFactory(UserFactory) class RecordFactory(DjangoModelFactory): class Meta: model = Record domain = factory.SubFactory(DomainFactory) owner = factory.SubFactory(UserFactory) service = factory.SubFactory(ServiceFactory) class RecordRequestFactory(DjangoModelFactory): class Meta: model = RecordRequest record = factory.SubFactory(RecordFactory) domain = factory.SubFactory(DomainFactory) owner = factory.SubFactory(UserFactory) target_service = factory.SubFactory(ServiceFactory) class RecordDeleteRequestFactory(factory.django.DjangoModelFactory): class Meta: model = DeleteRequest target = factory.SubFactory(RecordFactory) owner = factory.SubFactory(UserFactory) @property def content_type(self): return ContentType.objects.get_for_model(Record) class RecordTestCase(TestCase): def setUp(self): self.domain = DomainFactory( name='example.com', template=None, reverse_template=DomainTemplateFactory(name='reverse'), auto_ptr=AutoPtrOptions.NEVER, ) def validate(self, **values): values.setdefault('domain', self.domain) values.setdefault('change_date', '20150305') Record(**values).full_clean() def check_invalid(self, **values): with self.assertRaises(ValidationError): self.validate(**values)
BSD 2-Clause Simplified License
huffpostdata/python-pollster
pollster/api_client.py
ApiClient.select_header_content_type
python
def select_header_content_type(self, content_types): if not content_types: return 'application/json' content_types = list(map(lambda x: x.lower(), content_types)) if 'application/json' in content_types or '*/*' in content_types: return 'application/json' else: return content_types[0]
Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json).
https://github.com/huffpostdata/python-pollster/blob/276de8d66a92577b1143fd92a70cff9c35a1dfcf/pollster/api_client.py#L491-L506
from __future__ import absolute_import from . import models from .rest import RESTClientObject from .rest import ApiException import io import os import re import json import mimetypes import sys import tempfile import threading from datetime import datetime from datetime import date import pandas from six import PY3, integer_types, iteritems, text_type, StringIO from six.moves.urllib.parse import quote from .configuration import Configuration class ApiClient(object): def __init__(self, host=None, header_name=None, header_value=None, cookie=None): self.rest_client = RESTClientObject() self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value if host is None: self.host = Configuration().host else: self.host = host self.cookie = cookie self.user_agent = 'Swagger-Codegen/2.0.0/python' @property def user_agent(self): return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value def __call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, callback=None, pandas_read_table_kwargs=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): header_params = header_params or {} header_params.update(self.default_headers) if self.cookie: header_params['Cookie'] = self.cookie if header_params: header_params = self.sanitize_for_serialization(header_params) header_params = dict(self.parameters_to_tuples(header_params, collection_formats)) if path_params: path_params = self.sanitize_for_serialization(path_params) path_params = self.parameters_to_tuples(path_params, collection_formats) for k, v in path_params: resource_path = resource_path.replace( '{%s}' % k, quote(str(v))) if query_params: query_params = self.sanitize_for_serialization(query_params) query_params = self.parameters_to_tuples(query_params, collection_formats) if post_params or files: post_params = self.prepare_post_parameters(post_params, files) post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) self.update_params_for_auth(header_params, query_params, auth_settings) if body: body = self.sanitize_for_serialization(body) url = self.host + resource_path response_data = self.request(method, url, query_params=query_params, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout) self.last_response = response_data return_data = response_data if _preload_content: if header_params['Accept'] == 'text/tab-separated-values': kwargs = pandas_read_table_kwargs if kwargs is None: kwargs = {} return_data = pandas.read_table(StringIO(response_data.data), **kwargs) elif response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if callback: callback(return_data) if _return_http_data_only else callback((return_data, response_data.status, response_data.getheaders())) elif _return_http_data_only: return (return_data) else: return (return_data, response_data.status, response_data.getheaders()) def sanitize_for_serialization(self, obj): types = (str, float, bool, bytes) + tuple(integer_types) + (text_type,) if isinstance(obj, type(None)): return None elif isinstance(obj, types): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime, date)): return obj.isoformat() else: if isinstance(obj, dict): obj_dict = obj else: obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in iteritems(obj.swagger_types) if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in iteritems(obj_dict)} def deserialize(self, response, response_type): if "file" == response_type: return self.__deserialize_file(response) try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type) def __deserialize(self, data, klass): if data is None: return None if type(klass) == str: if klass.startswith('list['): sub_kls = re.match('list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in iteritems(data)} if klass in ['int', 'float', 'bool', "date", 'datetime', "object"]: klass = eval(klass) elif klass == 'str': klass = text_type elif klass == 'long': klass = int if PY3 else long else: klass = eval('models.' + klass) if klass in integer_types or klass in (float, text_type, bool): return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == date: return self.__deserialize_date(data) elif klass == datetime: return self.__deserialize_datatime(data) else: return self.__deserialize_model(data, klass) def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, callback=None, pandas_read_table_kwargs=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): if callback is None: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, callback, pandas_read_table_kwargs, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = threading.Thread(target=self.__call_api, args=(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, callback, pandas_read_table_kwargs, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) thread.start() return thread def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): if method == "GET": return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "HEAD": return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "OPTIONS": return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "POST": return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PUT": return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PATCH": return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "DELETE": return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`." ) def parameters_to_tuples(self, params, collection_formats): new_params = [] if collection_formats is None: collection_formats = {} for k, v in iteritems(params) if isinstance(params, dict) else params: if k in collection_formats: collection_format = collection_formats[k] if collection_format == 'multi': new_params.extend((k, value) for value in v) else: if collection_format == 'ssv': delimiter = ' ' elif collection_format == 'tsv': delimiter = '\t' elif collection_format == 'pipes': delimiter = '|' else: delimiter = ',' new_params.append( (k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params def prepare_post_parameters(self, post_params=None, files=None): params = [] if post_params: params = post_params if files: for k, v in iteritems(files): if not v: continue file_names = v if type(v) is list else [v] for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = mimetypes. guess_type(filename)[0] or 'application/octet-stream' params.append(tuple([k, tuple([filename, filedata, mimetype])])) return params def select_header_accept(self, accepts): if not accepts: return accepts = list(map(lambda x: x.lower(), accepts)) if 'application/json' in accepts: return 'application/json' else: return ', '.join(accepts)
BSD 2-Clause Simplified License
lisa-lab/pylearn2
pylearn2/models/maxout.py
MaxoutConvC01B.set_input_space
python
def set_input_space(self, space): rng = self.mlp.rng setup_detector_layer_c01b(layer=self, input_space=space, rng=rng) detector_shape = self.detector_space.shape def handle_pool_shape(idx): if self.pool_shape[idx] < 1: raise ValueError("bad pool shape: " + str(self.pool_shape)) if self.pool_shape[idx] > detector_shape[idx]: if self.fix_pool_shape: assert detector_shape[idx] > 0 self.pool_shape[idx] = detector_shape[idx] else: raise ValueError("Pool shape exceeds detector layer shape " "on axis %d" % idx) map(handle_pool_shape, [0, 1]) assert self.pool_shape[0] == self.pool_shape[1] assert self.pool_stride[0] == self.pool_stride[1] assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride) if self.pool_stride[0] > self.pool_shape[0]: if self.fix_pool_stride: warnings.warn("Fixing the pool stride") ps = self.pool_shape[0] assert isinstance(ps, py_integer_types) self.pool_stride = [ps, ps] else: raise ValueError("Stride too big.") assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride) dummy_detector = sharedX(self.detector_space.get_origin_batch(2)[0:16, :, :, :]) dummy_p = max_pool_c01b(c01b=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride) dummy_p = dummy_p.eval() self.output_space = Conv2DSpace(shape=[dummy_p.shape[1], dummy_p.shape[2]], num_channels=self.num_channels, axes=('c', 0, 1, 'b')) logger.info('Output space: {0}'.format(self.output_space.shape))
Tells the layer to use the specified input space. This resets parameters! The kernel tensor is initialized with the size needed to receive input from this space. Parameters ---------- space : Space The Space that the input will lie in.
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/models/maxout.py#L675-L738
__authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2012-2013, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" import functools import logging import numpy as np import warnings from theano.compat.six.moves import xrange from theano.compat.six.moves import zip as izip from theano.sandbox import cuda from theano import tensor as T from pylearn2.compat import OrderedDict from pylearn2.linear.matrixmul import MatrixMul from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm from pylearn2.models.mlp import Layer from pylearn2.models.model import Model from pylearn2.space import Conv2DSpace from pylearn2.space import VectorSpace from pylearn2.utils import py_integer_types from pylearn2.utils import sharedX from pylearn2.linear.conv2d_c01b import setup_detector_layer_c01b from pylearn2.linear import local_c01b if cuda.cuda_available: from pylearn2.sandbox.cuda_convnet.pool import max_pool_c01b else: max_pool_c01b = None from pylearn2.sandbox.cuda_convnet import check_cuda logger = logging.getLogger(__name__) class Maxout(Layer): def __str__(self): return "Maxout" def __init__(self, layer_name, num_units, num_pieces, pool_stride=None, randomize_pools=False, irange=None, sparse_init=None, sparse_stdev=1., include_prob=1.0, init_bias=0., W_lr_scale=None, b_lr_scale=None, max_col_norm=None, max_row_norm=None, mask_weights=None, min_zero=False, **kwargs): super(Maxout, self).__init__(**kwargs) detector_layer_dim = num_units * num_pieces pool_size = num_pieces if pool_stride is None: pool_stride = pool_size self.__dict__.update(locals()) del self.self self.b = sharedX(np.zeros((self.detector_layer_dim,)) + init_bias, name=(layer_name + '_b')) if max_col_norm is not None: self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0)) if max_row_norm is not None: self.extensions.append(MaxL2FilterNorm(max_row_norm, axis=1)) @functools.wraps(Model.get_lr_scalers) def get_lr_scalers(self): if not hasattr(self, 'W_lr_scale'): self.W_lr_scale = None if not hasattr(self, 'b_lr_scale'): self.b_lr_scale = None rval = OrderedDict() if self.W_lr_scale is not None: W, = self.transformer.get_params() rval[W] = self.W_lr_scale if self.b_lr_scale is not None: rval[self.b] = self.b_lr_scale return rval def set_input_space(self, space): self.input_space = space if isinstance(space, VectorSpace): self.requires_reformat = False self.input_dim = space.dim else: self.requires_reformat = True self.input_dim = space.get_total_dimension() self.desired_space = VectorSpace(self.input_dim) if not (0 == ((self.detector_layer_dim - self.pool_size) % self.pool_stride)): if self.pool_stride == self.pool_size: raise ValueError("detector_layer_dim = %d, pool_size = %d. " "Should be divisible but remainder is %d" % (self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size)) raise ValueError() self.h_space = VectorSpace(self.detector_layer_dim) self.pool_layer_dim = ((self.detector_layer_dim - self.pool_size) / self.pool_stride + 1) self.output_space = VectorSpace(self.pool_layer_dim) rng = self.mlp.rng if self.irange is not None: assert self.sparse_init is None W = rng.uniform(-self.irange, self.irange, (self.input_dim, self.detector_layer_dim)) * (rng.uniform(0., 1., (self.input_dim, self.detector_layer_dim)) < self.include_prob) else: assert self.sparse_init is not None W = np.zeros((self.input_dim, self.detector_layer_dim)) def mask_rejects(idx, i): if self.mask_weights is None: return False return self.mask_weights[idx, i] == 0. for i in xrange(self.detector_layer_dim): assert self.sparse_init <= self.input_dim for j in xrange(self.sparse_init): idx = rng.randint(0, self.input_dim) while W[idx, i] != 0 or mask_rejects(idx, i): idx = rng.randint(0, self.input_dim) W[idx, i] = rng.randn() W *= self.sparse_stdev W = sharedX(W) W.name = self.layer_name + '_W' self.transformer = MatrixMul(W) W, = self.transformer.get_params() assert W.name is not None if not hasattr(self, 'randomize_pools'): self.randomize_pools = False if self.randomize_pools: permute = np.zeros((self.detector_layer_dim, self.detector_layer_dim)) for j in xrange(self.detector_layer_dim): i = rng.randint(self.detector_layer_dim) permute[i, j] = 1 self.permute = sharedX(permute) if self.mask_weights is not None: expected_shape = (self.input_dim, self.detector_layer_dim) if expected_shape != self.mask_weights.shape: raise ValueError("Expected mask with shape " + str(expected_shape) + " but got " + str(self.mask_weights.shape)) self.mask = sharedX(self.mask_weights) def _modify_updates(self, updates): if not hasattr(self, 'mask_weights'): self.mask_weights = None if self.mask_weights is not None: W, = self.transformer.get_params() if W in updates: updates[W] = updates[W] * self.mask @functools.wraps(Model.get_params) def get_params(self): assert self.b.name is not None W, = self.transformer.get_params() assert W.name is not None rval = self.transformer.get_params() assert not isinstance(rval, set) rval = list(rval) assert self.b not in rval rval.append(self.b) return rval @functools.wraps(Layer.get_weight_decay) def get_weight_decay(self, coeff): if isinstance(coeff, str): coeff = float(coeff) assert isinstance(coeff, float) or hasattr(coeff, 'dtype') W, = self.transformer.get_params() return coeff * T.sqr(W).sum() @functools.wraps(Layer.get_l1_weight_decay) def get_l1_weight_decay(self, coeff): if isinstance(coeff, str): coeff = float(coeff) assert isinstance(coeff, float) or hasattr(coeff, 'dtype') W, = self.transformer.get_params() return coeff * T.abs_(W).sum() @functools.wraps(Model.get_weights) def get_weights(self): if self.requires_reformat: raise NotImplementedError() W, = self.transformer.get_params() W = W.get_value() if not hasattr(self, 'randomize_pools'): self.randomize_pools = False if self.randomize_pools: warnings.warn("randomize_pools makes get_weights multiply by the " "permutation matrix. If you call set_weights(W) and " "then call get_weights(), the return value will " "WP not W.") P = self.permute.get_value() return np.dot(W, P) return W @functools.wraps(Layer.set_weights) def set_weights(self, weights): W, = self.transformer.get_params() W.set_value(weights) @functools.wraps(Layer.set_biases) def set_biases(self, biases): self.b.set_value(biases) @functools.wraps(Layer.get_biases) def get_biases(self): return self.b.get_value() @functools.wraps(Model.get_weights_format) def get_weights_format(self): return ('v', 'h') @functools.wraps(Model.get_weights_view_shape) def get_weights_view_shape(self): total = self.detector_layer_dim cols = self.pool_size if cols == 1: raise NotImplementedError() rows = total // cols if rows * cols < total: rows = rows + 1 return rows, cols @functools.wraps(Model.get_weights_topo) def get_weights_topo(self): if not isinstance(self.input_space, Conv2DSpace): raise NotImplementedError() W, = self.transformer.get_params() assert self.input_space.num_channels in [1, 3] viewer_space = Conv2DSpace(shape=self.input_space.shape, num_channels=self.input_space.num_channels, axes=('b', 0, 1, 'c')) W = self.desired_space.format_as(W.T, viewer_space) rval = W.eval() return rval @functools.wraps(Layer.get_layer_monitoring_channels) def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None): W, = self.transformer.get_params() assert W.ndim == 2 sq_W = T.sqr(W) row_norms = T.sqrt(sq_W.sum(axis=1)) col_norms = T.sqrt(sq_W.sum(axis=0)) row_norms_min = row_norms.min() row_norms_min.__doc__ = ("The smallest norm of any row of the " "weight matrix W. This is a measure of the " "least influence any visible unit has.") rval = OrderedDict([('row_norms_min', row_norms_min), ('row_norms_mean', row_norms.mean()), ('row_norms_max', row_norms.max()), ('col_norms_min', col_norms.min()), ('col_norms_mean', col_norms.mean()), ('col_norms_max', col_norms.max()), ]) if (state is not None) or (state_below is not None): if state is None: state = self.fprop(state_below) P = state if self.pool_size == 1: vars_and_prefixes = [(P, '')] else: vars_and_prefixes = [(P, 'p_')] for var, prefix in vars_and_prefixes: v_max = var.max(axis=0) v_min = var.min(axis=0) v_mean = var.mean(axis=0) v_range = v_max - v_min for key, val in [('max_x.max_u', v_max.max()), ('max_x.mean_u', v_max.mean()), ('max_x.min_u', v_max.min()), ('min_x.max_u', v_min.max()), ('min_x.mean_u', v_min.mean()), ('min_x.min_u', v_min.min()), ('range_x.max_u', v_range.max()), ('range_x.mean_u', v_range.mean()), ('range_x.min_u', v_range.min()), ('mean_x.max_u', v_mean.max()), ('mean_x.mean_u', v_mean.mean()), ('mean_x.min_u', v_mean.min())]: rval[prefix + key] = val return rval @functools.wraps(Layer.fprop) def fprop(self, state_below): self.input_space.validate(state_below) if self.requires_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) z = self.transformer.lmul(state_below) + self.b if not hasattr(self, 'randomize_pools'): self.randomize_pools = False if not hasattr(self, 'pool_stride'): self.pool_stride = self.pool_size if self.randomize_pools: z = T.dot(z, self.permute) if not hasattr(self, 'min_zero'): self.min_zero = False if self.min_zero: p = 0. else: p = None last_start = self.detector_layer_dim - self.pool_size for i in xrange(self.pool_size): cur = z[:, i:last_start + i + 1:self.pool_stride] if p is None: p = cur else: p = T.maximum(cur, p) p.name = self.layer_name + '_p_' return p class MaxoutConvC01B(Layer): def __init__(self, num_channels, num_pieces, kernel_shape, pool_shape, pool_stride, layer_name, irange=None, init_bias=0., W_lr_scale=None, b_lr_scale=None, pad=0, fix_pool_shape=False, fix_pool_stride=False, fix_kernel_shape=False, partial_sum=1, tied_b=False, max_kernel_norm=None, input_normalization=None, detector_normalization=None, min_zero=False, output_normalization=None, kernel_stride=(1, 1)): check_cuda(str(type(self))) super(MaxoutConvC01B, self).__init__() detector_channels = num_channels * num_pieces self.__dict__.update(locals()) del self.self if max_kernel_norm is not None: self.extensions.append( MaxL2FilterNorm(max_kernel_norm, axis=(0, 1, 2)) ) @functools.wraps(Model.get_lr_scalers) def get_lr_scalers(self): if not hasattr(self, 'W_lr_scale'): self.W_lr_scale = None if not hasattr(self, 'b_lr_scale'): self.b_lr_scale = None rval = OrderedDict() if self.W_lr_scale is not None: W, = self.transformer.get_params() rval[W] = self.W_lr_scale if self.b_lr_scale is not None: rval[self.b] = self.b_lr_scale return rval
BSD 3-Clause New or Revised License
vemel/handsdown
examples/main_example.py
MyClass.__bool__
python
def __bool__(self): return self.STUFF_COUNT
Magic methods are added to docs only if they have docstrings. :returns: True if `STUFF_COUNT` is not zero
https://github.com/vemel/handsdown/blob/68bd0ca6b3ed738db888f18c7a0ccbc62c2d0e42/examples/main_example.py#L92-L99
from typing import TYPE_CHECKING from my_project.other_module import BaseClass if TYPE_CHECKING: from my_project.type_defs import StuffCallable MODULE_NAME = "My Module" _PRIVATE_ATTR = "Private attr" def hello(name: str) -> str: if not name: return "Hello!" return f"Hello, {name}!" class MyClass(BaseClass): STUFF_COUNT = 3 @classmethod def do_something(cls, stuff): return stuff(cls.STUFF_COUNT)
MIT License
google/clusterfuzz
src/appengine/libs/issue_management/jira/__init__.py
Issue.id
python
def id(self): return self.jira_issue.key
The issue identifier.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/appengine/libs/issue_management/jira/__init__.py#L41-L43
from dateutil import parser from clusterfuzz._internal.config import db_config from libs.issue_management import issue_tracker from libs.issue_management.jira.issue_tracker_manager import IssueTrackerManager class Issue(issue_tracker.Issue): def __init__(self, itm, jira_issue): self.itm = itm self.jira_issue = jira_issue self._ccs = issue_tracker.LabelStore(self.itm.get_watchers(self.jira_issue)) self._components = issue_tracker.LabelStore( self.jira_issue.fields.components) self._labels = issue_tracker.LabelStore(self.jira_issue.fields.labels) @property def issue_tracker(self): return IssueTracker(self.itm) @property
Apache License 2.0
bloomreach/s4cmd
s4cmd.py
clear_progress
python
def clear_progress(): progress('')
Clear previous progress message, if any.
https://github.com/bloomreach/s4cmd/blob/e74e0e7cc666d39af054d231d0a84f817dbab2fa/s4cmd.py#L146-L148
import sys, os, re, optparse, multiprocessing, fnmatch, time, hashlib, errno, pytz import logging, traceback, types, threading, random, socket, shlex, datetime, json IS_PYTHON2 = sys.version_info[0] == 2 if IS_PYTHON2: from cStringIO import StringIO import Queue import ConfigParser else: from io import BytesIO as StringIO import queue as Queue import configparser as ConfigParser def cmp(a, b): return (a > b) - (a < b) if sys.version_info < (2, 7): from utils import cmp_to_key else: from functools import cmp_to_key S4CMD_VERSION = "2.1.0" PATH_SEP = '/' DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S UTC' TIMESTAMP_FORMAT = '%04d-%02d-%02d %02d:%02d' SOCKET_TIMEOUT = 5 * 60 socket.setdefaulttimeout(SOCKET_TIMEOUT) TEMP_FILES = set() S3_ACCESS_KEY_NAME = "AWS_ACCESS_KEY_ID" S3_SECRET_KEY_NAME = "AWS_SECRET_ACCESS_KEY" S4CMD_ENV_KEY = "S4CMD_OPTS" class Failure(RuntimeError): pass class InvalidArgument(RuntimeError): pass class RetryFailure(Exception): pass class S4cmdLoggingClass: def __init__(self): self.log = logging.Logger("s4cmd") self.log.stream = sys.stderr self.log_handler = logging.StreamHandler(self.log.stream) self.log.addHandler(self.log_handler) def configure(self, opt): '' self.log_handler.setFormatter(logging.Formatter('%(message)s', DATETIME_FORMAT)) if opt.debug: self.log.verbosity = 3 self.log_handler.setFormatter(logging.Formatter( ' (%(levelname).1s)%(filename)s:%(lineno)-4d %(message)s', DATETIME_FORMAT)) self.log.setLevel(logging.DEBUG) elif opt.verbose: self.log.verbosity = 2 self.log.setLevel(logging.INFO) else: self.log.verbosity = 1 self.log.setLevel(logging.ERROR) def get_loggers(self): return self.log.debug, self.log.info, self.log.warn, self.log.error s4cmd_logging = S4cmdLoggingClass() debug, info, warn, error = s4cmd_logging.get_loggers() def get_default_thread_count(): return int(os.getenv('S4CMD_NUM_THREADS', multiprocessing.cpu_count() * 4)) def log_calls(func): def wrapper(*args, **kargs): callStr = "%s(%s)" % (func.__name__, ", ".join([repr(p) for p in args] + ["%s=%s" % (k, repr(v)) for (k, v) in list(kargs.items())])) debug(">> %s", callStr) ret = func(*args, **kargs) debug("<< %s: %s", callStr, repr(ret)) return ret return wrapper def synchronized(func): func.__lock__ = threading.Lock() def synced_func(*args, **kargs): with func.__lock__: return func(*args, **kargs) return synced_func
Apache License 2.0
havardgulldahl/mopidy_plex
mopidy_plex/playlists.py
PlexPlaylistsProvider.refresh
python
def refresh(self): logger.debug('Refresh')
Refresh the playlists in playlists.
https://github.com/havardgulldahl/mopidy_plex/blob/30cb4a74974122fc6e96a6363f3ce991e1cd9a98/mopidy_plex/playlists.py#L97-L99
from __future__ import unicode_literals import re from mopidy import backend from mopidy.models import Ref, Playlist from plexapi import audio as plexaudio, playlist as plexplaylist from plexapi.utils import listItems from mopidy_plex import logger from .library import wrap_track from .mwt import MWT class PlexPlaylistsProvider(backend.PlaylistsProvider): def __init__(self, *args, **kwargs): super(PlexPlaylistsProvider, self).__init__(*args, **kwargs) self.plex = self.backend.plex @MWT(timeout=3600) def as_list(self): logger.debug('Playlist: as_list') audiolists = [l for l in self.plex.playlists() if l.playlistType == 'audio'] return [Ref(uri='plex:playlist:{}'.format(playlist.ratingKey), name=playlist.title) for playlist in audiolists] def create(self, name): logger.debug('Playlist: create %r', name) def delete(self, uri): logger.debug('Playlist: delete %r', uri) @MWT(timeout=3600) def get_items(self, uri): logger.debug('Playlist: get_items %r', uri) _rx = re.compile(r'plex:playlist:(?P<plid>\d+)').match(uri) if _rx is None: return None def wrap_ref(item): return Ref.track(uri='plex:track:{}'.format(item.ratingKey), name=item.title) return [wrap_ref(item) for item in listItems(self.plex, '/playlists/{}/items'.format(_rx.group('plid')))] @MWT(timeout=3600) def lookup(self, uri): logger.debug('Playlist: lookup %r', uri) _rx = re.compile(r'plex:playlist:(?P<plid>\d+)').match(uri) if _rx is None: return None plexlist = listItems(self.plex, '/playlists/{:s}'.format(_rx.group('plid')))[0] PL = Playlist(uri=uri, name=plexlist.title, tracks=[wrap_track(_t, self.backend.plex_uri) for _t in plexlist.items()], last_modified=None, ) return PL
Apache License 2.0
mahenzon/aioalice
aioalice/dispatcher/handler.py
Handler.notify
python
async def notify(self, *args): for filters, handler in self.handlers: if await check_filters(filters, args): try: return await handler(*args) except SkipHandler: continue
Notify handlers :param args: :return: instance of AliceResponse You *have* to return something to answer to API Consider returning AliceResponse or prepared JSON
https://github.com/mahenzon/aioalice/blob/f87b2e24c42444b5cb274c95eff20555314ec4f6/aioalice/dispatcher/handler.py#L45-L60
from aioalice.dispatcher.filters import check_filters class SkipHandler(BaseException): class Handler: def __init__(self): self.handlers = [] def register(self, handler, filters=None, index=None): if filters and not isinstance(filters, (list, tuple, set)): filters = [filters] record = (filters, handler) if index is None: self.handlers.append(record) else: self.handlers.insert(index, record) def unregister(self, handler): for handler_with_filters in self.handlers: _, registered = handler_with_filters if handler is registered: self.handlers.remove(handler_with_filters) return True raise ValueError('This handler is not registered!')
MIT License
schemathesis/schemathesis
src/schemathesis/runner/serialization.py
deduplicate_failures
python
def deduplicate_failures(checks: List[SerializedCheck]) -> List[SerializedCheck]: seen: Set[Tuple[str, Optional[str]]] = set() unique_checks = [] for check in reversed(checks): if check.value == Status.failure: key = get_failure_key(check) if (check.name, key) not in seen: unique_checks.append(check) seen.add((check.name, key)) return unique_checks
Return only unique checks that should be displayed in the output.
https://github.com/schemathesis/schemathesis/blob/b3d0faaaf484574006624c2f23759a612d1ffb5e/src/schemathesis/runner/serialization.py#L161-L172
import logging from typing import Any, Dict, List, Optional, Set, Tuple import attr import requests from ..exceptions import FailureContext, InternalError from ..failures import ValidationErrorContext from ..models import Case, Check, Interaction, Request, Response, Status, TestResult from ..utils import WSGIResponse, format_exception @attr.s(slots=True) class SerializedCase: text_lines: List[str] = attr.ib() requests_code: str = attr.ib() curl_code: str = attr.ib() path_template: str = attr.ib() path_parameters: Optional[Dict[str, Any]] = attr.ib() query: Optional[Dict[str, Any]] = attr.ib() cookies: Optional[Dict[str, Any]] = attr.ib() verbose_name: str = attr.ib() media_type: Optional[str] = attr.ib() @classmethod def from_case(cls, case: Case, headers: Optional[Dict[str, Any]]) -> "SerializedCase": return cls( text_lines=case.as_text_lines(headers), requests_code=case.get_code_to_reproduce(headers), curl_code=case.as_curl_command(headers), path_template=case.path, path_parameters=case.path_parameters, query=case.query, cookies=case.cookies, verbose_name=case.operation.verbose_name, media_type=case.media_type, ) @attr.s(slots=True) class SerializedCheck: name: str = attr.ib() value: Status = attr.ib() request: Request = attr.ib() response: Optional[Response] = attr.ib() example: SerializedCase = attr.ib() message: Optional[str] = attr.ib(default=None) context: Optional[FailureContext] = attr.ib(default=None) @classmethod def from_check(cls, check: Check) -> "SerializedCheck": if check.response is not None: request = Request.from_prepared_request(check.response.request) elif check.request is not None: request = Request.from_prepared_request(check.request) else: raise InternalError("Can not find request data") response: Optional[Response] if isinstance(check.response, requests.Response): response = Response.from_requests(check.response) elif isinstance(check.response, WSGIResponse): response = Response.from_wsgi(check.response, check.elapsed) else: response = None headers = {key: value[0] for key, value in request.headers.items()} return cls( name=check.name, value=check.value, example=SerializedCase.from_case(check.example, headers), message=check.message, request=request, response=response, context=check.context, ) @attr.s(slots=True) class SerializedError: exception: str = attr.ib() exception_with_traceback: str = attr.ib() example: Optional[SerializedCase] = attr.ib() title: Optional[str] = attr.ib() @classmethod def from_error( cls, exception: Exception, case: Optional[Case], headers: Optional[Dict[str, Any]], title: Optional[str] = None ) -> "SerializedError": return cls( exception=format_exception(exception), exception_with_traceback=format_exception(exception, True), example=SerializedCase.from_case(case, headers) if case else None, title=title, ) @attr.s(slots=True) class SerializedInteraction: request: Request = attr.ib() response: Response = attr.ib() checks: List[SerializedCheck] = attr.ib() status: Status = attr.ib() recorded_at: str = attr.ib() @classmethod def from_interaction(cls, interaction: Interaction) -> "SerializedInteraction": return cls( request=interaction.request, response=interaction.response, checks=[SerializedCheck.from_check(check) for check in interaction.checks], status=interaction.status, recorded_at=interaction.recorded_at, ) @attr.s(slots=True) class SerializedTestResult: method: str = attr.ib() path: str = attr.ib() verbose_name: str = attr.ib() has_failures: bool = attr.ib() has_errors: bool = attr.ib() has_logs: bool = attr.ib() is_errored: bool = attr.ib() seed: Optional[int] = attr.ib() data_generation_method: str = attr.ib() checks: List[SerializedCheck] = attr.ib() logs: List[str] = attr.ib() errors: List[SerializedError] = attr.ib() interactions: List[SerializedInteraction] = attr.ib() @classmethod def from_test_result(cls, result: TestResult) -> "SerializedTestResult": formatter = logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s") return cls( method=result.method, path=result.path, verbose_name=result.verbose_name, has_failures=result.has_failures, has_errors=result.has_errors, has_logs=result.has_logs, is_errored=result.is_errored, seed=result.seed, data_generation_method=result.data_generation_method.as_short_name(), checks=[SerializedCheck.from_check(check) for check in result.checks], logs=[formatter.format(record) for record in result.logs], errors=[SerializedError.from_error(*error, headers=result.overridden_headers) for error in result.errors], interactions=[SerializedInteraction.from_interaction(interaction) for interaction in result.interactions], )
MIT License
smira/fmspy
fmspy/application/room.py
Room.__init__
python
def __init__(self, application, name='_'): self.name = name self.application = application self.clients = set()
Construct new room. @param application: application owning this room @type application: L{Application} @param name: room name @type name: C{str}
https://github.com/smira/fmspy/blob/85260f4ebe8ccb17b0c755f8631f15af848b1707/fmspy/application/room.py#L24-L35
class Room(object):
MIT License
douban/pymesos
pymesos/interface.py
Scheduler.offerRescinded
python
def offerRescinded(self, driver, offerId):
Invoked when an offer is no longer valid (e.g., the slave was lost or another framework used resources in the offer.) If for whatever reason an offer is never rescinded (e.g., dropped message, failing over framework, etc.), a framework that attempts to launch tasks using an invalid offer will receive TASK_LOST status updates for those tasks (see Scheduler.resourceOffers).
https://github.com/douban/pymesos/blob/047c7bac8ca98772f63192aed063148fdf399b55/pymesos/interface.py#L94-L102
from __future__ import print_function import sys __all__ = ( 'Executor', 'ExecutorDriver', 'Scheduler', 'SchedulerDriver', 'OperatorMasterDriver', 'OperatorMaster', 'OperatorAgentDriver', ) class Scheduler(object): def registered(self, driver, frameworkId, masterInfo): def reregistered(self, driver, masterInfo): def disconnected(self, driver): def processHeartBeat(self, driver): def resourceOffers(self, driver, offers): def inverseOffers(self, driver, offers):
BSD 3-Clause New or Revised License
synbiodex/pysbol2
sbol2/componentdefinition.py
ComponentDefinition.getLastComponent
python
def getLastComponent(self): if len(self.components) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'This ComponentDefinition has no ' 'components') arbitrary_component = self.components[0] next_component = arbitrary_component while self.hasDownstreamComponent(next_component): next_component = self.getDownstreamComponent(next_component) return next_component
Gets the last Component in a linear sequence. :return: The last component in sequential order.
https://github.com/synbiodex/pysbol2/blob/127b92d60ecf6f9b6cb8fbf9657bb578bc983090/sbol2/componentdefinition.py#L711-L729
import os import posixpath from typing import Union from rdflib import URIRef from .component import Component from .config import Config, ConfigOptions from .constants import * from .toplevel import TopLevel from .property import OwnedObject, ReferencedObject, URIProperty from .sbolerror import SBOLError, SBOLErrorCode from .sequence import Sequence from .sequenceannotation import SequenceAnnotation from .sequenceconstraint import SequenceConstraint class ComponentDefinition(TopLevel): _types = None _roles = None components = None sequences = None sequenceAnnotations = None sequenceConstraints = None def __init__(self, uri=URIRef("example"), component_type=URIRef(BIOPAX_DNA), version=VERSION_STRING, type_uri=SBOL_COMPONENT_DEFINITION): super().__init__(type_uri, uri, version) self.types = URIProperty(self, SBOL_TYPES, '1', '*', None, component_type) self.roles = URIProperty(self, SBOL_ROLES, '0', '*', None) self.sequences = ReferencedObject(self, SBOL_SEQUENCE_PROPERTY, SBOL_SEQUENCE, '0', '*', None) self.sequenceAnnotations = OwnedObject(self, SBOL_SEQUENCE_ANNOTATIONS, SequenceAnnotation, '0', '*', None) self.components = OwnedObject(self, SBOL_COMPONENTS, Component, '0', '*', None) self.sequenceConstraints = OwnedObject(self, SBOL_SEQUENCE_CONSTRAINTS, SequenceConstraint, '0', '*', None) self._sequence_cache: Union[Sequence, None] = None @property def sequence(self): seqs = self.sequences if not seqs: return None if self.doc: seq_uri = seqs[0] try: return self.doc.sequences[seq_uri] except SBOLError as e: if e.error_code() != SBOLErrorCode.NOT_FOUND_ERROR: raise return None else: if self._sequence_cache and self._sequence_cache.identity in seqs: return self._sequence_cache return None @sequence.setter def sequence(self, sequence: Union[Sequence, None]): if not sequence: self.sequences = None self._sequence_cache = None return if self.doc: try: self.doc.add(sequence) except SBOLError as e: if e.error_code() != SBOLErrorCode.DUPLICATE_URI_ERROR: raise else: self._sequence_cache = sequence self.sequences = [sequence.identity] def _added_to_document(self, doc): super()._added_to_document(doc) if self._sequence_cache: try: doc.add(self._sequence_cache) except SBOLError as e: if e.error_code() == SBOLErrorCode.SBOL_ERROR_URI_NOT_UNIQUE: pass else: raise def addType(self, new_type): val = self.types val.append(new_type) self.types = val def removeType(self, index=0): val = self.types del val[index] self.types = val def addRole(self, new_role): val = self.roles val.append(new_role) self.roles = val def removeRole(self, index=0): val = self.roles del val[index] self.roles = val def assemble(self, component_list, assembly_method=None, doc=None): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise EnvironmentError('Assemble method requires SBOL-compliance enabled') if not self.doc and not doc: raise ValueError('Missing doc argument. If the ComponentDefinition does ' 'not belong to a Document, a target Document must be ' 'specified using the doc keyword argument.') if doc and self.doc != doc: raise ValueError('Invalid doc argument. Do not use the doc keyword ' 'argument if the ComponentDefinition already belongs ' 'to a Document') doc = doc if doc else self.doc if isinstance(component_list, list) and all(isinstance(c, ComponentDefinition) for c in component_list): for cdef in component_list: if cdef.doc and cdef.doc is not doc: raise ValueError('Invalid component_list specified. Assembly ' 'subcomponents must belong to the same Document ' 'as self.') elif isinstance(component_list, list) and all(isinstance(c, str) for c in component_list): component_identities = component_list[:] component_list = [] for c_id in component_identities: if c_id not in doc.componentDefinitions: raise ValueError('Invalid component_list specified. ' 'ComponentDefinition <%s> not found.' % c_id) cdef = doc.componentDefinitions[c_id] component_list.append(cdef) else: raise TypeError('Invalid component_list specified. Please provide a list ' 'of ComponentDefinitions or, alternatively, a list of ' 'ComponentDefinition displayIds') if not self.doc: doc.addComponentDefinition(self) for cdef in component_list: if not cdef.doc: self.doc.addComponentDefinition(cdef) if assembly_method: component_list = assembly_method(component_list) if not all(type(c) is ComponentDefinition for c in component_list): raise TypeError('Invalid callback specified for assembly_method. The ' 'callback must return a list of ComponentDefinitions') instance_list = [] for cdef in component_list: instance_count = 0 component_id = self.persistentIdentity + "/" + cdef.displayId + "_" + str(instance_count) + "/" + self.version while self.find(component_id): instance_count += 1 component_id = self.persistentIdentity + "/" + cdef.displayId + "_" + str(instance_count) + "/" + self.version c = self.components.create(cdef.displayId + "_" + str(instance_count)) c.definition = cdef.identity instance_list.append(c) return component_list def assemblePrimaryStructure(self, primary_structure, assembly_method=None, doc=None): primary_structure = self.assemble(primary_structure, assembly_method, doc) doc = doc if doc else self.doc if all(isinstance(c, str) for c in primary_structure): component_identities = primary_structure[:] primary_structure = [] for c_id in component_identities: cdef = doc.componentDefinitions[c_id] primary_structure.append(cdef) self.types += [SO_LINEAR] component_map = {} for c in self.components: if c.definition not in component_map: component_map[c.definition] = [c] else: component_map[c.definition].append(c) primary_structure_components = [] for cd in primary_structure: primary_structure_components.append(component_map[cd.identity].pop()) if len(self.sequenceConstraints): self.sequenceConstraints.clear() for upstream, downstream in zip(primary_structure_components[:-1], primary_structure_components[1:]): instance_count = 0 constraint_id = 'constraint_%d' % instance_count while constraint_id in self.sequenceConstraints: instance_count += 1 constraint_id = 'constraint_%d' % instance_count sc = self.sequenceConstraints.create(constraint_id) sc.subject = upstream sc.object = downstream sc.restriction = SBOL_RESTRICTION_PRECEDES def compile(self, assembly_method=None): if not self.doc: raise ValueError('Cannot compile <%s>. The ComponentDefinition must belong ' 'to a Document in order to compile.' % self.identity) if self.sequence is None: sequence_id = self.displayId + '_seq' compliant_uris = Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS) typed_uris = Config.getOption(ConfigOptions.SBOL_TYPED_URIS) if compliant_uris and typed_uris: sequence_id = self.displayId self.sequence = Sequence(sequence_id) return self.sequence.compile(assembly_method=assembly_method) def updateSequence(self, composite_sequence=""): raise NotImplementedError("Not yet implemented") def getInSequentialOrder(self): return self.getPrimaryStructureComponents() def hasUpstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot determine upstream Component. ' 'Self has no SequenceConstraints') else: for sc in self.sequenceConstraints: if sc.object == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: return True return False def getUpstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot get upstream Component. Self ' 'has no SequenceConstraints') else: upstream_component_id = None for sc in self.sequenceConstraints: if sc.object == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: upstream_component = self.components[sc.subject] return upstream_component raise SBOLError(SBOLErrorCode.SBOL_ERROR_END_OF_LIST, 'This component has no upstream ' 'component. Use hasUpstreamComponent to catch this error') def hasDownstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot determine upstream Component. ' 'Self has no SequenceConstraints') else: for sc in self.sequenceConstraints: if sc.subject == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: return True return False def getDownstreamComponent(self, component): if len(self.sequenceConstraints) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'Cannot get downstream Component. ' 'Self has no SequenceConstraints') else: upstream_component_id = None for sc in self.sequenceConstraints: if sc.subject == component.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES: upstream_component = self.components[sc.object] return upstream_component raise SBOLError(SBOLErrorCode.SBOL_ERROR_END_OF_LIST, 'This component has no downstream ' 'component. Use hasDownstreamComponent to catch this error') def deleteDownstreamComponent(self, upstream_component): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if upstream_component.identity not in self.components: msg = 'Deletion failed. ComponentDefinition %s has no child component %s' msg = msg % (self.identity, upstream_component.identity) raise ValueError(msg) primary_structure = self.getPrimaryStructureComponents() if upstream_component.identity == primary_structure[-1].identity: msg = 'Deletion failed. No Components were found downstream of %s' msg = msg % upstream_component.identity raise ValueError( msg) downstream_component = None upstream_sequence_constraint = None downstream_sequence_constraint = None for c_upstream, c_downstream in zip(primary_structure[:-1], primary_structure[1:]): for sc in self.sequenceConstraints: if (sc.subject == c_upstream.identity and sc.object == c_downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): upstream_sequence_constraint = downstream_sequence_constraint downstream_sequence_constraint = sc if downstream_component: break if c_upstream.identity == upstream_component.identity: downstream_component = c_downstream if downstream_component: self.components.remove(downstream_component.identity) self.sequenceConstraints.remove(downstream_sequence_constraint.identity) if downstream_sequence_constraint.subject == downstream_component.identity: upstream_sequence_constraint.object = downstream_sequence_constraint.object def deleteUpstreamComponent(self, downstream_component): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if downstream_component.identity not in self.components: msg = 'Deletion failed. No Components were found upstream of %s' msg = msg % downstream_component.identity raise ValueError(msg) primary_structure = self.getPrimaryStructureComponents() if downstream_component.identity == primary_structure[0].identity: msg = 'Deletion failed. Component %s does not have an upstream component' msg = msg % downstream_component.identity raise ValueError(msg) upstream_component = None upstream_sequence_constraint = None downstream_sequence_constraint = None for c_upstream, c_downstream in zip(primary_structure[:-1], primary_structure[1:]): for sc in self.sequenceConstraints: if (sc.subject == c_upstream.identity and sc.object == c_downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): upstream_sequence_constraint = downstream_sequence_constraint downstream_sequence_constraint = sc if c_downstream.identity == downstream_component.identity: upstream_component = c_upstream break if upstream_component: self.components.remove(upstream_component.identity) self.sequenceConstraints.remove(downstream_sequence_constraint.identity) if upstream_sequence_constraint: upstream_sequence_constraint.object = downstream_sequence_constraint.object def insertUpstreamComponent(self, downstream, insert): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if not self.doc: msg = f'ComponentDefinition {self.identity} does not belong to a Document' msg += ' Add this ComponentDefinition to a Document before calling' msg += ' insertUpstreamComponent' raise ValueError(msg) if self.doc != insert.doc: msg = f'Invalid Document for ComponentDefinition {insert.identity}.' msg += ' Add the insert to the same Document as the calling object.' raise ValueError(msg) if not insert.doc: insert.doc = self.doc target_constraint = None for sc in self.sequenceConstraints: if (sc.object == downstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): if target_constraint is not None: msg = 'SequenceConstraints are ambiguous. The target component' msg += ' may have more than one downstream component specified' raise ValueError(msg) target_constraint = sc instance_count = 0 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) while self.find(component_id): instance_count += 1 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) c_insert = self.components.create(f'{insert.displayId}_{instance_count}') c_insert.definition = insert.identity instance_count = 0 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) while self.find(sc_id): instance_count += 1 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) sc_new = self.sequenceConstraints.create(f'constraint_{instance_count}') sc_new.subject = component_id sc_new.object = downstream.identity sc_new.restriction = SBOL_RESTRICTION_PRECEDES if target_constraint: target_constraint.object = c_insert.identity def insertDownstreamComponent(self, upstream, insert): if not Config.getOption(ConfigOptions.SBOL_COMPLIANT_URIS): raise ValueError('SBOL-compliant URIs must be enabled to use this method') if not self.doc: msg = f'ComponentDefinition {self.identity} does not belong to a Document' msg += ' Add this ComponentDefinition to a Document before calling' msg += ' insertUpstreamComponent' raise ValueError(msg) if self.doc != insert.doc: msg = f'Invalid Document for ComponentDefinition {insert.identity}.' msg += ' Add the insert to the same Document as the calling object.' raise ValueError(msg) if not insert.doc: insert.doc = self.doc target_constraint = None for sc in self.sequenceConstraints: if (sc.subject == upstream.identity and sc.restriction == SBOL_RESTRICTION_PRECEDES): if target_constraint is not None: msg = 'SequenceConstraints are ambiguous. The target component' msg += ' may have more than one downstream component specified' raise ValueError(msg) target_constraint = sc instance_count = 0 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) while self.find(component_id): instance_count += 1 component_id = posixpath.join(self.persistentIdentity, f'{insert.displayId}_{instance_count}', self.version) c_insert = self.components.create(f'{insert.displayId}_{instance_count}') c_insert.definition = insert.identity instance_count = 0 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) while self.find(sc_id): instance_count += 1 sc_id = posixpath.join(self.persistentIdentity, f'constraint_{instance_count}', self.version) sc_new = self.sequenceConstraints.create(f'constraint_{instance_count}') sc_new.subject = upstream.identity sc_new.object = component_id sc_new.restriction = SBOL_RESTRICTION_PRECEDES if target_constraint: target_constraint.object = c_insert.identity def getFirstComponent(self): if len(self.components) < 1: raise SBOLError(SBOLErrorCode.SBOL_ERROR_NOT_FOUND, 'This ComponentDefinition has no ' 'components') arbitrary_component = self.components[0] next_component = arbitrary_component while self.hasUpstreamComponent(next_component): next_component = self.getUpstreamComponent(next_component) return next_component
Apache License 2.0
olitheolix/aiokubernetes
aiokubernetes/models/v1_container.py
V1Container.readiness_probe
python
def readiness_probe(self): return self._readiness_probe
Gets the readiness_probe of this V1Container. # noqa: E501 Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes # noqa: E501 :return: The readiness_probe of this V1Container. # noqa: E501 :rtype: V1Probe
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_container.py#L391-L399
import pprint import re from aiokubernetes.models.v1_container_port import V1ContainerPort from aiokubernetes.models.v1_env_from_source import V1EnvFromSource from aiokubernetes.models.v1_env_var import V1EnvVar from aiokubernetes.models.v1_lifecycle import V1Lifecycle from aiokubernetes.models.v1_probe import V1Probe from aiokubernetes.models.v1_resource_requirements import V1ResourceRequirements from aiokubernetes.models.v1_security_context import V1SecurityContext from aiokubernetes.models.v1_volume_device import V1VolumeDevice from aiokubernetes.models.v1_volume_mount import V1VolumeMount class V1Container(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'security_context': 'V1SecurityContext', 'stdin': 'bool', 'stdin_once': 'bool', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'security_context': 'securityContext', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resources=None, security_context=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None): self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._readiness_probe = None self._resources = None self._security_context = None self._stdin = None self._stdin_once = None self._termination_message_path = None self._termination_message_policy = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe self.name = name if ports is not None: self.ports = ports if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if security_context is not None: self.security_context = security_context if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir @property def args(self): return self._args @args.setter def args(self, args): self._args = args @property def command(self): return self._command @command.setter def command(self, command): self._command = command @property def env(self): return self._env @env.setter def env(self, env): self._env = env @property def env_from(self): return self._env_from @env_from.setter def env_from(self, env_from): self._env_from = env_from @property def image(self): return self._image @image.setter def image(self, image): self._image = image @property def image_pull_policy(self): return self._image_pull_policy @image_pull_policy.setter def image_pull_policy(self, image_pull_policy): self._image_pull_policy = image_pull_policy @property def lifecycle(self): return self._lifecycle @lifecycle.setter def lifecycle(self, lifecycle): self._lifecycle = lifecycle @property def liveness_probe(self): return self._liveness_probe @liveness_probe.setter def liveness_probe(self, liveness_probe): self._liveness_probe = liveness_probe @property def name(self): return self._name @name.setter def name(self, name): if name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def ports(self): return self._ports @ports.setter def ports(self, ports): self._ports = ports @property
Apache License 2.0
unofficial-memsource/memsource-cli-client
memsource_cli/models/machine_translate_settings_dto.py
MachineTranslateSettingsDto.to_dict
python
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(MachineTranslateSettingsDto, dict): for key, value in self.items(): result[key] = value return result
Returns the model properties as a dict
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/machine_translate_settings_dto.py#L253-L278
import pprint import re import six class MachineTranslateSettingsDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'base_name': 'str', 'name': 'str', 'type': 'str', 'default_': 'bool', 'include_tags': 'bool', 'mt_quality_estimation': 'bool', 'args': 'dict(str, str)' } attribute_map = { 'id': 'id', 'base_name': 'baseName', 'name': 'name', 'type': 'type', 'default_': 'default_', 'include_tags': 'includeTags', 'mt_quality_estimation': 'mtQualityEstimation', 'args': 'args' } def __init__(self, id=None, base_name=None, name=None, type=None, default_=None, include_tags=None, mt_quality_estimation=None, args=None): self._id = None self._base_name = None self._name = None self._type = None self._default_ = None self._include_tags = None self._mt_quality_estimation = None self._args = None self.discriminator = None if id is not None: self.id = id if base_name is not None: self.base_name = base_name if name is not None: self.name = name if type is not None: self.type = type if default_ is not None: self.default_ = default_ if include_tags is not None: self.include_tags = include_tags if mt_quality_estimation is not None: self.mt_quality_estimation = mt_quality_estimation if args is not None: self.args = args @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def base_name(self): return self._base_name @base_name.setter def base_name(self, base_name): self._base_name = base_name @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def type(self): return self._type @type.setter def type(self, type): self._type = type @property def default_(self): return self._default_ @default_.setter def default_(self, default_): self._default_ = default_ @property def include_tags(self): return self._include_tags @include_tags.setter def include_tags(self, include_tags): self._include_tags = include_tags @property def mt_quality_estimation(self): return self._mt_quality_estimation @mt_quality_estimation.setter def mt_quality_estimation(self, mt_quality_estimation): self._mt_quality_estimation = mt_quality_estimation @property def args(self): return self._args @args.setter def args(self, args): self._args = args
Apache License 2.0
koalixswitzerland/koalixcrm
koalixcrm/crm/reporting/project.py
Project.planned_duration
python
def planned_duration(self): if (not self.planned_start()) or (not self.planned_end()): duration_in_days = "n/a" elif self.planned_start() > self.planned_end(): duration_in_days = "n/a" else: duration_in_days = (self.planned_end()-self.planned_start()).days.__str__() return duration_in_days
The function return planned overall duration of a project as a string in days Args: no arguments Returns: duration_in_days (String) Raises: No exceptions planned
https://github.com/koalixswitzerland/koalixcrm/blob/87d125379845d6ab990c19500d63cbed4051040a/koalixcrm/crm/reporting/project.py#L328-L345
from decimal import * from datetime import * from django.db import models from django.contrib import admin from django.utils.translation import ugettext as _ from django.utils.html import format_html from koalixcrm.crm.reporting.generic_project_link import GenericLinkInlineAdminView from koalixcrm.crm.reporting.reporting_period import ReportingPeriodInlineAdminView, ReportingPeriod from koalixcrm.crm.reporting.task import TaskInlineAdminView from koalixcrm.crm.documents.pdf_export import PDFExport from koalixcrm.crm.exceptions import TemplateSetMissingInContract from koalixcrm.crm.models import Task from rest_framework import serializers class Project(models.Model): project_manager = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, verbose_name=_("Staff"), related_name="db_rel_project_staff", blank=True, null=True) project_name = models.CharField(verbose_name=_("Project name"), max_length=100, null=True, blank=True) description = models.TextField(verbose_name=_("Description"), null=True, blank=True) project_status = models.ForeignKey("ProjectStatus", verbose_name=_('Project Status'), blank=True, null=True) default_template_set = models.ForeignKey("djangoUserExtension.TemplateSet", verbose_name=_("Default Template Set"), null=True, blank=True) default_currency = models.ForeignKey("Currency", verbose_name=_("Default Currency"), null=False, blank=False) date_of_creation = models.DateTimeField(verbose_name=_("Created at"), auto_now_add=True) last_modification = models.DateTimeField(verbose_name=_("Last modified"), auto_now=True) last_modified_by = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, verbose_name=_("Last modified by"), related_name="db_project_last_modified") def link_to_project(self): if self.id: return format_html("<a href='/admin/crm/project/%s' >%s</a>" % (str(self.id), str(self.project_name))) else: return "Not present" link_to_project.short_description = _("Project") def create_pdf(self, template_set, printed_by): self.last_print_date = datetime.now() self.save() return PDFExport.create_pdf(self, template_set, printed_by) def get_template_set(self): if self.default_template_set.monthly_project_summary_template: return self.default_template_set.monthly_project_summary_template else: raise TemplateSetMissingInContract((_("Template Set missing in Project" + str(self)))) def get_fop_config_file(self, template_set): template_set = self.get_template_set() return template_set.get_fop_config_file() def get_xsl_file(self, template_set): template_set = self.get_template_set() return template_set.get_xsl_file() def get_reporting_period(self, search_date): from koalixcrm.crm.reporting.reporting_period import ReportingPeriod return ReportingPeriod.get_reporting_period(self, search_date) def serialize_to_xml(self, **kwargs): reporting_period = kwargs.get('reporting_period', None) from koalixcrm.djangoUserExtension.models import UserExtension objects = [self, ] objects += UserExtension.objects_to_serialize(self, self.project_manager) main_xml = PDFExport.write_xml(objects) for task in Task.objects.filter(project=self.id): task_xml = task.serialize_to_xml(reporting_period=reporting_period) main_xml = PDFExport.merge_xml(main_xml, task_xml) main_xml = PDFExport.append_element_to_pattern(main_xml, "object/[@model='crm.project']", "Effective_Effort_Overall", self.effective_costs(reporting_period=None)) if reporting_period: main_xml = PDFExport.append_element_to_pattern(main_xml, "object/[@model='crm.project']", "Effective_Effort_InPeriod", self.effective_costs(reporting_period=reporting_period)) main_xml = PDFExport.append_element_to_pattern(main_xml, "object/[@model='crm.project']", "Planned_Effort", self.planned_costs()) main_xml = PDFExport.append_element_to_pattern(main_xml, "object/[@model='crm.project']", "Effective_Duration", self.effective_duration()) main_xml = PDFExport.append_element_to_pattern(main_xml, "object/[@model='crm.project']", "Planned_Duration", self.planned_duration()) return main_xml def effective_accumulated_costs(self, reporting_period=None): if reporting_period: reporting_periods = ReportingPeriod.get_all_predecessors(target_reporting_period=reporting_period, project=self) else: reporting_periods = ReportingPeriod.objects.filter(project=self.id) effective_accumulated_costs = 0 for single_reporting_period in reporting_periods: all_project_tasks = Task.objects.filter(project=self.id) for task in all_project_tasks: effective_accumulated_costs += float(task.effective_costs(reporting_period=single_reporting_period)) getcontext().prec = 5 effective_accumulated_costs = Decimal(effective_accumulated_costs) self.default_currency.round(effective_accumulated_costs) return effective_accumulated_costs effective_accumulated_costs.short_description = _("Effective Accumulated costs") effective_accumulated_costs.tags = True def effective_costs(self, reporting_period): effective_cost = 0 for task in Task.objects.filter(project=self.id): effective_cost += task.effective_costs(reporting_period=reporting_period) self.default_currency.round(effective_cost) return effective_cost def planned_costs(self, reporting_period=None): planned_effort_accumulated = 0 all_project_tasks = Task.objects.filter(project=self.id) if all_project_tasks: for task in all_project_tasks: planned_effort_accumulated += task.planned_costs(reporting_period) getcontext().prec = 5 planned_effort_accumulated = Decimal(planned_effort_accumulated) self.default_currency.round(planned_effort_accumulated) return planned_effort_accumulated planned_costs.short_description = _("Planned Costs") planned_costs.tags = True def effective_start(self): no_tasks_started = True all_project_tasks = Task.objects.filter(project=self.id) effective_project_start = None if len(all_project_tasks) == 0: effective_project_start = None else: for task in all_project_tasks: if not effective_project_start: if task.effective_start(): effective_project_start = task.effective_start() no_tasks_started = False effective_task_start = task.effective_start() if effective_task_start: if effective_task_start < effective_project_start: effective_project_start = effective_task_start if no_tasks_started: effective_project_start = None return effective_project_start effective_start.short_description = _("Effective Start") effective_start.tags = True def effective_end(self): all_tasks_done = True all_project_tasks = Task.objects.filter(project=self.id) effective_project_end = None if len(all_project_tasks) == 0: effective_project_end = None else: i = 0 for task in all_project_tasks: if not effective_project_end: if not task.effective_start(): all_tasks_done = False break else: effective_project_end = task.effective_start() effective_task_end = task.effective_end() if not effective_task_end: all_tasks_done = False break elif effective_task_end > effective_project_end: effective_project_end = effective_task_end i = i+1 if not all_tasks_done: effective_project_end = None return effective_project_end effective_end.short_description = _("Effective End") effective_end.tags = True def effective_duration(self): effective_end = self.effective_end() effective_start = self.effective_start() if not effective_start: duration_as_string = "Project has not yet started" elif not effective_end: duration_as_string = "Project has not yet ended" else: duration_as_date = self.effective_end()-self.effective_start() duration_as_string = duration_as_date.days.__str__() return duration_as_string effective_duration.short_description = _("Effective Duration [dys]") effective_duration.tags = True def planned_start(self): tasks = Task.objects.filter(project=self.id) if tasks: i = 0 project_start = None for task in tasks: if task.planned_start(): if i == 0: project_start = task.planned_start() elif task.planned_start() < project_start: project_start = task.planned_start() i += 1 return project_start else: return None def planned_end(self): tasks = Task.objects.filter(project=self.id) if tasks: i = 0 project_end = None for task in tasks: if task.planned_end(): if i == 0: project_end = task.planned_end() elif task.planned_end() > project_end: project_end = task.planned_end() i += 1 return project_end else: return None
BSD 3-Clause New or Revised License
rapidpro/casepro
casepro/contacts/models.py
Contact.prepare_for_case
python
def prepare_for_case(self): if self.is_stub: raise ValueError("Can't create a case for a stub contact") self.suspend_groups() self.expire_flows() self.archive_messages()
Prepares this contact to be put in a case
https://github.com/rapidpro/casepro/blob/e88c64fbd224e4b04cacfce1f6f1165bc788e87a/casepro/contacts/models.py#L366-L380
import phonenumbers import regex from dash.orgs.models import Org from django_redis import get_redis_connection from django.conf import settings from django.contrib.postgres.fields import ArrayField, HStoreField from django.db import models from django.utils.translation import ugettext_lazy as _ from casepro.utils import get_language_name FIELD_LOCK_KEY = "lock:field:%d:%s" GROUP_LOCK_KEY = "lock:group:%d:%s" CONTACT_LOCK_KEY = "lock:contact:%d:%s" class InvalidURN(Exception): class URN(object): SCHEME_TEL = "tel" SCHEME_TWITTER = "twitter" SCHEME_EMAIL = "mailto" VALID_SCHEMES = (SCHEME_TEL, SCHEME_TWITTER, SCHEME_EMAIL) def __init__(self): raise ValueError("Class shouldn't be instantiated") @classmethod def from_parts(cls, scheme, path): if not scheme or scheme not in cls.VALID_SCHEMES: raise ValueError("Invalid scheme component: '%s'" % scheme) if not path: raise ValueError("Invalid path component: '%s'" % path) return "%s:%s" % (scheme, path) @classmethod def to_parts(cls, urn): try: scheme, path = urn.split(":", 1) except ValueError: raise ValueError("URN strings must contain scheme and path components") if not scheme or scheme not in cls.VALID_SCHEMES: raise ValueError("URN contains an invalid scheme component: '%s'" % scheme) if not path: raise ValueError("URN contains an invalid path component: '%s'" % path) return scheme, path @classmethod def normalize(cls, urn): scheme, path = cls.to_parts(urn) norm_path = str(path).strip() if scheme == cls.SCHEME_TEL: norm_path = cls.normalize_phone(norm_path) elif scheme == cls.SCHEME_TWITTER: norm_path = norm_path.lower() if norm_path[0:1] == "@": norm_path = norm_path[1:] norm_path = norm_path.lower() elif scheme == cls.SCHEME_EMAIL: norm_path = norm_path.lower() return cls.from_parts(scheme, norm_path) @classmethod def validate(cls, urn): scheme, path = urn.split(":", 1) if scheme == cls.SCHEME_TEL: return cls.validate_phone(path) return True @classmethod def normalize_phone(cls, number): number = regex.sub(r"[^0-9a-z\+]", "", number.lower(), regex.V0) if len(number) >= 11 and number[0] not in ["+", "0"]: number = "+" + number try: normalized = phonenumbers.parse(number) if phonenumbers.is_possible_number(normalized): return phonenumbers.format_number(normalized, phonenumbers.PhoneNumberFormat.E164) except Exception: pass return number @classmethod def validate_phone(cls, number): try: parsed = phonenumbers.parse(number) except phonenumbers.NumberParseException as e: raise InvalidURN(str(e)) if number != phonenumbers.format_number(parsed, phonenumbers.PhoneNumberFormat.E164): raise InvalidURN("Phone numbers must be in E164 format") if not phonenumbers.is_possible_number(parsed) or not phonenumbers.is_valid_number(parsed): raise InvalidURN("Phone numbers must be in E164 format") return True class Group(models.Model): org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="groups", on_delete=models.PROTECT) uuid = models.CharField(max_length=36, unique=True) name = models.CharField(max_length=64) count = models.IntegerField(null=True) is_dynamic = models.BooleanField(default=False, help_text=_("Whether this group is dynamic")) created_on = models.DateTimeField(auto_now_add=True, help_text=_("When this group was created")) is_active = models.BooleanField(default=True, help_text=_("Whether this group is active")) is_visible = models.BooleanField(default=False, help_text=_("Whether this group is visible to partner users")) suspend_from = models.BooleanField( default=False, help_text=_("Whether contacts should be suspended from this group during a case") ) @classmethod def get_all(cls, org, visible=None, dynamic=None): qs = cls.objects.filter(org=org, is_active=True) if visible is not None: qs = qs.filter(is_visible=visible) if dynamic is not None: qs = qs.filter(is_dynamic=dynamic) return qs @classmethod def get_suspend_from(cls, org): return cls.get_all(org, dynamic=False).filter(suspend_from=True) @classmethod def lock(cls, org, uuid): return get_redis_connection().lock(GROUP_LOCK_KEY % (org.pk, uuid), timeout=60) def as_json(self, full=True): if full: return {"id": self.pk, "name": self.name, "count": self.count, "is_dynamic": self.is_dynamic} else: return {"id": self.pk, "name": self.name} def __str__(self): return self.name class Field(models.Model): TYPE_TEXT = "T" TYPE_DECIMAL = "N" TYPE_DATETIME = "D" TYPE_STATE = "S" TYPE_DISTRICT = "I" TEMBA_TYPES = { "text": TYPE_TEXT, "numeric": TYPE_DECIMAL, "datetime": TYPE_DATETIME, "state": TYPE_STATE, "district": TYPE_DISTRICT, } org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="fields", on_delete=models.PROTECT) key = models.CharField(verbose_name=_("Key"), max_length=36) label = models.CharField(verbose_name=_("Label"), max_length=36, null=True) value_type = models.CharField(verbose_name=_("Value data type"), max_length=1, default=TYPE_TEXT) is_active = models.BooleanField(default=True, help_text="Whether this field is active") is_visible = models.BooleanField(default=False, help_text=_("Whether this field is visible to partner users")) @classmethod def get_all(cls, org, visible=None): qs = cls.objects.filter(org=org, is_active=True) if visible is not None: qs = qs.filter(is_visible=visible) return qs @classmethod def lock(cls, org, key): return get_redis_connection().lock(FIELD_LOCK_KEY % (org.pk, key), timeout=60) def __str__(self): return self.key def as_json(self): return {"key": self.key, "label": self.label, "value_type": self.value_type} class Meta: unique_together = ("org", "key") class Contact(models.Model): DISPLAY_NAME = "name" DISPLAY_URNS = "urns" DISPLAY_ANON = "uuid" SAVE_GROUPS_ATTR = "__data__groups" org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="contacts", on_delete=models.PROTECT) uuid = models.CharField(max_length=36, unique=True, null=True) name = models.CharField( verbose_name=_("Full name"), max_length=128, null=True, blank=True, help_text=_("The name of this contact") ) groups = models.ManyToManyField(Group, related_name="contacts") fields = HStoreField(null=True) language = models.CharField( max_length=3, verbose_name=_("Language"), null=True, blank=True, help_text=_("Language for this contact") ) is_active = models.BooleanField(default=True, help_text="Whether this contact is active") is_blocked = models.BooleanField(default=False, help_text="Whether this contact is blocked") is_stopped = models.BooleanField(default=False, help_text="Whether this contact opted out of receiving messages") is_stub = models.BooleanField(default=False, help_text="Whether this contact is just a stub") suspended_groups = models.ManyToManyField(Group, help_text=_("Groups this contact has been suspended from")) created_on = models.DateTimeField(auto_now_add=True, help_text=_("When this contact was created")) urns = ArrayField( models.CharField(max_length=255), default=list, help_text=_("List of URNs of the format 'scheme:path'") ) def __init__(self, *args, **kwargs): if self.SAVE_GROUPS_ATTR in kwargs: setattr(self, self.SAVE_GROUPS_ATTR, kwargs.pop(self.SAVE_GROUPS_ATTR)) super(Contact, self).__init__(*args, **kwargs) @classmethod def get_or_create(cls, org, uuid, name=None): with cls.lock(org, uuid): contact = cls.objects.filter(org=org, uuid=uuid).first() if not contact: contact = cls.objects.create(org=org, uuid=uuid, name=name, is_stub=True) return contact @classmethod def get_or_create_from_urn(cls, org, urn, name=None): normalized_urn = URN.normalize(urn) contact = cls.objects.filter(urns__contains=[normalized_urn]).first() if not contact: URN.validate(normalized_urn) contact = cls.objects.create(org=org, name=name, urns=[normalized_urn], is_stub=False) org.get_backend().push_contact(org, contact) return contact @classmethod def lock(cls, org, uuid): return get_redis_connection().lock(CONTACT_LOCK_KEY % (org.pk, uuid), timeout=60) def get_display(self): display_format = getattr(settings, "SITE_CONTACT_DISPLAY", self.DISPLAY_NAME) if display_format == self.DISPLAY_ANON and self.uuid: return self.uuid[:6].upper() elif display_format == self.DISPLAY_URNS and self.urns: _scheme, path = URN.to_parts(self.urns[0]) return path elif display_format == self.DISPLAY_NAME and self.name: return self.name return "---" def get_fields(self, visible=None): fields = self.fields if self.fields else {} if visible: keys = Field.get_all(self.org, visible=True).values_list("key", flat=True) return {k: fields.get(k) for k in keys} else: return fields def get_language(self): if self.language: return {"code": self.language, "name": get_language_name(self.language)} else: return None
BSD 3-Clause New or Revised License
google/gazoo-device
gazoo_device/switchboard/switchboard.py
SwitchboardDefault.get_line_identifier
python
def get_line_identifier(self): return self._identifier
Returns the line identifier currently used by Switchboard.
https://github.com/google/gazoo-device/blob/f333b386f5993c8d4c9e12c89ebb620a0c4f5506/gazoo_device/switchboard/switchboard.py#L829-L831
import io import multiprocessing import os import queue import re import signal import subprocess import time import types from typing import Any, Dict, List, Optional, Tuple import xmodem from gazoo_device import config from gazoo_device import decorators from gazoo_device import errors from gazoo_device import gdm_logger from gazoo_device.capabilities.interfaces import switchboard_base from gazoo_device.switchboard import line_identifier from gazoo_device.switchboard import log_process from gazoo_device.switchboard import switchboard_process from gazoo_device.switchboard import transport_process from gazoo_device.switchboard import transport_properties from gazoo_device.switchboard.expect_response import ExpectResponse from gazoo_device.switchboard.transports import jlink_transport from gazoo_device.switchboard.transports import serial_transport from gazoo_device.switchboard.transports import tcp_transport from gazoo_device.switchboard.transports import transport_base from gazoo_device.utility import common_utils from gazoo_device.utility import usb_utils logger = gdm_logger.get_logger("core") MODE_TYPE_ALL = switchboard_base.MODE_TYPE_ALL MODE_TYPE_ANY = switchboard_base.MODE_TYPE_ANY MODE_TYPE_SEQUENTIAL = switchboard_base.MODE_TYPE_SEQUENTIAL VERIFY_METHOD_MD5SUM = switchboard_base.VERIFY_METHOD_MD5SUM _VALID_EXPECT_TYPES = [ line_identifier.LINE_TYPE_ALL, line_identifier.LINE_TYPE_LOG, line_identifier.LINE_TYPE_RESPONSE ] _VALID_EXPECT_MODES = [MODE_TYPE_ALL, MODE_TYPE_ANY, MODE_TYPE_SEQUENTIAL] _VERIFY_METHODS = [VERIFY_METHOD_MD5SUM] def _ensure_has_newline(cmd, add_newline=True, newline="\n"): if add_newline and not cmd.endswith(("\n", "\x00", r"\0", newline)): cmd += newline return cmd def _get_pattern_index(compiled_list, match_list, mode): if match_list: if mode == MODE_TYPE_SEQUENTIAL: return len(match_list) - 1 else: for index, pattern in enumerate(compiled_list): if pattern == match_list[-1].re: return index return None def _get_pattern_list(compiled_list, match_list, mode): if mode == MODE_TYPE_ANY: return compiled_list missing_patterns = _get_missing_patterns(compiled_list, match_list, mode) if mode == MODE_TYPE_ALL: return missing_patterns return [missing_patterns[0]] def _get_missing_patterns(compiled_list, match_list, mode): if mode == MODE_TYPE_SEQUENTIAL: return compiled_list[len(match_list):] else: matched_patterns = [match.re for match in match_list] return [ pattern for pattern in compiled_list if pattern not in matched_patterns ] def _get_pattern_strings(compiled_list): return [pattern.pattern for pattern in compiled_list] class SwitchboardDefault(switchboard_base.SwitchboardBase): def __init__( self, device_name, exception_queue, transport_list, log_path, framer_list=None, identifier=None, button_list=None, parser=None, partial_line_timeout_list=None, force_slow=False, max_log_size=0, ): super().__init__(device_name=device_name) if framer_list is None: framer_list = [] if partial_line_timeout_list is None: partial_line_timeout_list = [] self.log_path = log_path self.button_list = button_list self._force_slow = force_slow self._identifier = identifier or line_identifier.AllUnknownIdentifier() time.sleep(.1) common_utils.run_before_fork() self._mp_manager = multiprocessing.Manager() common_utils.run_after_fork_in_parent() self._transport_processes = [] self._log_queue = self._mp_manager.Queue() self._call_result_queue = self._mp_manager.Queue() self._raw_data_queue = self._mp_manager.Queue() self._raw_data_queue_users = 0 self._transport_process_id = 0 self._exception_queue = exception_queue self._add_transport_processes(transport_list, framer_list, partial_line_timeout_list) self._add_log_writer_process(log_path, max_log_size) self._add_log_filter_process(parser, log_path) self._start_processes() def __del__(self): self.close() @decorators.CapabilityLogDecorator(logger, level=None) def add_log_note(self, note): if note[-1] == "\n": log_message = "Note: {}".format(note) else: log_message = "Note: {}\n".format(note) try: log_process.log_message(self._log_queue, log_message, "M") except (AttributeError, IOError): pass @decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG) def add_new_filter(self, filter_path): if not os.path.exists(filter_path): raise ValueError("Filter path {} doesn't exist.".format(filter_path)) if not hasattr( self, "_log_filter_process") or not self._log_filter_process.is_running(): raise RuntimeError("Log filter process is not currently running.") self._log_filter_process.send_command(log_process.CMD_ADD_NEW_FILTER, filter_path) while (self._log_filter_process.is_running() and not self._log_filter_process.is_command_done()): time.sleep(0.001) def call(self, method: types.MethodType, method_args: Tuple[Any, ...] = (), method_kwargs: Optional[Dict[str, Any]] = None, port: int = 0) -> Any: method_kwargs = method_kwargs or {} self._validate_port(port, self.call.__name__) class_name = method.__qualname__.split(".")[-2] method_name = method.__qualname__.split(".")[-1] transport_class_name = type( self._transport_processes[port].transport).__name__ if (class_name != transport_base.TransportBase.__name__ and class_name != transport_class_name): raise errors.DeviceError( f"{self._device_name} Switchboard.call failed. " f"Requested method {method.__qualname__!r}, but transport {port} " f"is of type {transport_class_name!r}.") self.add_log_note("Executing {!r} in transport {}" .format(method.__qualname__, port)) self._transport_processes[port].send_command( transport_process.CMD_TRANSPORT_CALL, (method_name, method_args, method_kwargs)) success, response = self._call_result_queue.get() if success: return response raise errors.DeviceError( f"{self._device_name} switchboard.call of method {method.__qualname__} " f"in transport {port} failed. {response}") def call_and_expect(self, method: types.MethodType, pattern_list: List[str], timeout: float = 30.0, searchwindowsize: int = config.SEARCHWINDOWSIZE, expect_type: str = line_identifier.LINE_TYPE_ALL, mode: str = MODE_TYPE_ANY, method_args: Tuple[Any, ...] = (), method_kwargs: Optional[Dict[str, Any]] = None, port: int = 0, raise_for_timeout: bool = False): expect_ret, func_ret = self.do_and_expect( self.call, [method], { "method_args": method_args, "method_kwargs": method_kwargs, "port": port }, pattern_list, timeout=timeout, searchwindowsize=searchwindowsize, expect_type=expect_type, mode=mode, include_func_response=True) if expect_ret and expect_ret.timedout and raise_for_timeout: raise errors.DeviceError( "Device {} call_and_expect timed out for method {} in {}s".format( self._device_name, method.__name__, timeout)) return expect_ret, func_ret @decorators.CapabilityLogDecorator(logger) def click(self, button, duration=0.5, port=0): self._check_button_args("click", button, port, duration=duration) log_message = "click button {} on port {} for duration {} - begin".format( button, port, duration) self.add_log_note(log_message) self.button_list[port].click(button, duration) log_message = "click button {} on port {} for duration {} - end".format( button, port, duration) self.add_log_note(log_message) def click_and_expect(self, button, pattern_list, duration=0.5, timeout=30.0, searchwindowsize=config.SEARCHWINDOWSIZE, expect_type=line_identifier.LINE_TYPE_ALL, port=0, mode="any", raise_for_timeout=False): return self.do_and_expect( self.click, [button], { "duration": duration, "port": port }, pattern_list, timeout=timeout, searchwindowsize=searchwindowsize, expect_type=expect_type, mode=mode, raise_for_timeout=raise_for_timeout) @decorators.CapabilityLogDecorator(logger, level=None) def close(self): comms_addresses = [ proc.transport.comms_address for proc in self._transport_processes ] self._stop_processes() if hasattr(self, "button_list") and self.button_list: for button in self.button_list: button.close() self.button_list = [] if hasattr(self, "_call_result_queue") and self._call_result_queue: delattr(self, "_call_result_queue") if hasattr(self, "_raw_data_queue") and self._raw_data_queue: delattr(self, "_raw_data_queue") if hasattr(self, "_log_queue") and self._log_queue: delattr(self, "_log_queue") if hasattr(self, "_exception_queue") and self._exception_queue: delattr(self, "_exception_queue") if hasattr(self, "_mp_manager") and self._mp_manager: self._mp_manager.shutdown() delattr(self, "_mp_manager") self.ensure_serial_paths_unlocked(comms_addresses) super().close() @decorators.CapabilityLogDecorator(logger) def close_all_transports(self): for port in range(self.number_transports): self.close_transport(port=port) @decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG) def close_transport(self, port=0): self._validate_port(port, self.close_transport.__name__) log_message = "closing transport for port {}".format(port) self.add_log_note(log_message) try: if self.button_list: button = self.button_list[port] button.close() except IndexError: pass start_time = time.time() transport_proc = self._transport_processes[port] transport_proc.send_command(transport_process.CMD_TRANSPORT_CLOSE) while transport_proc.is_open(): time.sleep(0.01) log_message = "closed transport for port {} in {}s".format( port, time.time() - start_time) self.add_log_note(log_message) def do_and_expect(self, func, func_args, func_kwargs, pattern_list, timeout=30.0, searchwindowsize=config.SEARCHWINDOWSIZE, expect_type=line_identifier.LINE_TYPE_ALL, mode=MODE_TYPE_ANY, raise_for_timeout=False, include_func_response=False): if not callable(func): raise errors.DeviceError("Device {} do_and_expect failed. " "Function: {} is not callable.".format( self._device_name, func)) self._check_expect_args(pattern_list, timeout, searchwindowsize, expect_type, mode) compiled_list = self._get_compiled_pattern_list(pattern_list) try: self._enable_raw_data_queue() func_ret = func(*func_args, **func_kwargs) expect_ret = self._expect( compiled_list, timeout, searchwindowsize, expect_type, mode, raise_for_timeout=raise_for_timeout) if include_func_response: return expect_ret, func_ret else: return expect_ret finally: self._disable_raw_data_queue() @decorators.CapabilityLogDecorator(logger) def echo_file_to_transport(self, source_file, destination_path, port=0, bytes_per_echo=50): self._validate_port(port, self.echo_file_to_transport.__name__) if not os.path.exists(source_file): raise errors.DeviceError("Device {} echo file to transport failed. " "Source file {} doesn't exist.".format( self._device_name, source_file)) elif not isinstance(bytes_per_echo, int): raise errors.DeviceError( "Device {} echo file to transport failed. " "Expecting int for bytes_per_echo found {} instead.".format( self._device_name, type(bytes_per_echo))) elif bytes_per_echo <= 0: raise errors.DeviceError( "Device {} echo file to transport failed. " "Invalid bytes_per_echo value {} expected >0".format( self._device_name, bytes_per_echo)) start_time = time.time() log_message = ("starting echo transfer of {} for port {} to {}".format( source_file, port, destination_path)) self.add_log_note(log_message) try: with io.open(source_file, "rb") as in_file: data = in_file.read() except IOError as err: raise errors.DeviceError("Device {} echo file to transport failed. " "Unable to read {}. " "Error: {!r}".format(self._device_name, source_file, err)) cmd_string = "echo -ne > {}\n".format(destination_path) echo_pattern1 = r"echo -ne > [/\w.]+" result = self.send_and_expect( cmd_string, [echo_pattern1], port=port, expect_type="response") if result.timedout: raise errors.DeviceError("Device {} echo file to transport failed. " "Unable to create/erase file {} on device. " "Output: {!r}".format(self._device_name, destination_path, result.before)) echo_pattern2 = r'echo -ne ".*"\s.*\s[/\w.]+' data_indexes = list(range(0, len(data), bytes_per_echo)) + [len(data)] for data_index in range(1, len(data_indexes)): start_index = data_indexes[data_index - 1] end_index = data_indexes[data_index] data_chunk = u"".join( [u"\\x%02x" % byte for byte in data[start_index:end_index]]) cmd_string = "echo -ne \"{}\" >> {}\n".format(data_chunk, destination_path) result = self.send_and_expect( cmd_string, [echo_pattern2], port=port, expect_type="response") if result.timedout: raise errors.DeviceError("Device {} echo file to transport failed. " "Unable to echo bytes {!r} to file {} " "Output: {!r}".format(self._device_name, data_chunk, destination_path, result.before)) log_message = ("finished echo transfer of {} for port {} in {}s".format( source_file, port, time.time() - start_time)) self.add_log_note(log_message) @decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG) def ensure_serial_paths_unlocked(self, communication_addresses: List[str]): for comms_address in communication_addresses: if comms_address not in usb_utils.get_all_serial_connections(): continue try: output = subprocess.check_output(["lsof", comms_address], stderr=subprocess.STDOUT) output = output.decode("utf-8", "replace").splitlines() except (subprocess.CalledProcessError, ImportError): continue process_number = output[-1].split()[1] try: number = int(process_number) logger.info( "{}'s communication process still exists. Killing process {}" .format(self._device_name, number)) os.kill(int(process_number), signal.SIGTERM) except ValueError: logger.error( "{}'s communication process still exists " "but could not parse process number from lsof output correctly. " "Output: {}. Expected process number: {}".format( self._device_name, output, process_number)) def expect(self, pattern_list, timeout=30.0, searchwindowsize=config.SEARCHWINDOWSIZE, expect_type=line_identifier.LINE_TYPE_ALL, mode=MODE_TYPE_ANY, raise_for_timeout=False): self._check_expect_args(pattern_list, timeout, searchwindowsize, expect_type, mode) compiled_list = self._get_compiled_pattern_list(pattern_list) try: self._enable_raw_data_queue() return self._expect( compiled_list, timeout, searchwindowsize, expect_type, mode, raise_for_timeout=raise_for_timeout) finally: self._disable_raw_data_queue()
Apache License 2.0
mila-iqia/myia
myia/compile/transform.py
CompileGraph._reset
python
def _reset(self): self._height = 0 self.max_height = 0 self.slots = {} self.instrs = [] self.env_keys = []
Set/clear shared values.
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/compile/transform.py#L167-L173
from .. import xtype from ..abstract import AbstractHandle, AbstractTuple, to_abstract from ..ir import Apply, Constant, Graph, toposort from ..operations import Primitive, primitives as P from ..utils import SymbolicKeyInstance from .vm import FinalVM i64 = xtype.Int[64] def convert_grad(graph): mng = graph.manager counter = 0 key_map = {} for node in mng.all_nodes: if node.is_constant(SymbolicKeyInstance): if node.value not in key_map: key_map[node.value] = counter counter += 1 node.value = key_map[node.value] node.abstract = to_abstract(node.value) if node.is_constant(Primitive): if node.value is P.env_setitem: node.abstract = None if node.value is P.env_getitem: node.abstract = None return graph def get_prim_graph(cache, prim, typ): if (prim, typ) not in cache: g = Graph() args = [] for t in typ.args: p = g.add_parameter() p.abstract = t args.append(p) primct = Constant(prim) primct.abstract = typ out = g.apply(primct, *args) out.abstract = typ.output g.output = out cache[(prim, typ)] = g return cache[(prim, typ)] def wrap_primitives(graph): mng = graph.manager prim_graphs = {} with mng.transact() as tr: cts = {ct for cts in mng.constants.values() for ct in cts} for ct in cts: if ct.is_constant(Primitive): for node, key in mng.uses[ct]: if key != 0: if ( key == 1 and node.inputs[0].is_constant() and node.inputs[0].value in (P.array_map, P.array_reduce) ): continue g = get_prim_graph(prim_graphs, ct.value, ct.abstract) tr.set_edge(node, key, Constant(g)) return graph def return_handles(graph): mng = graph.manager handle_nodes = [] handle_idx = [] for i, p in enumerate(graph.parameters): if isinstance(p.abstract, AbstractHandle): handle_nodes.append(p) handle_idx.append(i) if len(handle_nodes) != 0: ct0 = Constant(0) ct1 = Constant(1) ct0.abstract = to_abstract(0) ct1.abstract = to_abstract(1) old_a = graph.output.abstract with mng.transact() as tr: if graph.output.is_apply(P.make_tuple): universe_out = graph.output.inputs[1] normal_out = graph.output.inputs[2] else: assert isinstance(graph.output.abstract, AbstractTuple) assert len(graph.output.abstract.elements) == 2 universe_out = graph.apply(P.tuple_getitem, graph.output, ct0) universe_out.abstract = graph.output.abstract.elements[0] normal_out = graph.apply(P.tuple_getitem, graph.output, ct1) normal_out.abstract = graph.output.abstract.elements[1] vals = [ graph.apply(P.universe_getitem, universe_out, n) for n in handle_nodes ] types = [n.abstract.element for n in handle_nodes] for v, a in zip(vals, types): v.abstract = a handles = graph.apply(P.make_tuple, *vals) handles.abstract = AbstractTuple(types) new_out_node = graph.apply(P.make_tuple, handles, normal_out) tr.replace(graph.output, new_out_node) graph.output.abstract = AbstractTuple( [handles.abstract] + old_a.elements[1:] ) return graph, handle_idx nonlinear_ops = ( P.return_, P.partial, P.switch, P.make_tuple, P.bool_and, P.tuple_getitem, P.tuple_setitem, P.env_getitem, P.env_setitem, P.env_add, P.tagged, P.hastag, P.casttag, P.unsafe_static_cast, ) class CompileGraph: def __init__(self, lin_convert, cut_list, backend): self.lin_convert = lin_convert self.cut_list = cut_list self.backend = backend
MIT License
quantstart/qstrader
qstrader/price_handler/iq_feed_intraday_csv_bar.py
IQFeedIntradayCsvBarPriceHandler._merge_sort_ticker_data
python
def _merge_sort_ticker_data(self): df = pd.concat(self.tickers_data.values()).sort_index() start = None end = None if self.start_date is not None: start = df.index.searchsorted(self.start_date) if self.end_date is not None: end = df.index.searchsorted(self.end_date) if start is None and end is None: return df.iterrows() elif start is not None and end is None: return df.ix[start:].iterrows() elif start is None and end is not None: return df.ix[:end].iterrows() else: return df.ix[start:end].iterrows()
Concatenates all of the separate equities DataFrames into a single DataFrame that is time ordered, allowing tick data events to be added to the queue in a chronological fashion. Note that this is an idealised situation, utilised solely for backtesting. In live trading ticks may arrive "out of order".
https://github.com/quantstart/qstrader/blob/e6d86a3ac3dc507b26e27b1f20c2949a69438ef7/qstrader/price_handler/iq_feed_intraday_csv_bar.py#L58-L82
import os import pandas as pd from ..price_parser import PriceParser from .base import AbstractBarPriceHandler from ..event import BarEvent class IQFeedIntradayCsvBarPriceHandler(AbstractBarPriceHandler): def __init__( self, csv_dir, events_queue, init_tickers=None, start_date=None, end_date=None ): self.csv_dir = csv_dir self.events_queue = events_queue self.continue_backtest = True self.tickers = {} self.tickers_data = {} if init_tickers is not None: for ticker in init_tickers: self.subscribe_ticker(ticker) self.start_date = start_date self.end_date = end_date self.bar_stream = self._merge_sort_ticker_data() def _open_ticker_price_csv(self, ticker): ticker_path = os.path.join(self.csv_dir, "%s.csv" % ticker) self.tickers_data[ticker] = pd.read_csv( ticker_path, names=[ "Date", "Open", "Low", "High", "Close", "Volume", "OpenInterest" ], index_col="Date", parse_dates=True ) self.tickers_data[ticker]["Ticker"] = ticker
MIT License
climdyn/qgs
qgs/inner_products/analytic.py
GroundAnalyticInnerProducts.C
python
def C(self, i, j, k): return 0
:math:`C_{i,j,k} = (\phi_i, J(\phi_j,\\nabla^2 \phi_k))` Warnings -------- Not defined and not used.
https://github.com/climdyn/qgs/blob/33d79b1fa360de22b7ae595c142dbe9b6a8fb53a/qgs/inner_products/analytic.py#L838-L845
import numpy as np import sparse as sp from qgs.params.params import QgParams from qgs.basis.fourier import channel_wavenumbers, basin_wavenumbers from qgs.inner_products.base import AtmosphericInnerProducts, OceanicInnerProducts, GroundInnerProducts class AtmosphericAnalyticInnerProducts(AtmosphericInnerProducts): def __init__(self, params=None, stored=True): AtmosphericInnerProducts.__init__(self) if params is not None: if isinstance(params, QgParams): self.n = params.scale_params.n self._natm = params.nmod[0] ams = params.ablocks else: self.n = params[0] self._natm = params[2] ams = params[1] else: self.n = None stored = False ams = None self.ocean_inner_products = None self.connected_to_ocean = False self.ground_inner_products = None self.connected_to_ground = False if ams is not None: self.atmospheric_wavenumbers = channel_wavenumbers(ams) else: self.atmospheric_wavenumbers = None self.stored = stored if stored: self.compute_inner_products() def connect_to_ocean(self, ocean_inner_products): self.ground_inner_products = None self.connected_to_ground = False self.ocean_inner_products = ocean_inner_products self.connected_to_ocean = True if self.stored: noc = ocean_inner_products.noc self._s = sp.zeros((self.natm, noc), dtype=float, format='dok') self._d = sp.zeros((self.natm, noc), dtype=float, format='dok') args_list = [(i, j) for i in range(self.natm) for j in range(noc)] for arg in args_list: self._s[arg] = self._s_comp(*arg) self._d[arg] = self._d_comp(*arg) self._s = self._s.to_coo() self._d = self._d.to_coo() if not ocean_inner_products.connected_to_atmosphere: ocean_inner_products.connect_to_atmosphere(self) if self.stored: self.ocean_inner_products = None def connect_to_ground(self, ground_inner_products): self.ocean_inner_products = None self.connected_to_ocean = False self.ground_inner_products = ground_inner_products self.connected_to_ground = True if self.stored: ngr = ground_inner_products.ngr self._s = sp.zeros((self.natm, ngr), dtype=float, format='dok') args_list = [(i, j) for i in range(self.natm) for j in range(ngr)] for arg in args_list: self._s[arg] = self._s_comp(*arg) self._s = self._s.to_coo() if not ground_inner_products.connected_to_atmosphere: ground_inner_products.connect_to_atmosphere(self) if self.stored: self.ground_inner_products = None def compute_inner_products(self): self._a = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._u = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._c = sp.zeros((self.natm, self.natm), dtype=float, format='dok') self._b = sp.zeros((self.natm, self.natm, self.natm), dtype=float, format='dok') self._g = sp.zeros((self.natm, self.natm, self.natm), dtype=float, format='dok') args_list = [(i, j) for i in range(self.natm) for j in range(self.natm)] for arg in args_list: self._a[arg] = self._a_comp(*arg) self._u[arg] = self._u_comp(*arg) self._c[arg] = self._c_comp(*arg) args_list = [(i, j, k) for i in range(self.natm) for j in range(self.natm) for k in range(self.natm)] for arg in args_list: val = self._g_comp(*arg) self._g[arg] = val self._b[arg] = val * self._a[arg[-1], arg[-1]] self._a = self._a.to_coo() self._u = self._u.to_coo() self._c = self._c.to_coo() self._g = self._g.to_coo() self._b = self._b.to_coo() @property def natm(self): return self._natm def a(self, i, j): if self.stored and self._a is not None: return self._a[i, j] else: return self._a_comp(i, j) def _a_comp(self, i, j): if i == j: n = self.n Ti = self.atmospheric_wavenumbers[i] return - (n ** 2) * Ti.nx ** 2 - Ti.ny ** 2 else: return 0 def u(self, i, j): if self.stored and self._u is not None: return self._u[i, j] else: return self._u_comp(i, j) def _u_comp(self, i, j): return _delta(i - j) def b(self, i, j, k): if self.stored and self._b is not None: return self._b[i, j, k] else: return self._b_comp(i, j, k) def _b_comp(self, i, j, k): return self._a_comp(k, k) * self._g_comp(i, j, k) def c(self, i, j): if self.stored and self._c is not None: return self._c[i, j] else: return self._c_comp(i, j) def _c_comp(self, i, j): n = self.n Ti = self.atmospheric_wavenumbers[i] Tj = self.atmospheric_wavenumbers[j] val = 0. if (Ti.type, Tj.type) == ('K', 'L'): val = _delta(Ti.M - Tj.H) * _delta(Ti.P - Tj.P) val = n * Ti.M * val elif (Ti.type, Tj.type) == ('L', 'K'): val = _delta(Tj.M - Ti.H) * _delta(Tj.P - Ti.P) val = - n * Tj.M * val return val def g(self, i, j, k): if self.stored and self._g is not None: return self._g[i, j, k] else: return self._g_comp(i, j, k) def _g_comp(self, i, j, k): sq2 = np.sqrt(2.) pi = np.pi n = self.n Ti = self.atmospheric_wavenumbers[i] Tj = self.atmospheric_wavenumbers[j] Tk = self.atmospheric_wavenumbers[k] val = 0. par = 1 s = [Ti.type, Tj.type, Tk.type] indices = [i, j, k] if s == ['L', 'L', 'L']: a, par = _piksort(indices) Ti = self.atmospheric_wavenumbers[a[0]] Tj = self.atmospheric_wavenumbers[a[1]] Tk = self.atmospheric_wavenumbers[a[2]] vs3 = _S3(Tj.P, Tk.P, Tj.H, Tk.H) vs4 = _S4(Tj.P, Tk.P, Tj.H, Tk.H) val = vs3 * ((_delta(Tk.H - Tj.H - Ti.H) - _delta(Tk.H - Tj.H + Ti.H)) * _delta(Tk.P + Tj.P - Ti.P) + _delta(Tk.H + Tj.H - Ti.H) * (_delta(Tk.P - Tj.P + Ti.P) - _delta(Tk.P - Tj.P - Ti.P))) + vs4 * ((_delta(Tk.H + Tj.H - Ti.H) * _delta(Tk.P - Tj.P - Ti.P)) + (_delta(Tk.H - Tj.H + Ti.H) - _delta(Tk.H - Tj.H - Ti.H)) * (_delta(Tk.P - Tj.P - Ti.P) - _delta(Tk.P - Tj.P + Ti.P))) else: if 'A' in s and 'K' in s and 'L' in s: ii = s.index('A') jj = s.index('K') kk = s.index('L') Ti = self.atmospheric_wavenumbers[indices[ii]] Tj = self.atmospheric_wavenumbers[indices[jj]] Tk = self.atmospheric_wavenumbers[indices[kk]] ss, par = _piksort(s) vb1 = _B1(Ti.P, Tj.P, Tk.P) vb2 = _B2(Ti.P, Tj.P, Tk.P) val = -2 * (sq2 / pi) * Tj.M * _delta(Tj.M - Tk.H) * _flambda(Ti.P + Tj.P + Tk.P) if val != 0: val = val * (((vb1 ** 2) / (vb1 ** 2 - 1)) - ((vb2 ** 2) / (vb2 ** 2 - 1))) elif 'A' not in s: K_indices = [i for i, x in enumerate(s) if x == "K"] if len(K_indices) == 2: ss, par = _piksort(s) perm = np.argsort(s) Ti = self.atmospheric_wavenumbers[indices[perm[0]]] Tj = self.atmospheric_wavenumbers[indices[perm[1]]] Tk = self.atmospheric_wavenumbers[indices[perm[2]]] vs1 = _S1(Tj.P, Tk.P, Tj.M, Tk.H) vs2 = _S2(Tj.P, Tk.P, Tj.M, Tk.H) val = vs1 * (_delta(Ti.M - Tk.H - Tj.M) * _delta(Ti.P - Tk.P + Tj.P) - _delta(Ti.M - Tk.H - Tj.M) * _delta(Ti.P + Tk.P - Tj.P) + (_delta(Tk.H - Tj.M + Ti.M) + _delta(Tk.H - Tj.M - Ti.M)) * _delta(Tk.P + Tj.P - Ti.P)) + vs2 * (_delta(Ti.M - Tk.H - Tj.M) * _delta(Ti.P - Tk.P - Tj.P) + (_delta(Tk.H - Tj.M - Ti.M) + _delta(Ti.M + Tk.H - Tj.M)) * (_delta(Ti.P - Tk.P + Tj.P) - _delta(Tk.P - Tj.P + Ti.P))) return val * n * par def s(self, i, j): if self.stored and self._s is not None: return self._s[i, j] else: return self._s_comp(i, j) def _s_comp(self, i, j): if self.connected_to_ocean: sq2 = np.sqrt(2.) pi = np.pi Ti = self.atmospheric_wavenumbers[i] Dj = self.ocean_inner_products.oceanic_wavenumbers[j] val = 0. if Ti.type == 'A': val = _flambda(Dj.H) * _flambda(Dj.P + Ti.P) if val != 0.: val = val * 8 * sq2 * Dj.P / (pi ** 2 * (Dj.P ** 2 - Ti.P ** 2) * Dj.H) if Ti.type == 'K': val = _flambda(2 * Ti.M + Dj.H) * _delta(Dj.P - Ti.P) if val != 0: val = val * 4 * Dj.H / (pi * (-4 * Ti.M ** 2 + Dj.H ** 2)) if Ti.type == 'L': val = _delta(Dj.P - Ti.P) * _delta(2 * Ti.H - Dj.H) elif self.connected_to_ground: val = 0 if i == j: val = 1 else: val = 0 return val def d(self, i, j): if self.stored and self._d is not None: return self._d[i, j] else: return self._d_comp(i, j) def _d_comp(self, i, j): if self.connected_to_ocean: return self._s_comp(i, j) * self.ocean_inner_products._M_comp(j, j) else: return 0 class OceanicAnalyticInnerProducts(OceanicInnerProducts): def __init__(self, params=None, stored=True): OceanicInnerProducts.__init__(self) if params is not None: if isinstance(params, QgParams): self.n = params.scale_params.n self._noc = params.nmod[1] oms = params.oblocks else: self.n = params[0] self._noc = params[2] oms = params[1] else: self.n = None stored = False oms = None self.connected_to_atmosphere = False self.atmosphere_inner_products = None if oms is not None: self.oceanic_wavenumbers = basin_wavenumbers(oms) else: self.oceanic_wavenumbers = None self.stored = stored if stored: self.compute_inner_products() def connect_to_atmosphere(self, atmosphere_inner_products): self.atmosphere_inner_products = atmosphere_inner_products self.connected_to_atmosphere = True if self.stored: natm = atmosphere_inner_products.natm self._K = sp.zeros((self.noc, natm), dtype=float, format='dok') self._W = sp.zeros((self.noc, natm), dtype=float, format='dok') args_list = [(i, j) for i in range(self.noc) for j in range(natm)] for arg in args_list: self._K[arg] = self._K_comp(*arg) self._W[arg] = self._W_comp(*arg) self._K = self._K.to_coo() self._W = self._W.to_coo() self.atmosphere_inner_products = None def compute_inner_products(self): self._M = sp.zeros((self.noc, self.noc), dtype=float, format='dok') self._U = sp.zeros((self.noc, self.noc), dtype=float, format='dok') self._N = sp.zeros((self.noc, self.noc), dtype=float, format='dok') self._O = sp.zeros((self.noc, self.noc, self.noc), dtype=float, format='dok') self._C = sp.zeros((self.noc, self.noc, self.noc), dtype=float, format='dok') args_list = [(i, j) for i in range(self.noc) for j in range(self.noc)] for arg in args_list: self._M[arg] = self._M_comp(*arg) self._U[arg] = self._U_comp(*arg) self._N[arg] = self._N_comp(*arg) args_list = [(i, j, k) for i in range(self.noc) for j in range(self.noc) for k in range(self.noc)] for arg in args_list: val = self._O_comp(*arg) self._O[arg] = val self._C[arg] = val * self._M[arg[-1], arg[-1]] self._M = self._M.to_coo() self._U = self._U.to_coo() self._N = self._N.to_coo() self._O = self._O.to_coo() self._C = self._C.to_coo() @property def noc(self): return self._noc def K(self, i, j): if self.stored and self._K is not None: return self._K[i, j] else: return self._K_comp(i, j) def _K_comp(self, i, j): if self.connected_to_atmosphere: return self.atmosphere_inner_products._s_comp(j, i) * self.atmosphere_inner_products._a_comp(j, j) else: return 0 def M(self, i, j): if self.stored and self._M is not None: return self._M[i, j] else: return self._M_comp(i, j) def _M_comp(self, i, j): if i == j: n = self.n Di = self.oceanic_wavenumbers[i] return - (n ** 2) * Di.nx ** 2 - Di.ny ** 2 else: return 0 def U(self, i, j): if self.stored and self._U is not None: return self._U[i, j] else: return self._U_comp(i, j) def _U_comp(self, i, j): return _delta(i - j) def N(self, i, j): if self.stored and self._N is not None: return self._N[i, j] else: return self._N_comp(i, j) def _N_comp(self, i, j): n = self.n pi = np.pi Di = self.oceanic_wavenumbers[i] Dj = self.oceanic_wavenumbers[j] val = _delta(Di.P - Dj.P) * _flambda(Di.H + Dj.H) if val != 0: val = val * (-2) * Dj.H * Di.H * n / ((Dj.H ** 2 - Di.H ** 2) * pi) return val def O(self, i, j, k): if self.stored and self._O is not None: return self._O[i, j, k] else: return self._O_comp(i, j, k) def _O_comp(self, i, j, k): n = self.n indices = [i, j, k] a, par = _piksort(indices) Di = self.oceanic_wavenumbers[a[0]] Dj = self.oceanic_wavenumbers[a[1]] Dk = self.oceanic_wavenumbers[a[2]] vs3 = _S3(Dj.P, Dk.P, Dj.H, Dk.H) vs4 = _S4(Dj.P, Dk.P, Dj.H, Dk.H) val = vs3 * ((_delta(Dk.H - Dj.H - Di.H) - _delta(Dk.H - Dj.H + Di.H)) * _delta(Dk.P + Dj.P - Di.P) + _delta(Dk.H + Dj.H - Di.H) * (_delta(Dk.P - Dj.P + Di.P) - _delta(Dk.P - Dj.P - Di.P))) + vs4 * ((_delta(Dk.H + Dj.H - Di.H) * _delta(Dk.P - Dj.P - Di.P)) + (_delta(Dk.H - Dj.H + Di.H) - _delta(Dk.H - Dj.H - Di.H)) * (_delta(Dk.P - Dj.P - Di.P) - _delta(Dk.P - Dj.P + Di.P))) return par * val * n / 2 def C(self, i, j, k): if self.stored and self._C is not None: return self._C[i, j, k] else: return self._C_comp(i, j, k) def _C_comp(self, i, j, k): return self._M_comp(k, k) * self._O_comp(i, j, k) def W(self, i, j): if self.stored and self._W is not None: return self._W[i, j] else: return self._W_comp(i, j) def _W_comp(self, i, j): if self.connected_to_atmosphere: return self.atmosphere_inner_products._s_comp(j, i) else: return 0 class GroundAnalyticInnerProducts(GroundInnerProducts): def __init__(self, params=None, stored=True): GroundInnerProducts.__init__(self) if params is not None: if isinstance(params, QgParams): self.n = params.scale_params.n self._ngr = params.nmod[1] gms = params.oblocks else: self.n = params[0] self._ngr = params[2] gms = params[1] else: self.n = None stored = False gms = None self.connected_to_atmosphere = False self.atmosphere_inner_products = None if gms is not None: self.ground_wavenumbers = channel_wavenumbers(gms) else: self.ground_wavenumbers = None self.stored = stored if stored: self.compute_inner_products() def compute_inner_products(self): self._U = sp.zeros((self.ngr, self.ngr), dtype=float, format='dok') args_list = [(i, j) for i in range(self.ngr) for j in range(self.ngr)] for arg in args_list: self._U[arg] = self._U_comp(*arg) self._U = self._U.to_coo() def connect_to_atmosphere(self, atmosphere_inner_products): self.atmosphere_inner_products = atmosphere_inner_products self.connected_to_atmosphere = True if self.stored: natm = atmosphere_inner_products.natm self._W = sp.zeros((self.ngr, natm), dtype=float, format='dok') args_list = [(i, j) for i in range(self.ngr) for j in range(natm)] for arg in args_list: self._W[arg] = self._W_comp(*arg) self._W = self._W.to_coo() self.atmosphere_inner_products = None @property def ngr(self): return self._ngr def K(self, i, j): return 0 def M(self, i, j): return 0 def U(self, i, j): if self.stored and self._U is not None: return self._U[i, j] else: return self._U_comp(i, j) def _U_comp(self, i, j): return _delta(i - j) def N(self, i, j): return 0 def O(self, i, j, k): return 0
MIT License
restran/hacker-scripts
misc/win_file_monitor/md5py.py
MD5.hexdigest
python
def hexdigest(self): d = map(None, self.digest()) d = map(ord, d) d = map(lambda x: "%02x" % x, d) d = string.join(d, '') return d
Terminate and return digest in HEX form. Like digest() except the digest is returned as a string of length 32, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non- binary environments.
https://github.com/restran/hacker-scripts/blob/30bbfd8bb97cda2b4762156aaf2973296f0e7cde/misc/win_file_monitor/md5py.py#L375-L389
__date__ = '2001-10-1' __version__ = 0.9 import struct, string, copy def _long2bytes(n, blocksize=0): s = '' pack = struct.pack while n > 0: s = pack('<I', n & 0xffffffffL) + s n = n >> 32 for i in range(len(s)): if s[i] <> '\000': break else: s = '\000' i = 0 s = s[i:] if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * '\000' + s return s def _bytelist2long(list): imax = len(list) / 4 hl = [0L] * imax j = 0 i = 0 while i < imax: b0 = long(ord(list[j])) b1 = (long(ord(list[j + 1]))) << 8 b2 = (long(ord(list[j + 2]))) << 16 b3 = (long(ord(list[j + 3]))) << 24 hl[i] = b0 | b1 | b2 | b3 i = i + 1 j = j + 4 return hl def _rotateLeft(x, n): return (x << n) | (x >> (32 - n)) def F(x, y, z): return (x & y) | ((~x) & z) def G(x, y, z): return (x & z) | (y & (~z)) def H(x, y, z): return x ^ y ^ z def I(x, y, z): return y ^ (x | (~z)) def XX(func, a, b, c, d, x, s, ac): res = 0L res = res + a + func(b, c, d) res = res + x res = res + ac res = res & 0xffffffffL res = _rotateLeft(res, s) res = res & 0xffffffffL res = res + b return res & 0xffffffffL class MD5: def __init__(self): self.A = 0L self.B = 0L self.C = 0L self.D = 0L self.length = 0L self.count = [0, 0] self.input = [] self.HASH_LENGTH = 16 self.DATA_LENGTH = 64 self.init() def init(self): self.length = 0L self.input = [] self.A = 0x67452301L self.B = 0xefcdab89L self.C = 0x98badcfeL self.D = 0x10325476L def _transform(self, inp): a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D S11, S12, S13, S14 = 7, 12, 17, 22 a = XX(F, a, b, c, d, inp[0], S11, 0xD76AA478L) d = XX(F, d, a, b, c, inp[1], S12, 0xE8C7B756L) c = XX(F, c, d, a, b, inp[2], S13, 0x242070DBL) b = XX(F, b, c, d, a, inp[3], S14, 0xC1BDCEEEL) a = XX(F, a, b, c, d, inp[4], S11, 0xF57C0FAFL) d = XX(F, d, a, b, c, inp[5], S12, 0x4787C62AL) c = XX(F, c, d, a, b, inp[6], S13, 0xA8304613L) b = XX(F, b, c, d, a, inp[7], S14, 0xFD469501L) a = XX(F, a, b, c, d, inp[8], S11, 0x698098D8L) d = XX(F, d, a, b, c, inp[9], S12, 0x8B44F7AFL) c = XX(F, c, d, a, b, inp[10], S13, 0xFFFF5BB1L) b = XX(F, b, c, d, a, inp[11], S14, 0x895CD7BEL) a = XX(F, a, b, c, d, inp[12], S11, 0x6B901122L) d = XX(F, d, a, b, c, inp[13], S12, 0xFD987193L) c = XX(F, c, d, a, b, inp[14], S13, 0xA679438EL) b = XX(F, b, c, d, a, inp[15], S14, 0x49B40821L) S21, S22, S23, S24 = 5, 9, 14, 20 a = XX(G, a, b, c, d, inp[1], S21, 0xF61E2562L) d = XX(G, d, a, b, c, inp[6], S22, 0xC040B340L) c = XX(G, c, d, a, b, inp[11], S23, 0x265E5A51L) b = XX(G, b, c, d, a, inp[0], S24, 0xE9B6C7AAL) a = XX(G, a, b, c, d, inp[5], S21, 0xD62F105DL) d = XX(G, d, a, b, c, inp[10], S22, 0x02441453L) c = XX(G, c, d, a, b, inp[15], S23, 0xD8A1E681L) b = XX(G, b, c, d, a, inp[4], S24, 0xE7D3FBC8L) a = XX(G, a, b, c, d, inp[9], S21, 0x21E1CDE6L) d = XX(G, d, a, b, c, inp[14], S22, 0xC33707D6L) c = XX(G, c, d, a, b, inp[3], S23, 0xF4D50D87L) b = XX(G, b, c, d, a, inp[8], S24, 0x455A14EDL) a = XX(G, a, b, c, d, inp[13], S21, 0xA9E3E905L) d = XX(G, d, a, b, c, inp[2], S22, 0xFCEFA3F8L) c = XX(G, c, d, a, b, inp[7], S23, 0x676F02D9L) b = XX(G, b, c, d, a, inp[12], S24, 0x8D2A4C8AL) S31, S32, S33, S34 = 4, 11, 16, 23 a = XX(H, a, b, c, d, inp[5], S31, 0xFFFA3942L) d = XX(H, d, a, b, c, inp[8], S32, 0x8771F681L) c = XX(H, c, d, a, b, inp[11], S33, 0x6D9D6122L) b = XX(H, b, c, d, a, inp[14], S34, 0xFDE5380CL) a = XX(H, a, b, c, d, inp[1], S31, 0xA4BEEA44L) d = XX(H, d, a, b, c, inp[4], S32, 0x4BDECFA9L) c = XX(H, c, d, a, b, inp[7], S33, 0xF6BB4B60L) b = XX(H, b, c, d, a, inp[10], S34, 0xBEBFBC70L) a = XX(H, a, b, c, d, inp[13], S31, 0x289B7EC6L) d = XX(H, d, a, b, c, inp[0], S32, 0xEAA127FAL) c = XX(H, c, d, a, b, inp[3], S33, 0xD4EF3085L) b = XX(H, b, c, d, a, inp[6], S34, 0x04881D05L) a = XX(H, a, b, c, d, inp[9], S31, 0xD9D4D039L) d = XX(H, d, a, b, c, inp[12], S32, 0xE6DB99E5L) c = XX(H, c, d, a, b, inp[15], S33, 0x1FA27CF8L) b = XX(H, b, c, d, a, inp[2], S34, 0xC4AC5665L) S41, S42, S43, S44 = 6, 10, 15, 21 a = XX(I, a, b, c, d, inp[0], S41, 0xF4292244L) d = XX(I, d, a, b, c, inp[7], S42, 0x432AFF97L) c = XX(I, c, d, a, b, inp[14], S43, 0xAB9423A7L) b = XX(I, b, c, d, a, inp[5], S44, 0xFC93A039L) a = XX(I, a, b, c, d, inp[12], S41, 0x655B59C3L) d = XX(I, d, a, b, c, inp[3], S42, 0x8F0CCC92L) c = XX(I, c, d, a, b, inp[10], S43, 0xFFEFF47DL) b = XX(I, b, c, d, a, inp[1], S44, 0x85845DD1L) a = XX(I, a, b, c, d, inp[8], S41, 0x6FA87E4FL) d = XX(I, d, a, b, c, inp[15], S42, 0xFE2CE6E0L) c = XX(I, c, d, a, b, inp[6], S43, 0xA3014314L) b = XX(I, b, c, d, a, inp[13], S44, 0x4E0811A1L) a = XX(I, a, b, c, d, inp[4], S41, 0xF7537E82L) d = XX(I, d, a, b, c, inp[11], S42, 0xBD3AF235L) c = XX(I, c, d, a, b, inp[2], S43, 0x2AD7D2BBL) b = XX(I, b, c, d, a, inp[9], S44, 0xEB86D391L) A = (A + a) & 0xffffffffL B = (B + b) & 0xffffffffL C = (C + c) & 0xffffffffL D = (D + d) & 0xffffffffL self.A, self.B, self.C, self.D = A, B, C, D def update(self, inBuf): leninBuf = long(len(inBuf)) index = (self.count[0] >> 3) & 0x3FL self.count[0] = self.count[0] + (leninBuf << 3) if self.count[0] < (leninBuf << 3): self.count[1] = self.count[1] + 1 self.count[1] = self.count[1] + (leninBuf >> 29) partLen = 64 - index if leninBuf >= partLen: self.input[index:] = map(None, inBuf[:partLen]) self._transform(_bytelist2long(self.input)) i = partLen while i + 63 < leninBuf: self._transform(_bytelist2long(map(None, inBuf[i:i + 64]))) i = i + 64 else: self.input = map(None, inBuf[i:leninBuf]) else: i = 0 self.input = self.input + map(None, inBuf) def digest(self): A = self.A B = self.B C = self.C D = self.D input = [] + self.input count = [] + self.count index = (self.count[0] >> 3) & 0x3fL if index < 56: padLen = 56 - index else: padLen = 120 - index padding = ['\200'] + ['\000'] * 63 self.update(padding[:padLen]) bits = _bytelist2long(self.input[:56]) + count self._transform(bits) digest = _long2bytes(self.A << 96, 16)[:4] + _long2bytes(self.B << 64, 16)[4:8] + _long2bytes(self.C << 32, 16)[8:12] + _long2bytes(self.D, 16)[12:] self.A = A self.B = B self.C = C self.D = D self.input = input self.count = count return digest
MIT License
simpeg/simpeg
SimPEG/electromagnetics/frequency_domain/fields.py
FieldsFDEM._bSecondary
python
def _bSecondary(self, solution, source_list): if getattr(self, "_bSecondary", None) is None: raise NotImplementedError( "Getting b from {} is not implemented".format( self.knownFields.keys()[0] ) ) return self._bSecondary(solution, source_list)
Total magnetic flux density is sum of primary and secondary :param numpy.ndarray solution: field we solved for :param list source_list: list of sources :rtype: numpy.ndarray :return: total magnetic flux density
https://github.com/simpeg/simpeg/blob/a264ba6a32ba3c83d82601add37f51d8e1cc5e90/SimPEG/electromagnetics/frequency_domain/fields.py#L87-L103
import numpy as np import scipy.sparse as sp from ...fields import Fields from ...utils import mkvc, Zero, Identity, sdiag from ..utils import omega from ...utils.code_utils import deprecate_class class FieldsFDEM(Fields): knownFields = {} dtype = complex def _GLoc(self, fieldType): return self.aliasFields[fieldType][1] def _e(self, solution, source_list): if ( getattr(self, "_ePrimary", None) is None or getattr(self, "_eSecondary", None) is None ): raise NotImplementedError( "Getting e from {0!s} is not implemented".format( self.knownFields.keys()[0] ) ) return self._ePrimary(solution, source_list) + self._eSecondary( solution, source_list ) def _b(self, solution, source_list): if ( getattr(self, "_bPrimary", None) is None or getattr(self, "_bSecondary", None) is None ): raise NotImplementedError( "Getting b from {0!s} is not implemented".format( self.knownFields.keys()[0] ) ) return self._bPrimary(solution, source_list) + self._bSecondary( solution, source_list )
MIT License
dropbox/changes-mesos-framework
changes_mesos_scheduler/changes_scheduler.py
ChangesScheduler._jobstep_snapshot
python
def _jobstep_snapshot(jobstep): if 'image' in jobstep and jobstep['image']: if 'snapshot' in jobstep['image'] and jobstep['image']['snapshot']: return jobstep['image']['snapshot']['id'] return None
Given a jobstep, return its snapshot id if set, None otherwise.
https://github.com/dropbox/changes-mesos-framework/blob/cbb2351d45b4231286a18e70e5fea039b121d0a4/changes_mesos_scheduler/changes_scheduler.py#L744-L751
from __future__ import absolute_import, print_function import bisect import concurrent.futures import json import logging import os import threading import time import urllib2 from changes_mesos_scheduler import statsreporter from typing import Any, Callable, Dict, NamedTuple, Optional, Set, Tuple from collections import defaultdict from threading import Event from urllib import urlencode from uuid import uuid4 from google.protobuf import text_format as _text_format from mesos.interface import Scheduler, SchedulerDriver from mesos.interface import mesos_pb2 TASK_KILL_THRESHOLD = 3600 class FileBlacklist(object): def __init__(self, path): self._path = path self._mtime = 0.0 self._blacklist = set() def refresh(self): if os.path.getmtime(self._path) > self._mtime: self._refresh() def _refresh(self): logging.info('Refreshing blacklist') self._mtime = os.path.getmtime(self._path) with open(self._path) as file: self._blacklist = set([s.strip() for s in file.readlines() if not s.startswith('#')]) def contains(self, hostname): return hostname in self._blacklist class APIError(Exception): def __init__(self, msg, cause=None): super(APIError, self).__init__(msg) self.cause = cause class ChangesAPI(object): def __init__(self, api_url): self._api_url = api_url @staticmethod def make_url(base_url, path, get_params=None): path = path if path.endswith('/') else path + '/' path = path if path.startswith('/') else '/' + path base_url = base_url.rstrip('/') full_url = base_url + path if get_params: query_string = '?' + urlencode(get_params) full_url += query_string return full_url def _api_request(self, path, body=None, get_params=None): full_url = ChangesAPI.make_url(self._api_url, path, get_params) try: data = json.dumps(body) if body else None req = urllib2.Request( full_url, data, {'Content-Type': 'application/json'}) content = urllib2.urlopen(req).read() return json.loads(content) except Exception as exc: logging.exception("Error POSTing to Changes at %s", full_url) raise APIError("Error POSTing to Changes at %s" % full_url, exc) def get_allocate_jobsteps(self, limit=None, cluster=None): data = {'limit': limit} if limit else {} if cluster: data['cluster'] = cluster return self._api_request("/jobsteps/allocate/", get_params=data)['jobsteps'] def post_allocate_jobsteps(self, jobstep_ids, cluster=None): data = {'jobstep_ids': jobstep_ids} if cluster: data['cluster'] = cluster return self._api_request("/jobsteps/allocate/", data)['allocated'] def jobstep_needs_abort(self, jobstep_ids): if len(jobstep_ids) == 0: return [] data = {'jobstep_ids': jobstep_ids} return self._api_request("/jobsteps/needs_abort/", data)['needs_abort'] def update_jobstep(self, jobstep_id, status, result=None, hostname=None): data = {"status": status} if result: data["result"] = result if hostname: data["node"] = hostname self._api_request("/jobsteps/{}/".format(jobstep_id), data) def jobstep_console_append(self, jobstep_id, text): url = '/jobsteps/%s/logappend/' % jobstep_id self._api_request(url, {'source': 'console', 'text': text}) class SlaveInfo(object): def __init__(self, hostname): self.hostname = hostname class ChangesScheduler(Scheduler): def __init__(self, state_file, api, blacklist, stats=None, changes_request_limit=200): self.framework_id = None self._changes_api = api self.taskJobStepMappingLock = threading.Lock() self.taskJobStepMapping = {} self.slaveIdInfo = {} self.tasksPendingKill = {} self.tasksLaunched = 0 self.tasksFinished = 0 self.shuttingDown = Event() self._stats = stats or statsreporter.Stats(None) self._blacklist = blacklist self._blacklist.refresh() self.state_file = state_file self.changes_request_limit = changes_request_limit self._snapshot_slave_map = defaultdict(lambda: defaultdict(float)) self._cached_slaves_lock = threading.Lock() self._cached_slaves = {} if not self.state_file: logging.warning("State file location not set. Not restoring old state.") elif not os.path.exists(self.state_file): logging.warning("State file not found. Not restoring old state.") else: try: self.restore_state() except Exception: logging.exception("Failed to restore state. Continuing as a new scheduler.") else: os.remove(self.state_file) def poll_changes_until_shutdown(self, driver, interval): with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(self._polling_loop, driver, interval) logging.info("Started thread at %s. Now waiting...", time.ctime()) while not future.done(): time.sleep(.01) try: future.result() except Exception: logging.exception("Polling thread failed. Exiting.") self.decline_open_offers(driver) def _polling_loop(self, driver, interval): try: next_wait_duration = 0.0 while not self.shuttingDown.wait(next_wait_duration): start_time = time.time() while self.poll_and_launch_once(driver): pass self.poll_and_abort(driver) last_poll_duration = time.time() - start_time next_wait_duration = max(0, interval - last_poll_duration) finally: self.shuttingDown.set() def poll_and_launch_once(self, driver): with self._cached_slaves_lock: all_slaves = self._cached_slaves.values() filtered_slaves = self._filter_slaves(all_slaves) logging.info("Do scheduling cycle with %d available slaves. (%d " + "after filtering)", len(all_slaves), len(filtered_slaves)) slaves_by_cluster = self._slaves_by_cluster(filtered_slaves) jobsteps_by_cluster = self._query_changes_for_jobsteps( driver, slaves_by_cluster.keys()) for cluster, jobsteps in jobsteps_by_cluster.iteritems(): self._assign_jobsteps(cluster, slaves_by_cluster[cluster], jobsteps_by_cluster[cluster]) self._launch_jobsteps(driver, cluster, slaves_by_cluster[cluster]) return len(jobsteps_by_cluster) == self.changes_request_limit def poll_and_abort(self, driver): jobStepTaskMapping = {} with self.taskJobStepMappingLock: for task_id, jobstep_id in self.taskJobStepMapping.iteritems(): jobStepTaskMapping[jobstep_id] = task_id try: abort_jobstep_ids = self._changes_api.jobstep_needs_abort(sorted(jobStepTaskMapping.keys())) except APIError: logging.warning('/jobstep/needs_abort/ failed', exc_info=True) abort_jobstep_ids = [] now = time.time() for jobstep_id in abort_jobstep_ids: task_id = jobStepTaskMapping[jobstep_id] with self.taskJobStepMappingLock: first_tried_to_kill = self.tasksPendingKill.setdefault(task_id, now) if now - first_tried_to_kill > TASK_KILL_THRESHOLD: logging.warning("Task %s (jobstep ID %s) still hasn't been successfully killed, giving up.", task_id, jobstep_id) self._stats.incr('couldnt_abort_task') del self.taskJobStepMapping[task_id] del self.tasksPendingKill[task_id] continue logging.info('Asking Mesos to kill task %s (jobstep ID %s)', task_id, jobstep_id) driver.killTask(mesos_pb2.TaskID(value=task_id)) def decline_open_offers(self, driver): with self._cached_slaves_lock: slaves = self._cached_slaves.values() for slave in slaves: self._stat_and_log_list(slave.offers(), 'decline_for_shutdown', lambda offer: "Shutting down, declining offer: %s" % offer.offer.id) self._decline_list(driver, slave.offers()) self._cached_slaves = {} def registered(self, driver, frameworkId, masterInfo): logging.info("Registered with framework ID %s", frameworkId.value) self.framework_id = frameworkId.value def reregistered(self, driver, masterInfo): logging.info("Re-Registered with new master") def disconnected(self, driver): logging.info("Disconnected from master. Abandoning all cached offer and slave info without declining.") with self._cached_slaves_lock: self._cached_slaves = {} @staticmethod def _decode_typed_field(pb): field_type = pb.type if field_type == mesos_pb2.Value.SCALAR: return pb.scalar.value elif field_type == mesos_pb2.Value.RANGES: return [{"begin": ra.begin, "end": ra.end} for ra in pb.ranges.range] elif field_type == mesos_pb2.Value.SET: return pb.set.item elif field_type == mesos_pb2.Value.TEXT: return pb.text.value else: raise Exception("Unknown field type: %s", field_type) @staticmethod def _decode_attribute(attr_pb): return (attr_pb.name, ChangesScheduler._decode_typed_field(attr_pb)) @staticmethod def _decode_resource(resource_pb): return (resource_pb.name, ChangesScheduler._decode_typed_field(resource_pb)) @property def activeTasks(self): return self.tasksFinished - self.tasksLaunched @staticmethod def get_cluster(offer): attributes = dict([ChangesScheduler._decode_attribute(a) for a in offer.attributes]) return attributes.get('labels') @staticmethod def get_resources(offer): return {name: value for (name, value) in [ChangesScheduler._decode_resource(r) for r in offer.resources]} class OfferWrapper(object): def __init__(self, pb_offer): self.offer = pb_offer self.cluster = ChangesScheduler.get_cluster(pb_offer) resources = ChangesScheduler.get_resources(pb_offer) self.cpu = resources.get('cpus', 0.0) self.mem = resources.get('mem', 0) def __cmp__(self, other): us = (-self.cpu, -self.mem) them = (-other.cpu, -other.mem) if us < them: return -1 return 0 if us == them else 1 def __str__(self, pb_offer): cpu = "?" mem = "?" for r in pb_offer.resources: if r.name == 'cpus': cpu = str(r.scalar).strip() if r.name == 'memory': cpu = str(r.scalar).strip() return "Offer({} {} {} cpu: {} mem: {})".format( pb_offer.id.value, pb_offer.slave_id.value, pb_offer.hostname, cpu, mem) class Slave(object): def __init__(self, slave_id, hostname, cluster): self.slave_id = slave_id self.hostname = hostname self.cluster = cluster self._offers = {} self.jobsteps_assigned = [] self.total_cpu = 0.0 self.total_mem = 0 self.allocated_cpu = 0.0 self.allocated_mem = 0 def offers(self): return self._offers.values() def has_offers(self): return len(self._offers) > 0 def is_maintenanced(self, now_nanos): is_maintenanced = False for offer in self._offers.itervalues(): if not offer.offer.HasField('unavailability'): continue start_time = offer.offer.unavailability.start.nanoseconds end_time = now_nanos + 1 if (offer.offer.unavailability.HasField('duration')): end_time = start_time + offer.offer.unavailability.duration.nanoseconds is_maintenanced = now_nanos > start_time and now_nanos < end_time if is_maintenanced: break return is_maintenanced def add_offer(self, offer): if (offer.offer.slave_id.value != self.slave_id or offer.offer.hostname != self.hostname or offer.cluster != self.cluster): logging.error("A mismatched offer got mixed in with the wrong " + "slave. Skipping. (\n Slave: %s\n Offer: %s)", self, offer) return self.total_cpu += offer.cpu self.total_mem += offer.mem logging.info("Slave %s: Add new offer +%f cpu, +%d mem (-> %f %d)", self.hostname, offer.cpu, offer.mem, self.total_cpu, self.total_mem) self._offers[offer.offer.id.value] = offer def remove_offer(self, offer_id): offer = self._offers.get(offer_id.value) if offer: del(self._offers[offer_id.value]) self.total_cpu -= offer.cpu self.total_mem -= offer.mem def offers_to_launch(self): current_offers = sorted(self._offers.values()) offers_to_launch = [] for offer in current_offers: if (self.allocated_cpu > 0 and offer.cpu > 0 or self.allocated_mem > 0 and offer.mem > 0): offers_to_launch.append(offer.offer.id) self.allocated_cpu -= offer.cpu self.allocated_mem -= offer.mem self.remove_offer(offer.offer.id) return offers_to_launch def tasks_to_launch(self): tasks = [] jobstep_ids = [] for jobstep in self.jobsteps_assigned: tasks.append(self._jobstep_to_task(jobstep)) jobstep_ids.append(jobstep['id']) self.unassign_jobsteps() return tasks, jobstep_ids def unassign_jobsteps(self): self.jobsteps_assigned = [] self.allocated_cpu = 0.0 self.allocated_mem = 0 def __cmp__(self, other): us = (-(self.total_cpu - self.allocated_cpu), -(self.total_mem - self.allocated_mem)) them = (-(other.total_cpu - other.allocated_cpu), -(other.total_mem - other.allocated_mem)) if us < them: return -1 return 0 if us == them else 1 def __str__(self, slave): return "Slave({}: {} offers, {} acpu, {} amem)".format( slave.hostname, len(slave.offers()), slave.total_cpu, slave.total_mem) def has_resources_for(self, jobstep): return ((self.total_cpu - self.allocated_cpu) >= jobstep['resources']['cpus'] and (self.total_mem - self.allocated_mem) >= jobstep['resources']['mem']) def assign_jobstep(self, jobstep): assert self.has_resources_for(jobstep) self.allocated_cpu += jobstep['resources']['cpus'] self.allocated_mem += jobstep['resources']['mem'] self.jobsteps_assigned.append(jobstep) def _jobstep_to_task(self, jobstep): tid = uuid4().hex logging.info("Accepting offer on %s to start task %s", self.hostname, tid) task = mesos_pb2.TaskInfo() task.name = "{} {}".format( jobstep['project']['slug'], jobstep['id'], ) task.task_id.value = str(tid) task.slave_id.value = self.slave_id cmd = jobstep["cmd"] task.command.value = cmd logging.debug("Scheduling cmd: %s", cmd) cpus = task.resources.add() cpus.name = "cpus" cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = jobstep["resources"]["cpus"] mem = task.resources.add() mem.name = "mem" mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = jobstep["resources"]["mem"] return task def _get_slaves_for_snapshot(self, snapshot_id, recency_threshold_hours=12): latest_snapshot_use = time.time() - recency_threshold_hours * 3600 return [k for k, v in self._snapshot_slave_map[snapshot_id].iteritems() if v >= latest_snapshot_use] def _associate_snapshot_with_slave(self, snapshot_id, slave): self._snapshot_slave_map[snapshot_id][slave] = time.time() @staticmethod
Apache License 2.0
ibm/max-question-answering
core/tokenization.py
_is_whitespace
python
def _is_whitespace(char): if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False
Checks whether `chars` is a whitespace character.
https://github.com/ibm/max-question-answering/blob/45f93fbbfa920b24cbcd4a9c3c84dd154d8b39cc/core/tokenization.py#L348-L357
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six import tensorflow as tf def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): if not init_checkpoint: return m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) if m is None: return model_name = m.group(1) lower_models = [ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" ] cased_models = [ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", "multi_cased_L-12_H-768_A-12" ] is_bad_config = False if model_name in lower_models and not do_lower_case: is_bad_config = True actual_flag = "False" case_name = "lowercased" opposite_flag = "True" if model_name in cased_models and do_lower_case: is_bad_config = True actual_flag = "True" case_name = "cased" opposite_flag = "False" if is_bad_config: raise ValueError( "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " "However, `%s` seems to be a %s model, so you " "should pass in `--do_lower_case=%s` so that the fine-tuning matches " "how the model was pre-training. If this error is wrong, please " "just comment out this check." % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) def convert_to_unicode(text): if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python 3?") def printable_text(text): if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python 3?") def load_vocab(vocab_file): vocab = collections.OrderedDict() index = 0 with tf.gfile.GFile(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) class BasicTokenizer(object): def __init__(self, do_lower_case=True): self.do_lower_case = do_lower_case def tokenize(self, text): text = convert_to_unicode(text) text = self._clean_text(text) text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): if ((0x4E00 <= cp <= 0x9FFF) or (0x3400 <= cp <= 0x4DBF) or (0x20000 <= cp <= 0x2A6DF) or (0x2A700 <= cp <= 0x2B73F) or (0x2B740 <= cp <= 0x2B81F) or (0x2B820 <= cp <= 0x2CEAF) or (0xF900 <= cp <= 0xFAFF) or (0x2F800 <= cp <= 0x2FA1F)): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens
Apache License 2.0
mozilla/mozilla-ignite
apps/challenges/models.py
Submission.get_delete_url
python
def get_delete_url(self): return self._lookup_url('entry_delete', {'pk': self.id, 'phase': self.phase_slug})
Return the URL to delete this submission.
https://github.com/mozilla/mozilla-ignite/blob/148f5dae0bd6519431d573296a039798fe41eb31/apps/challenges/models.py#L432-L435
from datetime import datetime from dateutil.relativedelta import relativedelta from decimal import Decimal from markdown import markdown from django.conf import settings from django.core.cache import cache from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse, NoReverseMatch from django.core.validators import MaxLengthValidator from django.template.defaultfilters import slugify from django.db import models from django.db.models import signals from django.dispatch import receiver from challenges.lib import cached_bleach, cached_property from challenges.managers import (SubmissionHelpManager, PhaseManager, SubmissionManager) from django_extensions.db.fields import (AutoSlugField, CreationDateTimeField, ModificationDateTimeField) from innovate.models import BaseModel, BaseModelManager from innovate.utils import ImageStorage from projects.models import Project from tower import ugettext_lazy as _ from users.models import Profile class ChallengeManager(BaseModelManager): def get_by_natural_key(self, slug): return self.get(slug=slug) class Challenge(BaseModel): objects = ChallengeManager() title = models.CharField(verbose_name=_(u'Title'), max_length=60, unique=True) slug = models.SlugField(verbose_name=_(u'Slug'), max_length=60, unique=True) summary = models.TextField(verbose_name=_(u'Summary'), validators=[MaxLengthValidator(200)]) description = models.TextField(verbose_name=_(u'Description')) def natural_key(self): return (self.slug,) @property def description_html(self): return cached_bleach(self.description) image = models.ImageField(verbose_name=_(u'Project image'), null=True, blank=True, upload_to=settings.CHALLENGE_IMAGE_PATH) start_date = models.DateTimeField(verbose_name=_(u'Start date'), default=datetime.utcnow) end_date = models.DateTimeField(verbose_name=_(u'End date')) moderate = models.BooleanField(verbose_name=_(u'Moderate entries'), default=False) allow_voting = models.BooleanField(verbose_name=_(u'Can users vote on submissions?'), default=False) project = models.ForeignKey(Project, verbose_name=_(u'Project'), limit_choices_to={'allow_participation': True}) def get_image_src(self): media_url = getattr(settings, 'MEDIA_URL', '') path = lambda f: f and '%s%s' % (media_url, f) return path(self.image) or path('img/project-default.gif') def __unicode__(self): return self.title def _lookup_url(self, view_name, kwargs=None): if kwargs is None: kwargs = {} try: return reverse(view_name, kwargs=kwargs) except NoReverseMatch: kwargs.update({'project': self.project.slug, 'slug': self.slug}) return reverse(view_name, kwargs=kwargs) def get_absolute_url(self): return self._lookup_url('challenge_show') def in_six_months(): return datetime.utcnow() + relativedelta(months=6) def has_phase_finished(phase): cache_key = '%s_END_DATE' % phase.upper() end_date = cache.get(cache_key) if not end_date: phase = Phase.objects.get_ideation_phase() cache.set(cache_key, end_date) if not phase: return False end_date = phase.end_date return datetime.utcnow() > end_date class Phase(BaseModel): challenge = models.ForeignKey(Challenge, related_name='phases') name = models.CharField(max_length=100) start_date = models.DateTimeField(verbose_name=_(u'Start date'), default=datetime.utcnow) end_date = models.DateTimeField(verbose_name=_(u'End date'), default=in_six_months) judging_start_date = models.DateTimeField( verbose_name=_(u'Judging start date'), blank=True, null=True) judging_end_date = models.DateTimeField( verbose_name=_(u'Judging end date'), blank=True, null=True) order = models.IntegerField() objects = PhaseManager() class Meta: unique_together = (('challenge', 'name'),) ordering = ('order',) def __unicode__(self): return '%s (%s)' % (self.name, self.challenge.title) def natural_key(self): return self.challenge.natural_key() + (self.name,) natural_key.dependencies = ['challenges.challenge'] @models.permalink def get_absolute_url(self): slug = 'ideas' if self.is_ideation else 'apps' return ('entries_all', [slug]) @cached_property def days_remaining(self): now = datetime.utcnow() if not self.is_open: return -1 time_remaining = self.end_date - now return time_remaining.days if time_remaining.days >= 0 else -1 @cached_property def days_until(self): now = datetime.utcnow() if not self.is_closed: return -1 time_until = self.start_date - now return time_until.days if time_until.days >= 0 else -1 @cached_property def phase_rounds(self): return self.phaseround_set.all() @cached_property def current_round(self): now = datetime.utcnow() for item in self.phase_rounds: if item.start_date < now and item.end_date > now: return item return None @cached_property def next_round(self): now = datetime.utcnow() upcoming_rounds = self.phase_rounds.filter(start_date__gte=now).order_by('start_date') if upcoming_rounds: return upcoming_rounds[0] else: return None @cached_property def current_judging_round(self): now = datetime.utcnow() for item in self.phase_rounds: if item.judging_start_date and item.judging_end_date and item.judging_start_date < now and item.judging_end_date > now: return item return None @cached_property def is_judgable(self): now = datetime.utcnow() if self.judging_start_date and self.judging_end_date and self.judging_start_date < now and self.judging_end_date > now: return True if self.current_judging_round: return True return False @cached_property def is_open(self): now = datetime.utcnow() if self.phase_rounds: if self.current_round: return True return False return self.start_date < now and now < self.end_date @property def is_closed(self): return not self.is_open @cached_property def has_started(self): return datetime.utcnow() > self.start_date @cached_property def is_ideation(self): return self.name == settings.IGNITE_IDEATION_NAME @cached_property def is_development(self): return self.name == settings.IGNITE_DEVELOPMENT_NAME @cached_property def slug_url(self): return 'apps' if self.is_development else 'ideas' @receiver(signals.post_save, sender=Phase) def phase_update_cache(instance, **kwargs): key = '%s_end_date' % slugify(instance.name) cache.set(key.upper(), instance.end_date) class ExternalLink(BaseModel): name = models.CharField(verbose_name=_(u'Link Name'), max_length=50) url = models.URLField(verbose_name=_(u'URL'), max_length=255, verify_exists=False) submission = models.ForeignKey('challenges.Submission', blank=True, null=True) def __unicode__(self): return u"%s -> %s" % (self.name, self.url) class CategoryManager(BaseModelManager): def get_active_categories(self): filtered_cats = [] for cat in Category.objects.all(): cat_submissions = cat.submission_set.all() if cat_submissions.count(): filtered_cats.append(cat) if len(filtered_cats) == 0: return False else: return filtered_cats class Category(BaseModel): objects = CategoryManager() name = models.CharField(verbose_name=_(u'Name'), max_length=60, unique=True) slug = models.SlugField(verbose_name=_(u'Slug'), max_length=60, unique=True) def __unicode__(self): return self.name class Meta: verbose_name_plural = 'Categories' class Submission(BaseModel): title = models.CharField(verbose_name=_(u'Title'), max_length=60) brief_description = models.CharField( max_length=200, verbose_name=_(u'Brief Description'), help_text = _(u"Think of this as an elevator pitch - keep it short" u" and sweet")) description = models.TextField(verbose_name=_(u'Description')) sketh_note = models.ImageField( verbose_name=_(u'Featured image'), blank=True, null=True, help_text=_(u"This will be used in our summary and list views. You " u"can add more images in your description or link out to " u"sets or images out on the web by adding in an external link"), upload_to=settings.CHALLENGE_IMAGE_PATH, storage=ImageStorage()) category = models.ForeignKey(Category) created_by = models.ForeignKey(Profile) created_on = models.DateTimeField(default=datetime.utcnow) updated_on = ModificationDateTimeField() is_winner = models.BooleanField(verbose_name=_(u'A winning entry?'), default=False, help_text=_(u'Mark this entry as green lit')) is_draft = models.BooleanField(verbose_name=_(u'Draft?'), help_text=_(u"If you would like some extra time to polish your submission" u" before making it publically then you can set it as draft. " u"When you're ready just un-tick and it will go live")) phase = models.ForeignKey('challenges.Phase') phase_round = models.ForeignKey('challenges.PhaseRound', blank=True, null=True, on_delete=models.SET_NULL) collaborators = models.TextField(blank=True) life_improvements = models.TextField(default="", verbose_name=_(u'How does this improve the lives of people?')) take_advantage = models.TextField(blank=True, null=True, verbose_name=_(u'How does this make the most of the GENI network?')) interest_making = models.TextField(blank=True, null=True, verbose_name=_(u'Are you interested in making this app?')) team_members = models.TextField(blank=True, null=True, verbose_name=_(u'Tell us about your team making this app')) repository_url = models.URLField(max_length=500, verify_exists=False, blank=True) blog_url = models.URLField(max_length=500, verify_exists=False, blank=True) required_effort = models.TextField(blank=True, null=True, verbose_name=_(u'How much effort do you expect this work to take?')) objects = SubmissionManager() class Meta: ordering = ['-id'] def __unicode__(self): return self.title @property def description_html(self): return cached_bleach(markdown(self.description)) @property def challenge(self): return self.phase.challenge def get_image_src(self): media_url = getattr(settings, 'MEDIA_URL', '') path = lambda f: f and '%s%s' % (media_url, f) return path(self.sketh_note) or path('img/project-default.png') def _lookup_url(self, view_name, kwargs=None): if kwargs is None: kwargs = {} try: return reverse(view_name, kwargs=kwargs) except NoReverseMatch: kwargs.update({'project': self.challenge.project.slug, 'slug': self.challenge.slug}) return reverse(view_name, kwargs=kwargs) @cached_property def parent(self): parent_list = self.submissionparent_set.all() if parent_list: return parent_list[0] return None @cached_property def parent_slug(self): if self.parent: return self.parent.slug version_list = self.submissionversion_set.select_related('parent').all() if version_list: return version_list[0].parent.slug return None @cached_property def is_idea(self): return self.phase.name == settings.IGNITE_IDEATION_NAME @cached_property def is_proposal(self): return self.phase.name == settings.IGNITE_DEVELOPMENT_NAME @cached_property def phase_slug(self): if self.is_idea: return 'ideas' return 'apps' def get_absolute_url(self): if self.parent_slug and self.phase_slug: return self._lookup_url('entry_show', {'entry_id': self.parent_slug, 'phase': self.phase_slug}) return u'' def get_edit_url(self): return self._lookup_url('entry_edit', {'pk': self.parent_slug, 'phase': self.phase_slug})
BSD 3-Clause New or Revised License
danielnyga/pracmln
python3/pracmln/utils/eval.py
ConfusionMatrix.countClassifications
python
def countClassifications(self, classname): tp = self.matrix.get(classname, {}).get(classname, 0) classes = list(self.matrix.keys()) fp = 0 for c in classes: if c != classname: fp += self.getMatrixEntry(classname, c) fn = 0 for c in classes: if c != classname: fn += self.getMatrixEntry(c, classname) tn = 0 for c in classes: if c != classname: for c2 in classes: if c2 != classname: tn += self.getMatrixEntry(c, c2) assert sum([tp, tn, fp, fn]) == self.instanceCount return tp, tn, fp, fn
Returns the true positive, true negative, false positive, false negative classification counts (in this order).
https://github.com/danielnyga/pracmln/blob/bbda65696fb8753b11ff007e991280ebe42d78f9/python3/pracmln/utils/eval.py#L88-L110
import pickle from subprocess import Popen, PIPE from ..mln.util import logx def KLDivergence(p, q): if type(p) is str: p = pickle.load(open(p)) if type(q) is str: q = pickle.load(open(q)) kl_div = 0 for p_, q_ in zip(p, q): p_ = max(1E-10, p_) q_ = max(1E-10, q_) kl_div += p_ * logx(float(p_) / q_) return kl_div class ConfusionMatrix(object): def __init__(self): self.matrix = {} self.instanceCount = 0 self.labels = [] def addClassificationResult(self, prediction, groundTruth, inc=1): if not prediction in self.labels: self.labels.append(prediction) if not groundTruth in self.labels: self.labels.append(groundTruth) gndTruths = self.matrix.get(prediction, None) if gndTruths is None: gndTruths = {} self.matrix[prediction] = gndTruths if self.matrix.get(groundTruth, None) is None: self.matrix[groundTruth] = {groundTruth: 0} gndTruths[groundTruth] = gndTruths.get(groundTruth, 0) + inc self.instanceCount += inc def getMatrixEntry(self, pred, clazz): if self.matrix.get(pred, None) is None or self.matrix[pred].get(clazz, None) is None: return 0 return self.matrix[pred][clazz]
BSD 2-Clause Simplified License
galarzaa90/tibia.py
tibiapy/kill_statistics.py
KillStatistics.get_url
python
def get_url(cls, world): return get_tibia_url("community", "killstatistics", world=world)
Get the Tibia.com URL of the kill statistics of a world. Parameters ---------- world: :class:`str` The game world of the desired kill statistics. Returns ------- The URL to the Tibia.com kill statistics for this world.
https://github.com/galarzaa90/tibia.py/blob/babcb1648fb99bf5ac0fd0162b38244cbcd21b9d/tibiapy/kill_statistics.py#L53-L65
from typing import Dict, List from tibiapy import abc from tibiapy.errors import InvalidContent from tibiapy.utils import get_tibia_url, parse_form_data, parse_tibiacom_content __all__ = ( "KillStatistics", "RaceEntry", ) class KillStatistics(abc.Serializable): __slots__ = ( "world", "total", "entries", "available_worlds", ) def __init__(self, world, entries=None, total=None, available_worlds=None): self.world: str = world self.entries: Dict[str, RaceEntry] = entries or {} self.total: RaceEntry = total or RaceEntry() self.available_worlds: List[str] = available_worlds or [] @property def url(self): return self.get_url(self.world) @property def players(self): return self.entries.get("players", RaceEntry()) @classmethod
Apache License 2.0
cjerdonek/open-rcv
openrcv/utiltest/helpers.py
CaseMixin.changeAttr
python
def changeAttr(self, obj, name, value): initial_value = getattr(obj, name) setattr(obj, name, value) try: yield finally: setattr(obj, name, initial_value)
Context manager to temporarily change the value of an attribute. This is useful for testing __eq__() by modifying one attribute at a time.
https://github.com/cjerdonek/open-rcv/blob/62400a9634e58fa358341a8b7e85613bcd69114e/openrcv/utiltest/helpers.py#L83-L95
from contextlib import contextmanager import os import sys import unittest import openrcv parent_dir = os.path.dirname(os.path.dirname(openrcv.__file__)) def skipIfTravis(): msg = ("since Travis uses Python 3.4.1 instead of 3.4.2. " "See: http://bugs.python.org/issue22386") return unittest.skipIf(os.getenv('TRAVIS', False), msg) class CaseMixin(object): def __str__(self): cls = self.__class__ mod_name = cls.__module__ path = os.path.relpath(sys.modules[mod_name].__file__, parent_dir) return ("%s.%s\n" " in %s\n" " >> %s.%s.%s" % (cls.__name__, self._testMethodName, path, mod_name, cls.__name__, self._testMethodName)) @contextmanager
MIT License
anymesh/anymesh-python
example/urwid/listbox.py
SimpleListWalker.set_modified_callback
python
def set_modified_callback(self, callback): raise NotImplementedError('Use connect_signal(' 'list_walker, "modified", ...) instead.')
This function inherited from MonitoredList is not implemented in SimpleListWalker. Use connect_signal(list_walker, "modified", ...) instead.
https://github.com/anymesh/anymesh-python/blob/017b7808f2fbdc765604488d325678c28be438c0/example/urwid/listbox.py#L162-L170
from urwid.util import is_mouse_press from urwid.canvas import SolidCanvas, CanvasCombine from urwid.widget import Widget, nocache_widget_render_instance, BOX, GIVEN from urwid.decoration import calculate_top_bottom_filler, normalize_valign from urwid import signals from urwid.signals import connect_signal from urwid.monitored_list import MonitoredList, MonitoredFocusList from urwid.container import WidgetContainerMixin from urwid.command_map import (CURSOR_UP, CURSOR_DOWN, CURSOR_PAGE_UP, CURSOR_PAGE_DOWN) class ListWalkerError(Exception): pass class ListWalker(object): __metaclass__ = signals.MetaSignals signals = ["modified"] def _modified(self): signals.emit_signal(self, "modified") def get_focus(self): try: focus = self.focus return self[focus], focus except (IndexError, KeyError, TypeError): return None, None def get_next(self, position): try: position = self.next_position(position) return self[position], position except (IndexError, KeyError): return None, None def get_prev(self, position): try: position = self.prev_position(position) return self[position], position except (IndexError, KeyError): return None, None class PollingListWalker(object): def __init__(self, contents): import warnings warnings.warn("PollingListWalker is deprecated, " "use SimpleFocusListWalker instead.", DeprecationWarning) self.contents = contents if not getattr(contents, '__getitem__', None): raise ListWalkerError("PollingListWalker expecting list like " "object, got: %r" % (contents,)) self.focus = 0 def _clamp_focus(self): if self.focus >= len(self.contents): self.focus = len(self.contents)-1 def get_focus(self): if len(self.contents) == 0: return None, None self._clamp_focus() return self.contents[self.focus], self.focus def set_focus(self, position): assert type(position) == int self.focus = position def get_next(self, start_from): pos = start_from + 1 if len(self.contents) <= pos: return None, None return self.contents[pos],pos def get_prev(self, start_from): pos = start_from - 1 if pos < 0: return None, None return self.contents[pos],pos class SimpleListWalker(MonitoredList, ListWalker): def __init__(self, contents): if not getattr(contents, '__getitem__', None): raise ListWalkerError, "SimpleListWalker expecting list like object, got: %r"%(contents,) MonitoredList.__init__(self, contents) self.focus = 0 def _get_contents(self): return self contents = property(_get_contents) def _modified(self): if self.focus >= len(self): self.focus = max(0, len(self)-1) ListWalker._modified(self)
MIT License
openmined/pyariesfl
aries_cloudagent/stats.py
Stats.__init__
python
def __init__(self): self.counts = {} self.max_time = {} self.min_time = {} self.total_time = {}
Initialize the Stats instance.
https://github.com/openmined/pyariesfl/blob/dd78dcebc771971abfee301b80cdd5d246c14840/aries_cloudagent/stats.py#L12-L17
import functools import inspect import time from typing import Sequence, Union class Stats:
Apache License 2.0
nullconvergence/graphrepo
graphrepo/mappers/csv.py
CSVMapper.map
python
def map(self, objects): return pd.DataFrame(objects)
The csv default map function assumes the objectss are of the type
https://github.com/nullconvergence/graphrepo/blob/ce625cd4efddc96f28835c541865ef11b46e4002/graphrepo/mappers/csv.py#L25-L30
from abc import abstractmethod import pandas as pd from graphrepo.mappers.default import DefaultMapper class CSVMapper(DefaultMapper):
Apache License 2.0
ccpgames/pypackage
pypackage/classified.py
choose_classifiers
python
def choose_classifiers(config): all_classifiers = read_classifiers() root_classifier = all_classifiers old_delay = os.getenv("ESCDELAY") os.environ["ESCDELAY"] = "25" screen = curses.initscr() choices = getattr(config, "classifiers", []) choice = BoxSelector(root_classifier, screen, choices).pick() while choice is not None: init = 0 if choice == 0 and root_classifier.name != "__root__": root_classifier = back_it_up(root_classifier, all_classifiers) elif choice == 9 and root_classifier.name == "__root__": break elif choice > len(root_classifier.classifiers): choice_index = (choice - len(root_classifier.classifiers) - int(root_classifier.name != "__root__")) choice_as_str = root_classifier.values[choice_index] if choice_as_str not in choices: choices.append(choice_as_str) else: choices.remove(choice_as_str) init = choice else: choice_index = choice - int(root_classifier.name != "__root__") root_classifier = root_classifier.classifiers[choice_index] choice = BoxSelector(root_classifier, screen, choices, init).pick() if old_delay: os.environ["ESCDELAY"] = old_delay else: os.environ.pop("ESCDELAY") return choices
Get some user input for the classifiers they'd like to use. Returns: list of valid classifier names
https://github.com/ccpgames/pypackage/blob/fa414bece96fa38a2471e380881f5ae4b20794de/pypackage/classified.py#L300-L340
from __future__ import unicode_literals import os import curses from collections import namedtuple from .constants import VERSION from .constants import CHECKMARK class BoxSelector(object): def __init__(self, classifier, screen, choices=None, current=0): self.stdscr = screen choices = choices or [] self.current_selected = current selections = [] if classifier.name != "__root__": selections.append("..") for group in classifier.classifiers: selections.append("[+] {}".format(group.name)) for value in classifier.values: selections.append(" {} {}".format( CHECKMARK if value in choices else " ", value, )) self.TEXTBOX_WIDTH = max(79, max([len(i) for i in selections]) + 2) self.TEXTBOX_HEIGHT = 3 if classifier.name == "__root__": selections.append("Done".center(self.TEXTBOX_WIDTH - 4, " ")) self.L = selections self.PAD_WIDTH = 600 self.PAD_HEIGHT = 10000 def pick(self): self._init_curses() self._create_pad() windows = self._make_textboxes() picked = self._select_textbox(windows) self._end_curses() return picked def _init_curses(self): curses.noecho() curses.cbreak() curses.curs_set(0) self.stdscr.keypad(1) curses.start_color() curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN) curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK) self.stdscr.bkgd(curses.color_pair(2)) self.stdscr.refresh() def _end_curses(self): curses.nocbreak() self.stdscr.keypad(0) curses.echo() curses.endwin() def _create_pad(self): self.pad = curses.newpad(self.PAD_HEIGHT, self.PAD_WIDTH) self.pad.box() def _make_textboxes(self): maxy, maxx = self.stdscr.getmaxyx() banner = "{} -- choose python trove classifiers".format(VERSION) self.stdscr.addstr(0, maxx // 2 - len(banner) // 2, banner) windows = [] i = 2 for item in self.L: pad = self.pad.derwin( self.TEXTBOX_HEIGHT, self.TEXTBOX_WIDTH, i, self.PAD_WIDTH // 2 - self.TEXTBOX_WIDTH // 2, ) pad.box() try: pad.addstr(1, 2, item) except UnicodeEncodeError: item = item.replace(CHECKMARK, "*") pad.addstr(1, 2, item) windows.append(pad) i += self.TEXTBOX_HEIGHT return windows def _center_view(self, window): cy, cx = window.getbegyx() maxy, maxx = self.stdscr.getmaxyx() self.pad.refresh(cy, cx, 1, maxx // 2 - self.TEXTBOX_WIDTH // 2, maxy - 1, maxx - 1) return (cy, cx) def _select_textbox(self, windows): topy, topx = self._center_view(windows[0]) last = self.current_selected - 1 top_textbox = windows[0] while True: windows[self.current_selected].bkgd(curses.color_pair(1)) windows[last].bkgd(curses.color_pair(2)) maxy, maxx = self.stdscr.getmaxyx() cy, cx = windows[self.current_selected].getbegyx() if ((topy + maxy - self.TEXTBOX_HEIGHT) <= cy): top_textbox = windows[self.current_selected] if topy >= cy + self.TEXTBOX_HEIGHT: top_textbox = windows[self.current_selected] if last != self.current_selected: last = self.current_selected topy, topx = self._center_view(top_textbox) c = self.stdscr.getch() if c in (106, curses.KEY_DOWN): if self.current_selected >= len(windows) - 1: self.current_selected = 0 else: self.current_selected += 1 elif c in (107, curses.KEY_UP): if self.current_selected <= 0: self.current_selected = len(windows) - 1 else: self.current_selected -= 1 elif c == 113: break elif c == curses.KEY_ENTER or c == 10: return int(self.current_selected) elif c == 27: self.stdscr.nodelay(True) n_seq = self.stdscr.getch() self.stdscr.nodelay(False) if n_seq == -1: if ".." in str(windows[0].instr(1, 0)): return 0 else: break Classifier = namedtuple("Classifier", ("name", "values", "classifiers")) def _ensure_chain(top_level, sub_categories): def _chain_in(level, item): for sub_class in level.classifiers: if sub_class.name == item: return sub_class else: new_sub = Classifier(item, [], []) level.classifiers.append(new_sub) return new_sub for sub_cat in sub_categories: top_level = _chain_in(top_level, sub_cat) return top_level def read_classifiers(): with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "classifiers")) as openc: classifiers = [c.strip() for c in openc.read().splitlines() if c] all_classifiers = [] def _get_classifier(categories): top_level = categories[0] sub_categories = categories[1:] for classifier in all_classifiers: if classifier.name == top_level: top_level = classifier break else: top_level = Classifier(top_level, [], []) all_classifiers.append(top_level) return _ensure_chain(top_level, sub_categories) for clsifier in classifiers: _get_classifier(clsifier.split(" :: ")[:-1]).values.append(clsifier) return Classifier("__root__", [], all_classifiers) def back_it_up(current_level, all_classifiers, recursive=False): for classifier in all_classifiers.classifiers: if current_level in classifier.classifiers: return classifier for classifier in all_classifiers.classifiers: attempt = back_it_up(current_level, classifier, True) if attempt: return attempt if not recursive: return all_classifiers
MIT License
w-a-r-m-inventory-system/food-pantry-inventory
fpiweb/support/BoxActivity.py
BoxActivityClass._report_internal_error
python
def _report_internal_error(self, exc: Exception, action: str): if self.box is None: box_number = 'is missing' else: box_number = self.box.box_number if self.activity is None: activity_info = f'activity missing' else: if self.activity.date_consumed: date_consumed = self.activity.date_consumed else: date_consumed = '(still in inventory)' activity_info = ( f'has box {self.activity.box_number}, created ' f'{self.activity.date_filled}, consumed ' f'{date_consumed}' ) logger.error( f'Got error: {exc}' f'while attempting to {action}, Box info: ' f'{box_number}, Activity info: {activity_info}' ) raise InternalError('Internal error, see log for details')
Report details of an internal error :param exc: original exeception :param action: additional message :return: (no return, ends by raising an additional exception
https://github.com/w-a-r-m-inventory-system/food-pantry-inventory/blob/3607703f2f57200b53f7e54bbcdf6d9a41f1c10b/fpiweb/support/BoxActivity.py#L489-L518
from datetime import datetime, date from enum import Enum, unique from logging import getLogger from typing import Optional from django.db import transaction, IntegrityError from django.utils.timezone import now from fpiweb.constants import InternalError from fpiweb.models import Box, Activity, BoxType, Location, LocRow, LocBin, LocTier, Product, ProductCategory __author__ = 'Travis Risner' __project__ = "Food-Pantry-Inventory" __creation_date__ = "07/31/2019" logger = getLogger('fpiweb') @unique class BOX_ACTION(Enum): ADD: str = 'add' FILL: str = 'fill' MOVE: str = 'move' EMPTY: str = 'empty' class BoxActivityClass: def __init__(self): self.box: Optional[Box] = None self.box_type: Optional[BoxType] = None self.location: Optional[Location] = None self.loc_row: Optional[LocRow] = None self.loc_bin: Optional[LocBin] = None self.loc_tier: Optional[LocTier] = None self.product: Optional[Product] = None self.prod_cat: Optional[ProductCategory] = None self.activity: Optional[Activity] = None def box_new(self, box_id: Box.id): logger.debug(f'Act Box New: No action - Box ID: {box_id}') return def box_fill(self, box_id: Box.id): self.box = Box.objects.select_related( 'box_type', 'location', 'location__loc_row', 'location__loc_bin', 'location__loc_tier', 'product', 'product__prod_cat', ).get(id=box_id) self.box_type = self.box.box_type self.location = self.box.location self.loc_row = self.location.loc_row self.loc_bin = self.location.loc_bin self.loc_tier = self.location.loc_tier self.product = self.box.product self.prod_cat = self.product.prod_cat logger.debug(f'Act Box Fill: box received: Box ID: {box_id}') self.activity = None try: self.activity = Activity.objects.filter( box_number__exact=self.box.box_number).latest( '-date_filled', '-date_consumed') logger.debug( f'Act Box Fill: Latest activity found: ' f'{self.activity.box_number}, ' f'filled:{self.activity.date_filled}' ) if self.activity.date_consumed: logger.debug( f'Act Box Fill: Previous activity consumed: ' f'{self.activity.date_consumed}' ) self.activity = None else: logger.debug(f'Act Box Fill: Consuming previous box contents') self._consume_activity(adjustment=Activity.FILL_EMPTIED) self.activity = None except Activity.DoesNotExist: self.activity = None logger.debug(f'Act Box Fill: No previous activity found') self._add_activity() logger.debug(f'Act Box Fill: done') return def box_move(self, box_id: Box.id): logger.debug(f'Act Box Move: box received: Box ID: {box_id}') self.box = Box.objects.select_related( 'box_type', 'location', 'location__loc_row', 'location__loc_bin', 'location__loc_tier', 'product', 'product__prod_cat', ).get(id=box_id) self.box_type = self.box.box_type self.location = self.box.location self.loc_row = self.location.loc_row self.loc_bin = self.location.loc_bin self.loc_tier = self.location.loc_tier self.product = self.box.product self.prod_cat = self.product.prod_cat try: act_for_box = Activity.objects.filter( box_number=self.box.box_number, date_consumed=None, ).order_by('-date_filled') self.activity = None for act in act_for_box: if (not self.activity) and ( act.box_type == self.box_type.box_type_code and act.prod_name == self.product.prod_name and act.date_filled == self.box.date_filled.date() and act.exp_year == self.box.exp_year and act.exp_month_start == self.box.exp_month_start and act.exp_month_end == self.box.exp_month_end ): self.activity = act else: date_consumed, duration = self.compute_duration_days( act.date_filled) act.date_consumed = date_consumed act.duration = duration act.adjustment_code = Activity.MOVE_CONSUMED logger.debug( f'Act Box Move: Bogus open activity found for: ' f'{act.box_number}, ' f'filled:{act.date_filled}, ' f'Forced to be consumed now' ) act.save() if self.activity: logger.debug( f'Act Box Move: Activity found to move: ' f'{self.activity.box_number}, ' f'filled:{self.activity.date_filled}' ) else: logger.debug( f'Act Box Move: Activity not consumed - proceeding...') raise Activity.DoesNotExist except Activity.DoesNotExist: self.activity = None logger.debug( f'Act Box Move: Activity for this box missing - making a ' f'new one...' ) self._add_activity( adjustment=Activity.MOVE_ADDED ) logger.debug(f'Act Box Move: Updating activity ID: {self.activity.id}') self._update_activity_location() logger.debug(f'Act Box Move: done') return def box_empty(self, box_id: Box.id): self.box = Box.objects.select_related( 'box_type', 'location', 'location__loc_row', 'location__loc_bin', 'location__loc_tier', 'product', 'product__prod_cat', ).get(id=box_id) self.box_type = self.box.box_type self.location = self.box.location self.loc_row = self.location.loc_row self.loc_bin = self.location.loc_bin self.loc_tier = self.location.loc_tier self.product = self.box.product self.prod_cat = self.product.prod_cat logger.debug(f'Act Box Empty: box received: Box ID: {box_id}') try: self.activity = Activity.objects.filter( box_number__exact=self.box.box_number).latest( 'date_filled', '-date_consumed' ) logger.debug( f'Act Box Empty: Activity found - id: ' f'{self.activity.id}, filled: {self.activity.date_filled}' ) if self.activity.date_consumed: logger.debug( f'Act Box Empty: activity consumed ' f'{self.activity.date_consumed}, make new activity' ) self.activity = None self._add_activity(adjustment=Activity.CONSUME_ADDED) elif ( self.activity.loc_row != self.loc_row.loc_row or self.activity.loc_bin != self.loc_bin.loc_bin or self.activity.loc_tier != self.loc_tier.loc_tier or self.activity.prod_name != self.product.prod_name or self.activity.date_filled != self.box.date_filled.date() or self.activity.exp_year != self.box.exp_year or self.activity.exp_month_start != self.box.exp_month_start or self.activity.exp_month_end != self.box.exp_month_end ): logger.debug( f'Act Box Empty: mismatch, consume this activity and ' f'make a new one' ) self._consume_activity( adjustment=Activity.CONSUME_ADDED) self._add_activity(adjustment=Activity.CONSUME_EMPTIED) else: logger.debug( f'Act Box Empty: box and activity matched, record ' f'consumption ' ) pass except Activity.DoesNotExist: self.activity = None logger.debug(f'Act Box Empty: no activity, make one') self._add_activity( adjustment=Activity.CONSUME_ADDED ) self._consume_activity() logger.debug(f'Act Box Empty: done') return def _add_activity(self, adjustment: str = None): try: with transaction.atomic(): self.activity = Activity( box_number=self.box.box_number, box_type=self.box_type.box_type_code, loc_row=self.loc_row.loc_row, loc_bin=self.loc_bin.loc_bin, loc_tier=self.loc_tier.loc_tier, prod_name=self.product.prod_name, prod_cat_name=self.prod_cat.prod_cat_name, date_filled=self.box.date_filled.date(), date_consumed=None, duration=0, exp_year=self.box.exp_year, exp_month_start=self.box.exp_month_start, exp_month_end=self.box.exp_month_end, quantity=self.box.quantity, adjustment_code=adjustment, ) self.activity.save() logger.debug( f'Act Box_Add: Just added activity ID: ' f'{self.activity.id}' ) except IntegrityError as exc: self._report_internal_error( exc, 'adding an activity for a newly filled box' ) return def _update_activity_location(self): try: with transaction.atomic(): self.activity.loc_row = self.loc_row.loc_row self.activity.loc_bin = self.loc_bin.loc_bin self.activity.loc_tier = self.loc_tier.loc_tier self.activity.save() logger.debug( f'Act Box_Upd: Just updated activity ID: ' f'{self.activity.id}' ) self.activity.save() except IntegrityError as exc: self._report_internal_error( exc, 'update an activity by moving a box' ) self.activity = None return def _consume_activity(self, adjustment: str = None): try: with transaction.atomic(): date_consumed, duration = self.compute_duration_days( self.activity.date_filled) self.activity.date_consumed = date_consumed self.activity.duration = duration if not self.activity.adjustment_code: self.activity.adjustment_code = adjustment self.activity.save() logger.debug( f'Act Box_Empty: Just consumed activity ID: ' f'{self.activity.id}' ) if not adjustment: self.box.location = None self.box.product = None self.box.exp_year = None self.box.exp_month_start = None self.box.exp_month_end = None self.box.date_filled = None self.box.quantity = None self.box.save() logger.debug( f'Act Box_Empty: Just emptied box ID: {self.box.id}' ) except IntegrityError as exc: self._report_internal_error( exc, 'update an activity by consuming a box' ) self.activity = None return def compute_duration_days(self, date_filled: date) -> tuple: date_consumed = now().date() duration = (date_consumed - date_filled).days return date_consumed, duration
MIT License
huntermcgushion/hyperparameter_hunter
hyperparameter_hunter/i_o/leaderboards.py
combine_column_order
python
def combine_column_order(df_1, df_2, both_cols=None): both_cols = both_cols or [] df_1_cols = [_ for _ in list(df_1.columns) if _ not in both_cols] df_2_cols = [_ for _ in list(df_2.columns) if _ not in both_cols] common_cols = [_ for _ in df_1_cols if _ in df_2_cols] unique_cols_1 = [_ for _ in df_1_cols if _ not in df_2_cols] unique_cols_2 = [_ for _ in df_2_cols if _ not in df_1_cols] combined_cols = common_cols + unique_cols_1 + unique_cols_2 + both_cols return combined_cols
Determine the sort order for the combined columns of two DataFrames Parameters ---------- df_1: pd.DataFrame The first DataFrame, whose columns will be sorted. Columns unique to `df_1` will be sorted before those of `df_2` df_2: pd.DataFrame The second DataFrame, whose columns will be sorted. Columns unique to `df_2` will be sorted after those of `df_1` both_cols: list, or None, default=None If list, the column names that should be common to both DataFrames and placed last in the sort order Returns ------- combined_cols: list of strings The result of combining and sorting column names from `df_1`, and `df_2` Examples -------- >>> df_1 = pd.DataFrame(columns=['A', 'B', 'C', 'Common_1', 'Common_2']) >>> df_2 = pd.DataFrame(columns=['A', 'D', 'E', 'Common_1', 'Common_2']) >>> combine_column_order(df_1, df_2, both_cols=['Common_1', 'Common_2']) ['A', 'B', 'C', 'D', 'E', 'Common_1', 'Common_2'] >>> combine_column_order(df_1, df_2, both_cols=None) ['A', 'Common_1', 'Common_2', 'B', 'C', 'D', 'E']
https://github.com/huntermcgushion/hyperparameter_hunter/blob/28b1d48e01a993818510811b82a677e0a7a232b2/hyperparameter_hunter/i_o/leaderboards.py#L175-L213
from abc import ABCMeta, abstractmethod from collections import OrderedDict from numbers import Number import numpy as np import pandas as pd from typing import Dict, List, Optional, Tuple class Leaderboard(metaclass=ABCMeta): def __init__(self, data=None): self.data = data if data is not None else pd.DataFrame() def __getattr__(self, item): return self.data.__getattribute__(item) @classmethod def from_path(cls, path, assert_existence=False): try: data = pd.read_csv(path, index_col=None) except FileNotFoundError: if assert_existence is True: raise data = None return cls(data=data) @abstractmethod def add_entry(self, experiment, **kwargs): def save(self, path, **kwargs): self.data.to_csv(path_or_buf=path, index=False, float_format="%.10f", **kwargs) def sort(self, by, ascending=False): self.data.sort_values(by=by, axis=0, inplace=True, ascending=ascending) class GlobalLeaderboard(Leaderboard): def add_entry(self, experiment, **kwargs): final_evaluations = experiment.last_evaluation_results entry_columns, entry_data = [], [] evaluation_columns, evaluation_values = list( zip(*evaluations_to_columns(final_evaluations)) ) entry_columns.extend(evaluation_columns) entry_data.extend(evaluation_values) identifier_cols = [ "experiment_id", "hyperparameter_key", "cross_experiment_key", "algorithm_name", ] entry_columns.extend(identifier_cols) for id_col in identifier_cols: entry_data.append(str(getattr(experiment, id_col))) entry_columns.append("experiment_#") entry_data.append(self.data.shape[0]) identifier_cols.append("experiment_#") entry = pd.DataFrame(data=[entry_data], columns=entry_columns) self.data = self.data.append(entry, ignore_index=True)[ combine_column_order(self.data, entry, both_cols=identifier_cols) ] def evaluations_to_columns( evaluation: Dict[str, Optional[OrderedDict]], decimals=10 ) -> List[Tuple[str, Number]]: data_types = ["oof", "holdout", "in_fold"] column_metrics = [] for data_type in data_types: if evaluation[data_type] is not None: for metric_key, metric_val in evaluation[data_type].items(): column_metrics.append((f"{data_type}_{metric_key}", np.round(metric_val, decimals))) return column_metrics
MIT License
veeresht/commpy
commpy/modulation.py
Modem.__init__
python
def __init__(self, constellation, reorder_as_gray=True): if reorder_as_gray: m = log2(len(constellation)) gray_code_sequence = GrayCode(m).generate_gray() gray_code_sequence_array = fromiter((int(g, 2) for g in gray_code_sequence), int, len(constellation)) self.constellation = array(constellation)[gray_code_sequence_array.argsort()] else: self.constellation = constellation
Creates a custom Modem object.
https://github.com/veeresht/commpy/blob/aab0f22b57bfbccee95f44b78108824feee3af91/commpy/modulation.py#L68-L77
from bisect import insort import matplotlib.pyplot as plt from numpy import arange, array, zeros, pi, sqrt, log2, argmin, hstack, repeat, tile, dot, shape, concatenate, exp, log, vectorize, empty, eye, kron, inf, full, abs, newaxis, minimum, clip, fromiter from numpy.fft import fft, ifft from numpy.linalg import qr, norm from sympy.combinatorics.graycode import GrayCode from commpy.utilities import bitarray2dec, dec2bitarray, signal_power __all__ = ['PSKModem', 'QAMModem', 'ofdm_tx', 'ofdm_rx', 'mimo_ml', 'kbest', 'best_first_detector', 'bit_lvl_repr', 'max_log_approx'] class Modem:
BSD 3-Clause New or Revised License
google/clusterfuzz
src/appengine/handlers/fuzzer_stats.py
Handler.get
python
def get(self, extra=None): if not access.has_access(): user_email = helpers.get_user_email() fuzzers_list = external_users.allowed_fuzzers_for_user( user_email, include_from_jobs=True, include_parents=True) if not fuzzers_list: raise helpers.AccessDeniedException( "You don't have access to any fuzzers.") return self.render('fuzzer-stats.html', {})
Handle a GET request.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/appengine/handlers/fuzzer_stats.py#L389-L402
import datetime import html import re import urllib.parse from flask import request from googleapiclient.errors import HttpError import six import yaml from clusterfuzz._internal.base import external_users from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.metrics import fuzzer_stats from clusterfuzz._internal.metrics import logs from handlers import base_handler from libs import access from libs import handler from libs import helpers MEMCACHE_OLD_TTL_IN_SECONDS = 24 * 60 * 60 MEMCACHE_TODAY_TTL_IN_SECONDS = 30 * 60 class QueryField(object): def __init__(self, field, results_index, field_type, bigquery_type): self.field = field self.results_index = results_index self.field_type = field_type self.bigquery_type = bigquery_type.lower() class BuiltinField(object): def __init__(self, spec, field): self.spec = spec self.field = field def _bigquery_type_to_charts_type(typename): typename = typename.lower() if typename in ('integer', 'float'): return 'number' if typename == 'timestamp': return 'date' return 'string' def _python_type_to_charts_type(type_value): if type_value in (int, float): return 'number' if type_value == datetime.date: return 'date' return 'string' def _parse_date(date_str): if not date_str: return None pattern = re.compile(r'^(\d{4})-(\d{2})-(\d{2})$') match = pattern.match(date_str) if not match: return None year, month, day = (int(val) for val in match.groups()) return datetime.date(year, month, day) def _parse_stats_column_fields(results, stats_columns, group_by, fuzzer, jobs): result = [] columns = fuzzer_stats.parse_stats_column_fields(stats_columns) group_by_field_name = fuzzer_stats.group_by_to_field_name(group_by) columns.insert(0, fuzzer_stats.QueryField('j', group_by_field_name, None)) contexts = {} for column in columns: if isinstance(column, fuzzer_stats.QueryField): key = '%s_%s' % (column.table_alias, column.select_alias) for i, field_info in enumerate(results['schema']['fields']): if (field_info['name'] == column.select_alias or field_info['name'] == key): result.append( QueryField(column, i, _bigquery_type_to_charts_type(field_info['type']), field_info['type'])) break elif isinstance(column, fuzzer_stats.BuiltinFieldSpecifier): field_class = column.field_class() if not field_class: continue context_class = field_class.CONTEXT_CLASS context = contexts.setdefault(context_class, context_class(fuzzer, jobs)) result.append(BuiltinField(column, column.create(context))) return result def _parse_group_by(group_by): if group_by == 'by-day': return fuzzer_stats.QueryGroupBy.GROUP_BY_DAY if group_by == 'by-time': return fuzzer_stats.QueryGroupBy.GROUP_BY_TIME if group_by == 'by-revision': return fuzzer_stats.QueryGroupBy.GROUP_BY_REVISION if group_by == 'by-job': return fuzzer_stats.QueryGroupBy.GROUP_BY_JOB if group_by == 'by-fuzzer': return fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER return None def _get_fuzzer_or_engine(name): fuzz_target = data_handler.get_fuzz_target(name) if fuzz_target: name = fuzz_target.engine return data_types.Fuzzer.query(data_types.Fuzzer.name == name).get() def _do_bigquery_query(query): logs.log(query) client = big_query.Client() try: results = client.raw_query(query, max_results=10000) except HttpError as e: raise helpers.EarlyExitException(str(e), 500) if 'rows' not in results: raise helpers.EarlyExitException('No stats.', 404) return results def _parse_stats_column_descriptions(stats_column_descriptions): if not stats_column_descriptions: return {} try: result = yaml.safe_load(stats_column_descriptions) for key, value in six.iteritems(result): result[key] = html.escape(value) return result except yaml.parser.ParserError: logs.log_error('Failed to parse stats column descriptions.') return {} def _build_columns(result, columns): for column in columns: if isinstance(column, QueryField): result['cols'].append({ 'label': column.field.select_alias, 'type': column.field_type, }) elif isinstance(column, BuiltinField): result['cols'].append({ 'label': column.spec.alias or column.spec.name, 'type': _python_type_to_charts_type(column.field.VALUE_TYPE), }) def _try_cast(cell, value_str, cast_function, default_value): try: cell['v'] = cast_function(value_str) except (ValueError, TypeError): cell['v'] = default_value cell['f'] = '--' def _build_rows(result, columns, rows, group_by): for row in rows: row_data = [] first_column_value = None for column in columns: cell = {} if isinstance(column, QueryField): value = row['f'][column.results_index]['v'] if column.field.select_alias == 'time': timestamp = float(value) time = datetime.datetime.utcfromtimestamp(timestamp) first_column_value = first_column_value or time cell['v'] = 'Date(%d, %d, %d, %d, %d, %d)' % ( time.year, time.month - 1, time.day, time.hour, time.minute, time.second) elif column.field.select_alias == 'date': timestamp = float(value) date = datetime.datetime.utcfromtimestamp(timestamp).date() first_column_value = first_column_value or date cell['v'] = 'Date(%d, %d, %d)' % (date.year, date.month - 1, date.day) elif column.bigquery_type == 'integer': _try_cast(cell, value, int, 0) elif column.bigquery_type == 'float': _try_cast(cell, value, lambda s: round(float(s), 1), 0.0) else: cell['v'] = value first_column_value = first_column_value or cell['v'] elif isinstance(column, BuiltinField): data = column.field.get(group_by, first_column_value) if data: formatted_value = data.value if data.link: link = ( _get_cloud_storage_link(data.link) if data.link.startswith('gs://') else data.link) formatted_value = '<a href="%s">%s</a>' % (link, data.value) if data.sort_key is not None: cell['v'] = data.sort_key else: cell['v'] = data.value if data.sort_key is not None or data.link: cell['f'] = formatted_value else: cell['v'] = '' cell['f'] = '--' row_data.append(cell) result['rows'].append({'c': row_data}) def _get_cloud_storage_link(bucket_path): return '/gcs-redirect?' + urllib.parse.urlencode({'path': bucket_path}) def _get_filter_from_job(job): return [str(job)] if job else None def build_results(fuzzer, jobs, group_by, date_start, date_end): datetime_end = _parse_date(date_end) if not datetime_end: raise helpers.EarlyExitException('Missing end date.', 400) if datetime_end < utils.utcnow().date(): logs.log('Building results for older stats %s %s %s %s %s.' % (fuzzer, jobs, group_by, date_start, date_end)) return _build_old_results(fuzzer, jobs, group_by, date_start, date_end) logs.log('Building results for stats including today %s %s %s %s %s.' % (fuzzer, jobs, group_by, date_start, date_end)) return _build_todays_results(fuzzer, jobs, group_by, date_start, date_end) @memoize.wrap(memoize.Memcache(MEMCACHE_TODAY_TTL_IN_SECONDS)) def _build_todays_results(fuzzer, jobs, group_by, date_start, date_end): return _build_results(fuzzer, jobs, group_by, date_start, date_end) @memoize.wrap(memoize.Memcache(MEMCACHE_OLD_TTL_IN_SECONDS)) def _build_old_results(fuzzer, jobs, group_by, date_start, date_end): return _build_results(fuzzer, jobs, group_by, date_start, date_end) def _build_results(fuzzer, jobs, group_by, date_start, date_end): date_start = _parse_date(date_start) date_end = _parse_date(date_end) if not fuzzer or not group_by or not date_start or not date_end: raise helpers.EarlyExitException('Missing params.', 400) fuzzer_entity = _get_fuzzer_or_engine(fuzzer) if not fuzzer_entity: raise helpers.EarlyExitException('Fuzzer not found.', 404) if fuzzer_entity.stats_columns: stats_columns = fuzzer_entity.stats_columns else: stats_columns = fuzzer_stats.JobQuery.DEFAULT_FIELDS group_by = _parse_group_by(group_by) if group_by is None: raise helpers.EarlyExitException('Invalid grouping.', 400) table_query = fuzzer_stats.TableQuery(fuzzer, jobs, stats_columns, group_by, date_start, date_end) results = _do_bigquery_query(table_query.build()) is_timeseries = group_by == fuzzer_stats.QueryGroupBy.GROUP_BY_TIME result = { 'cols': [], 'rows': [], 'column_descriptions': _parse_stats_column_descriptions( fuzzer_entity.stats_column_descriptions), 'is_timeseries': is_timeseries } columns = _parse_stats_column_fields(results, stats_columns, group_by, fuzzer, jobs) if is_timeseries: columns = [c for c in columns if not isinstance(c, BuiltinField)] _build_columns(result, columns) _build_rows(result, columns, results['rows'], group_by) return result def _get_date(date_value, days_ago): if date_value: return date_value date_datetime = utils.utcnow() - datetime.timedelta(days=days_ago) return date_datetime.strftime('%Y-%m-%d') class Handler(base_handler.Handler): @handler.unsupported_on_local_server @handler.get(handler.HTML)
Apache License 2.0
binux/pyspider
pyspider/libs/counter.py
CounterManager.trim
python
def trim(self): for key, value in list(iteritems(self.counters)): if value.empty(): del self.counters[key]
Clear not used counters
https://github.com/binux/pyspider/blob/897891cafb21ea5b4ac08e728ad2ea212879f7fa/pyspider/libs/counter.py#L366-L370
from __future__ import unicode_literals, division, absolute_import import time import logging from collections import deque try: from UserDict import DictMixin except ImportError: from collections import Mapping as DictMixin import six from six import iteritems from six.moves import cPickle class BaseCounter(object): def __init__(self): pass def event(self, value=1): raise NotImplementedError def value(self, value): raise NotImplementedError @property def avg(self): raise NotImplementedError @property def sum(self): raise NotImplementedError def empty(self): raise NotImplementedError class TotalCounter(BaseCounter): def __init__(self): super(TotalCounter, self).__init__() self.cnt = 0 def event(self, value=1): self.cnt += value def value(self, value): self.cnt = value @property def avg(self): return self.cnt @property def sum(self): return self.cnt def empty(self): return self.cnt == 0 class AverageWindowCounter(BaseCounter): def __init__(self, window_size=300): super(AverageWindowCounter, self).__init__() self.window_size = window_size self.values = deque(maxlen=window_size) def event(self, value=1): self.values.append(value) value = event @property def avg(self): return self.sum / len(self.values) @property def sum(self): return sum(self.values) def empty(self): if not self.values: return True class TimebaseAverageEventCounter(BaseCounter): def __init__(self, window_size=30, window_interval=10): super(TimebaseAverageEventCounter, self).__init__() self.max_window_size = window_size self.window_size = 0 self.window_interval = window_interval self.values = deque(maxlen=window_size) self.events = deque(maxlen=window_size) self.times = deque(maxlen=window_size) self.cache_value = 0 self.cache_event = 0 self.cache_start = None self._first_data_time = None def event(self, value=1): now = time.time() if self._first_data_time is None: self._first_data_time = now if self.cache_start is None: self.cache_value = value self.cache_event = 1 self.cache_start = now elif now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.events.append(self.cache_event) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = value self.cache_event = 1 self.cache_start = now else: self.cache_value += value self.cache_event += 1 return self def value(self, value): self.cache_value = value def _trim_window(self): now = time.time() if self.cache_start and now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.events.append(self.cache_event) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = 0 self.cache_start = None if self.window_size != self.max_window_size and self._first_data_time is not None: time_passed = now - self._first_data_time self.window_size = min(self.max_window_size, time_passed / self.window_interval) window_limit = now - self.window_size * self.window_interval while self.times and self.times[0] < window_limit: self.times.popleft() self.events.popleft() self.values.popleft() @property def avg(self): events = (sum(self.events) + self.cache_event) if not events: return 0 return float(self.sum) / events @property def sum(self): self._trim_window() return sum(self.values) + self.cache_value def empty(self): self._trim_window() if not self.values and not self.cache_start: return True def on_append(self, value, time): pass class TimebaseAverageWindowCounter(BaseCounter): def __init__(self, window_size=30, window_interval=10): super(TimebaseAverageWindowCounter, self).__init__() self.max_window_size = window_size self.window_size = 0 self.window_interval = window_interval self.values = deque(maxlen=window_size) self.times = deque(maxlen=window_size) self.cache_value = 0 self.cache_start = None self._first_data_time = None def event(self, value=1): now = time.time() if self._first_data_time is None: self._first_data_time = now if self.cache_start is None: self.cache_value = value self.cache_start = now elif now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = value self.cache_start = now else: self.cache_value += value return self def value(self, value): self.cache_value = value def _trim_window(self): now = time.time() if self.cache_start and now - self.cache_start > self.window_interval: self.values.append(self.cache_value) self.times.append(self.cache_start) self.on_append(self.cache_value, self.cache_start) self.cache_value = 0 self.cache_start = None if self.window_size != self.max_window_size and self._first_data_time is not None: time_passed = now - self._first_data_time self.window_size = min(self.max_window_size, time_passed / self.window_interval) window_limit = now - self.window_size * self.window_interval while self.times and self.times[0] < window_limit: self.times.popleft() self.values.popleft() @property def avg(self): sum = float(self.sum) if not self.window_size: return 0 return sum / self.window_size / self.window_interval @property def sum(self): self._trim_window() return sum(self.values) + self.cache_value def empty(self): self._trim_window() if not self.values and not self.cache_start: return True def on_append(self, value, time): pass class CounterValue(DictMixin): def __init__(self, manager, keys): self.manager = manager self._keys = keys def __getitem__(self, key): if key == '__value__': key = self._keys return self.manager.counters[key] else: key = self._keys + (key, ) available_keys = [] for _key in list(self.manager.counters.keys()): if _key[:len(key)] == key: available_keys.append(_key) if len(available_keys) == 0: raise KeyError elif len(available_keys) == 1: if available_keys[0] == key: return self.manager.counters.get(key) else: return CounterValue(self.manager, key) else: return CounterValue(self.manager, key) def __len__(self): return len(self.keys()) def __iter__(self): return iter(self.keys()) def __contains__(self, key): return key in self.keys() def keys(self): result = set() for key in list(self.manager.counters.keys()): if key[:len(self._keys)] == self._keys: key = key[len(self._keys):] result.add(key[0] if key else '__value__') return result def to_dict(self, get_value=None): result = {} for key, value in iteritems(self): if isinstance(value, BaseCounter): if get_value is not None: value = getattr(value, get_value) result[key] = value else: result[key] = value.to_dict(get_value) return result class CounterManager(DictMixin): def __init__(self, cls=TimebaseAverageWindowCounter): self.cls = cls self.counters = {} def event(self, key, value=1): if isinstance(key, six.string_types): key = (key, ) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].event(value) return self def value(self, key, value=1): if isinstance(key, six.string_types): key = (key, ) assert isinstance(key, tuple), "event key type error" if key not in self.counters: self.counters[key] = self.cls() self.counters[key].value(value) return self
Apache License 2.0
googleads/google-ads-python
examples/advanced_operations/add_display_upload_ad.py
_create_display_upload_ad_group_ad
python
def _create_display_upload_ad_group_ad( client, customer_id, ad_group_id, ad_asset_resource_name ): ad_group_ad_service = client.get_service("AdGroupAdService") ad_group_ad_operation = client.get_type("AdGroupAdOperation") ad_group_ad = ad_group_ad_operation.create ad_group_ad.status = client.enums.AdGroupAdStatusEnum.PAUSED ad_group_ad.ad_group = client.get_service("AdGroupService").ad_group_path( customer_id, ad_group_id ) display_upload_ad = ad_group_ad.ad display_upload_ad.name = "Ad for HTML5" display_upload_ad.final_urls.append("http://example.com/html5") display_upload_ad.display_upload_ad.media_bundle.asset = ( ad_asset_resource_name ) display_upload_ad.display_upload_ad.display_upload_product_type = ( client.enums.DisplayUploadProductTypeEnum.HTML5_UPLOAD_AD ) mutate_ad_group_ads_response = ad_group_ad_service.mutate_ad_group_ads( customer_id=customer_id, operations=[ad_group_ad_operation] ) print( "Created new ad group ad with resource name " f"'{mutate_ad_group_ads_response.results[0].resource_name}'." )
Creates a new HTML5 display upload ad and adds it to the given ad group. Args: client: An initialized Google Ads client. customer_id: The Google Ads customer ID. ad_group_id: The ID of the ad group to which the new ad will be added. ad_asset_resource_name: The resource name of the media bundle containing the HTML5 components.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/examples/advanced_operations/add_display_upload_ad.py#L98-L146
import argparse import sys import requests from google.ads.googleads.client import GoogleAdsClient from google.ads.googleads.errors import GoogleAdsException BUNDLE_URL = "https://gaagl.page.link/ib87" def main(client, customer_id, ad_group_id): ad_asset_resource_name = _create_media_bundle_asset(client, customer_id) _create_display_upload_ad_group_ad( client, customer_id, ad_group_id, ad_asset_resource_name ) def _create_media_bundle_asset(client, customer_id): asset_service = client.get_service("AssetService") asset_operation = client.get_type("AssetOperation") media_bundle_asset = asset_operation.create media_bundle_asset.type_ = client.enums.AssetTypeEnum.MEDIA_BUNDLE media_bundle_asset.media_bundle_asset.data = requests.get( BUNDLE_URL ).content mutate_asset_response = asset_service.mutate_assets( customer_id=customer_id, operations=[asset_operation] ) uploaded_asset_resource_name = mutate_asset_response.results[ 0 ].resource_name print(f"Uploaded file with resource name '{uploaded_asset_resource_name}'.") return uploaded_asset_resource_name
Apache License 2.0
mystor/git-revise
gitrevise/odb.py
Commit.update
python
def update( self, tree: Optional["Tree"] = None, parents: Optional[Sequence["Commit"]] = None, message: Optional[bytes] = None, author: Optional[Signature] = None, recommit: bool = False, ) -> "Commit": if tree is None: tree = self.tree() if parents is None: parents = self.parents() if message is None: message = self.message if author is None: author = self.author if not recommit: unchanged = ( tree == self.tree() and parents == self.parents() and message == self.message and author == self.author ) if unchanged: return self return self.repo.new_commit(tree, parents, message, author)
Create a new commit with specific properties updated or replaced
https://github.com/mystor/git-revise/blob/06e9126cc9f39c48486792a25999d3b8fc1caacd/gitrevise/odb.py#L619-L650
import hashlib import re import os from typing import ( TypeVar, Type, Dict, Union, Sequence, Optional, Mapping, Generic, Tuple, cast, ) import sys from types import TracebackType from pathlib import Path from enum import Enum from subprocess import Popen, run, PIPE, CalledProcessError from collections import defaultdict from tempfile import TemporaryDirectory class MissingObject(Exception): def __init__(self, ref: str) -> None: Exception.__init__(self, f"Object {ref} does not exist") class GPGSignError(Exception): def __init__(self, stderr: str) -> None: Exception.__init__(self, f"unable to sign object: {stderr}") T = TypeVar("T") class Oid(bytes): __slots__ = () def __new__(cls, b: bytes) -> "Oid": if len(b) != 20: raise ValueError("Expected 160-bit SHA1 hash") return super().__new__(cls, b) @classmethod def fromhex(cls, instr: str) -> "Oid": return Oid(bytes.fromhex(instr)) @classmethod def null(cls) -> "Oid": return cls(b"\0" * 20) def short(self) -> str: return str(self)[:12] @classmethod def for_object(cls, tag: str, body: bytes) -> "Oid": hasher = hashlib.sha1() hasher.update(tag.encode() + b" " + str(len(body)).encode() + b"\0" + body) return cls(hasher.digest()) def __repr__(self) -> str: return self.hex() def __str__(self) -> str: return self.hex() class Signature(bytes): __slots__ = () sig_re = re.compile( rb""" (?P<signing_key> (?P<name>[^<>]+)<(?P<email>[^<>]+)> ) [ ] (?P<timestamp>[0-9]+) (?:[ ](?P<offset>[\+\-][0-9]+))? """, re.X, ) @property def name(self) -> bytes: match = self.sig_re.fullmatch(self) assert match, "invalid signature" return match.group("name").strip() @property def email(self) -> bytes: match = self.sig_re.fullmatch(self) assert match, "invalid signature" return match.group("email").strip() @property def signing_key(self) -> bytes: match = self.sig_re.fullmatch(self) assert match, "invalid signature" return match.group("signing_key").strip() @property def timestamp(self) -> bytes: match = self.sig_re.fullmatch(self) assert match, "invalid signature" return match.group("timestamp").strip() @property def offset(self) -> bytes: match = self.sig_re.fullmatch(self) assert match, "invalid signature" return match.group("offset").strip() class Repository: workdir: Path gitdir: Path default_author: Signature default_committer: Signature index: "Index" sign_commits: bool gpg: bytes _objects: Dict[int, Dict[Oid, "GitObj"]] _catfile: Popen _tempdir: Optional[TemporaryDirectory] __slots__ = [ "workdir", "gitdir", "default_author", "default_committer", "index", "sign_commits", "gpg", "_objects", "_catfile", "_tempdir", ] def __init__(self, cwd: Optional[Path] = None) -> None: self._tempdir = None self.workdir = Path(self.git("rev-parse", "--show-toplevel", cwd=cwd).decode()) self.gitdir = self.workdir / Path(self.git("rev-parse", "--git-dir").decode()) self.default_author = Signature(self.git("var", "GIT_AUTHOR_IDENT")) self.default_committer = Signature(self.git("var", "GIT_COMMITTER_IDENT")) self.index = Index(self) self.sign_commits = self.bool_config( "revise.gpgSign", default=self.bool_config("commit.gpgSign", default=False) ) self.gpg = self.config("gpg.program", default=b"gpg") self._catfile = Popen( ["git", "cat-file", "--batch"], bufsize=-1, stdin=PIPE, stdout=PIPE, cwd=self.workdir, ) self._objects = defaultdict(dict) try: self.get_obj(Oid.null()) raise IOError("cat-file backend failure") except MissingObject: pass def git( self, *cmd: str, cwd: Optional[Path] = None, stdin: Optional[bytes] = None, trim_newline: bool = True, env: Dict[str, str] = None, nocapture: bool = False, ) -> bytes: if cwd is None: cwd = getattr(self, "workdir", None) cmd = ("git",) + cmd prog = run( cmd, stdout=None if nocapture else PIPE, cwd=cwd, env=env, input=stdin, check=True, ) if nocapture: return b"" if trim_newline and prog.stdout.endswith(b"\n"): return prog.stdout[:-1] return prog.stdout def config(self, setting: str, default: T) -> Union[bytes, T]: try: return self.git("config", "--get", setting) except CalledProcessError: return default def bool_config(self, config: str, default: T) -> Union[bool, T]: try: return self.git("config", "--get", "--bool", config) == b"true" except CalledProcessError: return default def int_config(self, config: str, default: T) -> Union[int, T]: try: return int(self.git("config", "--get", "--int", config)) except CalledProcessError: return default def __enter__(self) -> "Repository": return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[Exception], exc_tb: Optional[TracebackType], ) -> None: if self._tempdir: self._tempdir.__exit__(exc_type, exc_val, exc_tb) self._catfile.terminate() self._catfile.wait() def get_tempdir(self) -> Path: if self._tempdir is None: self._tempdir = TemporaryDirectory( prefix="revise.", dir=str(self.gitdir) ) return Path(self._tempdir.name) def git_path(self, path: Union[str, Path]) -> Path: return self.workdir / self.git("rev-parse", "--git-path", str(path)).decode() def new_commit( self, tree: "Tree", parents: Sequence["Commit"], message: bytes, author: Optional[Signature] = None, committer: Optional[Signature] = None, ) -> "Commit": if author is None: author = self.default_author if committer is None: committer = self.default_committer body = b"tree " + tree.oid.hex().encode() + b"\n" for parent in parents: body += b"parent " + parent.oid.hex().encode() + b"\n" body += b"author " + author + b"\n" body += b"committer " + committer + b"\n" body_tail = b"\n" + message body += self.sign_buffer(body + body_tail) body += body_tail return Commit(self, body) def sign_buffer(self, buffer: bytes) -> bytes: if not self.sign_commits: return b"" key_id = self.config( "user.signingKey", default=self.default_committer.signing_key ) gpg = None try: gpg = run( (self.gpg, "--status-fd=2", "-bsau", key_id), stdout=PIPE, stderr=PIPE, input=buffer, check=True, ) except CalledProcessError as gpg: print(gpg.stderr.decode(), file=sys.stderr, end="") print("gpg failed to sign commit", file=sys.stderr) raise if b"\n[GNUPG:] SIG_CREATED " not in gpg.stderr: raise GPGSignError(gpg.stderr.decode()) signature = b"gpgsig" for line in gpg.stdout.splitlines(): signature += b" " + line + b"\n" return signature def new_tree(self, entries: Mapping[bytes, "Entry"]) -> "Tree": def entry_key(pair: Tuple[bytes, Entry]) -> bytes: name, entry = pair if entry.mode == Mode.DIR: return name + b"/" return name body = b"" for name, entry in sorted(entries.items(), key=entry_key): body += cast(bytes, entry.mode.value) + b" " + name + b"\0" + entry.oid return Tree(self, body) def get_obj(self, ref: Union[Oid, str]) -> "GitObj": if isinstance(ref, Oid): cache = self._objects[ref[0]] if ref in cache: return cache[ref] ref = ref.hex() (stdin, stdout) = (self._catfile.stdin, self._catfile.stdout) assert stdin is not None assert stdout is not None stdin.write(ref.encode() + b"\n") stdin.flush() resp = stdout.readline().decode() if resp.endswith("missing\n"): try: abbrev = bytes.fromhex(ref) for oid, obj in self._objects[abbrev[0]].items(): if oid.startswith(abbrev): return obj except (ValueError, IndexError): pass raise MissingObject(ref) parts = resp.rsplit(maxsplit=2) oid, kind, size = Oid.fromhex(parts[0]), parts[1], int(parts[2]) body = stdout.read(size + 1)[:-1] assert size == len(body), "bad size?" if kind == "commit": obj = Commit(self, body) elif kind == "tree": obj = Tree(self, body) elif kind == "blob": obj = Blob(self, body) else: raise ValueError(f"Unknown object kind: {kind}") obj.persisted = True assert obj.oid == oid, "miscomputed oid" return obj def get_commit(self, ref: Union[Oid, str]) -> "Commit": obj = self.get_obj(ref) if isinstance(obj, Commit): return obj raise ValueError(f"{type(obj).__name__} {ref} is not a Commit!") def get_tree(self, ref: Union[Oid, str]) -> "Tree": obj = self.get_obj(ref) if isinstance(obj, Tree): return obj raise ValueError(f"{type(obj).__name__} {ref} is not a Tree!") def get_blob(self, ref: Union[Oid, str]) -> "Blob": obj = self.get_obj(ref) if isinstance(obj, Blob): return obj raise ValueError(f"{type(obj).__name__} {ref} is not a Blob!") def get_obj_ref(self, ref: str) -> "Reference[GitObj]": return Reference(GitObj, self, ref) def get_commit_ref(self, ref: str) -> "Reference[Commit]": return Reference(Commit, self, ref) def get_tree_ref(self, ref: str) -> "Reference[Tree]": return Reference(Tree, self, ref) def get_blob_ref(self, ref: str) -> "Reference[Blob]": return Reference(Blob, self, ref) GitObjT = TypeVar("GitObjT", bound="GitObj") class GitObj: repo: Repository body: bytes oid: Oid persisted: bool __slots__ = ("repo", "body", "oid", "persisted") def __new__(cls: Type[GitObjT], repo: Repository, body: bytes) -> GitObjT: oid = Oid.for_object(cls._git_type(), body) cache = repo._objects[oid[0]] if oid in cache: cached = cache[oid] assert isinstance(cached, cls) return cached self = super().__new__(cls) self.repo = repo self.body = body self.oid = oid self.persisted = False cache[oid] = self self._parse_body() return cast(GitObjT, self) @classmethod def _git_type(cls) -> str: return cls.__name__.lower() def persist(self) -> Oid: if self.persisted: return self.oid self._persist_deps() new_oid = self.repo.git( "hash-object", "--no-filters", "-t", self._git_type(), "-w", "--stdin", stdin=self.body, ) assert Oid.fromhex(new_oid.decode()) == self.oid self.persisted = True return self.oid def _persist_deps(self) -> None: pass def _parse_body(self) -> None: pass def __eq__(self, other: object) -> bool: if isinstance(other, GitObj): return self.oid == other.oid return False class Commit(GitObj): tree_oid: Oid parent_oids: Sequence[Oid] author: Signature committer: Signature gpgsig: Optional[bytes] message: bytes __slots__ = ("tree_oid", "parent_oids", "author", "committer", "gpgsig", "message") def _parse_body(self) -> None: hdrs, self.message = self.body.split(b"\n\n", maxsplit=1) self.parent_oids = [] for hdr in re.split(br"\n(?! )", hdrs): key, value = hdr.split(maxsplit=1) value = value.replace(b"\n ", b"\n") self.gpgsig = None if key == b"tree": self.tree_oid = Oid.fromhex(value.decode()) elif key == b"parent": self.parent_oids.append(Oid.fromhex(value.decode())) elif key == b"author": self.author = Signature(value) elif key == b"committer": self.committer = Signature(value) elif key == b"gpgsig": self.gpgsig = value def tree(self) -> "Tree": return self.repo.get_tree(self.tree_oid) def parent_tree(self) -> "Tree": if self.is_root: return Tree(self.repo, b"") return self.parents()[0].tree() @property def is_root(self) -> bool: return not self.parent_oids def parents(self) -> Sequence["Commit"]: return [self.repo.get_commit(parent) for parent in self.parent_oids] def parent(self) -> "Commit": if len(self.parents()) != 1: raise ValueError(f"Commit {self.oid} has {len(self.parents())} parents") return self.parents()[0] def summary(self) -> str: summary_paragraph = self.message.split(b"\n\n", maxsplit=1)[0].decode( errors="replace" ) return " ".join(summary_paragraph.splitlines()) def rebase(self, parent: Optional["Commit"]) -> "Commit": from .merge import rebase return rebase(self, parent)
MIT License
bennington-hardware-hacking-2019/pos_system
tag_reader/smbus2/smbus2/smbus2.py
SMBus.read_i2c_block_data
python
def read_i2c_block_data(self, i2c_addr, register, length, force=None): if length > I2C_SMBUS_BLOCK_MAX: raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX) self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA ) msg.data.contents.byte = length ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.block[1:length + 1]
Read a block of byte data from a given register. :param i2c_addr: i2c address :type i2c_addr: int :param register: Start register :type register: int :param length: Desired block length :type length: int :param force: :type force: Boolean :return: List of bytes :rtype: list
https://github.com/bennington-hardware-hacking-2019/pos_system/blob/b204d19bf7695d53d6a0b1ce25ed6490bb06314c/tag_reader/smbus2/smbus2/smbus2.py#L546-L569
import os import sys from fcntl import ioctl from ctypes import c_uint32, c_uint8, c_uint16, c_char, POINTER, Structure, Array, Union, create_string_buffer, string_at I2C_SLAVE = 0x0703 I2C_SLAVE_FORCE = 0x0706 I2C_FUNCS = 0x0705 I2C_RDWR = 0x0707 I2C_SMBUS = 0x0720 I2C_SMBUS_WRITE = 0 I2C_SMBUS_READ = 1 I2C_SMBUS_QUICK = 0 I2C_SMBUS_BYTE = 1 I2C_SMBUS_BYTE_DATA = 2 I2C_SMBUS_WORD_DATA = 3 I2C_SMBUS_PROC_CALL = 4 I2C_SMBUS_BLOCK_DATA = 5 I2C_SMBUS_BLOCK_PROC_CALL = 7 I2C_SMBUS_I2C_BLOCK_DATA = 8 I2C_SMBUS_BLOCK_MAX = 32 I2C_FUNC_I2C = 0x00000001 I2C_FUNC_10BIT_ADDR = 0x00000002 I2C_FUNC_PROTOCOL_MANGLING = 0x00000004 I2C_FUNC_SMBUS_PEC = 0x00000008 I2C_FUNC_NOSTART = 0x00000010 I2C_FUNC_SLAVE = 0x00000020 I2C_FUNC_SMBUS_BLOCK_PROC_CALL = 0x00008000 I2C_FUNC_SMBUS_QUICK = 0x00010000 I2C_FUNC_SMBUS_READ_BYTE = 0x00020000 I2C_FUNC_SMBUS_WRITE_BYTE = 0x00040000 I2C_FUNC_SMBUS_READ_BYTE_DATA = 0x00080000 I2C_FUNC_SMBUS_WRITE_BYTE_DATA = 0x00100000 I2C_FUNC_SMBUS_READ_WORD_DATA = 0x00200000 I2C_FUNC_SMBUS_WRITE_WORD_DATA = 0x00400000 I2C_FUNC_SMBUS_PROC_CALL = 0x00800000 I2C_FUNC_SMBUS_READ_BLOCK_DATA = 0x01000000 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA = 0x02000000 I2C_FUNC_SMBUS_READ_I2C_BLOCK = 0x04000000 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK = 0x08000000 I2C_M_RD = 0x0001 LP_c_uint8 = POINTER(c_uint8) LP_c_uint16 = POINTER(c_uint16) LP_c_uint32 = POINTER(c_uint32) class i2c_smbus_data(Array): _length_ = I2C_SMBUS_BLOCK_MAX + 2 _type_ = c_uint8 class union_i2c_smbus_data(Union): _fields_ = [ ("byte", c_uint8), ("word", c_uint16), ("block", i2c_smbus_data) ] union_pointer_type = POINTER(union_i2c_smbus_data) class i2c_smbus_ioctl_data(Structure): _fields_ = [ ('read_write', c_uint8), ('command', c_uint8), ('size', c_uint32), ('data', union_pointer_type)] __slots__ = [name for name, type in _fields_] @staticmethod def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA): u = union_i2c_smbus_data() return i2c_smbus_ioctl_data( read_write=read_write, command=command, size=size, data=union_pointer_type(u)) class i2c_msg(Structure): _fields_ = [ ('addr', c_uint16), ('flags', c_uint16), ('len', c_uint16), ('buf', POINTER(c_char))] def __iter__(self): return i2c_msg_iter(self) def __len__(self): return self.len def __bytes__(self): return string_at(self.buf, self.len) def __repr__(self): return 'i2c_msg(%d,%d,%r)' % (self.addr, self.flags, self.__bytes__()) def __str__(self): s = self.__bytes__() if sys.version_info.major >= 3: s = ''.join(map(chr, s)) return s @staticmethod def read(address, length): arr = create_string_buffer(length) return i2c_msg( addr=address, flags=I2C_M_RD, len=length, buf=arr) @staticmethod def write(address, buf): if sys.version_info.major >= 3: if type(buf) is str: buf = bytes(map(ord, buf)) else: buf = bytes(buf) else: if type(buf) is not str: buf = ''.join([chr(x) for x in buf]) arr = create_string_buffer(buf, len(buf)) return i2c_msg( addr=address, flags=0, len=len(arr), buf=arr) class i2c_rdwr_ioctl_data(Structure): _fields_ = [ ('msgs', POINTER(i2c_msg)), ('nmsgs', c_uint32) ] __slots__ = [name for name, type in _fields_] @staticmethod def create(*i2c_msg_instances): n_msg = len(i2c_msg_instances) msg_array = (i2c_msg * n_msg)(*i2c_msg_instances) return i2c_rdwr_ioctl_data( msgs=msg_array, nmsgs=n_msg ) class i2c_msg_iter: def __init__(self, msg): self.msg = msg self.idx = 0 def __iter__(self): return self def __next__(self): if self.idx < self.msg.len: val = ord(self.msg.buf[self.idx]) self.idx += 1 return val else: raise StopIteration() def next(self): return self.__next__() class SMBus(object): def __init__(self, bus=None, force=False): self.fd = None self.funcs = 0 if bus is not None: self.open(bus) self.address = None self.force = force self._force_last = None def open(self, bus): self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR) self.funcs = self._get_funcs() def close(self): if self.fd: os.close(self.fd) self.fd = None def _set_address(self, address, force=None): force = force if force is not None else self.force if self.address != address or self._force_last != force: if force is True: ioctl(self.fd, I2C_SLAVE_FORCE, address) else: ioctl(self.fd, I2C_SLAVE, address) self.address = address self._force_last = force def _get_funcs(self): f = c_uint32() ioctl(self.fd, I2C_FUNCS, f) return f.value def write_quick(self, i2c_addr, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK) ioctl(self.fd, I2C_SMBUS, msg) def read_byte(self, i2c_addr, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.byte def write_byte(self, i2c_addr, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE ) ioctl(self.fd, I2C_SMBUS, msg) def read_byte_data(self, i2c_addr, register, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.byte def write_byte_data(self, i2c_addr, register, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA ) msg.data.contents.byte = value ioctl(self.fd, I2C_SMBUS, msg) def read_word_data(self, i2c_addr, register, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA ) ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word def write_word_data(self, i2c_addr, register, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA ) msg.data.contents.word = value ioctl(self.fd, I2C_SMBUS, msg) def process_call(self, i2c_addr, register, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL ) msg.data.contents.word = value ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word def read_block_data(self, i2c_addr, register, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA ) ioctl(self.fd, I2C_SMBUS, msg) length = msg.data.contents.block[0] return msg.data.contents.block[1:length + 1] def write_block_data(self, i2c_addr, register, data, force=None): length = len(data) if length > I2C_SMBUS_BLOCK_MAX: raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX) self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA ) msg.data.contents.block[0] = length msg.data.contents.block[1:length + 1] = data ioctl(self.fd, I2C_SMBUS, msg) def block_process_call(self, i2c_addr, register, data, force=None): length = len(data) if length > I2C_SMBUS_BLOCK_MAX: raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX) self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL ) msg.data.contents.block[0] = length msg.data.contents.block[1:length + 1] = data ioctl(self.fd, I2C_SMBUS, msg) length = msg.data.contents.block[0] return msg.data.contents.block[1:length + 1]
MIT License
dmlc/dgl
python/dgl/init.py
base_initializer
python
def base_initializer(shape, dtype, ctx, id_range): raise NotImplementedError
The function signature for feature initializer. Any customized feature initializer should follow this signature (see example below). Parameters ---------- shape : tuple of int The shape of the result features. The first dimension is the batch dimension. dtype : data type object The data type of the returned features. ctx : context object The device context of the returned features. id_range : slice The start id and the end id of the features to be initialized. The id could be node or edge id depending on the scenario. Note that the step is always None. Examples -------- If PyTorch is used as backend, the following code defines an feature initializer that initializes tensor value to 1 >>> import torch >>> import dgl >>> def initializer(shape, dtype, ctx, id_range): >>> return torch.ones(shape, dtype=dtype, device=ctx) >>> g = dgl.DGLGraph() >>> g.set_n_initializer(initializer) See Also -------- dgl.DGLGraph.set_n_initializer dgl.DGLGraph.set_e_initializer
https://github.com/dmlc/dgl/blob/8341244a2dac850bd0c1153c7641c3b8a2bbfc30/python/dgl/init.py#L8-L45
from __future__ import absolute_import from . import backend as F __all__ = ['base_initializer', 'zero_initializer']
Apache License 2.0
pyxll/remote-notebook
pyxll_notebook/client/kernel.py
Kernel.__on_reply
python
async def __on_reply(self, msg): msg_id = msg.get("parent_header", {}).get("msg_id") if not msg_id: _log.debug(f"Message reply received with no msg_id in the parent_header: {msg}") return event = self.__message_events.pop(msg_id, None) if event: event.set(msg)
Sets any waiting events when a message reply is received.
https://github.com/pyxll/remote-notebook/blob/321c3fd6978a7e6a7c8500cb7d6d1727d21ecbee/pyxll_notebook/client/kernel.py#L196-L205
from .handler import Handler from .events import MessageReplyEvent from ..errors import * from typing import * import datetime as dt import urllib.parse import websockets import logging import aiohttp import asyncio import pickle import json import uuid import os import re _log = logging.getLogger(__name__) class Kernel: default_handler_cls = Handler message_protocol_version = "5.0" def __init__(self, url, authenticator, handler=None): if handler is None: handler = self.default_handler_cls(self) self.__url = url self.__handler = handler self.__kernel = None self.__ws = None self.__session_id = uuid.uuid1().hex self.__username = os.getlogin() self.__kernel_url = None self.__ws_url = None self.__authenticator = authenticator self.__message_events: Dict[str, MessageReplyEvent] = {} async def start(self): url = self.__url ws_url = None if not self.__authenticator.authenticated: await self.__authenticator.authenticate() kernels_url = url + "/api/kernels" async with aiohttp.ClientSession(cookie_jar=self.__authenticator.cookie_jar) as session: async with session.post(kernels_url, headers=self.__authenticator.headers) as response: try: await response.read() response.raise_for_status() except Exception: self.__authenticator.reset() raise if not re.match(r"^application/(?:[\w.+-]+?\+)?json", response.content_type, re.IGNORECASE): raise KernelStartError("Response ito kernel start request is not JSON data. " "Check the notebook server is running.") kernel = await response.json() if not "id" in kernel: raise KernelStartError(kernel.get("message")) kernel_id = kernel["id"] _log.debug(f"Started new kernel {kernel_id}.") self.__kernel = kernel self.__id = kernel_id self.__kernel_url = kernels_url + "/" + self.__kernel["id"] if ws_url is None: u = urllib.parse.urlparse(url) scheme = "wss" if u.scheme == "https" else "ws" port = f":{u.port}" if u.port else "" ws_url = f"{scheme}://{u.hostname}{port}{u.path}" ws_headers = dict(self.__authenticator.headers) cookies = self.__authenticator.cookie_jar.filter_cookies(kernels_url) cookies = [f"{k}={c.value};" for k, c in cookies.items()] ws_headers["Cookie"] = " ".join(cookies) self.__ws_url = f"{ws_url}/api/kernels/{kernel_id}/channels?session_id={self.__session_id}" self.__ws = await websockets.connect(self.__ws_url, max_size=None, extra_headers=ws_headers) loop = asyncio.get_event_loop() loop.create_task(self.__poll_ws()) async def run_notebook(self, path): url = self.__url + "/api/contents/" + path async with aiohttp.ClientSession(cookie_jar=self.__authenticator.cookie_jar) as session: async with session.get(url, headers=self.__authenticator.headers) as response: try: await response.read() response.raise_for_status() file = await response.json() except Exception: self.__authenticator.reset() raise await self.execute(f"__pyxll_notebook_session__ = '{self.__session_id}'") await self.execute(f"__pyxll_pickle_protocol__ = {pickle.HIGHEST_PROTOCOL}") cells = file["content"]["cells"] code = [c["source"] for c in cells if len(c["source"]) > 0 and c["cell_type"] == "code"] for c in code: await self.execute(c) async def execute(self, code, user_expressions={}): msg_id = uuid.uuid1().hex content = { 'code': code, 'silent': False, 'user_expressions': user_expressions } header = { 'msg_id': msg_id, 'msg_type': 'execute_request', 'username': self.__username, 'session': self.__session_id, 'data': dt.datetime.now().isoformat(), 'version': self.message_protocol_version } msg = { "channel": "shell", 'header': header, 'parent_header': header, 'metadata': {}, 'content': content, } event = self.__message_events[msg_id] = MessageReplyEvent() await self.__ws.send(json.dumps(msg)) reply = await event.wait() content = reply.get("content", {}) status = content.get("status") if status != "ok": raise ExecuteRequestError(**content) return content async def __poll_ws(self): while self.__ws is not None: try: msg = json.loads(await self.__ws.recv()) parent_header = msg.get("parent_header", {}) parent_session_id = parent_header.get("session") if parent_session_id != self.__session_id: continue header = msg.get("header", {}) msg_type = header.get("msg_type") if "." in msg_type: ns, msg_type = msg_type.split(".", 1) if ns != "pyxll": continue if msg_type.endswith("_reply"): await self.__on_reply(msg) func = getattr(self.__handler, f"on_{msg_type}", None) if func: await func(msg) except Exception: _log.error("An error occurred processing a message from the kernel", exc_info=True)
MIT License
guidopaul/cail2019
viz_utils/model_utils.py
get_attention_nth_layer_mth_head_kth_token
python
def get_attention_nth_layer_mth_head_kth_token( attention_outputs, n, m, k, average_heads=False, logger=None ): if average_heads is True and m is not None: logger.warning( "Argument passed for param @m will be ignored because of head averaging." ) attention_outputs_concatenated = torch.cat( attention_outputs, dim=0 ) attention_outputs = attention_outputs_concatenated.data[ n, :, :, : ] attention_outputs = attention_outputs[:, k, :] if average_heads: attention_outputs = torch.sum(attention_outputs, dim=0) num_attention_heads = attention_outputs_concatenated.shape[1] attention_outputs /= num_attention_heads else: attention_outputs = attention_outputs[m, :] return attention_outputs
Function to compute attention weights by: i) Take the attention weights from the nth multi-head attention layer assigned to kth token ii) Take the mth attention head
https://github.com/guidopaul/cail2019/blob/752e1b222614fdd8f4b574b2f73a33b3640c5264/viz_utils/model_utils.py#L35-L68
import re import torch def get_tokenized_text(tokenizer, input_ids): input_ids = input_ids sentence = tokenizer.decode(token_ids=input_ids) if isinstance(sentence, list): sentence = sentence[0] sentence = sentence.replace("[CLS]", "C") sentence = sentence.replace("[SEP]", "E") sentence = sentence.replace("[UNK]", "U") sentence = sentence.replace("[PAD]", "P") sentence = sentence.lstrip().rstrip() sentence = re.sub(" +", " ", sentence) sentence = "".join(sentence.split()) return sentence[1:-1]
MIT License
devopshq/teamcity
dohq_teamcity/models/vcs_roots.py
VcsRoots.next_href
python
def next_href(self, next_href): self._next_href = next_href
Sets the next_href of this VcsRoots. :param next_href: The next_href of this VcsRoots. # noqa: E501 :type: str
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/models/vcs_roots.py#L113-L121
from dohq_teamcity.custom.base_model import TeamCityObject class VcsRoots(TeamCityObject): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'count': 'int', 'href': 'str', 'next_href': 'str', 'prev_href': 'str', 'vcs_root': 'list[VcsRoot]' } attribute_map = { 'count': 'count', 'href': 'href', 'next_href': 'nextHref', 'prev_href': 'prevHref', 'vcs_root': 'vcs-root' } def __init__(self, count=None, href=None, next_href=None, prev_href=None, vcs_root=None, teamcity=None): self._count = None self._href = None self._next_href = None self._prev_href = None self._vcs_root = None self.discriminator = None if count is not None: self.count = count if href is not None: self.href = href if next_href is not None: self.next_href = next_href if prev_href is not None: self.prev_href = prev_href if vcs_root is not None: self.vcs_root = vcs_root super(VcsRoots, self).__init__(teamcity=teamcity) @property def count(self): return self._count @count.setter def count(self, count): self._count = count @property def href(self): return self._href @href.setter def href(self, href): self._href = href @property def next_href(self): return self._next_href @next_href.setter
MIT License
greenblitz/gbvision
gbvision/utils/tracker.py
Tracker.init
python
def init(self, frame, rect): return self.tracker.init(frame, tuple([int(max(x, 0)) for x in rect]))
Initlize the tracker :param frame: The frame :param rect: Given rectangle :return: True if initialization went succesfully, false otherwise
https://github.com/greenblitz/gbvision/blob/7b6f1dfc09e28ea1e5e771af9cb222412d71c7bb/gbvision/utils/tracker.py#L60-L68
import cv2 class __EmptyTracker: def __init__(self): self.__rect = None def init(self, frame, rect): self.__rect = rect return True def update(self, frame): return True, self.__rect try: TRACKER_ALGORITHMS = { "BOOSTING": cv2.TrackerBoosting_create, "MIL": cv2.TrackerMIL_create, "KCF": cv2.TrackerKCF_create, "TLD": cv2.TrackerTLD_create, "MEDIANFLOW": cv2.TrackerMedianFlow_create, "GOTURN": cv2.TrackerGOTURN_create, "MOSSE": cv2.TrackerMOSSE_create, "CSRT": cv2.TrackerCSRT_create, "EMPTY": __EmptyTracker } except AttributeError: import sys print("[WARN] no trackers in your version of opencv, you may only use the empty tracker", file=sys.stderr) TRACKER_ALGORITHMS = { "EMPTY": __EmptyTracker } class Tracker: TRACKER_TYPE_BOOSTING = 'BOOSTING' TRACKER_TYPE_MIL = 'MIL' TRACKER_TYPE_KCF = 'KCF' TRACKER_TYPE_TLD = 'TLD' TRACKER_TYPE_MEDIANFLOW = 'MEDIANFLOW' TRACKER_TYPE_GOTURN = 'GOTURN' TRACKER_TYPE_MOSSE = 'MOSSE' TRACKER_TYPE_CSRT = 'CSRT' TRACKER_TYPE_EMPTY = 'EMPTY' def __init__(self, tracker_type="EMPTY"): tracker_type = tracker_type.upper() assert tracker_type in TRACKER_ALGORITHMS self.tracker = TRACKER_ALGORITHMS[tracker_type]() self.tracker_type = tracker_type
Apache License 2.0
srusskih/sublimejedi
sublime_jedi/tooltips/markdown.py
MarkDownTooltip._build_html
python
def _build_html(self, view, docstring): signature, docstring = self._prepare_signature(docstring) if signature: content = '```python\n{0}\n```\n'.format(signature) else: content = '' content += html.escape(docstring, quote=False) content = content.replace('\n\n', '\n\u00A0\n') content = content.replace(' ', '\u00A0\u00A0') content = mdpopups.md2html(view, content) keywords = ( 'Args:', 'Arguments:', 'Attributes:', 'Example:', 'Examples:', 'Note:', 'Raises:', 'Returns:', 'Yields:') for keyword in keywords: content = content.replace( keyword + '<br />', '<h6>' + keyword + '</h6>') return content
Convert python docstring to text ready to show in popup. :param view: sublime text view object :param docstring: python docstring as a string
https://github.com/srusskih/sublimejedi/blob/8a5054f0a053c8a8170c06c56216245240551d54/sublime_jedi/tooltips/markdown.py#L98-L127
import html import re import sublime try: if int(sublime.version()) < 3119: raise ImportError('Sublime Text 3119+ required.') import mdpopups if mdpopups.version() < (1, 9, 0): raise ImportError('mdpopups 1.9.0+ required.') _HAVE_MDPOPUPS = True except ImportError: _HAVE_MDPOPUPS = False from .base import Tooltip class MarkDownTooltip(Tooltip): @classmethod def guess(cls, docstring): return _HAVE_MDPOPUPS def _get_style(self): css = """ body { margin: 6px; } div.mdpopups { margin: 0; padding: 0; } .jedi h6 { font-weight: bold; color: var(--bluish); } .jedi .highlight { font-size: 1.1rem; } """ return css def _prepare_signature(self, docstring): pattern = ( '(?x)' '^([\w\. \t]+\.[ \t]*)?' '(\w+)' '[ \t]*(\([^\)]*\))' '(?:' '(\s*->\s*.*?)' '(--|$)' ')?' ) match = re.match(pattern, docstring, re.MULTILINE) if not match: return (None, docstring) path, func, args, note, comment = match.groups() types = ( 'basestring', 'unicode', 'byte', 'dict', 'float', 'int', 'list', 'tuple', 'str', 'set', 'frozenset') if any(func.startswith(s) for s in types): prefix = '' else: prefix = 'class ' if func.lstrip('_')[0].isupper() else 'def ' clean_args = ''.join( [each.group() for each in re.finditer( '[^\s"\']+|"([^"]*)"|\'([^\']*)', args)] ).replace(',', ', ') args_length_difference = len(args) - len(clean_args) signature = ''.join( (prefix, path or '', func or '', clean_args or '', note or '')) signature_length = len(signature) + args_length_difference signature = signature.replace('\n', ' ') docstring = docstring[ signature_length + len(comment or '') - len(prefix):] or '' return (signature, docstring.strip())
MIT License
utiasstars/pyslam
pyslam/pipelines/sparse.py
SparseVOPipeline.track
python
def track(self, trackframe): if len(self.keyframes) == 0: self.keyframes.append(trackframe) self.active_keyframe_idx = 0 active_keyframe = self.keyframes[0] else: active_keyframe = self.keyframes[self.active_keyframe_idx] T_track_ref = self._compute_frame_to_frame_motion( active_keyframe, trackframe) T_track_ref.normalize() self.T_c_w.append(T_track_ref.dot(active_keyframe.T_c_w)) se3_vec = SE3.log(T_track_ref) trans_dist = np.linalg.norm(se3_vec[0:3]) rot_dist = np.linalg.norm(se3_vec[3:6]) if trans_dist > self.keyframe_trans_thresh or rot_dist > self.keyframe_rot_thresh: if self.mode is 'map': trackframe.T_c_w = self.T_c_w[-1] self.keyframes.append(trackframe) print('Dropped new keyframe. ' 'Trans dist was {:.3f}. Rot dist was {:.3f}.'.format( trans_dist, rot_dist)) self.active_keyframe_idx += 1 print('Active keyframe idx: {}'.format( self.active_keyframe_idx))
Track an image. Args: trackframe : frame to track
https://github.com/utiasstars/pyslam/blob/5c0de9eaed4ebc7687b6d43079481c9a9336145b/pyslam/pipelines/sparse.py#L71-L109
import copy import numpy as np import cv2 from liegroups import SE3 import viso2 from pyslam.problem import Options, Problem from pyslam.sensors import StereoCamera, RGBDCamera from pyslam.residuals import ReprojectionMotionOnlyBatchResidual from pyslam.losses import L2Loss from pyslam.pipelines.keyframes import SparseStereoKeyframe, SparseRGBDKeyframe from pyslam.pipelines.ransac import FrameToFrameRANSAC class SparseVOPipeline: def __init__(self, camera, first_pose=SE3.identity()): self.camera = camera self.first_pose = first_pose self.keyframes = [] self.T_c_w = [first_pose] self.motion_options = Options() self.motion_options.allow_nondecreasing_steps = True self.motion_options.max_nondecreasing_steps = 5 self.motion_options.min_cost_decrease = 0.99 self.motion_options.max_iters = 30 self.motion_options.num_threads = 1 self.motion_options.linesearch_max_iters = 0 self.keyframe_trans_thresh = 3.0 self.keyframe_rot_thresh = 0.3 self.matcher_params = viso2.Matcher_parameters() self.matcher = viso2.Matcher(self.matcher_params) self.matcher_mode = 0 self.ransac = FrameToFrameRANSAC(self.camera) self.reprojection_stiffness = np.diag([1., 1., 1.]) self.mode = 'map' self.loss = L2Loss() def set_mode(self, mode): self.mode = mode if self.mode == 'track': self.active_keyframe_idx = 0 self.T_c_w = []
MIT License
rapid7/vm-console-client-python
rapid7vmconsole/models/report_storage.py
ReportStorage.location
python
def location(self, location): self._location = location
Sets the location of this ReportStorage. The location to storage an additional copy of the report. This is a sub-path post-fixed to `$(install_dir)/nsc/reports/$(user)/`. Variables such as `$(report_name)`, `$(date)`, and `$(time)` may be used to generate the directory structure. # noqa: E501 :param location: The location of this ReportStorage. # noqa: E501 :type: str
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/report_storage.py#L65-L74
import pprint import re import six class ReportStorage(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'location': 'str', 'path': 'str' } attribute_map = { 'location': 'location', 'path': 'path' } def __init__(self, location=None, path=None): self._location = None self._path = None self.discriminator = None if location is not None: self.location = location if path is not None: self.path = path @property def location(self): return self._location @location.setter
MIT License
facebookresearch/rlstructures
tutorial/deprecated/tutorial_from_reinforce_to_a2c_s/agent.py
ReinforceAgent.__call__
python
def __call__(self, state, observation, agent_info=None, history=None): B = observation.n_elems() action_proba = self.model.action_model(observation["frame"]) dist = torch.distributions.Categorical(action_proba) action_sampled = dist.sample() action_max = action_proba.max(1)[1] smask = agent_info["stochastic"].float() action = masked_tensor(action_max, action_sampled, agent_info["stochastic"]) new_state = DictTensor({}) agent_do = DictTensor({"action": action}) return agent_do, new_state
Executing one step of the agent
https://github.com/facebookresearch/rlstructures/blob/ceb9be4bd979565976ac6b983194665d5c7c18df/tutorial/deprecated/tutorial_from_reinforce_to_a2c_s/agent.py#L31-L53
import torch import torch.nn as nn from rlstructures import DictTensor from rlstructures import S_Agent import time from rlstructures.dicttensor import masked_tensor, masked_dicttensor class ReinforceAgent(S_Agent): def __init__(self, model=None, n_actions=None): super().__init__() self.model = model self.n_actions = n_actions def update(self, state_dict): self.model.load_state_dict(state_dict) def initial_state(self, agent_info, B): return DictTensor({})
MIT License
fkie/multimaster_fkie
fkie_master_sync/src/fkie_master_sync/master_sync.py
Main.__init__
python
def __init__(self): self.masters = {} self.masteruri = masteruri_from_master() self.hostname = get_hostname(self.masteruri) self._localname = '' self.__lock = threading.RLock() self._load_interface() self._check_host = rospy.get_param('~check_host', True) topic_names = interface_finder.get_changes_topic(masteruri_from_master(), check_host=self._check_host) self.sub_changes = dict() for topic_name in topic_names: rospy.loginfo("listen for updates on %s", topic_name) self.sub_changes[topic_name] = rospy.Subscriber(topic_name, MasterState, self._rosmsg_callback_master_state) self.__timestamp_local = None self.__own_state = None self.update_timer = None self.resync_timer = None self.own_state_getter = None self._timer_update_diagnostics = None self._join_threads = dict() rospy.Service('~get_sync_info', GetSyncInfo, self._rosservice_get_sync_info) rospy.on_shutdown(self.finish) self._current_diagnistic_level = None self.pub_diag = rospy.Publisher( "/diagnostics", DiagnosticArray, queue_size=10, latch=True) self.obtain_masters()
Creates a new instance. Find the topic of the master_discovery node using U{fkie_master_discovery.interface_finder.get_changes_topic() <http://docs.ros.org/api/fkie_master_discovery/html/modules.html#interface-finder-module>}. Also the parameter C{~ignore_hosts} will be analyzed to exclude hosts from sync.
https://github.com/fkie/multimaster_fkie/blob/386ebf27f41bffdb1896bbcfdccb7c5290ac0eb4/fkie_master_sync/src/fkie_master_sync/master_sync.py#L62-L98
import socket import threading import time import uuid try: import xmlrpclib as xmlrpcclient except ImportError: import xmlrpc.client as xmlrpcclient from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue from fkie_multimaster_msgs.msg import MasterState from fkie_multimaster_msgs.srv import DiscoverMasters, GetSyncInfo, GetSyncInfoResponse import rospy from fkie_master_discovery.common import masteruri_from_master, resolve_url, read_interface, create_pattern, is_empty_pattern, get_hostname from fkie_master_discovery.master_info import MasterInfo import fkie_master_discovery.interface_finder as interface_finder from .sync_thread import SyncThread class Main(object): UPDATE_INTERVALL = 30
BSD 3-Clause New or Revised License
ethantkoenig/run_lambda
run_lambda/context.py
MockLambdaContext.memory_limit_in_mb
python
def memory_limit_in_mb(self): return self._memory_limit_in_mb
:property: Memory limit, in MB, as a string :rtype: str
https://github.com/ethantkoenig/run_lambda/blob/d08842acb939b6f1a75675a307031c2c7be0374e/run_lambda/context.py#L56-L61
import datetime from run_lambda import utils class MockLambdaContext(object): def __init__(self, function_name, function_version, invoked_function_arn, memory_limit_in_mb, aws_request_id, log_group_name, log_stream_name, identity=None, client_context=None, default_remaining_time_in_millis=None): self._function_name = function_name self._function_version = function_version self._invoked_function_arn = invoked_function_arn self._memory_limit_in_mb = memory_limit_in_mb self._aws_request_id = aws_request_id self._log_group_name = log_group_name self._log_stream_name = log_stream_name self._identity = identity self._client_context = client_context self._default_remaining_time_in_millis = default_remaining_time_in_millis self._expiration = None @property def function_name(self): return self._function_name @property def function_version(self): return self._function_version @property def invoked_function_arn(self): return self._invoked_function_arn @property
MIT License
ron89/thesaurus_query.vim
autoload/thesaurus_query/tq_common_lib.py
get_variable
python
def get_variable(v_name, default=None): if independent_session: return default if vim.eval("exists('b:'.'{0}')".format(v_name))=='0': if vim.eval("exists('g:'.'{0}')".format(v_name))=='0': if vim.eval("exists('{0}')".format(v_name))=='0': return default else: return vim.eval(v_name) else: return vim.eval('g:'+v_name) else: return vim.eval('b:'+v_name)
get variable from Vim return: vim_variable # buffer variable tried first, global variable second default # if no variable exists, or module used independently # from Vim session.
https://github.com/ron89/thesaurus_query.vim/blob/65e67a2dc3290d1fbed59db2a3f58944a73e7251/autoload/thesaurus_query/tq_common_lib.py#L96-L114
import sys import urllib try: import vim independent_session = False except ImportError: independent_session = True if sys.version_info < (3,0): import urlparse urlpquote = urllib.quote urlpunquote = urllib.unquote urlpurlsplit = urlparse.urlsplit urlpurlunsplit = urlparse.urlunsplit else: import urllib.parse as urlparse urlpquote = urlparse.quote urlpunquote = urlparse.unquote urlpurlsplit = urlparse.urlsplit urlpurlunsplit = urlparse.urlunsplit def decode_utf_8(string_in): if sys.version_info < (3,0): return string_in.decode('utf-8') if not isinstance(string_in, unicode) else string_in return string_in.decode('utf-8') if not isinstance(string_in, str) else string_in def encode_utf_8(string_in): if sys.version_info < (3,0): return string_in.encode('utf-8') if isinstance(string_in, unicode) else string_in return string_in.encode('utf-8') if isinstance(string_in, str) else string_in def send_string_to_vim(string_in): if sys.version_info > (3,0): return decode_utf_8(string_in) return encode_utf_8(string_in) def fixurl(url): url = decode_utf_8(url) parsed = urlpurlsplit(url) userpass,at,hostport = parsed.netloc.rpartition('@') user,colon1,pass_ = userpass.partition(':') host,colon2,port = hostport.partition(':') scheme = parsed.scheme.encode('utf8') user = encode_utf_8(urlpquote(user.encode('utf8'))) colon1 = colon1.encode('utf8') pass_ = encode_utf_8(urlpquote(pass_.encode('utf8'))) at = at.encode('utf8') host = host.encode('idna') colon2 = colon2.encode('utf8') port = port.encode('utf8') if sys.version_info < (3,0): path = '/'.join( urlpquote(urlpunquote(pce).encode('utf8'),'') for pce in parsed.path.split('/') ) else: path = b'/'.join( encode_utf_8(urlpquote(urlpunquote(pce).encode('utf8'),b'')) for pce in parsed.path.split('/') ) query = encode_utf_8(urlpquote(urlpunquote(parsed.query).encode('utf8'),'=&?/')) fragment = encode_utf_8(urlpquote(urlpunquote(parsed.fragment).encode('utf8'))) if sys.version_info < (3,0): netloc = ''.join((user,colon1,pass_,at,host,colon2,port)) else: netloc = b''.join((user,colon1,pass_,at,host,colon2,port)) return urlparse.urlunsplit((scheme,netloc,path,query,fragment))
Apache License 2.0
gaa-uam/scikit-fda
skfda/misc/operators/_srvf.py
SRSF.transform
python
def transform(self, X: FDataGrid, y: None = None) -> FDataGrid: check_is_univariate(X) if self.output_points is None: output_points = X.grid_points[0] else: output_points = np.asarray(self.output_points) g = X.derivative() data_matrix = g(output_points)[..., 0] sign_g = np.sign(data_matrix) data_matrix = np.abs(data_matrix, out=data_matrix) data_matrix = np.sqrt(data_matrix, out=data_matrix) data_matrix *= sign_g if self.initial_value is None: a = X.domain_range[0][0] self.initial_value_ = X(a).reshape(X.n_samples, 1, X.dim_codomain) return X.copy(data_matrix=data_matrix, grid_points=output_points)
r""" Compute the square-root slope function (SRSF) transform. Let :math:`f : [a,b] \rightarrow \mathbb{R}` be an absolutely continuous function, the SRSF transform is defined as :footcite:`srivastava+klassen_2016_analysis_square`: .. math:: SRSF(f(t)) = sgn(f(t)) \sqrt{\dot f(t)|} = q(t) Args: X: Functions to be transformed. y: Present for API conventions. Returns: SRSF functions. Raises: ValueError: If functions are not univariate.
https://github.com/gaa-uam/scikit-fda/blob/1a6fc2c01e39871e09fd2ec6d0b14d378d6b069f/skfda/misc/operators/_srvf.py#L121-L167
from __future__ import annotations from typing import Optional import numpy as np import scipy.integrate from sklearn.base import BaseEstimator, TransformerMixin from ..._utils import check_is_univariate from ...representation import FDataGrid from ...representation._typing import ArrayLike from ._operators import Operator class SRSF( Operator[FDataGrid, FDataGrid], BaseEstimator, TransformerMixin, ): def __init__( self, output_points: Optional[ArrayLike] = None, initial_value: Optional[float] = None, ) -> None: self.output_points = output_points self.initial_value = initial_value def __call__(self, vector: FDataGrid) -> FDataGrid: return self.fit_transform(vector) def fit(self, X: FDataGrid, y: None = None) -> SRSF: return self
BSD 3-Clause New or Revised License
mailhexu/tb2j
TB2J/myTB.py
MyTB.prepare_phase_rjri
python
def prepare_phase_rjri(self): self.rjminusri = self.xred[None, :, :] - self.xred[:, None, :]
The matrix P: P(i, j) = r(j)-r(i)
https://github.com/mailhexu/tb2j/blob/0463d18212b653021469f218b522b0f584a52450/TB2J/myTB.py#L297-L301
import os import numpy as np import copy from scipy.linalg import eigh, eigvalsh from scipy.sparse import csr_matrix from scipy.io import netcdf_file from collections import defaultdict from ase.io import read from ase.atoms import Atoms from TB2J.utils import auto_assign_basis_name from functools import lru_cache from TB2J.wannier import parse_ham, parse_xyz, parse_atoms class AbstractTB(): def __init__(self, R2kfactor, nspin, norb): self.is_siesta = False self.is_orthogonal = True self.R2kfactor = R2kfactor self.nspin = nspin self.norb = norb self.nbasis = nspin * norb self.xcart = None self.xred = None self._name = None @property def name(self): return self._name def get_hamR(self, R): raise NotImplementedError() def get_orbs(self): raise NotImplementedError() def HSE(self, kpt): raise NotImplementedError() def HS_and_eigen(self, kpts): raise NotImplementedError() class MyTB(AbstractTB): def __init__( self, nbasis, data=None, positions=None, sparse=False, ndim=3, nspin=1, double_site_energy=2.0, ): if data is not None: self.data = data else: self.data = defaultdict(lambda: np.zeros( (nbasis, nbasis), dtype=complex)) self._nbasis = nbasis self._nspin = nspin self._norb = nbasis // nspin self._ndim = ndim if positions is None: self._positions = np.zeros((nbasis, self.ndim)) else: self._positions = positions self.prepare_phase_rjri() self.sparse = sparse self.double_site_energy = double_site_energy if sparse: self._matrix = csr_matrix self.atoms = None self.R2kfactor = 2.0j * np.pi self.k2Rfactor = -2.0j * np.pi self.is_siesta = False self.is_orthogonal = True self._name='Wannier' def set_atoms(self, atoms): self.atoms = atoms @property def nspin(self): return self._nspin @property def norb(self): return self._norb @property def nbasis(self): return self._nbasis @property def ndim(self): return self._ndim @property def xcart(self): raise NotImplementedError() @property def xred(self): return self._positions @property def positions(self): return self._positions @property def onsite_energies(self): return self.data[(0, 0, 0)].diagonal() * 2 @property def hoppings(self): data = copy.deepcopy(self.data) np.fill_diagonal(data[(0, 0, 0)], 0.0) return data @staticmethod def read_from_wannier_dir(path, prefix, atoms=None, nls=True, groupby=None): nbasis, data = parse_ham(fname=os.path.join(path, prefix + '_hr.dat')) xcart, _, _ = parse_xyz(fname=os.path.join(path, prefix + '_centres.xyz')) cell = atoms.get_cell() xred = cell.scaled_positions(xcart) if groupby == 'spin': norb = nbasis // 2 xtmp = copy.deepcopy(xred) xred[::2] = xtmp[:norb] xred[1::2] = xtmp[norb:] for key, val in data.items(): dtmp = copy.deepcopy(val) data[key][::2, ::2] = dtmp[:norb, :norb] data[key][::2, 1::2] = dtmp[:norb, norb:] data[key][1::2, ::2] = dtmp[norb:, :norb] data[key][1::2, 1::2] = dtmp[norb:, norb:] ind, positions = auto_assign_basis_name(xred, atoms) m = MyTB(nbasis=nbasis, data=data, positions=xred) nm = m.shift_position(positions) nm.set_atoms(atoms) return nm @staticmethod def load_banddownfolder(path, prefix, atoms=None, nls=True, groupby='spin'): from banddownfolder.scdm.lwf import LWF lwf = LWF.load_nc(fname=os.path.join(path, f"{prefix}.nc")) nbasis = lwf.nwann nspin = 1 positions = lwf.wann_centers ndim = lwf.ndim H_mnR = defaultdict(lambda: np.zeros((nbasis, nbasis), dtype=complex)) for iR, R in enumerate(lwf.Rlist): R=tuple(R) val = lwf.HwannR[iR] if np.linalg.norm(R) < 0.001: H_mnR[R] = val/2.0 else: H_mnR[R] = val/2.0 m = MyTB(nbasis, data=H_mnR, nspin=nspin, ndim=ndim, positions=positions) m.atoms = atoms return m def gen_ham(self, k, convention=2): Hk = np.zeros((self.nbasis, self.nbasis), dtype='complex') if convention == 2: for R, mat in self.data.items(): phase = np.exp(self.R2kfactor * np.dot(k, R)) H = mat * phase Hk += H + H.conjugate().T elif convention == 1: for R, mat in self.data.items(): phase = np.exp(self.R2kfactor * np.dot(k, R + self.rjminusri)) H = mat * phase Hk += H + H.conjugate().T else: raise ValueError("convention should be either 1 or 2.") return Hk def solve(self, k, convention=2): Hk = self.gen_ham(k, convention=convention) return eigh(Hk) def HSE_k(self, kpt, convention=2): H = self.gen_ham(tuple(kpt), convention=convention) S = None evals, evecs = eigh(H) return H, S, evals, evecs def HS_and_eigen(self, kpts, convention=2): nk = len(kpts) hams = np.zeros((nk, self.nbasis, self.nbasis), dtype=complex) evals = np.zeros((nk, self.nbasis), dtype=float) evecs = np.zeros((nk, self.nbasis, self.nbasis), dtype=complex) for ik, k in enumerate(kpts): hams[ik], S, evals[ik], evecs[ik] = self.HSE_k(tuple(k), convention=convention) return hams, None, evals, evecs
BSD 2-Clause Simplified License
l0sg/nanoflow
nde/transforms/lu.py
LULinear.forward_no_cache
python
def forward_no_cache(self, inputs): lower, upper = self._create_lower_upper() outputs = F.linear(inputs, upper) outputs = F.linear(outputs, lower, self.bias) logabsdet = self.logabsdet() * inputs.new_ones(outputs.shape[0]) return outputs, logabsdet
Cost: output = O(D^2N) logabsdet = O(D) where: D = num of features N = num of inputs
https://github.com/l0sg/nanoflow/blob/da885389448835e9e1c555020c910540276c9dda/nde/transforms/lu.py#L56-L68
import numpy as np import torch from torch import nn from torch.nn import functional as F, init from nde.transforms.linear import Linear class LULinear(Linear): def __init__(self, features, using_cache=False, identity_init=True, eps=1e-3): super().__init__(features, using_cache) self.eps = eps self.lower_indices = np.tril_indices(features, k=-1) self.upper_indices = np.triu_indices(features, k=1) self.diag_indices = np.diag_indices(features) n_triangular_entries = ((features - 1) * features) // 2 self.lower_entries = nn.Parameter(torch.zeros(n_triangular_entries)) self.upper_entries = nn.Parameter(torch.zeros(n_triangular_entries)) self.unconstrained_upper_diag = nn.Parameter(torch.zeros(features)) self._initialize(identity_init) def _initialize(self, identity_init): init.zeros_(self.bias) if identity_init: init.zeros_(self.lower_entries) init.zeros_(self.upper_entries) constant = np.log(np.exp(1 - self.eps) - 1) init.constant_(self.unconstrained_upper_diag, constant) else: stdv = 1.0 / np.sqrt(self.features) init.uniform_(self.lower_entries, -stdv, stdv) init.uniform_(self.upper_entries, -stdv, stdv) init.uniform_(self.unconstrained_upper_diag, -stdv, stdv) def _create_lower_upper(self): lower = self.lower_entries.new_zeros(self.features, self.features) lower[self.lower_indices[0], self.lower_indices[1]] = self.lower_entries lower[self.diag_indices[0], self.diag_indices[1]] = 1. upper = self.upper_entries.new_zeros(self.features, self.features) upper[self.upper_indices[0], self.upper_indices[1]] = self.upper_entries upper[self.diag_indices[0], self.diag_indices[1]] = self.upper_diag return lower, upper
BSD 3-Clause New or Revised License
bigmlcom/bigmler
bigmler/dispatcher.py
clear_log_files
python
def clear_log_files(log_files): for log_file in log_files: try: open(log_file, 'wb', 0).close() except IOError: pass
Clear all contents in log files
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/dispatcher.py#L105-L113
import sys import os import re import gc from functools import partial import bigml.api from bigml.model import Model, to_prediction from bigml.basemodel import retrieve_resource from bigml.fields import Fields import bigmler.utils as u import bigmler.resourcesapi.common as r import bigmler.resourcesapi.datasets as rds import bigmler.resourcesapi.ensembles as rens import bigmler.resourcesapi.models as rmod import bigmler.resourcesapi.batch_predictions as rbp import bigmler.labels as l import bigmler.processing.args as a import bigmler.processing.sources as ps import bigmler.processing.datasets as pd import bigmler.processing.models as pm from bigmler.evaluation import evaluate, cross_validate from bigmler.defaults import DEFAULTS_FILE from bigmler.prediction import predict, combine_votes, remote_predict from bigmler.prediction import OTHER, COMBINATION from bigmler.reports import clear_reports, upload_reports from bigmler.command import get_context from bigmler.command import COMMAND_LOG, DIRS_LOG, SESSIONS_LOG LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG] MINIMUM_MODEL = "full=false" DEFAULT_OUTPUT = 'predictions.csv' SETTINGS = { "command_log": COMMAND_LOG, "sessions_log": SESSIONS_LOG, "dirs_log": DIRS_LOG, "default_output": DEFAULT_OUTPUT, "defaults_file": DEFAULTS_FILE} def belongs_to_ensemble(model): return ('object' in model and 'ensemble' in model['object'] and model['object']['ensemble']) def get_ensemble_id(model): if 'object' in model and 'ensemble_id' in model['object']: return "ensemble/%s" % model['object']['ensemble_id'] return None def get_metadata(resource, key, default_value): if ('object' in resource and 'user_metadata' in resource['object'] and key in resource['object']['user_metadata']): return resource['object']['user_metadata'][key] return default_value def has_source(args): return (args.training_set or args.source or args.source_file or args.train_stdin)
Apache License 2.0
alexpof/opycleid
opycleid/q_categoryaction.py
QMorphism.set_name
python
def set_name(self,name): if not len(name): raise Exception("The specified morphism name is empty") self.name = name
Sets the name of the morphism Parameters ---------- name: a string representing the new name of the morphism Returns ------- None
https://github.com/alexpof/opycleid/blob/5abffca1a52c0d221dcb46162cb0678f265204b7/opycleid/q_categoryaction.py#L412-L425
import numpy as np import itertools import time from .categoryaction import CatObject class MultQ(object): def __init__(self,x): if x<0 or x>1: raise Exception("Real number should be comprised between 0 and 1") self.x = x @staticmethod def Unit(): return MultQ(1.0) @staticmethod def Zero(): return MultQ(0.0) def __mul__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid MultQ") return self.__class__(self.x*rhs.x) def __add__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid MultQ") return self.__class__(max([self.x,rhs.x])) def __eq__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid MultQ") return self.x==rhs.x def __lt__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid MultQ") return self.x<rhs.x def __le__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid MultQ") return self.x<=rhs.x def __str__(self): return str(self.x) def __repr__(self): return "MultQ({})".format(self.x) class IntvQ(object): def __init__(self,x): if x<0 or x>1: raise Exception("Real number should be comprised between 0 and 1") self.x = x @staticmethod def Unit(): return IntvQ(1.0) @staticmethod def Zero(): return IntvQ(0.0) def __mul__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid IntvQ") return self.__class__(min([self.x,rhs.x])) def __add__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid IntvQ") return self.__class__(max([self.x,rhs.x])) def __eq__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid IntvQ") return self.x==rhs.x def __lt__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid IntvQ") return self.x<rhs.x def __le__(self,rhs): if not isinstance(rhs,self.__class__): raise Exception("RHS is not a valid IntvQ") return self.x<=rhs.x def __str__(self): return str(self.x) def __repr__(self): return "IntvQ({})".format(self.x) class Lin3Q(IntvQ): def __init__(self,x): if not (x==0 or x==0.5 or x==1): raise Exception("The possibles values are 0, 1/2, and 1") super().__init__(x) @staticmethod def Unit(): return Lin3Q(1.0) @staticmethod def Zero(): return Lin3Q(0.0) def __str__(self): return str(self.x) def __repr__(self): return "Lin3Q({})".format(self.x) class QMorphism(object): def __init__(self,name,source,target,qtype=None,mapping=None): if not isinstance(source,CatObject): raise Exception("Source is not a valid CatObject class\n") if not isinstance(target,CatObject): raise Exception("Target is not a valid CatObject class\n") if qtype is None: raise Exception("Type of quantale should be specified") self.name = name self.source = source self.target = target self.qtype = qtype if mapping is not None: if isinstance(mapping,np.ndarray)==False: self.set_mapping(mapping) else: self.set_mapping_matrix(mapping)
BSD 3-Clause New or Revised License
binance/binance-connector-python
binance/spot/account.py
get_oco_open_orders
python
def get_oco_open_orders(self, **kwargs): url_path = "/api/v3/openOrderList" return self.sign_request("GET", url_path, {**kwargs})
Query Open OCO (USER_DATA) GET /api/v3/openOrderList https://binance-docs.github.io/apidocs/spot/en/#query-open-oco-user_data Keyword Args: recvWindow (int, optional): The value cannot be greater than 60000
https://github.com/binance/binance-connector-python/blob/9e7c819c91a1bf153b0215fcc7a8169f984dd543/binance/spot/account.py#L303-L315
from binance.lib.utils import check_required_parameter from binance.lib.utils import check_required_parameters def new_order_test(self, symbol: str, side: str, type: str, **kwargs): check_required_parameters([[symbol, "symbol"], [side, "side"], [type, "type"]]) params = {"symbol": symbol, "side": side, "type": type, **kwargs} url_path = "/api/v3/order/test" return self.sign_request("POST", url_path, params) def new_order(self, symbol: str, side: str, type: str, **kwargs): check_required_parameters([[symbol, "symbol"], [side, "side"], [type, "type"]]) params = {"symbol": symbol, "side": side, "type": type, **kwargs} url_path = "/api/v3/order" return self.sign_request("POST", url_path, params) def cancel_order(self, symbol: str, **kwargs): check_required_parameter(symbol, "symbol") url_path = "/api/v3/order" payload = {"symbol": symbol, **kwargs} return self.sign_request("DELETE", url_path, payload) def cancel_open_orders(self, symbol: str, **kwargs): check_required_parameter(symbol, "symbol") url_path = "/api/v3/openOrders" payload = {"symbol": symbol, **kwargs} return self.sign_request("DELETE", url_path, payload) def get_order(self, symbol, **kwargs): check_required_parameter(symbol, "symbol") url_path = "/api/v3/order" payload = {"symbol": symbol, **kwargs} return self.sign_request("GET", url_path, payload) def get_open_orders(self, symbol=None, **kwargs): url_path = "/api/v3/openOrders" payload = {"symbol": symbol, **kwargs} return self.sign_request("GET", url_path, payload) def get_orders(self, symbol: str, **kwargs): check_required_parameter(symbol, "symbol") url_path = "/api/v3/allOrders" payload = {"symbol": symbol, **kwargs} return self.sign_request("GET", url_path, payload) def new_oco_order( self, symbol: str, side: str, quantity: float, price: float, stopPrice: float, **kwargs ): check_required_parameters( [ [symbol, "symbol"], [side, "side"], [quantity, "quantity"], [price, "price"], [stopPrice, "stopPrice"], ] ) params = { "symbol": symbol, "side": side, "quantity": quantity, "price": price, "stopPrice": stopPrice, **kwargs, } url_path = "/api/v3/order/oco" return self.sign_request("POST", url_path, params) def cancel_oco_order(self, symbol, **kwargs): check_required_parameter(symbol, "symbol") url_path = "/api/v3/orderList" payload = {"symbol": symbol, **kwargs} return self.sign_request("DELETE", url_path, payload) def get_oco_order(self, **kwargs): url_path = "/api/v3/orderList" return self.sign_request("GET", url_path, {**kwargs}) def get_oco_orders(self, **kwargs): url_path = "/api/v3/allOrderList" return self.sign_request("GET", url_path, {**kwargs})
MIT License
pinax/pinax-teams
pinax/teams/utils.py
create_teams
python
def create_teams(obj, user, access): for field_name, access_types in access.items(): id_field = f"{field_name}_id" if hasattr(obj, id_field) and getattr(obj, id_field) is None: next_pk = next(iter(instance.pk for instance in obj.__class__.objects.order_by("-pk")), 0) + 1 team_name = "{} for {} {}".format( field_name, obj._meta.model_name, next_pk) new_team = Team( name=team_name, member_access=access_types[0], manager_access=access_types[1], creator=user) new_team.save() setattr(obj, field_name, new_team) return obj
Will create new teams associated with the referenced obj and set the resulting relation to the correct attribute. The naming convention for team foreign keys is pluralname_team (for example, instructors_team). This function will take the access dictionary and apply the specified access types as follows: access = { 'trainees_team': ('open', 'add someone'), } Where the key name is the team name and the tuple contains the access types for member access and manager access respectively. If the foreign key already has a value associated with it, this function will NOT create a new team to replace it.
https://github.com/pinax/pinax-teams/blob/dd75e1c2d66fff927c349a65bf34150b1a08086c/pinax/teams/utils.py#L4-L43
from .models import Team
MIT License
lamdav/audioconverter
AudioConverter/converter.py
converter
python
def converter(conversion_job: ConversionJob): logger = conversion_job.logger output_format = conversion_job.output_format[1:] output_path = conversion_job.output_path verbose_flag = conversion_job.verbose audio_file = conversion_job.file_path audio_name = audio_file.name[: audio_file.name.rfind(".")] converted_name = "{}.{}".format(audio_name, output_format) logger.verbose( "Converting {} to {}".format(audio_name, output_format), verbose_flag ) audio = AudioSegment.from_file(audio_file.as_posix(), audio_file.suffix[1:]) output_name = output_path.joinpath(converted_name) audio.export(output_name.as_posix(), format=output_format, bitrate="192k") logger.verbose("{} converted".format(audio_name), verbose_flag)
Multiprocessing worker function. Expects audio_datum to have keys: output_format - String of the form '.mp3' (must include '.' prefix) verbose - Boolean of verbose mode logging output_path - Path object of the output directory location file_path - Path object of the file to be converted Converts the audio file_path to the desired output_format of the same name in the output_path.
https://github.com/lamdav/audioconverter/blob/b9df0159f926e36d9ee554d4413d9c86f8679769/AudioConverter/converter.py#L175-L206
import pathlib from multiprocessing import Pool from typing import Optional, Sequence import click from pydub import AudioSegment AUDIO_EXTENSIONS = [ ".aiff", ".flac", ".m4a", ".mp3", ".mp4", ".wav", ] AUDIO_EXTENSIONS_SET = set(AUDIO_EXTENSIONS) class Logger(object): def success(self, message: str): self.display("[ SUCCESS ] {}".format(message), "green") def info(self, message: str): self.display("[ INFO ] {}".format(message), "blue") def verbose(self, message: str, verbose_flag: bool): if verbose_flag: self.display("[ DEBUG ] {}".format(message), "cyan") def error(self, message: str): self.display("[ ERROR ] {}".format(message), "red") @staticmethod def display(formatted_message, color): click.secho(formatted_message, fg=color) class Config(object): __slots__ = ["verbose", "logger"] def __init__(self, verbose: bool): self.verbose = verbose self.logger = Logger() class ConversionJob(object): __slots__ = ["output_format", "verbose", "output_path", "file_path", "logger"] def __init__( self, output_format: str, verbose: bool, output_path: pathlib.Path, file_path: pathlib.Path, logger: Optional[Logger] = None, ): self.output_format = output_format self.verbose = verbose self.output_path = output_path self.file_path = file_path self.logger = logger if logger is not None else Logger() @click.group() @click.version_option() @click.option("--verbose", "-v", type=bool, is_flag=True, help="Enable Verbose Logging") @click.pass_context def cli(context: click.Context, verbose: bool): context.obj = Config(verbose) @cli.command() @click.argument( "input_directory", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=str), ) @click.argument( "output_directory", type=click.Path(file_okay=False, dir_okay=True, path_type=str) ) @click.option( "--output-format", "-o", type=click.Choice(AUDIO_EXTENSIONS), default=".mp3", help="Target output format", ) @click.option( "--workers", "-w", type=int, default=5, help="Number of worker processes to run" ) @click.pass_obj def convert( config: Config, input_directory: str, output_directory: str, output_format: str, workers: int, ): logger = config.logger logger.info("Starting conversion of {}.".format(input_directory)) input_path = pathlib.Path(input_directory) output_path = pathlib.Path(output_directory) logger.verbose("Input : {}".format(input_path.as_posix()), config.verbose) logger.verbose("Output: {}".format(output_path.as_posix()), config.verbose) logger.verbose("Workers: {}".format(workers), config.verbose) if not output_path.exists(): logger.verbose( "Creating output directory {}".format(output_path.as_posix()), config.verbose, ) output_path.mkdir(exist_ok=True) audio_files = get_audio_files(input_path) audio_files = [ ConversionJob( output_format=output_format, verbose=config.verbose, output_path=output_path, file_path=file_path, logger=logger, ) for file_path in audio_files ] with Pool(processes=workers) as worker: worker.map(converter, audio_files) logger.success("See {} for converted audio.".format(output_path.as_posix())) def get_audio_files(input_path: pathlib.Path) -> Sequence[pathlib.Path]: audio_files = [] for input_file in input_path.iterdir(): if input_file.is_file() and input_file.suffix.lower() in AUDIO_EXTENSIONS_SET: audio_files.append(input_file) elif input_file.is_dir() and not input_file.is_symlink(): audio_files.extend(get_audio_files(input_file)) return audio_files
MIT License
zihenglin/lstm-mobility-model
lstm_mobility_model/two_layer_latlng_location/tensor_builder.py
TwoLayerTensorBuilder._get_lstm_dimensions
python
def _get_lstm_dimensions(self): return {'lstm_layer_1': self.lstm_units, 'lstm_layer_2': self.lstm_units}
Get LSTM dimensions. Returns: dict(str -> list(int)): tensor names and dimensions map as dictionaries.
https://github.com/zihenglin/lstm-mobility-model/blob/36f21b46a5c9382f90ece561a3efb1885be3c74f/lstm_mobility_model/two_layer_latlng_location/tensor_builder.py#L85-L92
from lstm_mobility_model.components import TensorBuilder from lstm_mobility_model.config import (Features, OptionalFeatures, Constants) class TwoLayerTensorBuilder(TensorBuilder): DEFAULT_PARAMETERS_PER_MIXTURE = 10 def __init__(self, lstm_units, number_of_mixtures, lstm_dropout=None, batch_size=None, parameters_per_mixture=None, context_dimensions=1): TensorBuilder.__init__(self, lstm_dropout=lstm_dropout, batch_size=batch_size) self.lstm_units = lstm_units self.number_of_mixtures = number_of_mixtures self.layer_2_output_parameters = self.number_of_mixtures * TwoLayerTensorBuilder.DEFAULT_PARAMETERS_PER_MIXTURE self.context_dimensions = context_dimensions self.build_placeholders(self._get_placeholder_dimensions()) self.build_trainable_variables(self._get_trainable_tensor_dimensions()) self.build_lstm_layers(self._get_lstm_dimensions()) def _get_placeholder_dimensions(self): return {Features.contex_features.name: [None, Constants.INPUT_LENGTH, self.context_dimensions], Features.lat.name: [None, Constants.INPUT_LENGTH, 1], Features.lon.name: [None, Constants.INPUT_LENGTH, 1], Features.start_hour_since_day.name: [None, Constants.INPUT_LENGTH, 1], Features.duration.name: [None, Constants.INPUT_LENGTH, 1], Features.categorical_features.name: [None, Constants.INPUT_LENGTH, 1], Features.contex_features.name: [None, Constants.INPUT_LENGTH, 1], Features.mask.name: [None, Constants.INPUT_LENGTH], Features.initial_activity_type_input.name: [None, 1], OptionalFeatures.is_observed.name: [None, Constants.INPUT_LENGTH, 1]} def _get_trainable_tensor_dimensions(self): return {'output_embedding_layer_1': [self.lstm_units, Constants.NUMBER_OF_CATEGORIES], 'output_bias_layer_1': [1, Constants.NUMBER_OF_CATEGORIES], 'output_embedding_layer_2': [self.lstm_units, self.layer_2_output_parameters], 'output_bias_layer_2': [1, self.layer_2_output_parameters]}
MIT License
shibdib/firetail
firetail/extensions/char_lookup/char_lookup.py
CharLookup._char
python
async def _char(self, ctx): global character_data if len(ctx.message.content.split()) == 1: dest = ctx.author if ctx.bot.config.dm_only else ctx return await dest.send('**ERROR:** Use **!help char** for more info.') character_name = ctx.message.content.split(' ', 1)[1] if '@' in character_name: member = ctx.guild.get_member(int(character_name.replace('<@', '').replace('>', '').replace('!', ''))) character_name = member.display_name self.logger.info( 'CharLookup - {} requested character info for the user {}'.format(str(ctx.message.author), character_name)) character_id = await ctx.bot.esi_data.esi_search(character_name, 'character') try: if len(character_id['character']) > 1: for eve_id in character_id['character']: character_data = await ctx.bot.esi_data.character_info(eve_id) if character_data['name'].lower().strip().replace("'", '1') == character_name.lower().strip().replace("'", '1'): character_id = eve_id character_data = await ctx.bot.esi_data.character_info(character_id) character_name = character_data['name'] break else: character_id = character_id['character'][0] character_data = await ctx.bot.esi_data.character_info(character_id) character_name = character_data['name'] except Exception: dest = ctx.author if ctx.bot.config.dm_only else ctx self.logger.info('CharLookup ERROR - {} could not be found'.format(character_name)) return await dest.send('**ERROR:** No User Found With The Name {}'.format(character_name)) latest_killmail, latest_system_id = await self.zkill_last_mail(character_id) ship_lost = 'No Killmails Found' solar_system_name = 'N/A' if latest_killmail is not None: if 'ship_type_id' in latest_killmail: ship_lost_raw = await ctx.bot.esi_data.type_info_search(latest_killmail['ship_type_id']) ship_lost = ship_lost_raw['name'] else: ship_lost = 'N/A' solar_system_info = await ctx.bot.esi_data.system_info(latest_system_id) solar_system_name = solar_system_info['name'] victim_corp_raw = await ctx.bot.esi_data.corporation_info(character_data['corporation_id']) victim_corp = victim_corp_raw['name'] zkill_stats = await self.zkill_stats(character_id) firetail_intel = await self.firetail_intel(character_id, character_name, zkill_stats) zkill_link = 'https://zkillboard.com/character/{}/'.format(character_id) eve_prism = 'http://eve-prism.com/?view=character&name={}'.format(urllib.parse.quote(character_name)) eve_who = 'https://evewho.com/pilot/{}'.format(urllib.parse.quote(character_name)) try: if zkill_stats['allTimeSum']: total_kills = '{0:}'.format(zkill_stats['allTimeSum']) danger_ratio = zkill_stats['dangerRatio'] gang_ratio = zkill_stats['gangRatio'] solo_kills = '{0:}'.format(zkill_stats['soloKills']) else: total_kills = 'N/A' danger_ratio = 'N/A' gang_ratio = 'N/A' solo_kills = 'N/A' try: victim_alliance_raw = await ctx.bot.esi_data.alliance_info(character_data['alliance_id']) victim_alliance = victim_alliance_raw['name'] except Exception: victim_alliance = None embed = make_embed(guild=ctx.guild, title_url="https://zkillboard.com/character/" + str(character_id) + "/", title=character_data['name'], content='[ZKill]({}) / [EveWho]({}) / [EVE-Prism]({})'.format(zkill_link, eve_who, eve_prism)) embed.set_footer(icon_url=ctx.bot.user.avatar_url, text="Provided Via Firetail Bot") embed.set_thumbnail( url="https://imageserver.eveonline.com/Character/" + str(character_id) + "_64.jpg") if victim_alliance: embed.add_field(name="Firetail Intel Report", value=firetail_intel, inline=False) embed.add_field(name="General Info", value='Alliance:\nCorporation:\nLast Seen Location:\nLast Seen Ship:', inline=True) embed.add_field(name="-", value='{}\n{}\n{}\n{}'.format(victim_alliance, victim_corp, solar_system_name, ship_lost), inline=True) embed.add_field(name="PVP Info", value='Threat Rating:\nGang Ratio:\nSolo Kills:\nTotal Kills:', inline=True) embed.add_field(name="-", value='{}%\n{}%\n{}\n{}'.format(danger_ratio, gang_ratio, solo_kills, total_kills), inline=True) else: embed.add_field(name="Firetail Intel Report", value=firetail_intel, inline=False) embed.add_field(name="General Info", value='Corporation:\nLast Seen System:\nLast Seen Ship:', inline=True) embed.add_field(name="-", value='{}\n{}\n{}'.format(victim_corp, solar_system_name, ship_lost), inline=True) embed.add_field(name="PVP Info", value='Threat Rating:\nGang Ratio:\nSolo Kills:\nTotal Kills:', inline=True) embed.add_field(name="-", value='{}%\n{}%\n{}\n{}'.format(danger_ratio, gang_ratio, solo_kills, total_kills), inline=True) dest = ctx.author if ctx.bot.config.dm_only else ctx await dest.send(embed=embed) if ctx.bot.config.delete_commands: await ctx.message.delete() except Exception: try: victim_alliance_raw = await ctx.bot.esi_data.alliance_info(character_data['alliance_id']) victim_alliance = victim_alliance_raw['name'] except Exception: victim_alliance = None embed = make_embed(guild=ctx.guild, title_url="https://zkillboard.com/character/" + str(character_id) + "/", title=character_data['name'], content='[ZKill]({}) / [EveWho]({}) / [EVE-Prism]({})'.format(zkill_link, eve_who, eve_prism)) embed.set_footer(icon_url=ctx.bot.user.avatar_url, text="Provided Via Firetail Bot") embed.set_thumbnail( url="https://imageserver.eveonline.com/Character/" + str(character_id) + "_64.jpg") if victim_alliance: embed.add_field(name="Firetail Intel Report", value=firetail_intel, inline=False) embed.add_field(name="General Info", value='Alliance:\nCorporation:\nLast Seen Location:\nLast Seen Ship:', inline=True) embed.add_field(name="-", value='{}\n{}\n{}\n{}'.format(victim_alliance, victim_corp, solar_system_name, ship_lost), inline=True) else: embed.add_field(name="Firetail Intel Report", value=firetail_intel, inline=False) embed.add_field(name="General Info", value='Corporation:\nLast Seen System:\nLast Seen Ship:', inline=True) embed.add_field(name="-", value='{}\n{}\n{}'.format(victim_corp, solar_system_name, ship_lost), inline=True) dest = ctx.author if ctx.bot.config.dm_only else ctx await dest.send(embed=embed) if ctx.bot.config.delete_commands: await ctx.message.delete()
Shows character information. Do '!char name
https://github.com/shibdib/firetail/blob/afadce977bcdf3ee481c68e6aca8bcfcce747d8d/firetail/extensions/char_lookup/char_lookup.py#L22-L165
import json import urllib import aiohttp from discord.ext import commands from firetail.utils import make_embed from firetail.core import checks class CharLookup: def __init__(self, bot): self.bot = bot self.config = bot.config self.logger = bot.logger @commands.command(name='char') @checks.spam_check() @checks.is_whitelist()
MIT License
tlc-pack/tenset
python/tvm/relay/op/contrib/ethosn.py
depth_to_space
python
def depth_to_space(expr): if not ethosn_available(): return False attrs, args = expr.attrs, expr.args depth = tvm.relay.nn.depth_to_space(*args, **attrs) if not support.depth_to_space(depth): return False return True
Check if a depth_to_space is supported by Ethos-N.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/relay/op/contrib/ethosn.py#L253-L263
from enum import Enum import tvm.ir from tvm.relay import transform from tvm.relay.build_module import bind_params_by_name from ...dataflow_pattern import wildcard, is_op, is_constant from ... import qnn as _qnn from .register import register_pattern_table from . import _ethosn as support class Available(Enum): UNAVAILABLE = 0 SW_ONLY = 1 SW_AND_HW = 2 def __bool__(self): return self != Available.UNAVAILABLE def ethosn_available(): if not tvm.get_global_func("relay.ethos-n.query", True): print("skip because Ethos-N module is not available") return Available.UNAVAILABLE hw = tvm.get_global_func("relay.ethos-n.query")() return Available.SW_AND_HW if hw else Available.SW_ONLY def partition_for_ethosn(mod, params=None): if params: mod["main"] = bind_params_by_name(mod["main"], params) seq = tvm.transform.Sequential( [ transform.InferType(), transform.MergeComposite(pattern_table()), transform.AnnotateTarget("ethos-n"), transform.MergeCompilerRegions(), transform.PartitionGraph(), ] ) return seq(mod) @register_pattern_table("ethos-n") def pattern_table(): def qnn_conv_pattern(): pattern = is_op("nn.pad")(wildcard()) | wildcard() pattern = is_op("qnn.conv2d")( pattern, is_constant(), is_constant(), is_constant(), is_constant(), is_constant() ) pattern = is_op("nn.bias_add")(pattern, is_constant()) pattern = is_op("qnn.requantize")( pattern, is_constant(), is_constant(), is_constant(), is_constant() ) return pattern def qnn_fc_pattern(): pattern = is_op("qnn.dense")( wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant() ) pattern = is_op("nn.bias_add")(pattern, is_constant()) pattern = is_op("qnn.requantize")( pattern, is_constant(), is_constant(), is_constant(), is_constant() ) return pattern def qnn_avg_pool2d_pattern(): pattern = is_op("cast")(wildcard()) pattern = is_op("nn.avg_pool2d")(pattern) pattern = is_op("cast")(pattern) return pattern def qnn_sigmoid_pattern(): pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant()) pattern = is_op("sigmoid")(pattern) pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant()) return pattern def check_conv2d(extract): if not ethosn_available(): return False return support.conv2d(extract) def check_fc(extract): if not ethosn_available(): return False return support.fc(extract) def check_avg_pool2d(extract): if not ethosn_available(): return False return support.avg_pool2d(extract) def check_sigmoid(extract): if not ethosn_available(): return False if extract.attrs.out_dtype != "uint8": return False return support.sigmoid(extract) return [ ("ethos-n.qnn_conv2d", qnn_conv_pattern(), check_conv2d), ("ethos-n.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_avg_pool2d), ("ethos-n.qnn_sigmoid", qnn_sigmoid_pattern(), check_sigmoid), ("ethos-n.qnn_fc", qnn_fc_pattern(), check_fc), ] def _is_ethosn_composite(node): if isinstance(node, tvm.relay.expr.Call) and isinstance(node.op, tvm.relay.Function): if "Composite" in node.op.attrs: comp_name = node.op.attrs["Composite"] return comp_name.split(".")[0] == "ethos-n" return False @tvm.ir.register_op_attr("nn.max_pool2d", "target.ethos-n") def max_pool2d(expr): if not ethosn_available(): return False attrs, args = expr.attrs, expr.args pool = tvm.relay.nn.max_pool2d(*args, **attrs) return support.max_pool2d(pool) @tvm.ir.register_op_attr("reshape", "target.ethos-n") def reshape(expr): if not ethosn_available(): return False attrs, args = expr.attrs, expr.args if not _is_ethosn_composite(args[0]): return False rs = tvm.relay.op.reshape(*args, attrs["newshape"]) return support.reshape(rs) @tvm.ir.register_op_attr("qnn.add", "target.ethos-n") def qnn_add(expr): if not ethosn_available(): return False args = expr.args add = _qnn.op.add(*args) return support.addition(add) @tvm.ir.register_op_attr("qnn.concatenate", "target.ethos-n") def qnn_concatenate(expr): if not ethosn_available(): return False attrs, args = expr.attrs, expr.args conc = _qnn.op.concatenate(*args, **attrs) if not support.concatenate(conc): return False min_range = 1e9 max_range = -1e9 qnn_params = [] for i in range(len(args[1].fields)): scale = args[1].fields[i].data.asnumpy() zero_point = args[2].fields[i].data.asnumpy() min_range = min(-1 * zero_point * scale, min_range) max_range = max((255 - zero_point) * scale, max_range) qnn_params.append((scale, zero_point)) scale = (max_range - min_range) / 255 zero_point = int(-min_range / scale) if (scale, zero_point) in qnn_params: return True return False @tvm.ir.register_op_attr("split", "target.ethos-n") def split(expr): if not ethosn_available(): return False attrs, args = expr.attrs, expr.args if isinstance(attrs["indices_or_sections"], tvm.tir.IntImm): sp = tvm.relay.split( *args, indices_or_sections=attrs["indices_or_sections"].value, axis=attrs["axis"] ) else: sp = tvm.relay.split( *args, indices_or_sections=attrs["indices_or_sections"], axis=attrs["axis"] ) if not support.split(sp.astuple()): return False return True @tvm.ir.register_op_attr("nn.depth_to_space", "target.ethos-n")
Apache License 2.0
samsammurphy/ee-atmcorr-timeseries
ee-atmcorr-coefficients-timeseries.py
atm_corr_image
python
def atm_corr_image(imageInfo: dict) -> dict: atmParams = {} scene_date = datetime.datetime.utcfromtimestamp(imageInfo['system:time_start']/1000) dt1 = ee.Date(str(scene_date).rsplit(sep=' ')[0]) atmParams['doy'] = scene_date.timetuple().tm_yday atmParams['solar_z'] = imageInfo['MEAN_SOLAR_ZENITH_ANGLE'] atmParams['h2o'] = Atmospheric.water(geom, dt1).getInfo() atmParams['o3'] = Atmospheric.ozone(geom, dt1).getInfo() atmParams['aot'] = Atmospheric.aerosol(geom, dt1).getInfo() return atmParams
Retrieves atmospheric params from image. imageInfo is a dictionary created from an ee.Image object
https://github.com/samsammurphy/ee-atmcorr-timeseries/blob/411840d0009c2af89d2d56a2a1d8f95be8956c20/ee-atmcorr-coefficients-timeseries.py#L45-L61
import ee from pprint import pprint import datetime import math import pickle ee.Initialize() from atmcorr.atmospheric import Atmospheric from atmcorr.timeSeries import timeSeries target = 'forest' geom = ee.Geometry.Rectangle(85.5268682942167402, 25.6240533612814261, 85.7263954375090407, 25.8241594034421382) MISSIONS = ['Sentinel2'] DIRPATH = './files/iLUTs/S2A_MSI/Continental/view_zenith_0/' START_DATE = '2016-11-19' STOP_DATE = '2017-02-17' NO_OF_BANDS = 13 _ = timeSeries(target, geom, START_DATE, STOP_DATE, MISSIONS) SRTM = ee.Image('CGIAR/SRTM90_V4') altitude = SRTM.reduceRegion(reducer=ee.Reducer.mean(), geometry=geom.centroid()).get('elevation').getInfo() KM = altitude/1000 S2 = ee.ImageCollection('COPERNICUS/S2').filterBounds(geom) .filterDate(START_DATE, STOP_DATE).sort('system:time_start') S2List = S2.toList(S2.size()) NO_OF_IMAGES = S2.size().getInfo()
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/carbon_copy.py
CarbonCopy.agent_can_edit_email
python
def agent_can_edit_email(self, agent_can_edit_email): self._agent_can_edit_email = agent_can_edit_email
Sets the agent_can_edit_email of this CarbonCopy. # noqa: E501 :param agent_can_edit_email: The agent_can_edit_email of this CarbonCopy. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/carbon_copy.py#L456-L465
import pprint import re import six from docusign_esign.client.configuration import Configuration class CarbonCopy(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'access_code': 'str', 'access_code_metadata': 'PropertyMetadata', 'add_access_code_to_email': 'str', 'additional_notifications': 'list[RecipientAdditionalNotification]', 'agent_can_edit_email': 'str', 'agent_can_edit_name': 'str', 'allow_system_override_for_locked_recipient': 'str', 'auto_responded_reason': 'str', 'client_user_id': 'str', 'completed_count': 'str', 'custom_fields': 'list[str]', 'declined_date_time': 'str', 'declined_reason': 'str', 'delivered_date_time': 'str', 'delivery_method': 'str', 'delivery_method_metadata': 'PropertyMetadata', 'designator_id': 'str', 'designator_id_guid': 'str', 'document_visibility': 'list[DocumentVisibility]', 'email': 'str', 'email_metadata': 'PropertyMetadata', 'email_notification': 'RecipientEmailNotification', 'embedded_recipient_start_url': 'str', 'error_details': 'ErrorDetails', 'excluded_documents': 'list[str]', 'fax_number': 'str', 'fax_number_metadata': 'PropertyMetadata', 'first_name': 'str', 'first_name_metadata': 'PropertyMetadata', 'full_name': 'str', 'full_name_metadata': 'PropertyMetadata', 'id_check_configuration_name': 'str', 'id_check_configuration_name_metadata': 'PropertyMetadata', 'id_check_information_input': 'IdCheckInformationInput', 'identity_verification': 'RecipientIdentityVerification', 'inherit_email_notification_configuration': 'str', 'last_name': 'str', 'last_name_metadata': 'PropertyMetadata', 'linked_account_configuration_id': 'str', 'locked_recipient_phone_auth_editable': 'str', 'locked_recipient_sms_editable': 'str', 'name': 'str', 'name_metadata': 'PropertyMetadata', 'note': 'str', 'note_metadata': 'PropertyMetadata', 'phone_authentication': 'RecipientPhoneAuthentication', 'phone_number': 'RecipientPhoneNumber', 'proof_file': 'RecipientProofFile', 'recipient_attachments': 'list[RecipientAttachment]', 'recipient_authentication_status': 'AuthenticationStatus', 'recipient_feature_metadata': 'list[FeatureAvailableMetadata]', 'recipient_id': 'str', 'recipient_id_guid': 'str', 'recipient_type': 'str', 'recipient_type_metadata': 'PropertyMetadata', 'require_id_lookup': 'str', 'require_id_lookup_metadata': 'PropertyMetadata', 'role_name': 'str', 'routing_order': 'str', 'routing_order_metadata': 'PropertyMetadata', 'sent_date_time': 'str', 'signed_date_time': 'str', 'signing_group_id': 'str', 'signing_group_id_metadata': 'PropertyMetadata', 'signing_group_name': 'str', 'signing_group_users': 'list[UserInfo]', 'sms_authentication': 'RecipientSMSAuthentication', 'social_authentications': 'list[SocialAuthentication]', 'status': 'str', 'status_code': 'str', 'suppress_emails': 'str', 'tabs': 'Tabs', 'template_locked': 'str', 'template_required': 'str', 'total_tab_count': 'str', 'user_id': 'str' } attribute_map = { 'access_code': 'accessCode', 'access_code_metadata': 'accessCodeMetadata', 'add_access_code_to_email': 'addAccessCodeToEmail', 'additional_notifications': 'additionalNotifications', 'agent_can_edit_email': 'agentCanEditEmail', 'agent_can_edit_name': 'agentCanEditName', 'allow_system_override_for_locked_recipient': 'allowSystemOverrideForLockedRecipient', 'auto_responded_reason': 'autoRespondedReason', 'client_user_id': 'clientUserId', 'completed_count': 'completedCount', 'custom_fields': 'customFields', 'declined_date_time': 'declinedDateTime', 'declined_reason': 'declinedReason', 'delivered_date_time': 'deliveredDateTime', 'delivery_method': 'deliveryMethod', 'delivery_method_metadata': 'deliveryMethodMetadata', 'designator_id': 'designatorId', 'designator_id_guid': 'designatorIdGuid', 'document_visibility': 'documentVisibility', 'email': 'email', 'email_metadata': 'emailMetadata', 'email_notification': 'emailNotification', 'embedded_recipient_start_url': 'embeddedRecipientStartURL', 'error_details': 'errorDetails', 'excluded_documents': 'excludedDocuments', 'fax_number': 'faxNumber', 'fax_number_metadata': 'faxNumberMetadata', 'first_name': 'firstName', 'first_name_metadata': 'firstNameMetadata', 'full_name': 'fullName', 'full_name_metadata': 'fullNameMetadata', 'id_check_configuration_name': 'idCheckConfigurationName', 'id_check_configuration_name_metadata': 'idCheckConfigurationNameMetadata', 'id_check_information_input': 'idCheckInformationInput', 'identity_verification': 'identityVerification', 'inherit_email_notification_configuration': 'inheritEmailNotificationConfiguration', 'last_name': 'lastName', 'last_name_metadata': 'lastNameMetadata', 'linked_account_configuration_id': 'linkedAccountConfigurationId', 'locked_recipient_phone_auth_editable': 'lockedRecipientPhoneAuthEditable', 'locked_recipient_sms_editable': 'lockedRecipientSmsEditable', 'name': 'name', 'name_metadata': 'nameMetadata', 'note': 'note', 'note_metadata': 'noteMetadata', 'phone_authentication': 'phoneAuthentication', 'phone_number': 'phoneNumber', 'proof_file': 'proofFile', 'recipient_attachments': 'recipientAttachments', 'recipient_authentication_status': 'recipientAuthenticationStatus', 'recipient_feature_metadata': 'recipientFeatureMetadata', 'recipient_id': 'recipientId', 'recipient_id_guid': 'recipientIdGuid', 'recipient_type': 'recipientType', 'recipient_type_metadata': 'recipientTypeMetadata', 'require_id_lookup': 'requireIdLookup', 'require_id_lookup_metadata': 'requireIdLookupMetadata', 'role_name': 'roleName', 'routing_order': 'routingOrder', 'routing_order_metadata': 'routingOrderMetadata', 'sent_date_time': 'sentDateTime', 'signed_date_time': 'signedDateTime', 'signing_group_id': 'signingGroupId', 'signing_group_id_metadata': 'signingGroupIdMetadata', 'signing_group_name': 'signingGroupName', 'signing_group_users': 'signingGroupUsers', 'sms_authentication': 'smsAuthentication', 'social_authentications': 'socialAuthentications', 'status': 'status', 'status_code': 'statusCode', 'suppress_emails': 'suppressEmails', 'tabs': 'tabs', 'template_locked': 'templateLocked', 'template_required': 'templateRequired', 'total_tab_count': 'totalTabCount', 'user_id': 'userId' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._access_code = None self._access_code_metadata = None self._add_access_code_to_email = None self._additional_notifications = None self._agent_can_edit_email = None self._agent_can_edit_name = None self._allow_system_override_for_locked_recipient = None self._auto_responded_reason = None self._client_user_id = None self._completed_count = None self._custom_fields = None self._declined_date_time = None self._declined_reason = None self._delivered_date_time = None self._delivery_method = None self._delivery_method_metadata = None self._designator_id = None self._designator_id_guid = None self._document_visibility = None self._email = None self._email_metadata = None self._email_notification = None self._embedded_recipient_start_url = None self._error_details = None self._excluded_documents = None self._fax_number = None self._fax_number_metadata = None self._first_name = None self._first_name_metadata = None self._full_name = None self._full_name_metadata = None self._id_check_configuration_name = None self._id_check_configuration_name_metadata = None self._id_check_information_input = None self._identity_verification = None self._inherit_email_notification_configuration = None self._last_name = None self._last_name_metadata = None self._linked_account_configuration_id = None self._locked_recipient_phone_auth_editable = None self._locked_recipient_sms_editable = None self._name = None self._name_metadata = None self._note = None self._note_metadata = None self._phone_authentication = None self._phone_number = None self._proof_file = None self._recipient_attachments = None self._recipient_authentication_status = None self._recipient_feature_metadata = None self._recipient_id = None self._recipient_id_guid = None self._recipient_type = None self._recipient_type_metadata = None self._require_id_lookup = None self._require_id_lookup_metadata = None self._role_name = None self._routing_order = None self._routing_order_metadata = None self._sent_date_time = None self._signed_date_time = None self._signing_group_id = None self._signing_group_id_metadata = None self._signing_group_name = None self._signing_group_users = None self._sms_authentication = None self._social_authentications = None self._status = None self._status_code = None self._suppress_emails = None self._tabs = None self._template_locked = None self._template_required = None self._total_tab_count = None self._user_id = None self.discriminator = None setattr(self, "_{}".format('access_code'), kwargs.get('access_code', None)) setattr(self, "_{}".format('access_code_metadata'), kwargs.get('access_code_metadata', None)) setattr(self, "_{}".format('add_access_code_to_email'), kwargs.get('add_access_code_to_email', None)) setattr(self, "_{}".format('additional_notifications'), kwargs.get('additional_notifications', None)) setattr(self, "_{}".format('agent_can_edit_email'), kwargs.get('agent_can_edit_email', None)) setattr(self, "_{}".format('agent_can_edit_name'), kwargs.get('agent_can_edit_name', None)) setattr(self, "_{}".format('allow_system_override_for_locked_recipient'), kwargs.get('allow_system_override_for_locked_recipient', None)) setattr(self, "_{}".format('auto_responded_reason'), kwargs.get('auto_responded_reason', None)) setattr(self, "_{}".format('client_user_id'), kwargs.get('client_user_id', None)) setattr(self, "_{}".format('completed_count'), kwargs.get('completed_count', None)) setattr(self, "_{}".format('custom_fields'), kwargs.get('custom_fields', None)) setattr(self, "_{}".format('declined_date_time'), kwargs.get('declined_date_time', None)) setattr(self, "_{}".format('declined_reason'), kwargs.get('declined_reason', None)) setattr(self, "_{}".format('delivered_date_time'), kwargs.get('delivered_date_time', None)) setattr(self, "_{}".format('delivery_method'), kwargs.get('delivery_method', None)) setattr(self, "_{}".format('delivery_method_metadata'), kwargs.get('delivery_method_metadata', None)) setattr(self, "_{}".format('designator_id'), kwargs.get('designator_id', None)) setattr(self, "_{}".format('designator_id_guid'), kwargs.get('designator_id_guid', None)) setattr(self, "_{}".format('document_visibility'), kwargs.get('document_visibility', None)) setattr(self, "_{}".format('email'), kwargs.get('email', None)) setattr(self, "_{}".format('email_metadata'), kwargs.get('email_metadata', None)) setattr(self, "_{}".format('email_notification'), kwargs.get('email_notification', None)) setattr(self, "_{}".format('embedded_recipient_start_url'), kwargs.get('embedded_recipient_start_url', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('excluded_documents'), kwargs.get('excluded_documents', None)) setattr(self, "_{}".format('fax_number'), kwargs.get('fax_number', None)) setattr(self, "_{}".format('fax_number_metadata'), kwargs.get('fax_number_metadata', None)) setattr(self, "_{}".format('first_name'), kwargs.get('first_name', None)) setattr(self, "_{}".format('first_name_metadata'), kwargs.get('first_name_metadata', None)) setattr(self, "_{}".format('full_name'), kwargs.get('full_name', None)) setattr(self, "_{}".format('full_name_metadata'), kwargs.get('full_name_metadata', None)) setattr(self, "_{}".format('id_check_configuration_name'), kwargs.get('id_check_configuration_name', None)) setattr(self, "_{}".format('id_check_configuration_name_metadata'), kwargs.get('id_check_configuration_name_metadata', None)) setattr(self, "_{}".format('id_check_information_input'), kwargs.get('id_check_information_input', None)) setattr(self, "_{}".format('identity_verification'), kwargs.get('identity_verification', None)) setattr(self, "_{}".format('inherit_email_notification_configuration'), kwargs.get('inherit_email_notification_configuration', None)) setattr(self, "_{}".format('last_name'), kwargs.get('last_name', None)) setattr(self, "_{}".format('last_name_metadata'), kwargs.get('last_name_metadata', None)) setattr(self, "_{}".format('linked_account_configuration_id'), kwargs.get('linked_account_configuration_id', None)) setattr(self, "_{}".format('locked_recipient_phone_auth_editable'), kwargs.get('locked_recipient_phone_auth_editable', None)) setattr(self, "_{}".format('locked_recipient_sms_editable'), kwargs.get('locked_recipient_sms_editable', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None)) setattr(self, "_{}".format('note'), kwargs.get('note', None)) setattr(self, "_{}".format('note_metadata'), kwargs.get('note_metadata', None)) setattr(self, "_{}".format('phone_authentication'), kwargs.get('phone_authentication', None)) setattr(self, "_{}".format('phone_number'), kwargs.get('phone_number', None)) setattr(self, "_{}".format('proof_file'), kwargs.get('proof_file', None)) setattr(self, "_{}".format('recipient_attachments'), kwargs.get('recipient_attachments', None)) setattr(self, "_{}".format('recipient_authentication_status'), kwargs.get('recipient_authentication_status', None)) setattr(self, "_{}".format('recipient_feature_metadata'), kwargs.get('recipient_feature_metadata', None)) setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None)) setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None)) setattr(self, "_{}".format('recipient_type'), kwargs.get('recipient_type', None)) setattr(self, "_{}".format('recipient_type_metadata'), kwargs.get('recipient_type_metadata', None)) setattr(self, "_{}".format('require_id_lookup'), kwargs.get('require_id_lookup', None)) setattr(self, "_{}".format('require_id_lookup_metadata'), kwargs.get('require_id_lookup_metadata', None)) setattr(self, "_{}".format('role_name'), kwargs.get('role_name', None)) setattr(self, "_{}".format('routing_order'), kwargs.get('routing_order', None)) setattr(self, "_{}".format('routing_order_metadata'), kwargs.get('routing_order_metadata', None)) setattr(self, "_{}".format('sent_date_time'), kwargs.get('sent_date_time', None)) setattr(self, "_{}".format('signed_date_time'), kwargs.get('signed_date_time', None)) setattr(self, "_{}".format('signing_group_id'), kwargs.get('signing_group_id', None)) setattr(self, "_{}".format('signing_group_id_metadata'), kwargs.get('signing_group_id_metadata', None)) setattr(self, "_{}".format('signing_group_name'), kwargs.get('signing_group_name', None)) setattr(self, "_{}".format('signing_group_users'), kwargs.get('signing_group_users', None)) setattr(self, "_{}".format('sms_authentication'), kwargs.get('sms_authentication', None)) setattr(self, "_{}".format('social_authentications'), kwargs.get('social_authentications', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('status_code'), kwargs.get('status_code', None)) setattr(self, "_{}".format('suppress_emails'), kwargs.get('suppress_emails', None)) setattr(self, "_{}".format('tabs'), kwargs.get('tabs', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('total_tab_count'), kwargs.get('total_tab_count', None)) setattr(self, "_{}".format('user_id'), kwargs.get('user_id', None)) @property def access_code(self): return self._access_code @access_code.setter def access_code(self, access_code): self._access_code = access_code @property def access_code_metadata(self): return self._access_code_metadata @access_code_metadata.setter def access_code_metadata(self, access_code_metadata): self._access_code_metadata = access_code_metadata @property def add_access_code_to_email(self): return self._add_access_code_to_email @add_access_code_to_email.setter def add_access_code_to_email(self, add_access_code_to_email): self._add_access_code_to_email = add_access_code_to_email @property def additional_notifications(self): return self._additional_notifications @additional_notifications.setter def additional_notifications(self, additional_notifications): self._additional_notifications = additional_notifications @property def agent_can_edit_email(self): return self._agent_can_edit_email @agent_can_edit_email.setter
MIT License
kkevsterrr/backdoorme
backdoors/shell/__pupy/pupy/packages/windows/amd64/psutil/_pssunos.py
boot_time
python
def boot_time(): return cext.boot_time()
The system boot time expressed in seconds since the epoch.
https://github.com/kkevsterrr/backdoorme/blob/f9755ca6cec600335e681752e7a1c5c617bb5a39/backdoors/shell/__pupy/pupy/packages/windows/amd64/psutil/_pssunos.py#L161-L163
import errno import os import socket import subprocess import sys from collections import namedtuple from . import _common from . import _psposix from . import _psutil_posix as cext_posix from . import _psutil_sunos as cext from ._common import isfile_strict, socktype_to_enum, sockfam_to_enum from ._common import usage_percent from ._compat import PY3 __extra__all__ = ["CONN_IDLE", "CONN_BOUND"] PAGE_SIZE = os.sysconf('SC_PAGE_SIZE') AF_LINK = cext_posix.AF_LINK CONN_IDLE = "IDLE" CONN_BOUND = "BOUND" PROC_STATUSES = { cext.SSLEEP: _common.STATUS_SLEEPING, cext.SRUN: _common.STATUS_RUNNING, cext.SZOMB: _common.STATUS_ZOMBIE, cext.SSTOP: _common.STATUS_STOPPED, cext.SIDL: _common.STATUS_IDLE, cext.SONPROC: _common.STATUS_RUNNING, cext.SWAIT: _common.STATUS_WAITING, } TCP_STATUSES = { cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED, cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT, cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV, cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1, cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2, cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT, cext.TCPS_CLOSED: _common.CONN_CLOSE, cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT, cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK, cext.TCPS_LISTEN: _common.CONN_LISTEN, cext.TCPS_CLOSING: _common.CONN_CLOSING, cext.PSUTIL_CONN_NONE: _common.CONN_NONE, cext.TCPS_IDLE: CONN_IDLE, cext.TCPS_BOUND: CONN_BOUND, } scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait']) svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free']) pextmem = namedtuple('pextmem', ['rss', 'vms']) pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked']) pmmap_ext = namedtuple( 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) NoSuchProcess = None ZombieProcess = None AccessDenied = None TimeoutExpired = None disk_io_counters = cext.disk_io_counters net_io_counters = cext.net_io_counters disk_usage = _psposix.disk_usage net_if_addrs = cext_posix.net_if_addrs def virtual_memory(): total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE used = total - free percent = usage_percent(used, total, _round=1) return svmem(total, avail, percent, used, free) def swap_memory(): sin, sout = cext.swap_mem() p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' % os.environ['PATH'], 'swap', '-l', '-k'], stdout=subprocess.PIPE) stdout, stderr = p.communicate() if PY3: stdout = stdout.decode(sys.stdout.encoding) if p.returncode != 0: raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode) lines = stdout.strip().split('\n')[1:] if not lines: raise RuntimeError('no swap device(s) configured') total = free = 0 for line in lines: line = line.split() t, f = line[-2:] t = t.replace('K', '') f = f.replace('K', '') total += int(int(t) * 1024) free += int(int(f) * 1024) used = total - free percent = usage_percent(used, total, _round=1) return _common.sswap(total, used, free, percent, sin * PAGE_SIZE, sout * PAGE_SIZE) def pids(): return [int(x) for x in os.listdir('/proc') if x.isdigit()] def pid_exists(pid): return _psposix.pid_exists(pid) def cpu_times(): ret = cext.per_cpu_times() return scputimes(*[sum(x) for x in zip(*ret)]) def per_cpu_times(): ret = cext.per_cpu_times() return [scputimes(*x) for x in ret] def cpu_count_logical(): try: return os.sysconf("SC_NPROCESSORS_ONLN") except ValueError: return None def cpu_count_physical(): return cext.cpu_count_phys()
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/h264_per_title_configuration.py
H264PerTitleConfiguration.target_quality_crf
python
def target_quality_crf(self): return self._target_quality_crf
Gets the target_quality_crf of this H264PerTitleConfiguration. Desired target quality of the highest representation expressed as CRF value :return: The target_quality_crf of this H264PerTitleConfiguration. :rtype: float
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/h264_per_title_configuration.py#L77-L86
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.auto_representation import AutoRepresentation from bitmovin_api_sdk.models.per_title_configuration import PerTitleConfiguration from bitmovin_api_sdk.models.per_title_fixed_resolution_and_bitrate_configuration import PerTitleFixedResolutionAndBitrateConfiguration import pprint import six class H264PerTitleConfiguration(PerTitleConfiguration): @poscheck_model def __init__(self, min_bitrate=None, max_bitrate=None, min_bitrate_step_size=None, max_bitrate_step_size=None, auto_representations=None, complexity_factor=None, fixed_resolution_and_bitrate_configuration=None, target_quality_crf=None, codec_min_bitrate_factor=None, codec_max_bitrate_factor=None, codec_bufsize_factor=None): super(H264PerTitleConfiguration, self).__init__(min_bitrate=min_bitrate, max_bitrate=max_bitrate, min_bitrate_step_size=min_bitrate_step_size, max_bitrate_step_size=max_bitrate_step_size, auto_representations=auto_representations, complexity_factor=complexity_factor, fixed_resolution_and_bitrate_configuration=fixed_resolution_and_bitrate_configuration) self._target_quality_crf = None self._codec_min_bitrate_factor = None self._codec_max_bitrate_factor = None self._codec_bufsize_factor = None self.discriminator = None if target_quality_crf is not None: self.target_quality_crf = target_quality_crf if codec_min_bitrate_factor is not None: self.codec_min_bitrate_factor = codec_min_bitrate_factor if codec_max_bitrate_factor is not None: self.codec_max_bitrate_factor = codec_max_bitrate_factor if codec_bufsize_factor is not None: self.codec_bufsize_factor = codec_bufsize_factor @property def openapi_types(self): types = {} if hasattr(super(H264PerTitleConfiguration, self), 'openapi_types'): types = getattr(super(H264PerTitleConfiguration, self), 'openapi_types') types.update({ 'target_quality_crf': 'float', 'codec_min_bitrate_factor': 'float', 'codec_max_bitrate_factor': 'float', 'codec_bufsize_factor': 'float' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(H264PerTitleConfiguration, self), 'attribute_map'): attributes = getattr(super(H264PerTitleConfiguration, self), 'attribute_map') attributes.update({ 'target_quality_crf': 'targetQualityCrf', 'codec_min_bitrate_factor': 'codecMinBitrateFactor', 'codec_max_bitrate_factor': 'codecMaxBitrateFactor', 'codec_bufsize_factor': 'codecBufsizeFactor' }) return attributes @property
MIT License
therealsupermario/supermariopy
supermariopy/ptutils/viz.py
argmax_rgb
python
def argmax_rgb(m, cmap=plt.cm.viridis): B, P, H, W = ptutils.nn.shape_as_list(m) max_values, argmax_map = torch.max(m, dim=1) colors = imageutils.make_colors(P, cmap=cmap) colors = colors.astype(np.float32) colors = torch.from_numpy(colors) colors = colors.to(m.device) m_one_hot = ptnn.to_one_hot(argmax_map, P) m_one_hot = m_one_hot.permute(0, 3, 1, 2) mask_rgb = torch.einsum("bphw,pc->bchw", m_one_hot, colors) return mask_rgb
Take argmax of m along dimension 1 and apply RGB colorcode on it Parameters ---------- m : [type] [description] Returns ------- np.array RGB mask tensor shaped [B, 3, H, W]
https://github.com/therealsupermario/supermariopy/blob/9fff8275278ff26caff50da86109c25d276bb30b/supermariopy/ptutils/viz.py#L8-L34
import numpy as np import torch from matplotlib import pyplot as plt from supermariopy import imageutils, ptutils from supermariopy.ptutils import nn as ptnn
MIT License
seetaresearch/dragon
dragon/python/core/util/logging.py
info
python
def info(msg, *args, **kwargs): get_logger().info(_detailed_msg(msg), *args, **kwargs)
Log message at the INFO level. Parameters ---------- msg: str The message.
https://github.com/seetaresearch/dragon/blob/3dfb6ea55d90d2fb2da9b1b471f5e1e7d7667810/dragon/python/core/util/logging.py#L133-L142
from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import logging as _logging import os import sys as _sys import threading from dragon.core.framework import backend from dragon.core.framework import config _logger = None _logger_lock = threading.Lock() def get_logger(): global _logger if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger logger = _logging.getLogger('dragon') logger.setLevel('INFO') logger.propagate = False if True: _interactive = False try: if _sys.ps1: _interactive = True except AttributeError: _interactive = _sys.flags.interactive if _interactive: logger.setLevel('INFO') _logging_target = _sys.stdout else: _logging_target = _sys.stderr _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter('%(levelname)s %(message)s')) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() def log(level, msg, *args, **kwargs): level = _logging._checkLevel(level) get_logger().log(level, _detailed_msg(msg), *args, **kwargs) def debug(msg, *args, **kwargs): get_logger().debug(_detailed_msg(msg), *args, **kwargs) def error(msg, *args, **kwargs): get_logger().error(_detailed_msg(msg), *args, **kwargs) def fatal(msg, *args, **kwargs): get_logger().fatal(_detailed_msg(msg), *args, **kwargs) def get_verbosity(): return get_logger().getEffectiveLevel()
BSD 2-Clause Simplified License
zhaochaocs/dualenc
train.py
make_dataset_iter
python
def make_dataset_iter(datasets, fields, opt, is_train=True): batch_size = opt.batch_size if is_train else opt.valid_batch_size batch_size_fn = None if is_train and opt.batch_type == "tokens": global max_src_in_batch, max_tgt_in_batch def batch_size_fn(new, count, sofar): global max_src_in_batch, max_tgt_in_batch if count == 1: max_src_in_batch = 0 max_tgt_in_batch = 0 max_src_in_batch = max(max_src_in_batch, len(new.src) + 2) max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt) + 1) src_elements = count * max_src_in_batch tgt_elements = count * max_tgt_in_batch return max(src_elements, tgt_elements) device = torch.device(f'cuda:{opt.gpuid[0]}') if opt.gpuid else -1 return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn, device, is_train)
This returns user-defined train/validate data iterator for the trainer to iterate over during each train epoch. We implement simple ordered iterator strategy here, but more sophisticated strategy like curriculum learning is ok too.
https://github.com/zhaochaocs/dualenc/blob/4175a7ed3f2c3232152ecce5ffd6ee4c727e64b9/train.py#L184-L211
from __future__ import division import argparse import glob import os import sys import random from datetime import datetime import numpy as np import torch import torch.nn as nn from torch import cuda import onmt import onmt.io import onmt.Models import onmt.ModelConstructor import onmt.modules from onmt.Utils import use_gpu import opts parser = argparse.ArgumentParser( description='train.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter) opts.add_md_help_argument(parser) opts.model_opts(parser) opts.train_opts(parser) opts.gcn_opts(parser) opt = parser.parse_args() if opt.word_vec_size != -1: opt.src_word_vec_size = opt.word_vec_size opt.tgt_word_vec_size = opt.word_vec_size if opt.layers != -1: opt.enc_layers = opt.layers opt.dec_layers = opt.layers if opt.seed > 0: random.seed(opt.seed) np.random.seed(opt.seed) torch.manual_seed(opt.seed) if opt.rnn_type == "SRU" and not opt.gpuid: raise AssertionError("Using SRU requires -gpuid set.") if torch.cuda.is_available() and not opt.gpuid: print("WARNING: You have a CUDA device, should run with -gpuid 0") if opt.gpuid: cuda.set_device(opt.gpuid[0]) if opt.seed > 0: torch.cuda.manual_seed(opt.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if len(opt.gpuid) > 1: sys.stderr.write("Sorry, multigpu isn't supported yet, coming soon!\n") sys.exit(1) if opt.exp_host != "": from pycrayon import CrayonClient cc = CrayonClient(hostname=opt.exp_host) experiments = cc.get_experiment_names() print(experiments) if opt.exp in experiments: cc.remove_experiment(opt.exp) experiment = cc.create_experiment(opt.exp) if opt.tensorboard: from tensorboardX import SummaryWriter writer = SummaryWriter( opt.tensorboard_log_dir + datetime.now().strftime("/%b-%d_%H-%M-%S"), comment="Onmt") progress_step = 0 def report_func(epoch, batch, num_batches, progress_step, start_time, lr, report_stats): if batch % opt.report_every == -1 % opt.report_every: report_stats.output(epoch, batch + 1, num_batches, start_time) if opt.exp_host: report_stats.log("progress", experiment, lr) if opt.tensorboard: report_stats.log_tensorboard( "progress", writer, lr, progress_step) report_stats = onmt.Statistics() return report_stats class DatasetLazyIter(object): def __init__(self, datasets, fields, batch_size, batch_size_fn, device, is_train): self.datasets = datasets self.fields = fields self.batch_size = batch_size self.batch_size_fn = batch_size_fn self.device = device self.is_train = is_train self.cur_iter = self._next_dataset_iterator(datasets) assert self.cur_iter is not None def __iter__(self): dataset_iter = (d for d in self.datasets) while self.cur_iter is not None: for batch in self.cur_iter: yield batch self.cur_iter = self._next_dataset_iterator(dataset_iter) def __len__(self): assert self.cur_iter is not None return len(self.cur_iter) def get_cur_dataset(self): return self.cur_dataset def _next_dataset_iterator(self, dataset_iter): try: self.cur_dataset = next(dataset_iter) except StopIteration: return None self.cur_dataset.fields = self.fields return onmt.io.OrderedIterator( dataset=self.cur_dataset, batch_size=self.batch_size, batch_size_fn=self.batch_size_fn, device=self.device, train=self.is_train, sort=False, sort_within_batch=True, repeat=False)
MIT License
encode-dcc/snovault
src/snovault/tests/test_redis_store.py
TestLocalStore.setup_method
python
def setup_method(self, app_settings): self.local_store = _get_client(app_settings) self.local_store.client.delete(self.dict_key) self.local_store.client.delete(self.item_key) for item in self.local_store.client.lrange(self.list_key, 0, -1): self.local_store.client.delete(item) self.local_store.client.delete(self.list_key)
Add local store to test class and cleans up standard redis keys - Uses the exposed redis client directly
https://github.com/encode-dcc/snovault/blob/75e77bb7445f6de57d2e389942255435fade06dc/src/snovault/tests/test_redis_store.py#L33-L43
import pytest from redis import StrictRedis from snovault.local_storage import LocalStoreClient def _get_client(local_settings): return LocalStoreClient( db_index=local_settings['local_storage_redis_index'], host=local_settings['local_storage_host'], port=local_settings['local_storage_port'], socket_timeout=local_settings['local_storage_timeout'], ) def test_local_storage_server_fixture(app_settings): local_store = _get_client(app_settings) try: local_store.ping() except Exception as excp: print(excp) assert False assert True class TestLocalStore(): dict_key = 'fakedictkey' item_key = 'fakeitemkey' list_key = 'fakelistkey' @pytest.fixture(autouse=True)
MIT License
fzj-iek3-vsa/fine
FINE/subclasses/lopf.py
LOPFModel.getOptimalValues
python
def getOptimalValues(self, name='all'): if name == 'capacityVariablesOptimum': return {'values': self.capacityVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension} elif name == 'isBuiltVariablesOptimum': return {'values': self.isBuiltVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension} elif name == 'operationVariablesOptimum': return {'values': self.operationVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension} elif name == 'phaseAngleVariablesOptimum': return {'values': self.phaseAngleVariablesOptimum, 'timeDependent': True, 'dimension': '1dim'} else: return {'capacityVariablesOptimum': {'values': self.capacityVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}, 'isBuiltVariablesOptimum': {'values': self.isBuiltVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}, 'operationVariablesOptimum': {'values': self.operationVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension}, 'phaseAngleVariablesOptimum': {'values': self.phaseAngleVariablesOptimum, 'timeDependent': True, 'dimension': '1dim'}}
Return optimal values of the components. :param name: name of the variables of which the optimal values should be returned:\n * 'capacityVariables', * 'isBuiltVariables', * 'operationVariablesOptimum', * 'phaseAngleVariablesOptimum', * 'all' or another input: all variables are returned.\n :type name: string
https://github.com/fzj-iek3-vsa/fine/blob/3114fd009e80a7eadacffe26bf5ff8e6a126ac61/FINE/subclasses/lopf.py#L287-L315
from FINE.transmission import Transmission, TransmissionModel from FINE import utils import pyomo.environ as pyomo import pandas as pd class LinearOptimalPowerFlow(Transmission): def __init__(self, esM, name, commodity, reactances, losses=0, distances=None, hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1, hasIsBuiltBinaryVariable=False, bigM=None, operationRateMax=None, operationRateFix=None, tsaWeight=1, locationalEligibility=None, capacityMin=None, capacityMax=None, partLoadMin=None, sharedPotentialID=None, capacityFix=None, isBuiltFix=None, investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0, opexIfBuilt=0, QPcostScale=0, interestRate=0.08, economicLifetime=10, technicalLifetime=None): Transmission.__init__(self, esM, name, commodity, losses=losses, distances=distances, hasCapacityVariable=hasCapacityVariable, capacityVariableDomain=capacityVariableDomain, capacityPerPlantUnit=capacityPerPlantUnit, hasIsBuiltBinaryVariable=hasIsBuiltBinaryVariable, bigM=bigM, operationRateMax=operationRateMax, operationRateFix=operationRateFix, tsaWeight=tsaWeight, locationalEligibility=locationalEligibility, capacityMin=capacityMin, capacityMax=capacityMax, partLoadMin=partLoadMin, sharedPotentialID=sharedPotentialID, capacityFix=capacityFix, isBuiltFix=isBuiltFix, investPerCapacity=investPerCapacity, investIfBuilt=investIfBuilt, opexPerOperation=opexPerOperation, opexPerCapacity=opexPerCapacity, opexIfBuilt=opexIfBuilt, QPcostScale=QPcostScale, interestRate=interestRate, economicLifetime=economicLifetime, technicalLifetime=technicalLifetime) self.modelingClass = LOPFModel self.reactances2dim = reactances try: self.reactances = pd.Series(self._mapC).apply(lambda loc: self.reactances2dim[loc[0]][loc[1]]) except: self.reactances = utils.preprocess2dimData(self.reactances2dim) def addToEnergySystemModel(self, esM): super().addToEnergySystemModel(esM) class LOPFModel(TransmissionModel): def __init__(self): self.abbrvName = 'lopf' self.dimension = '2dim' self.componentsDict = {} self.capacityVariablesOptimum, self.isBuiltVariablesOptimum = None, None self.operationVariablesOptimum, self.phaseAngleVariablesOptimum = None, None self.optSummary = None def initPhaseAngleVarSet(self, pyM): compDict, abbrvName = self.componentsDict, self.abbrvName def initPhaseAngleVarSet(pyM): return ((loc, compName) for compName, comp in compDict.items() for loc in compDict[compName]._mapL.keys()) setattr(pyM, 'phaseAngleVarSet_' + abbrvName, pyomo.Set(dimen=2, initialize=initPhaseAngleVarSet)) def declareSets(self, esM, pyM): self.declareDesignVarSet(pyM) self.declareContinuousDesignVarSet(pyM) self.declareDiscreteDesignVarSet(pyM) self.declareDesignDecisionVarSet(pyM) self.declareOpVarSet(esM, pyM) self.initPhaseAngleVarSet(pyM) self.declareOperationBinarySet(pyM) self.declareOperationModeSets(pyM, 'opConstrSet', 'operationRateMax', 'operationRateFix') def declarePhaseAngleVariables(self, pyM): setattr(pyM, 'phaseAngle_' + self.abbrvName, pyomo.Var(getattr(pyM, 'phaseAngleVarSet_' + self.abbrvName), pyM.timeSet, domain=pyomo.Reals)) def declareVariables(self, esM, pyM, relaxIsBuiltBinary): self.declareCapacityVars(pyM) self.declareRealNumbersVars(pyM) self.declareIntNumbersVars(pyM) self.declareBinaryDesignDecisionVars(pyM, relaxIsBuiltBinary) self.declareOperationVars(pyM, 'op') self.declareOperationBinaryVars(pyM, 'op_bin') self.declarePhaseAngleVariables(pyM) def powerFlowDC(self, pyM): compDict, abbrvName = self.componentsDict, self.abbrvName phaseAngleVar = getattr(pyM, 'phaseAngle_' + self.abbrvName) opVar, opVarSet = getattr(pyM, 'op_' + abbrvName), getattr(pyM, 'operationVarSet_' + abbrvName) def powerFlowDC(pyM, loc, compName, p, t): node1, node2 = compDict[compName]._mapC[loc] return (opVar[loc, compName, p, t] - opVar[compDict[compName]._mapI[loc], compName, p, t] == (phaseAngleVar[node1, compName, p, t]-phaseAngleVar[node2, compName, p, t])/ compDict[compName].reactances[loc]) setattr(pyM, 'ConstrpowerFlowDC_' + abbrvName, pyomo.Constraint(opVarSet, pyM.timeSet, rule=powerFlowDC)) def basePhaseAngle(self, pyM): compDict, abbrvName = self.componentsDict, self.abbrvName phaseAngleVar = getattr(pyM, 'phaseAngle_' + self.abbrvName) def basePhaseAngle(pyM, compName, p, t): node0 = sorted(compDict[compName]._mapL)[0] return phaseAngleVar[node0, compName, p, t] == 0 setattr(pyM, 'ConstrBasePhaseAngle_' + abbrvName, pyomo.Constraint(compDict.keys(), pyM.timeSet, rule=basePhaseAngle)) def declareComponentConstraints(self, esM, pyM): super().declareComponentConstraints(esM, pyM) self.powerFlowDC(pyM) self.basePhaseAngle(pyM) def setOptimalValues(self, esM, pyM): super().setOptimalValues(esM, pyM) compDict, abbrvName = self.componentsDict, self.abbrvName phaseAngleVar = getattr(pyM, 'phaseAngle_' + abbrvName) optVal_ = utils.formatOptimizationOutput(phaseAngleVar.get_values(), 'operationVariables', '1dim', esM.periodsOrder, esM=esM) self.phaseAngleVariablesOptimum = optVal_
MIT License
selimfirat/pysad
pysad/transform/probability_calibration/conformal_prediction.py
ConformalProbabilityCalibrator.fit_partial
python
def fit_partial(self, score): self.window.update(score) return self
Fits particular (next) timestep's score to train the postprocessor. Args: score (float): Input score. Returns: object: self.
https://github.com/selimfirat/pysad/blob/dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede/pysad/transform/probability_calibration/conformal_prediction.py#L21-L31
from pysad.core.base_postprocessor import BasePostprocessor import numpy as np from pysad.utils.window import Window, UnlimitedWindow class ConformalProbabilityCalibrator(BasePostprocessor): def __init__(self, windowed=True, window_size=300): self.windowed = windowed self.window_size = window_size self.window = Window(window_size=self.window_size) if self.windowed else UnlimitedWindow()
BSD 3-Clause New or Revised License
c0fec0de/anytree
anytree/search.py
find
python
def find(node, filter_=None, stop=None, maxlevel=None): return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`. Return matching node. Args: node: top node, start searching. Keyword Args: filter_: function called with every `node` as argument, `node` is returned if `True`. stop: stop iteration at `node` if `stop` function returns `True` for `node`. maxlevel (int): maximum descending in the node hierarchy. Example tree: >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle()).by_attr()) f |-- b | |-- a | +-- d | |-- c | +-- e +-- g +-- i +-- h >>> find(f, lambda node: node.name == "d") Node('/f/b/d') >>> find(f, lambda node: node.name == "z") >>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS Traceback (most recent call last): ... anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
https://github.com/c0fec0de/anytree/blob/d63289b65644c6e8c9c1d83d339c2c235d9ecc31/anytree/search.py#L116-L161
from anytree.iterators import PreOrderIter def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None): return _findall(node, filter_=filter_, stop=stop, maxlevel=maxlevel, mincount=mincount, maxcount=maxcount) def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None): return _findall(node, filter_=lambda n: _filter_by_name(n, name, value), maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
Apache License 2.0
academysoftwarefoundation/opencue
cuegui/cuegui/HostMonitor.py
HostMonitor.setColumnVisibility
python
def setColumnVisibility(self, settings): self.hostMonitorTree.setColumnVisibility(settings)
Sets table column visibility.
https://github.com/academysoftwarefoundation/opencue/blob/da28ae905b81e7d1125db2073a369fdc0ae9acd4/cuegui/cuegui/HostMonitor.py#L86-L88
from __future__ import absolute_import from __future__ import print_function from __future__ import division from builtins import str from PySide2 import QtCore from PySide2 import QtGui from PySide2 import QtWidgets import opencue import cuegui.HostMonitorTree import cuegui.Logger log = cuegui.Logger.getLogger(__file__) FILTER_HEIGHT = 20 class HostMonitor(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent) self.__filterByHostNameLastInput = None self.hostMonitorTree = cuegui.HostMonitorTree.HostMonitorTree(self) layout = QtWidgets.QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) self.setLayout(layout) self.layout().setContentsMargins(0, 0, 0, 0) self.layout().setSpacing(4) hlayout = QtWidgets.QHBoxLayout() self.__filterByHostNameSetup(hlayout) self.__filterAllocationSetup(hlayout) self.__filterHardwareStateSetup(hlayout) hlayout.addStretch() self.__refreshToggleCheckBoxSetup(hlayout) self.__refreshButtonSetup(hlayout) self.__clearButtonSetup(hlayout) self.layout().addLayout(hlayout) self.layout().addWidget(self.hostMonitorTree) self.__viewHostsSetup() if bool(int(QtGui.qApp.settings.value("AutoRefreshMonitorHost", 1))): self.updateRequest() def updateRequest(self): self.hostMonitorTree.updateRequest() def getColumnVisibility(self): return self.hostMonitorTree.getColumnVisibility()
Apache License 2.0
nexinfinite/discordbothelp
Music/music.py
Music.on_voice_state_update
python
async def on_voice_state_update(self, member, before, after): if member != self.bot.user: return if before.channel and not after.channel and self.queues.get(member.guild.id): self.queues.pop(member.guild.id, None)
Handle the unfortunate event in which the bot is kicked from a vc. Delete queue and cleanup
https://github.com/nexinfinite/discordbothelp/blob/1fc7e77aa4c380cb4728a8645a8bc81160d8dea7/Music/music.py#L244-L248
import discord from discord.ext import commands import pafy import asyncio import aiohttp YOUTUBE_SEARCH_ENDPOINT = 'https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=1&q={0}&type=video&key={1}' YOUTUBE_SONG_URL = 'https://www.youtube.com/watch?v={0}' FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'} SPOTIFY_CLIENT_ID = '00f2564d102142fbb5d9229533aede9e' SPOTIFY_CLIENT_SECRET = 'dbdb32a313a047db851ff02448a9b6d3' class Music(commands.Cog): def __init__(self, bot): self.bot = bot self.queues = {} self.bot.loop.create_task(self._self_deafen()) self.session = aiohttp.ClientSession() self.yt_key = " " async def _self_deafen(self): tasks = [guild.me.edit(deafen=True) for guild in self.bot.guilds] await asyncio.gather(*tasks) def embed(self, p): e = discord.Embed(title=p.title, color=0x020202) e.add_field(name='Duration', value=f'`{p.duration}`') e.add_field(name='Views', value=f'`{p.viewcount}`', inline=False) e.add_field(name='Rating', value=f'`{p.rating}`', inline=False) e.set_thumbnail(url=p.thumb) e.set_author(name=str(p.req), icon_url=str(p.req.avatar_url)) return e @commands.command(usage='`tp!play (song name | YouTube URL)`') async def play(self, ctx, *, song: str): if not ctx.author.voice: return await ctx.message.reply('You must be in a Voice Channel!', delete_after=5) elif ctx.guild.voice_client: q = self.queues.get(ctx.guild.id) item = await self._parse(song, ctx) if item: q.put(item) await ctx.message.reply(content='`Added to the queue!`', embed=self.embed(self.queues.get(ctx.guild.id).last())) else: await ctx.message.reply('Could not find the song...') return if not q.np and not q.empty(): await self._handler(ctx) else: await ctx.author.voice.channel.connect() items = await self._parse(song, ctx) if items: q = Queue(id=ctx.guild.id, guild=ctx.guild) q.put(items) self.queues[ctx.guild.id] = q await self._handler(ctx) else: await ctx.message.reply('Could not find the song...') return @commands.command(usage='`tp!volume (0 to 1)`') async def volume(self, ctx, amount: float): if vc := ctx.guild.voice_client: vc.source.volume = amount self.queues.get(ctx.guild.id).volume = amount @commands.command(usage='`tp!stop`') async def stop(self, ctx): if vc := ctx.guild.voice_client: await vc.disconnect() if ctx.guild.id in self.queues: del self.queues[ctx.guild.id] async def _parse(self, _input, ctx): if 'watch?' in _input: try: p = await self.run_async(pafy.new, _input) except: return None p.req = ctx.author return p elif 'playlist?' in _input: p = await self.run_async(pafy.get_playlist, _input) items = [p['items'][i]['pafy'] for i in range(0, len(p['items']))] for i in items: i.req = ctx.author return items else: d = await self._get_data(_input) try: r = d["items"][0]["id"]["videoId"] p = await self.run_async(pafy.new, r) except: return None p.req = ctx.author return p def spotify_url_parser(self, input): remove_text = r'https://open.spotify.com/track/' n = input.replace(remove_text, '') r = n.split('?') print(r[0]) return r[0] async def _play(self, ctx, url, volume): stream = discord.FFmpegPCMAudio(url, **FFMPEG_OPTIONS) transform_stream = discord.PCMVolumeTransformer(stream, volume) ctx.guild.voice_client.play(transform_stream, after=lambda x: self.bot.loop.create_task(self._handler(ctx))) async def _handler(self, ctx): queue = self.queues.get(ctx.guild.id) if not queue: return if queue.empty() and not queue.loop: queue.np = None await ctx.voice_client.disconnect() elif not queue.loop: song = (await self.run_async(queue.get().getbestaudio)).url_https await self._play(ctx, song, queue.volume) await ctx.message.reply(content='`Now playing:`', embed=self.embed(queue.np)) elif queue.loop: song = (await self.run_async(queue.loop_get().getbestaudio)).url_https await self._play(ctx, song, queue.volume) async def _get_data(self, _input): response = await self.session.get(YOUTUBE_SEARCH_ENDPOINT.format(_input, self.yt_key)) result = await response.json() return result def queue_embed(self, q): desc = '```md\n' + f'Now Playing: {q.np.title} (Requested by: {q.np.req})\n\n' + '\n'.join(f'{n}. {s.title} (Requested by: {s.req})' for n, s in enumerate(q.queue)) + '```' embed = discord.Embed(title='Queue', description=desc) embed.set_author(name=q.guild.name, icon_url=str(q.guild.icon_url)) return embed @commands.command(usage='`tp!queue`') async def queue(self, ctx): if len((q := self.queues.get(ctx.guild.id)).queue) > 0: embed = self.queue_embed(q) else: embed = discord.Embed(title='The queue is empty.') await ctx.message.reply(embed=embed) async def run_async(self, coro, *args): return await self.bot.loop.run_in_executor(None, coro, *args) @commands.command(usage='`tp!remove (index)`') async def remove(self, ctx, element: int): q = self.queues.get(ctx.guild.id) if not q: embed = discord.Embed(title='There is nothing playing!') elif element >= len(q.queue): embed = discord.Embed(title=f'There are only {len(q.queue)} songs playing!') else: q.queue.pop(element) embed = self.queue_embed(q) await ctx.message.reply(embed=embed) @commands.command(usage='`tp!skip`') async def skip(self, ctx): queue = self.queues.get(ctx.guild.id) vc = ctx.guild.voice_client self._skip(ctx, queue, vc) await ctx.message.reply('Skipping immediately!') def _skip(self, ctx, queue, vc): vc.stop() queue.loop = None @commands.command(usage='`tp!pause`') async def pause(self, ctx): if vc := ctx.voice_client: vc.pause() await ctx.send("Song paused.") @commands.command(usage='`tp!resume`') async def resume(self, ctx): if vc := ctx.voice_client: vc.resume() await ctx.send("Song resumed.") @commands.command(aliases=['np'], usage='`tp!nowplaying/np`') async def nowplaying(self, ctx): q = self.queues.get(ctx.guild.id) if q and q.np: await ctx.message.reply(embed=self.embed(q.np)) else: await ctx.message.reply('`No songs are currently playing! Use ♫play (song) to play a song from YouTube in your voice channel.``') @commands.command(usage='`tp!loop`') async def loop(self, ctx): if queue := self.queues.get(ctx.guild.id): if not queue.loop: queue.loop = queue.queue + [queue.np] await ctx.message.reply(content='`Looping!`', embed=self.embed(queue.np)) else: queue.loop = [] await ctx.message.reply('`Stopping loop!`') @commands.Cog.listener()
Apache License 2.0
nttcom/eclcli
eclcli/monitoring/client.py
make_client
python
def make_client(instance): from .monitoringclient import client as monitoring_client if _monitoring_api_version is not None: version = _monitoring_api_version else: version = instance._api_version[API_NAME] LOG.debug('Instantiating monitoring client for V%s', version) kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface) client = monitoring_client.Client( version, session=instance.session, timings=instance.timing, region_name=instance._region_name, **kwargs ) return client
Returns a monitoring service client.
https://github.com/nttcom/eclcli/blob/25946165882b352c16df4077f5470d3c5e4b910e/eclcli/monitoring/client.py#L33-L55
import logging from eclcli.common import utils LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '2' API_VERSION_OPTION = 'os_monitoring_api_version' API_NAME = 'monitoring' API_VERSIONS = { "2": "monitoring.client", } _monitoring_api_version = None
Apache License 2.0
alvarolm/gorename
dep/shellenv/__init__.py
get_user
python
def get_user(): output = getuser() if not isinstance(output, str_cls): output = output.decode('utf-8') return output
Returns the current username as a unicode string :return: A unicode string of the current user's username
https://github.com/alvarolm/gorename/blob/a9f7690cdbc78b7ef86e3743053a399011c006d2/dep/shellenv/__init__.py#L56-L67
from __future__ import unicode_literals, division, absolute_import, print_function import sys import os from getpass import getuser from ._types import str_cls, type_name from ._encoding import env_encode, env_decode, path_encode, path_decode if sys.platform == 'win32': from ._win import get_env, get_user_login_shell elif sys.platform == 'darwin': from ._osx import get_env from ._osx.open_directory import get_user_login_shell else: from ._linux import get_env from ._linux.getent import get_user_login_shell __version__ = '1.4.2' __version_info__ = (1, 4, 2) _paths = {} def get_path(shell=None): if shell is not None and not isinstance(shell, str_cls): raise TypeError('shell must be a unicode string, not %s' % type_name(shell)) shell_key = shell if shell else 'default' if shell_key not in _paths: shell, env = get_env(shell) _paths[shell_key] = (shell, env.get('PATH', '').split(os.pathsep)) return _paths[shell_key]
MIT License
nsls-ii/pyxrf
pyxrf/model/fit_spectrum.py
Fit1D.output_summed_data_fit
python
def output_summed_data_fit(self, save_fit=True): xx = None if self.x0 is not None: a0, a1, a2 = ( self.param_model.param_new["e_offset"]["value"], self.param_model.param_new["e_linear"]["value"], self.param_model.param_new["e_quadratic"]["value"], ) xx = a0 + self.x0 * a1 + self.x0 ** 2 * a2 if save_fit: logger.info("Saving spectrum after total spectrum fitting.") if (xx is None) or (self.y0 is None) or (self.fit_y is None): msg = "Not enough data to save spectrum/fit data. Total spectrum fitting was not run." raise RuntimeError(msg) data = np.array([self.x0, self.y0, self.fit_y]) else: logger.info("Saving spectrum based on loaded or estimated parameters.") if (xx is None) or (self.y0 is None) or (self.param_model.total_y is None): msg = "Not enough data to save spectrum/fit data based on loaded or estimated parameters." raise RuntimeError(msg) data = np.array([xx, self.y0, self.param_model.total_y]) output_fit_name = self.data_title + "_summed_spectrum_fit.txt" fpath = os.path.join(self.result_folder, output_fit_name) np.savetxt(fpath, data.T) logger.info(f"Spectrum fit data is saved to file '{fpath}'")
Save energy, summed data and fitting curve to a file.
https://github.com/nsls-ii/pyxrf/blob/0aa4e175f541edfaa8f71daf54b54a07e4ab2b04/pyxrf/model/fit_spectrum.py#L638-L664
from __future__ import absolute_import import numpy as np import time import copy import os import re import math from collections import OrderedDict import multiprocessing import multiprocessing.pool import h5py import matplotlib.pyplot as plt import matplotlib.animation as animation import lmfit import platform from distutils.version import LooseVersion import dask.array as da from atom.api import Atom, Str, observe, Typed, Int, List, Dict, Float, Bool from skbeam.core.fitting.xrf_model import ( ModelSpectrum, update_parameter_dict, set_parameter_bound, K_LINE, L_LINE, M_LINE, nnls_fit, construct_linear_model, register_strategy, TRANSITIONS_LOOKUP, ) from skbeam.fluorescence import XrfElement as Element from .parameters import calculate_profile, fit_strategy_list, trim_escape_peak, define_range from .fileio import save_fitdata_to_hdf, output_data from ..core.fitting import rfactor from ..core.quant_analysis import ParamQuantEstimation from ..core.map_processing import fit_xrf_map, TerminalProgressBar, prepare_xrf_map, snip_method_numba import logging logger = logging.getLogger(__name__) class Fit1D(Atom): file_status = Str() img_dict = Dict() element_list = List() fit_x = Typed(np.ndarray) fit_y = Typed(np.ndarray) residual = Typed(np.ndarray) comps = Dict() fit_strategy1 = Int(0) fit_strategy2 = Int(0) fit_strategy3 = Int(0) fit_strategy4 = Int(0) fit_strategy5 = Int(0) fit_result = Typed(object) data_title = Str() runid = Int(0) working_directory = Str() result_folder = Str() all_strategy = Typed(object) x0 = Typed(np.ndarray) y0 = Typed(np.ndarray) bg = Typed(np.ndarray) es_peak = Typed(np.ndarray) cal_x = Typed(np.ndarray) cal_y = Typed(np.ndarray) cal_spectrum = Dict() selected_element = Str() selected_index = Int() elementinfo_list = List() img_title = Str() dict_to_plot = Dict() function_num = Int(0) nvar = Int(0) chi2 = Float(0.0) red_chi2 = Float(0.0) r2 = Float(0.0) global_param_list = List() fit_num = Int(100) ftol = Float(1e-5) c_weight = Float(1e2) fit_img = Dict() save_point = Bool(False) point1v = Int(0) point1h = Int(0) point2v = Int(0) point2h = Int(0) e_name = Str() add_element_intensity = Float(100.0) pileup_data = Dict() raise_bg = Float(0.0) pixel_bin = Int(0) linear_bg = Bool(False) use_snip = Bool(True) bin_energy = Int(0) fit_info = Str() pixel_fit_info = Str() pixel_fit_method = Int(0) result_map = Dict() map_interpolation = Bool(False) hdf_path = Str() hdf_name = Str() roi_sum_opt = Dict() scaler_keys = List() scaler_index = Int(0) param_model = Typed(object) io_model = Typed(object) io_model = Typed(object) param_quant_estimation = ParamQuantEstimation() qe_param_built_in_ref = Typed(object) qe_param_custom_ref = Typed(object) qe_standard_selected_ref = Typed(object) qe_standard_selected_copy = Typed(object) qe_standard_distance_to_sample = Float(0.0) def __init__(self, *, param_model, io_model, working_directory): self.working_directory = working_directory self.result_folder = working_directory self.all_strategy = OrderedDict() self.param_model = param_model self.io_model = io_model self.pileup_data = {"element1": "Si_K", "element2": "Si_K", "intensity": 100.0} self.fit_strategy1 = 0 self.fit_strategy2 = 0 self.fit_strategy1 = 1 self.fit_strategy2 = 0 self.roi_sum_opt["status"] = False self.roi_sum_opt["low"] = 0.0 self.roi_sum_opt["high"] = 10.0 self.qe_standard_selected_ref = None self.qe_standard_selected_copy = None def result_folder_changed(self, change): self.result_folder = change["value"] def data_title_update(self, change): self.data_title = change["value"] def runid_update(self, change): self.runid = change["value"] def img_dict_updated(self, change): if change["value"]: _key = [k for k in self.io_model.img_dict.keys() if "scaler" in k] if len(_key) != 0: self.scaler_keys = sorted(self.io_model.img_dict[_key[0]].keys()) def scaler_index_update(self, change): self.scaler_index = change["value"] def img_title_update(self, change): self.img_title = change["value"] def dict_to_plot_update(self, change): self.dict_to_plot = change["value"] def update_selected_index(self, selected_element=None, element_list_new=None): if selected_element is None: element = self.selected_element else: element = selected_element if element_list_new is None: element_list = self.param_model.element_list else: element_list = element_list_new if not element_list: ind = 0 else: try: ind = element_list.index(element) + 1 except ValueError: ind = 0 if ind == self.selected_index: self.selected_index = 0 self.selected_index = ind @observe("selected_index") def _selected_element_changed(self, change): if change["value"] > 0: ind_sel = change["value"] - 1 if ind_sel >= len(self.param_model.element_list): ind_sel = len(self.param_model.element_list) - 1 self.selected_index = ind_sel + 1 self.selected_element = self.param_model.element_list[ind_sel] if len(self.selected_element) <= 4: element = self.selected_element.split("_")[0] self.elementinfo_list = sorted( [ e for e in list(self.param_model.param_new.keys()) if (element + "_" in e) and ("pileup" not in e) ] ) logger.info(f"Element line info: {self.elementinfo_list}") else: element = self.selected_element self.elementinfo_list = sorted( [e for e in list(self.param_model.param_new.keys()) if element.replace("-", "_") in e] ) logger.info(f"User defined or pileup peak info: {self.elementinfo_list}") else: self.elementinfo_list = [] def select_index_by_eline_name(self, eline_name): if eline_name in self.param_model.element_list: self.selected_index = self.param_model.element_list.index(eline_name) + 1 else: raise Exception(f"Line '{eline_name}' is not in the list of selected element lines.") def apply_default_param(self): selected_element = self.selected_element self.selected_index = 0 element_list = self.param_model.param_new["non_fitting_values"]["element_list"] element_list = [e.strip(" ") for e in element_list.split(",")] element_list = [_ for _ in element_list if _] self.param_model.element_list = element_list self.update_element_info() self.update_selected_index(selected_element=selected_element, element_list_new=element_list) self.global_param_list = [] self.global_param_list = sorted( [k for k in self.param_model.param_new.keys() if k == k.lower() and k != "non_fitting_values"] ) self.define_range() def filename_update(self, change): self.hdf_name = change["value"] self.hdf_path = os.path.join(self.result_folder, self.hdf_name) @observe("fit_strategy1") def update_strategy1(self, change): self.all_strategy.update({"strategy1": change["value"]}) if change["value"]: logger.info("Setting strategy (preset) for step 1: {}".format(fit_strategy_list[change["value"] - 1])) @observe("fit_strategy2") def update_strategy2(self, change): self.all_strategy.update({"strategy2": change["value"]}) if change["value"]: logger.info("Setting strategy (preset) for step 2: {}".format(fit_strategy_list[change["value"] - 1])) @observe("fit_strategy3") def update_strategy3(self, change): self.all_strategy.update({"strategy3": change["value"]}) if change["value"]: logger.info("Setting strategy (preset) for step 3: {}".format(fit_strategy_list[change["value"] - 1])) @observe("fit_strategy4") def update_strategy4(self, change): self.all_strategy.update({"strategy4": change["value"]}) if change["value"]: logger.info("Strategy at step 4 is: {}".format(fit_strategy_list[change["value"] - 1])) @observe("fit_strategy5") def update_strategy5(self, change): self.all_strategy.update({"strategy5": change["value"]}) if change["value"]: logger.info("Strategy at step 5 is: {}".format(fit_strategy_list[change["value"] - 1])) def update_param_with_result(self): update_parameter_dict(self.param_model.param_new, self.fit_result) def define_range(self): lowv = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"] highv = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"] self.x0, self.y0 = define_range( self.io_model.data, lowv, highv, self.param_model.param_new["e_offset"]["value"], self.param_model.param_new["e_linear"]["value"], ) def get_background(self): self.bg = snip_method_numba( self.y0, self.param_model.param_new["e_offset"]["value"], self.param_model.param_new["e_linear"]["value"], self.param_model.param_new["e_quadratic"]["value"], width=self.param_model.param_new["non_fitting_values"]["background_width"], ) def get_profile(self): if self.x0 is None or self.y0 is None: return self.cal_x, self.cal_spectrum, area_dict = calculate_profile( self.x0, self.y0, self.param_model.param_new, self.param_model.element_list ) if self.param_model.param_new["non_fitting_values"]["escape_ratio"] > 0: self.cal_spectrum["escape"] = trim_escape_peak( self.io_model.data, self.param_model.param_new, len(self.y0) ) self.cal_y = np.zeros(len(self.cal_x)) for k, v in self.cal_spectrum.items(): self.cal_y += v self.residual = self.cal_y - self.y0 def fit_data(self, x0, y0, *, init_params=True): fit_num = self.fit_num ftol = self.ftol c_weight = 1 params = copy.deepcopy(self.param_model.param_new) if init_params: for k, p in params.items(): if "area" in k: p["value"] = 1000 MS = ModelSpectrum(params, self.param_model.element_list) MS.assemble_models() weights = 1 / np.sqrt(c_weight + np.abs(y0)) result = MS.model_fit(x0, y0, weights=weights, maxfev=fit_num, xtol=ftol, ftol=ftol, gtol=ftol) self.fit_x = ( result.values["e_offset"] + result.values["e_linear"] * x0 + result.values["e_quadratic"] * x0 ** 2 ) self.fit_y = result.best_fit self.fit_result = result self.residual = self.fit_y - y0 def fit_multiple(self): self.define_range() self.get_background() if self.param_model.param_new["non_fitting_values"]["escape_ratio"] > 0: self.es_peak = trim_escape_peak(self.io_model.data, self.param_model.param_new, self.y0.size) y0 = self.y0 - self.bg - self.es_peak else: y0 = self.y0 - self.bg t0 = time.time() self.fit_info = ( "Spectrum fitting of the sum spectrum (incident energy " f"{self.param_model.param_new['coherent_sct_energy']['value']})." ) init_params = True for k, v in self.all_strategy.items(): if v: strat_name = fit_strategy_list[v - 1] logger.info(self.fit_info) strategy = extract_strategy(self.param_model.param_new, strat_name) register_strategy(strat_name, strategy) set_parameter_bound(self.param_model.param_new, strat_name) self.fit_data(self.x0, y0, init_params=init_params) init_params = False self.update_param_with_result() for key, val in self.param_model.param_new.items(): if key.endswith("_area") and val["value"] <= 0.0: _small_value_for_area = 0.1 logger.warning( f"Fitting resulted in negative value for '{key}' ({val['value']}). \n" f" In order to continue using the emission line in future computations, " f"the fitted area is set to a small value ({_small_value_for_area}).\n Delete " f"the emission line from the list if you know it is not present in " f"the sample." ) val["value"] = _small_value_for_area self.r2 = cal_r2(y0, self.fit_y) self.assign_fitting_result() t1 = time.time() logger.warning("Time used for summed spectrum fitting is : {}".format(t1 - t0)) self.comps.clear() comps = self.fit_result.eval_components(x=self.x0) self.comps = combine_lines(comps, self.param_model.element_list, self.bg) if self.param_model.param_new["non_fitting_values"]["escape_ratio"] > 0: self.fit_y += self.bg + self.es_peak self.comps["escape"] = self.es_peak else: self.fit_y += self.bg self.save_result() self.assign_fitting_result() self.fit_info = "Summed spectrum fitting is done!" logger.info("-------- " + self.fit_info + " --------") def compute_current_rfactor(self, save_fit=True): rf = None if save_fit: if (self.y0 is not None) and (self.fit_y is not None) and (self.y0.shape == self.fit_y.shape): rf = rfactor(self.y0, self.fit_y) else: if ( (self.param_model.y0 is not None) and (self.param_model.total_y is not None) and (self.param_model.y0.shape == self.param_model.total_y.shape) ): rf = rfactor(self.param_model.y0, self.param_model.total_y) return rf
BSD 3-Clause New or Revised License
jameshicks/pydigree
pydigree/genotypes/chromosometemplate.py
ChromosomeTemplate.from_genomesimla
python
def from_genomesimla(filename): return read_gs_chromosome_template(filename)
Reads positions and frequencies from a genomeSIMLA template file :param filename: path to the template :type filename: string :returns: Template containing the data from the file :rtype: ChromosomeTemplate
https://github.com/jameshicks/pydigree/blob/b268402b14b053d899443ff5ef86aea319d2614b/pydigree/genotypes/chromosometemplate.py#L177-L186
from bisect import bisect_right import numpy as np from pydigree.common import cumsum from pydigree.genotypes import Alleles, SparseAlleles from pydigree.exceptions import SimulationError from pydigree.io.genomesimla import read_gs_chromosome_template class ChromosomeSet(object): def __init__(self): self.chroms = [] def __iter__(self): for c in self.chroms: yield c def __getitem__(self, idx): return self.chroms[idx] def __len__(self): return len(self.chroms) def add_chromosome(self, template): self.chroms.append(template) def finalize(self): for c in self.chroms: c.finalize() def nloci(self): return sum(c.nmark() for c in self.chroms) def nchrom(self): return len(self.chroms) def frequency(self, chrom, variant): return self.chroms[chrom].frequencies[variant] def physical_map(self, chrom, variant): return self.chroms[chrom].physical_map[variant] def marker_label(self, chrom, variant): return self.chroms[chrom].labels[variant] def select_random_loci(self, nloc): available = self.nloci() loci = set(np.random.randint(0, available, nloc)) for _ in range(100): if len(loci) == nloc: break needed = nloc - len(loci) loci |= set(np.random.randint(0, available, needed)) else: raise SimulationError loci = sorted(loci) chromsizes = [0] + cumsum([c.nmark() for c in self.chroms]) chromidx = 0 for loc in loci: while loc > chromsizes[chromidx]: chromidx += 1 yield chromidx, loc - (chromsizes[chromidx]) class ChromosomeTemplate(object): def __init__(self, label=None): self.final = False self.label = label self.genetic_map = [] self.physical_map = [] self.frequencies = [] self.labels = [] self.reference = [] self.alternates = [] def __str__(self): return 'ChromosomeTemplate object %s: %s markers, %s cM' % (self.label if self.label is not None else 'object', len(self.frequencies), max(self.genetic_map) if self.genetic_map else 0) @property def outputlabel(self): if self.label: return self.label else: return 0 def __iter__(self): return zip(self.labels, self.genetic_map, self.physical_map) def iterinfo(self): return zip(self.labels, self.genetic_map, self.physical_map, self.frequencies) @staticmethod
Apache License 2.0
darrenburns/ward
ward/_terminal.py
TestResultDisplayWidget.footer
python
def footer(self, test_results: List[TestResult]) -> Optional[RenderableType]: pass
This method should return an object that can be rendered by Rich. It will be inserted into the "footer" of the test suite result display, which hugs the bottom of the output as the suite runs. This method may be called at any time to refresh the state of the footer, so it should be a pure function. If this function returns ``None``, it will not cause anything to be rendered in the footer. You can use this to "hide" the footer based on state captured during the suite.
https://github.com/darrenburns/ward/blob/844762d7714348dc38a3102c73e8c97a15d86150/ward/_terminal.py#L337-L350
import abc import inspect import itertools import math import os import platform import statistics from dataclasses import dataclass, field from enum import Enum from pathlib import Path from textwrap import dedent from typing import ( Collection, Dict, Generator, Iterable, Iterator, List, Optional, Tuple, Type, Union, ) from rich.columns import Columns from rich.console import ( Console, ConsoleOptions, RenderableType, RenderGroup, RenderResult, ) from rich.highlighter import NullHighlighter from rich.live import Live from rich.markdown import Markdown from rich.padding import Padding from rich.panel import Panel from rich.pretty import Pretty from rich.progress import ( BarColumn, Progress, RenderableColumn, SpinnerColumn, TimeElapsedColumn, ) from rich.rule import Rule from rich.syntax import Syntax from rich.table import Table from rich.text import Text from rich.theme import Theme from rich.traceback import Traceback from rich.tree import Tree from ward._diff import Diff from ward._fixtures import FixtureHierarchyMapping, fixture_parents_and_children from ward._suite import Suite from ward._utilities import group_by from ward._ward_version import __version__ from ward.expect import ( EQUALITY_COMPARISONS, IN_COMPARISONS, INEQUALITY_COMPARISONS, IS_COMPARISONS, Comparison, TestAssertionFailure, ) from ward.fixtures import Fixture from ward.models import ExitCode, Scope from ward.testing import Test, TestOutcome, TestResult, fixtures_used_directly_by_tests HORIZONTAL_PAD = (0, 1, 0, 1) INDENT = " " * 2 theme = Theme( { "title": "bold", "heading": "bold", "pass": "#ffffff on #137C39", "pass.textonly": "#189F4A", "fail": "#ffffff on #BF2D2D", "fail.textonly": "#BF2D2D", "fail.header": "bold #BF2D2D", "skip": "#ffffff on #0E67B3", "skip.textonly": "#1381E0", "xpass": "#162740 on #F4C041", "xpass.textonly": "#F4C041", "xfail": "#ffffff on #695CC8", "xfail.textonly": "#695CC8", "muted": "dim", "info": "yellow italic", "info.border": "yellow", "dryrun": "#ffffff on #162740", "rule.line": "#189F4A", "fixture.name": "bold #1381E0", "fixture.scope.test": "bold #189F4A", "fixture.scope.module": "bold #F4C041", "fixture.scope.global": "bold #EA913C", "usedby": "#9285F6", } ) rich_console = Console(theme=theme, highlighter=NullHighlighter()) def format_test_id(test_result: TestResult) -> str: return f"{format_test_location(test_result.test)}{format_test_case_number(test_result.test)}" def format_test_location(test: Test) -> str: return f"{test.module_name}:{test.line_number}" def format_test_case_number(test: Test) -> str: param_meta = test.param_meta if param_meta.group_size > 1: pad = len(str(param_meta.group_size)) iter_indicator = ( f"[{param_meta.instance_index + 1:>{pad}}/{param_meta.group_size}]" ) else: iter_indicator = "" return iter_indicator class TestOutputStyle(str, Enum): TEST_PER_LINE = "test-per-line" DOTS_GLOBAL = "dots-global" DOTS_MODULE = "dots-module" LIVE = "live" NONE = "none" class TestProgressStyle(str, Enum): INLINE = "inline" BAR = "bar" NONE = "none" def get_test_result_line( test_result: TestResult, test_index: int, num_tests: int, progress_styles: List[TestProgressStyle], extra_left_pad: int = 0, ) -> Table: outcome_tag = test_result.outcome.name[:4] test = test_result.test test_location = format_test_location(test) test_case_number = format_test_case_number(test) test_style = outcome_to_style(test_result.outcome) grid = Table.grid(expand=True) grid.add_column() grid.add_column() grid.add_column() columns: List[RenderableType] = [ Padding(outcome_tag, style=test_style, pad=(0, 1, 0, 1 + extra_left_pad)), Padding(f"{test_location}{test_case_number}", style="muted", pad=(0, 1, 0, 1)), Padding( Markdown(test.description, inline_code_theme="ansi_dark"), pad=(0, 1, 0, 0) ), ] reason = getattr(test.marker, "reason", "") if reason: assert test.marker is not None, "if reason exists, marker must too" if test.marker.active: grid.add_column(justify="center", style=test_style) columns.append(Padding(reason, pad=(0, 1, 0, 1))) if TestProgressStyle.INLINE in progress_styles: grid.add_column(justify="right", style="muted") columns.append(f"{(test_index + 1) / num_tests:>4.0%}") grid.add_row(*columns) return grid INLINE_PROGRESS_LEN = 5 def get_dot(result: TestResult) -> Text: style = outcome_to_style(result.outcome) return Text(result.outcome.display_char, style=style, end="") @dataclass class TestTimingStatsPanel: all_tests_in_session: List[TestResult] num_tests_to_show: int @property def _raw_test_durations_secs(self): return [r.test.timer.duration for r in self.all_tests_in_session] @property def _median_secs(self): return statistics.median(self._raw_test_durations_secs) @property def _percentile99_secs(self): data = self._raw_test_durations_secs size = len(data) percentile = 99 return sorted(data)[int(math.ceil((size * percentile) / 100)) - 1] def __rich_console__(self, c: Console, co: ConsoleOptions) -> RenderResult: def sort_key(r: TestResult) -> float: assert r.test.timer, "test must've been run already" return r.test.timer.duration test_results = sorted(self.all_tests_in_session, key=sort_key, reverse=True) grid = Table.grid(padding=(0, 2, 0, 0)) grid.add_column(justify="right") grid.add_column() grid.add_column() for result in test_results[: self.num_tests_to_show]: assert result.test.timer, "test must've been run already" time_taken_secs = result.test.timer.duration time_taken_millis = time_taken_secs * 1000 test_id = format_test_id(result) description = result.test.description grid.add_row( f"[b]{time_taken_millis:.0f}[/b]ms", Text(test_id, style="muted"), description, ) num_slowest_displayed = min( len(self.all_tests_in_session), self.num_tests_to_show ) panel = Panel( RenderGroup( Padding( f"Median: [b]{self._median_secs * 1000:.2f}[/b]ms" f" [muted]|[/muted] " f"99th Percentile: [b]{self._percentile99_secs * 1000:.2f}[/b]ms", pad=(0, 0, 1, 0), ), grid, ), title=f"[b white]{num_slowest_displayed} Slowest Tests[/b white]", style="none", border_style="rule.line", ) yield panel @dataclass class SessionPrelude: time_to_collect_secs: float num_tests_collected: int num_fixtures_collected: int config_path: Optional[Path] python_impl: str = field(default=platform.python_implementation()) python_version: str = field(default=platform.python_version()) ward_version: str = field(default=__version__) def __rich_console__(self, c: Console, co: ConsoleOptions) -> RenderResult: yield Rule( Text( f"Ward {self.ward_version} | {self.python_impl} {self.python_version}", style="title", ) ) if self.config_path: try: path: Union[Path, str] = self.config_path.relative_to(Path.cwd()) except ValueError: path = self.config_path.name yield f"Loaded config from [b]{path}[/b]." test_plural = "test" if self.num_tests_collected == 1 else "tests" fixture_plural = "fixture" if self.num_fixtures_collected == 1 else "fixtures" yield ( f"Found [b]{self.num_tests_collected}[/b] {test_plural} " f"and [b]{self.num_fixtures_collected}[/b] {fixture_plural} " f"in [b]{self.time_to_collect_secs:.2f}[/b] seconds." ) class ResultProcessor(abc.ABC): @abc.abstractmethod def handle_result(self, test_result: TestResult): pass class TerminalResultProcessor(ResultProcessor): def __init__( self, suite: Suite, test_output_style: str, progress_styles: List[TestProgressStyle], config_path: Optional[Path], show_diff_symbols: bool = False, ): self.suite = suite self.test_output_style = test_output_style self.progress_styles = progress_styles self.config_path = config_path self.show_diff_symbols = show_diff_symbols def handle_result(self, test_result: TestResult): pass class TestResultDisplayWidget: def __init__(self, num_tests: int, progress_styles: List[TestProgressStyle]): self.console = rich_console self.num_tests = num_tests self.progress_styles = progress_styles
MIT License