repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
cc1-cloud/cc1
src/cm/models/vm.py
VM.admin_get
python
def admin_get(vm_id): try: vm = VM.objects.get(pk=vm_id) except: raise CMException('vm_get') return vm
@parameter{vm_id,int} id of the requested VM @returns{VM} requested VM instance @raises{vm_get,CMException} no such VM
https://github.com/cc1-cloud/cc1/blob/8113673fa13b6fe195cea99dedab9616aeca3ae8/src/cm/models/vm.py#L444-L458
from cm.settings import VNC_PORTS, NOVNC_PORTS from cm.models.node import Node from cm.models.public_ip import PublicIP from cm.models.template import Template from cm.models.user import User from cm.models.farm import Farm from cm.models.system_image import SystemImage from cm.models.iso_image import IsoImage from cm.models.lease import Lease from cm.models.available_network import AvailableNetwork from cm.utils.exception import CMException from cm.utils import log from cm.utils.monia import RrdHandler from django.db import models, transaction from django.conf import settings from django.template import loader, Context from django.conf import settings as django_settings from common.states import vm_states, image_states, farm_states, lease_states, vnc_states from common.utils import password_gen from datetime import datetime import os import uuid import libvirt import subprocess from netaddr import IPNetwork from cm.utils import message class VM(models.Model): name = models.CharField(max_length=128) node = models.ForeignKey(Node) user = models.ForeignKey(User) template = models.ForeignKey(Template) system_image = models.ForeignKey(SystemImage) iso_image = models.ForeignKey(IsoImage, null=True, blank=True) libvirt_id = models.IntegerField() state = models.IntegerField() start_time = models.DateTimeField() stop_time = models.DateTimeField(null=True, blank=True) ctx_key = models.CharField(max_length=128, null=True, blank=True) ctx_api_version = models.CharField(max_length=10, null=True, blank=True) vnc_passwd = models.CharField(max_length=45) ssh_key = models.TextField(null=True, blank=True) ssh_username = models.CharField(max_length=45, null=True, blank=True) description = models.TextField(null=True, blank=True) save_vm = models.IntegerField() farm = models.ForeignKey(Farm, related_name='vms', null=True) hostname = models.CharField(max_length=256, null=True, blank=True) vnc_port = models.IntegerField() novnc_port = models.IntegerField(default=0) vnc_enabled = models.IntegerField(default=0) reservation_id = models.IntegerField(default=0) user_data = models.CharField(max_length=32768, null=True, blank=True) class Meta: app_label = 'cm' def __unicode__(self): return self.name @property def dict(self): d = {} d['vm_id'] = self.id d['user_id'] = self.user.id d['name'] = self.name d['state'] = self.state d['leases'] = [l.dict for l in self.lease_set.all()] d['image_name'] = self.system_image.name d['image_id'] = self.system_image.id d['template_name'] = self.template.name d['platform'] = 0 d['description'] = self.description or '' d['vnc_endpoint'] = '%s:%d' % (settings.VNC_ADDRESS, self.vnc_port) d['novnc_endpoint'] = '%s:%d' % (settings.VNC_ADDRESS, self.novnc_port) d['vnc_enabled'] = self.vnc_enabled d['vnc_passwd'] = self.vnc_passwd or '' if self.iso_image: d['iso_images'] = [{'id': self.iso_image.id, 'name': self.iso_image.name}] else: d['iso_images'] = [] d['storage_images'] = [{'storage_image_id': img.id, 'name': img.name} for img in self.storage_images] d['cpu_load'] = self.cpu_load return d @property def long_dict(self): d = {} d['vm_id'] = self.id d['user_id'] = self.user.id d['name'] = self.name d['state'] = self.state d['leases'] = [l.dict for l in self.lease_set.all()] d['image_name'] = self.system_image.name d['image_id'] = self.system_image.id d['template_name'] = self.template.name d['platform'] = 0 d['description'] = self.description or '' d['vnc_endpoint'] = '%s:%d' % (settings.VNC_ADDRESS, self.vnc_port) d['novnc_endpoint'] = '%s:%d' % (settings.VNC_ADDRESS, self.novnc_port) d['vnc_enabled'] = self.vnc_enabled d['vnc_passwd'] = self.vnc_passwd or '' d['start_time'] = self.start_time delta = datetime.now() - self.start_time d['uptime'] = delta.seconds + 24 * 3600 * delta.days d['node'] = self.node.address d['libvirt_id'] = self.libvirt_id d['ssh_username'] = self.ssh_username or '' d['ssh_key'] = self.ssh_key or '' d['reservation_id'] = self.reservation_id d['user_data'] = self.user_data if self.iso_image: d['iso_images'] = [{'id': self.iso_image.id, 'name': self.iso_image.name}] else: d['iso_images'] = [] d['storage_images'] = [{'storage_image_id': img.id, 'name': img.name, 'disk_controller': img.disk_controller} for img in self.storage_images] d['cpu_load'] = self.cpu_load return d @staticmethod def create(user, name, description, image_id, template_id, public_ip_id, iso_list, disk_list, vnc, groups, ssh_key=None, ssh_username=None, count=1, farm=None, head_template_id=None, node_id=False, lease_id=None, user_data=None): from cm.models.storage_image import StorageImage from cm.utils.threads.vm import VMThread template = Template.get(template_id) image = SystemImage.get(user.id, image_id, groups) if image.state != image_states['ok']: raise CMException('image_unavailable') if farm: head_template = Template.get(head_template_id) wn_template = template user.check_quota([(head_template, 1), (wn_template, count)]) count += 1 else: user.check_quota([(template, count)]) vms = [] reservation_id = None for i in range(count): log.debug(user.id, "Looking for node") node = Node.get_free_node(head_template, image, node_id) if farm and i == 0 else Node.get_free_node(template, image, node_id) log.info(user.id, 'Selected node: %d' % node.id) vm = VM() vm.libvirt_id = -1 if farm: if i == 0: vm.name = '%s-head' % name vm.description = 'Farm head' vm.template = head_template else: vm.name = '%s-wn%d' % (name, i) vm.description = 'Worker Node' vm.template = wn_template else: vm.template = template vm.description = description if count > 1: vm.name = '%s_%d' % (name, i + 1) else: vm.name = name vm.user = user vm.state = vm_states['init'] vm.start_time = datetime.now() vm.system_image = image vm.node = node vm.save_vm = True if farm: vm.farm = farm used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('vnc_port', flat=True) for new_vnc_port in xrange(VNC_PORTS['START'], VNC_PORTS['END'] + 1): if new_vnc_port not in used_ports and new_vnc_port not in VNC_PORTS['EXCLUDE']: break else: raise CMException('vm_vnc_not_found') log.debug(user.id, "Found vnc port: %d" % new_vnc_port) vm.vnc_port = new_vnc_port used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('novnc_port', flat=True) for new_novnc_port in xrange(NOVNC_PORTS['START'], NOVNC_PORTS['END'] + 1): if new_novnc_port not in used_ports and new_novnc_port not in NOVNC_PORTS['EXCLUDE']: break else: raise CMException('vm_novnc_not_found') log.debug(user.id, "Found novnc port: %d" % new_novnc_port) vm.novnc_port = new_novnc_port if vnc: vm.attach_vnc() vm.vnc_passwd = password_gen(13, chars=['letters', 'digits'], extra_chars='!@#$%^&*()') vm.ssh_key = ssh_key vm.ssh_username = ssh_username vm.user_data = user_data vm.save() if not reservation_id: reservation_id = vm.id vm.reservation_id = reservation_id vm.save() if farm and i == 0: farm.head = vm vms.append(vm) log.debug(user.id, "Attaching disks") disk_devs = [] if i == 0 and disk_list: for disk_id in disk_list: log.debug(user.id, 'Attaching disks to first VM') disk = StorageImage.get(user.id, disk_id) if disk.vm != None: raise CMException('image_attached') while disk.disk_dev in disk_devs: disk.disk_dev += 1 disk_devs.append(disk.disk_dev) disk.vm = vm disk.save() log.debug(user.id, "Attaching CD") if i == 0 and iso_list: for iso_id in iso_list: log.debug(user.id, 'Attaching iso to first VM') iso = IsoImage.get(user.id, iso_id) iso.check_attached() vm.iso_image = iso vm.save() for i, vm in enumerate(vms): if lease_id != None: lease = Lease.objects.get(id=lease_id) if lease.user_network.user != user: raise CMException('lease_permission') if lease.vm != None: raise CMException('lease_attached') lease.vm = vm log.debug(user.id, "Attached ip: %s" % lease.address) else: lease = AvailableNetwork.get_lease(user) lease.vm = vm lease.save() log.debug(user.id, "Attached ip: %s" % lease.address) if i == 0 and public_ip_id > 0: log.debug(user.id, "Attaching PublicIP") try: publicip = PublicIP.objects.filter(user=user).get(id=public_ip_id) publicip.assign(lease) publicip.save() except Exception, e: log.exception(user.id, str(e)) raise CMException("lease_not_found") return vms @property def path(self): conn = libvirt.open(self.node.conn_string) storage = conn.storagePoolLookupByName('images') storage.refresh(0) path = storage.storageVolLookupByName('info').path() conn.close() return os.path.join(os.path.dirname(path), str(self.id)) def is_head(self): return bool(self.farm) and self == self.farm.head def is_farm(self): return bool(self.farm) def libvirt_template(self): try: lv_template = loader.get_template("%s.xml" % self.node.driver) c = Context({'vm': self, 'uuid': uuid.uuid1(), 'memory': self.template.memory * 1024, 'cpu': self.template.cpu, 'image_path': self.path }) domain_template = lv_template.render(c) except Exception, e: log.debug(self.user.id, str(e)) return domain_template def network_template(self): try: django_settings.configure() except: pass try: template = open("%s/%s-network.xml" % (settings.TEMPLATE_DIR, settings.NETWORK_TYPE)).read() lv_template = loader.get_template_from_string(template) c = Context({'vm': self}) lv_template = lv_template.render(c) except Exception, e: log.debug(self.user.id, str(e)) return lv_template @staticmethod def get(user_id, vm_id): try: vm = VM.objects.get(pk=vm_id) except: raise CMException('vm_get') if vm.user.id != user_id: raise CMException('user_permission') return vm @staticmethod
Apache License 2.0
linuxchristian/pyw215
pyW215/pyW215.py
SmartPlug.current_consumption
python
def current_consumption(self): res = 'N/A' if self.use_legacy_protocol: try: res = self.fetchMyCgi()['Meter Watt'] except: return 'N/A' else: try: res = self.SOAPAction('GetCurrentPowerConsumption', 'CurrentConsumption', self.moduleParameters("2")) except: return 'N/A' if res is None: return 'N/A' try: res = float(res) except ValueError: _LOGGER.error("Failed to retrieve current power consumption from SmartPlug") return res
Get the current power consumption in Watt.
https://github.com/linuxchristian/pyw215/blob/e304e115b0ee8395d119796e8fb6234629253e55/pyW215/pyW215.py#L198-L221
try: from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError except ImportError: from urllib2 import Request, urlopen from urllib2 import URLError, HTTPError import xml.etree.ElementTree as ET import hashlib import hmac import time import logging _LOGGER = logging.getLogger(__name__) ON = 'ON' OFF = 'OFF' class SmartPlug(object): def __init__(self, ip, password, user="admin", use_legacy_protocol=False): self.ip = ip self.url = "http://{}/HNAP1/".format(ip) self.user = user self.password = password self.use_legacy_protocol = use_legacy_protocol self.authenticated = None if self.use_legacy_protocol: _LOGGER.info("Enabled support for legacy firmware.") self._error_report = False self.model_name = self.SOAPAction(Action="GetDeviceSettings", responseElement="ModelName", params="") def moduleParameters(self, module): return '''<ModuleID>{}</ModuleID>'''.format(module) def controlParameters(self, module, status): if self.use_legacy_protocol: return '''{}<NickName>Socket 1</NickName><Description>Socket 1</Description> <OPStatus>{}</OPStatus><Controller>1</Controller>'''.format(self.moduleParameters(module), status) else: return '''{}<NickName>Socket 1</NickName><Description>Socket 1</Description> <OPStatus>{}</OPStatus>'''.format(self.moduleParameters(module), status) def radioParameters(self, radio): return '''<RadioID>{}</RadioID>'''.format(radio) def requestBody(self, Action, params): return '''<?xml version="1.0" encoding="UTF-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <{} xmlns="http://purenetworks.com/HNAP1/"> {} </{}> </soap:Body> </soap:Envelope> '''.format(Action, params, Action) def SOAPAction(self, Action, responseElement, params="", recursive=False): if self.authenticated is None: self.authenticated = self.auth() auth = self.authenticated if not self.use_legacy_protocol: self.authenticated = None if auth is None: return None payload = self.requestBody(Action, params) time_stamp = str(round(time.time() / 1e6)) action_url = '"http://purenetworks.com/HNAP1/{}"'.format(Action) AUTHKey = hmac.new(auth[0].encode(), (time_stamp + action_url).encode(), digestmod=hashlib.md5).hexdigest().upper() + " " + time_stamp headers = {'Content-Type': '"text/xml; charset=utf-8"', 'SOAPAction': '"http://purenetworks.com/HNAP1/{}"'.format(Action), 'HNAP_AUTH': '{}'.format(AUTHKey), 'Cookie': 'uid={}'.format(auth[1])} try: response = urlopen(Request(self.url, payload.encode(), headers)) except (HTTPError, URLError): self.authenticated = None if not recursive: return_value = self.SOAPAction(Action, responseElement, params, True) if recursive or return_value is None: _LOGGER.warning("Failed to open url to {}".format(self.ip)) self._error_report = True return None else: return return_value xmlData = response.read().decode() root = ET.fromstring(xmlData) try: value = root.find('.//{http://purenetworks.com/HNAP1/}%s' % (responseElement)).text except AttributeError: _LOGGER.warning("Unable to find %s in response." % responseElement) return None if value is None and self._error_report is False: _LOGGER.warning("Could not find %s in response." % responseElement) self._error_report = True return None self._error_report = False return value def fetchMyCgi(self): try: response = urlopen(Request('http://{}/my_cgi.cgi'.format(self.ip), b'request=create_chklst')); except (HTTPError, URLError): _LOGGER.warning("Failed to open url to {}".format(self.ip)) self._error_report = True return None lines = response.readlines() return {line.decode().split(':')[0].strip(): line.decode().split(':')[1].strip() for line in lines} @property
MIT License
siviltaram/persona-dialogue-generation
parlai/core/utils.py
clip_text
python
def clip_text(text, max_len): if len(text) > max_len: begin_text = ' '.join( text[:math.floor(0.8 * max_len)].split(' ')[:-1] ) end_text = ' '.join( text[(len(text) - math.floor(0.2 * max_len)):].split(' ')[1:] ) if len(end_text) > 0: text = begin_text + ' ...\n' + end_text else: text = begin_text + ' ...' return text
Clip text to max length, adding ellipses.
https://github.com/siviltaram/persona-dialogue-generation/blob/3cc800ffe3c5a8d16ed26522cda839acfab8d417/parlai/core/utils.py#L693-L706
from collections import deque from functools import lru_cache import math import os import random import time import warnings import heapq try: import torch __TORCH_AVAILABLE = True except ImportError: __TORCH_AVAILABLE = False NEAR_INF = 1e20 DISPLAY_MESSAGE_DEFAULT_FIELDS = { 'episode_done', 'id', 'image', 'text', 'labels', 'eval_labels', 'label_candidates', 'text_candidates', 'reward', 'eval_labels_vec', 'text_vec', 'label_candidates_vecs' } def maintain_dialog_history(history, observation, reply='', historyLength=1, useReplies='label_else_model', dict=None, useStartEndIndices=True, splitSentences=False): def parse(txt, splitSentences): if dict is not None: if splitSentences: vec = [dict.txt2vec(t) for t in txt.split('\n')] else: vec = dict.txt2vec(txt) return vec else: return [txt] if 'dialog' not in history: history['dialog'] = deque(maxlen=historyLength) history['episode_done'] = False history['labels'] = [] if history['episode_done']: history['dialog'].clear() history['labels'] = [] useReplies = 'none' history['episode_done'] = False if useReplies != 'none': if useReplies == 'model' or (useReplies == 'label_else_model' and len(history['labels']) == 0): if reply: if useStartEndIndices: reply = dict.start_token + ' ' + reply history['dialog'].extend(parse(reply, splitSentences)) elif len(history['labels']) > 0: r = history['labels'][0] history['dialog'].extend(parse(r, splitSentences)) obs = observation if 'text' in obs: if useStartEndIndices: obs['text'] = dict.end_token + ' ' + obs['text'] history['dialog'].extend(parse(obs['text'], splitSentences)) history['episode_done'] = obs['episode_done'] labels = obs.get('labels', obs.get('eval_labels', None)) if labels is not None: if useStartEndIndices: history['labels'] = [dict.start_token + ' ' + l for l in labels] else: history['labels'] = labels return history['dialog'] def load_cands(path, lines_have_ids=False, cands_are_replies=False): if path is None: return None cands = [] cnt = 0 with open(path) as read: for line in read: line = line.strip().replace('\\n', '\n') if len(line) > 0: cnt = cnt + 1 if cnt == 1 and line[0:2] == '1 ': lines_have_ids = True if '\t' in line and not cands_are_replies: cands_are_replies = True cands = [] if lines_have_ids: space_idx = line.find(' ') line = line[space_idx + 1:] if cands_are_replies: sp = line.split('\t') if len(sp) > 1 and sp[1] != '': cands.append(sp[1]) else: cands.append(line) else: cands.append(line) return cands class Predictor(object): def __init__(self, args=None, **kwargs): from parlai.core.params import ParlaiParser from parlai.core.agents import create_agent if args is None: args = [] for k, v in kwargs.items(): args.append('--' + str(k).replace('_', '-')) args.append(str(v)) parser = ParlaiParser(True, True) self.opt = parser.parse_args(args) self.agent = create_agent(self.opt) def predict(self, observation): if 'episode_done' not in observation: observation['episode_done'] = True self.agent.observe(observation) reply = self.agent.act() return reply class Timer(object): def __init__(self): self.running = True self.total = 0 self.start = time.time() def reset(self): self.running = True self.total = 0 self.start = time.time() return self def resume(self): if not self.running: self.running = True self.start = time.time() return self def stop(self): if self.running: self.running = False self.total += time.time() - self.start return self def time(self): if self.running: return self.total + time.time() - self.start return self.total class TimeLogger(): def __init__(self): self.timer = Timer() self.tot_time = 0 def total_time(self): return self.tot_time def time(self): return self.timer.time() def log(self, done, total, report=None): self.tot_time += self.timer.time() self.timer.reset() log = {} log['exs'] = done if total > 0: log['%done'] = done / total if log["%done"] > 0: time_left = self.tot_time / log['%done'] - self.tot_time log['time_left'] = str(int(time_left)) + 's' z = '%.2f' % (100 * log['%done']) log['%done'] = str(z) + '%' if report: for k, v in report.items(): if k not in log: log[k] = v text = str(int(self.tot_time)) + "s elapsed: " + str(log).replace('\\n', '\n') return text, log class AttrDict(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__dict__ = self def round_sigfigs(x, sigfigs=4): try: if x == 0: return 0 return round(x, -math.floor(math.log10(abs(x)) - sigfigs + 1)) except (RuntimeError, TypeError): if hasattr(x, 'item'): return round_sigfigs(x.item(), sigfigs) else: return round_sigfigs(x[0], sigfigs) except (ValueError, OverflowError) as ex: if x in [float('inf'), float('-inf')] or x != x: return x else: raise ex class NoLock(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): pass single_nolock = NoLock() def no_lock(): return single_nolock class PaddingUtils(object): @classmethod def pad_text(cls, observations, dictionary, end_idx=None, null_idx=0, dq=False, eval_labels=True, truncate=None): def valid(obs): return 'text' in obs and len(obs['text']) > 0 try: valid_inds, exs = zip(*[(i, ex) for i, ex in enumerate(observations) if valid(ex)]) except ValueError: return None, None, None, None, None, None if any(['text2vec' in ex for ex in exs]): parsed_x = [ex['text2vec'] for ex in exs] else: parsed_x = [dictionary.txt2vec(ex['text']) for ex in exs] if len(parsed_x) > 0 and not isinstance(parsed_x[0], deque): if dq: parsed_x = [deque(x, maxlen=truncate) for x in parsed_x] elif truncate is not None and truncate > 0: parsed_x = [x[-truncate:] for x in parsed_x] x_lens = [len(x) for x in parsed_x] ind_sorted = sorted(range(len(x_lens)), key=lambda k: -x_lens[k]) exs = [exs[k] for k in ind_sorted] valid_inds = [valid_inds[k] for k in ind_sorted] parsed_x = [parsed_x[k] for k in ind_sorted] end_idxs = [x_lens[k] for k in ind_sorted] eval_labels_avail = any(['eval_labels' in ex for ex in exs]) labels_avail = any(['labels' in ex for ex in exs]) if eval_labels: some_labels_avail = eval_labels_avail or labels_avail else: some_labels_avail = labels_avail max_x_len = max(x_lens) if dq: parsed_x = [x if len(x) == max_x_len else x + deque((null_idx,)) * (max_x_len - len(x)) for x in parsed_x] else: parsed_x = [x if len(x) == max_x_len else x + [null_idx] * (max_x_len - len(x)) for x in parsed_x] xs = parsed_x ys = None labels = None y_lens = None if some_labels_avail: if labels_avail: labels = [random.choice(ex.get('labels', [''])) for ex in exs] else: labels = [random.choice(ex.get('eval_labels', [''])) for ex in exs] if dq: parsed_y = [deque(maxlen=truncate) for _ in labels] for deq, y in zip(parsed_y, labels): deq.extendleft(reversed(dictionary.txt2vec(y))) else: parsed_y = [dictionary.txt2vec(label) for label in labels] if end_idx is not None: for y in parsed_y: y.append(end_idx) y_lens = [len(y) for y in parsed_y] max_y_len = max(y_lens) if dq: parsed_y = [y if len(y) == max_y_len else y + deque((null_idx,)) * (max_y_len - len(y)) for y in parsed_y] else: parsed_y = [y if len(y) == max_y_len else y + [null_idx] * (max_y_len - len(y)) for y in parsed_y] ys = parsed_y return xs, ys, labels, valid_inds, end_idxs, y_lens @classmethod def map_predictions(cls, predictions, valid_inds, batch_reply, observations, dictionary, end_idx, report_freq=0.1, labels=None, answers=None, ys=None): for i in range(len(predictions)): curr = batch_reply[valid_inds[i]] output_tokens = [] j = 0 for c in predictions[i]: if c == end_idx and j != 0: break else: output_tokens.append(c) j += 1 curr_pred = dictionary.vec2txt(output_tokens) curr['text'] = curr_pred if labels is not None and answers is not None and ys is not None: y = [] for c in ys[i]: if c == end_idx: break else: y.append(c) answers[valid_inds[i]] = y elif answers is not None: answers[valid_inds[i]] = curr_pred if random.random() > (1 - report_freq): print('TEXT: ', observations[valid_inds[i]]['text']) print('PREDICTION: ', curr_pred, '\n~') return class OffensiveLanguageDetector(object): def __init__(self): import parlai.core.build_data as build_data from parlai.core.params import ParlaiParser from parlai.core.dict import DictionaryAgent self.tokenize = DictionaryAgent.split_tokenize parser = ParlaiParser(False, False) def _path(): build() return os.path.join( self.datapath, 'OffensiveLanguage', 'OffensiveLanguage.txt' ) def build(): version = 'v1.0' dpath = os.path.join(self.datapath, 'OffensiveLanguage') if not build_data.built(dpath, version): print('[building data: ' + dpath + ']') if build_data.built(dpath): build_data.remove_dir(dpath) build_data.make_dir(dpath) fname = 'OffensiveLanguage.txt' url = 'http://parl.ai/downloads/offensive_language/' + fname build_data.download(url, dpath, fname) build_data.mark_done(dpath, version) self.datapath = os.path.join(parser.parlai_home, 'data') self.datafile = _path() self.END = '__END__' self.max_len = 1 self.offensive_trie = {} self.word_prefixes = ['de', 'de-', 'dis', 'dis-', 'ex', 'ex-', 'mis', 'mis-', 'pre', 'pre-', 'non', 'non-', 'semi', 'semi-', 'sub', 'sub-', 'un', 'un-'] self.word_suffixes = ['a', 'able', 'as', 'dom', 'ed', 'er', 'ers', 'ery', 'es', 'est', 'ful', 'fy', 'ies', 'ify', 'in', 'ing', 'ish', 'less', 'ly', 's', 'y'] self.white_list = ['butter', 'buttery', 'spicy', 'spiced', 'spices', 'spicier', 'spicing', 'twinkies'] with open(self.datafile, 'r') as f: for p in f.read().splitlines(): mod_ps = [p] mod_ps += [pref + p for pref in self.word_prefixes] mod_ps += [p + suff for suff in self.word_suffixes] for mod_p in mod_ps: if mod_p not in self.white_list: self.add_phrase(mod_p) def add_phrase(self, phrase): toks = self.tokenize(phrase) curr = self.offensive_trie for t in toks: if t not in curr: curr[t] = {} curr = curr[t] curr[self.END] = True self.max_len = max(self.max_len, len(toks)) def add_words(self, phrase_list): for phrase in phrase_list: self.add_phrase(phrase) def _check_sequence(self, toks, idx, node): right = min(idx + self.max_len, len(toks)) for i in range(idx, right): if toks[i] in node: node = node[toks[i]] if self.END in node: return ' '.join(toks[j] for j in range(idx, i + 1)) else: break return False def contains_offensive_language(self, text): if type(text) is str: toks = self.tokenize(text.lower()) elif type(text) is list or type(text) is tuple: toks = text for i in range(len(toks)): res = self._check_sequence(toks, i, self.offensive_trie) if res: return res return None def __contains__(self, key): return self.contains_offensive_language(key) def str_segment(self, text, dict_agent, k=1, max_length=None): freqs = dict_agent.freqs() N = sum(freqs.values()) V = len(freqs) logNV = math.log(N + V) max_heap = [] if not max_length: max_length = len(text) @lru_cache(maxsize=16) def segment(text): if not text: return [] candidates = [ [first] + segment(rem) for first, rem in splits(text, max_length) ] nonlocal max_heap max_heap = [] for c in candidates: cand_score = (score(c), c) max_heap.append(cand_score) heapq._heapify_max(max_heap) return max_heap[0][1] def splits(text, max_length): return [ (text[:i+1], text[i+1:]) for i in range(min(len(text), max_length)) ] def score(words): return sum(logprob(w) for w in words) / len(words) def logprob(word): count_w = freqs.get(word, 0) return math.log(count_w + 1) - logNV segment(text) res = [] for i in range(0, k): res.append(heapq._heappop_max(max_heap)[1]) return res
MIT License
gecrooks/quantumflow-dev
quantumflow/states.py
print_state
python
def print_state(state: State, file: TextIO = None) -> None: for index, amplitude in np.ndenumerate(state.tensor): ket = "".join([str(n) for n in index]) print(ket, ":", amplitude, file=file)
Print a state vector
https://github.com/gecrooks/quantumflow-dev/blob/6840df0024ad953d0cdd22dd8bc348ce3df37af9/quantumflow/states.py#L360-L364
from abc import ABC from math import sqrt from typing import ( TYPE_CHECKING, Any, Dict, List, Mapping, TextIO, Tuple, TypeVar, Union, ) import numpy as np import opt_einsum from . import tensors, utils from .qubits import Qubit, Qubits, sorted_qubits from .tensors import QubitTensor if TYPE_CHECKING: from numpy.typing import ArrayLike __all__ = [ "State", "join_states", "print_probabilities", "print_state", "random_state", "ghz_state", "w_state", "zero_state", "Density", "mixed_density", "random_density", "join_densities", ] QuantumStateType = TypeVar("QuantumStateType", bound="QuantumState") class QuantumState(ABC): def __init__( self, tensor: "ArrayLike", qubits: Qubits, memory: Mapping = None ) -> None: self._tensor = np.asarray(tensor) self._qubits = tuple(qubits) if memory is None: self.memory: utils.FrozenDict = utils.FrozenDict() elif isinstance(memory, utils.FrozenDict): self.memory = memory else: self.memory = utils.FrozenDict(memory) @property def tensor(self) -> QubitTensor: if self._tensor is None: raise ValueError("Cannot access quantum state") return self._tensor @property def qubits(self) -> Qubits: return self._qubits @property def qubit_nb(self) -> int: return len(self._qubits) def replace( self: QuantumStateType, *, tensor: "ArrayLike" = None, qubits: Qubits = None, memory: Mapping = None, ) -> QuantumStateType: tensor = self.tensor if tensor is None else tensor qubits = self.qubits if qubits is None else qubits memory = self.memory if memory is None else memory return type(self)(tensor, qubits, memory) def store(self: QuantumStateType, *args: Any, **kwargs: Any) -> QuantumStateType: mem = self.memory.update(*args, **kwargs) return self.replace(memory=mem) def on(self: QuantumStateType, *qubits: Qubit) -> QuantumStateType: return self.replace(qubits=qubits) def rewire(self: QuantumStateType, labels: Dict[Qubit, Qubit]) -> QuantumStateType: qubits = tuple(labels[q] for q in self.qubits) return self.on(*qubits) def permute(self: QuantumStateType, qubits: Qubits = None) -> QuantumStateType: if qubits is None: qubits = sorted_qubits(self.qubits) tensor = tensors.permute(self.tensor, self.qubit_indices(qubits)) return self.replace(tensor=tensor, qubits=qubits) def qubit_indices(self, qubits: Qubits) -> Tuple[int, ...]: return tuple(self.qubits.index(q) for q in qubits) def norm(self) -> QubitTensor: return tensors.norm(self.tensor) class State(QuantumState): def __init__( self, tensor: "ArrayLike", qubits: Qubits = None, memory: Mapping = None ) -> None: tensor = tensors.asqutensor(tensor) N = np.ndim(tensor) if qubits is None: qubits = range(N) elif len(qubits) != N: raise ValueError("Wrong number of qubits for tensor") super().__init__(tensor, qubits, memory) def normalize(self) -> "State": tensor = self.tensor / np.sqrt(self.norm()) return State(tensor, self.qubits, self.memory) def probabilities(self) -> QubitTensor: value = np.absolute(self.tensor) return value * value def sample(self, trials: int) -> np.ndarray: probs = np.real(self.probabilities()) res = np.random.multinomial(trials, probs.ravel()) res = res.reshape(probs.shape) return res def expectation( self, diag_hermitian: "ArrayLike", trials: int = None ) -> QubitTensor: if trials is None: probs = self.probabilities() else: probs = np.real(tensors.asqutensor(self.sample(trials) / trials)) diag_hermitian = tensors.asqutensor(diag_hermitian) return np.sum(np.real(diag_hermitian) * probs) def measure(self) -> np.ndarray: probs = np.real(self.probabilities()) indices = np.asarray(list(np.ndindex(*[2] * self.qubit_nb))) res = np.random.choice(probs.size, p=probs.ravel()) res = indices[res] return res def asdensity(self, qubits: Qubits = None) -> "Density": N = self.qubit_nb if qubits is None: qubits = self.qubits contract_qubits: List[Qubit] = list(self.qubits) for q in qubits: contract_qubits.remove(q) left_subs = np.asarray(list(range(0, N))) right_subs = np.asarray(list(range(N, 2 * N))) indices = [self.qubits.index(qubit) for qubit in contract_qubits] for idx in indices: right_subs[idx] = left_subs[idx] left_tensor = self.tensor right_tensor = np.conj(left_tensor) tensor = opt_einsum.contract(left_tensor, left_subs, right_tensor, right_subs) return Density(tensor, qubits, self.memory) def __str__(self) -> str: state = self.tensor s = [] count = 0 MAX_ELEMENTS = 64 for index, amplitude in np.ndenumerate(state): if not np.isclose(amplitude, 0.0): ket = "|" + "".join([str(n) for n in index]) + ">" s.append(f"({amplitude.real:0.04g}" f"{amplitude.imag:+0.04g}i) {ket}") count += 1 if count > MAX_ELEMENTS: s.append("...") break return " + ".join(s) def zero_state(qubits: Union[int, Qubits]) -> State: N, qubits = _qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0,) * N] = 1 return State(ket, qubits) def w_state(qubits: Union[int, Qubits]) -> State: N, qubits = _qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) for n in range(N): idx = np.zeros(shape=N, dtype=int) idx[n] += 1 ket[tuple(idx)] = 1 / sqrt(N) return State(ket, qubits) def ghz_state(qubits: Union[int, Qubits]) -> State: N, qubits = _qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0,) * N] = 1 / sqrt(2) ket[(1,) * N] = 1 / sqrt(2) return State(ket, qubits) def random_state(qubits: Union[int, Qubits]) -> State: N, qubits = _qubits_count_tuple(qubits) ket = np.random.normal(size=([2] * N)) + 1j * np.random.normal(size=([2] * N)) return State(ket, qubits).normalize() def join_states(ket0: State, ket1: State) -> State: qubits = tuple(ket0.qubits) + tuple(ket1.qubits) tensor = tensors.outer(ket0.tensor, ket1.tensor, rank=1) memory = ket0.memory.update(ket1.memory) return State(tensor, qubits, memory)
Apache License 2.0
surrealai/surreal
surreal/utils/filesys.py
f_remove
python
def f_remove(fpath): fpath = f_expand(fpath) for f in glob.glob(fpath): try: shutil.rmtree(f) except OSError as e: if e.errno == errno.ENOTDIR: try: os.remove(f) except: pass
If exist, remove. Supports both dir and file. Supports glob wildcard.
https://github.com/surrealai/surreal/blob/ae9e5f43bdd7d1bc6d39d0a4783b96b2c117fade/surreal/utils/filesys.py#L122-L135
import os import sys import errno import shutil import glob import pwd import codecs import hashlib import tarfile from socket import gethostname f_ext = os.path.splitext f_expand = os.path.expanduser f_size = os.path.getsize is_file = os.path.isfile is_dir = os.path.isdir get_dir = os.path.dirname def owner_name(filepath): return pwd.getpwuid(os.stat(filepath).st_uid).pw_name def host_name(): return gethostname() def host_id(): return host_name().split('.')[0] def utf_open(fname, mode): return codecs.open(fname, mode=mode, encoding='utf-8') def is_txt(fpath): _, ext = f_ext(fpath) return ext == '.txt' def f_exists(path): return os.path.exists(f_expand(path)) def f_join(*fpaths): return f_expand(os.path.join(*fpaths)) def f_mkdir(fpath): os.makedirs(f_expand(fpath), exist_ok=True) def f_mkdir_in_path(fpath): os.makedirs(get_dir(f_expand(fpath)), exist_ok=True) def f_last_part_in_path(fpath): return os.path.basename(os.path.normpath(f_expand(fpath))) def f_time(fpath): return str(os.path.getctime(fpath)) def f_append_before_ext(fpath, suffix): name, ext = f_ext(fpath) return name + suffix + ext def f_add_ext(fpath, ext): if not ext.startswith('.'): ext = '.' + ext if fpath.endswith(ext): return fpath else: return fpath + ext
MIT License
tenable/pytenable
tenable/io/policies.py
PoliciesAPI.template_details
python
def template_details(self, name): tmpl = self.templates() tmpl_uuid = tmpl[self._check('name', name, str, choices=tmpl.keys())] editor = self._api.editor.template_details('policy', tmpl_uuid) scan = { 'settings': policy_settings(editor['settings']), 'uuid': editor['uuid'] } for item in editor['settings']['basic']['groups']: for setting in item.keys(): if setting not in ['name', 'title', 'inputs', 'sections']: scan['settings'][setting] = item[setting] if 'credentials' in editor: scan['credentials'] = { 'current': self._api.editor.parse_creds( editor['credentials']['data']) } for ctype in editor['credentials']['data']: for citem in ctype['types']: if 'settings' in citem and citem['settings']: scan['settings'] = dict_merge( scan['settings'], policy_settings( citem['settings'])) if 'compliance' in editor: scan['compliance'] = { 'current': self._api.editor.parse_audits( editor['compliance']['data']) } for item in editor['compliance']['data']: if 'settings' in item: scan['settings'] = dict_merge( scan['settings'], policy_settings( item['settings'])) if 'plugins' in editor: scan['plugins'] = self._api.editor.parse_plugins( 'policy', editor['plugins']['families'], tmpl_uuid) return scan
Calls the editor API and parses the policy template config to return a document that closely matches what the API expects to be POSTed or PUTed via the policy create and configure methods. The compliance audits and credentials are populated into the 'current' sub-document for the relevant resources. Args: name (str): The name of the scan template. Returns: :obj:`dict`: The policy configuration resource. Examples: >>> template = tio.policies.template_details('basic') >>> pprint(template) Please note that template_details is reverse-engineered from the responses from the editor API and isn't guaranteed to work.
https://github.com/tenable/pytenable/blob/32b925f0cebd4d3032f85e65571dd9593778b9f1/tenable/io/policies.py#L39-L121
from .base import TIOEndpoint from tenable.utils import policy_settings, dict_merge from io import BytesIO class PoliciesAPI(TIOEndpoint): def templates(self): policies = dict() for item in self._api.editor.template_list('policy'): policies[item['name']] = item['uuid'] return policies
MIT License
tmancal74/quantarhei
quantarhei/qm/propagators/rdmpropagator.py
ReducedDensityMatrixPropagator.__propagate_short_exp_with_relaxation
python
def __propagate_short_exp_with_relaxation(self, rhoi, L=4): try: if self.RelaxationTensor.as_operators: return self.__propagate_short_exp_with_rel_operators(rhoi, L=L) except: raise Exception("Operator propagation failed") pr = ReducedDensityMatrixEvolution(self.TimeAxis, rhoi, name=self.propagation_name) rho1 = rhoi.data rho2 = rhoi.data if self.Hamiltonian.has_rwa: HH = self.Hamiltonian.get_RWA_data() else: HH = self.Hamiltonian.data RR = self.RelaxationTensor.data if self.has_PDeph: if self.PDeph.dtype == "Lorentzian": expo = numpy.exp(-self.PDeph.data*self.dt) t0 = 0.0 elif self.PDeph.dtype == "Gaussian": expo = numpy.exp(-self.PDeph.data*(self.dt**2)/2.0) t0 = self.PDeph.data*self.dt indx = 1 for ii in range(1, self.Nt): tNt = self.TimeAxis.data[indx-1] for jj in range(0, self.Nref): tt = tNt + jj*self.dt for ll in range(1, L+1): rho1 = - (1j*self.dt/ll)*(numpy.dot(HH,rho1) - numpy.dot(rho1,HH)) + (self.dt/ll)*numpy.tensordot(RR,rho1) rho2 = rho2 + rho1 rho2 = rho2*expo*numpy.exp(-t0*tt) rho1 = rho2 pr.data[indx,:,:] = rho2 indx += 1 else: indx = 1 for ii in range(1, self.Nt): for jj in range(0, self.Nref): for ll in range(1, L+1): rho1 = - (1j*self.dt/ll)*(numpy.dot(HH,rho1) - numpy.dot(rho1,HH)) + (self.dt/ll)*numpy.tensordot(RR,rho1) rho2 = rho2 + rho1 rho1 = rho2 pr.data[indx,:,:] = rho2 indx += 1 if self.Hamiltonian.has_rwa: pr.is_in_rwa = True return pr
Integration by short exponentional expansion Integration by expanding exponential to Lth order
https://github.com/tmancal74/quantarhei/blob/54a40cc55cdedf86bf04a5d705227fe69461d408/quantarhei/qm/propagators/rdmpropagator.py#L551-L643
import numpy import numpy.linalg import matplotlib.pyplot as plt from ..hilbertspace.hamiltonian import Hamiltonian from ..hilbertspace.operators import Operator from ...core.time import TimeAxis from ...core.time import TimeDependent from ...core.saveable import Saveable from ..liouvillespace.redfieldtensor import RelaxationTensor from ..hilbertspace.operators import ReducedDensityMatrix, DensityMatrix from .dmevolution import ReducedDensityMatrixEvolution from ...core.matrixdata import MatrixData from ...core.managers import Manager import quantarhei as qr class ReducedDensityMatrixPropagator(MatrixData, Saveable): def __init__(self, timeaxis=None, Ham=None, RTensor=None, Efield=None, Trdip=None, PDeph=None): self.has_Trdip = False self.has_Efield = False self.has_PDeph = False self.has_RTensor = False self.has_RWA = False self.has_EField = False if not ((timeaxis is None) and (Ham is None)): if isinstance(Ham,Hamiltonian): self.Hamiltonian = Ham else: raise Exception if isinstance(timeaxis,TimeAxis): self.TimeAxis = timeaxis else: raise Exception if isinstance(RTensor,RelaxationTensor): self.RelaxationTensor = RTensor self.has_RTensor = True self.has_relaxation = True elif RTensor is None: self.has_RTensor = False self.has_relaxation = False else: raise Exception if Trdip is not None: if isinstance(Trdip,Operator): self.Trdip = Trdip self.has_Trdip = True else: raise Exception if Efield is not None: if isinstance(Efield,numpy.ndarray): self.Efield = Efield self.has_Efield = True self.has_EField = False else: self.EField = Efield self.has_EField = True self.has_Efield = False if PDeph is not None: self.PDeph = PDeph self.has_PDeph = True self.has_relaxation = True self.Odt = self.TimeAxis.data[1]-self.TimeAxis.data[0] self.dt = self.Odt self.Nref = 1 self.Nt = self.TimeAxis.data.shape[0] N = self.Hamiltonian.data.shape[0] self.N = N self.data = numpy.zeros((self.Nt,N,N),dtype=numpy.complex64) self.propagation_name = "" self.verbose = Manager().log_conf.verbose def setDtRefinement(self, Nref): self.Nref = Nref self.dt = self.Odt/self.Nref def propagate(self, rhoi, method="short-exp", mdata=None, name=""): self.propagation_name = name if not (isinstance(rhoi, ReducedDensityMatrix) or isinstance(rhoi, DensityMatrix)): raise Exception("First argument has be of"+ "the ReducedDensityMatrix type") if self.has_relaxation: if isinstance(self.RelaxationTensor, TimeDependent): if (self.has_Efield and self.has_Trdip): if method == "short-exp": return self.__propagate_short_exp_with_TD_relaxation_field( rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp_with_TD_relaxation_field( rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp_with_TD_relaxation_field( rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp_with_TD_relaxation_field( rhoi,L=6) else: raise Exception("Unknown propagation method: "+method) else: if method == "short-exp": return self.__propagate_short_exp_with_TD_relaxation( rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp_with_TD_relaxation( rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp_with_TD_relaxation( rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp_with_TD_relaxation( rhoi,L=6) else: raise Exception("Unknown propagation method: "+method) else: if (self.has_Efield and self.has_Trdip): if method == "short-exp": return self.__propagate_short_exp_with_relaxation_field( rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp_with_relaxation_field( rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp_with_relaxation_field( rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp_with_relaxation_field( rhoi,L=6) else: raise Exception("Unknown propagation method: "+method) elif (self.has_EField and self.has_Trdip): if method == "short-exp": return self.__propagate_short_exp_with_relaxation_EField( rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp_with_relaxation_EField( rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp_with_relaxation_EField( rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp_with_relaxation_EField( rhoi,L=6) else: raise Exception("Unknown propagation method: "+method) else: if method == "short-exp": return self.__propagate_short_exp_with_relaxation( rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp_with_relaxation( rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp_with_relaxation( rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp_with_relaxation( rhoi,L=6) elif method == "primitive": return self.__propagate_primitive_with_relaxation(rhoi) elif method == "Runge-Kutta": return self.__propagate_Runge_Kutta(rhoi) elif method == "diagonalization": return self.__propagate_diagonalization(rhoi) else: raise Exception("Unknown propagation method: "+method) else: if (self.has_Efield and self.has_Trdip): raise Exception("NOT IMPLEMENTED") else: if method == "short-exp": return self.__propagate_short_exp(rhoi,L=4) elif method == "short-exp-2": return self.__propagate_short_exp(rhoi,L=2) elif method == "short-exp-4": return self.__propagate_short_exp(rhoi,L=4) elif method == "short-exp-6": return self.__propagate_short_exp(rhoi,L=6) elif method == "primitive": return self.__propagate_primitive(rhoi) elif method == "Runge-Kutta": return self.__propagate_Runge_Kutta(rhoi) elif method == "diagonalization": return self.__propagate_diagonalization(rhoi) else: raise Exception("Unknown propagation method: "+method) def __propagate_primitive(self, rhoi): pr = ReducedDensityMatrixEvolution(self.TimeAxis,rhoi) rhoPrim = rhoi.data HH = self.Hamiltonian.data indx = 0 for ii in self.TimeAxis.time: for jj in range(0,self.Nref): drho = -1j*( numpy.dot(HH,rhoPrim) - numpy.dot(rhoPrim,HH) ) rhoPrim = rhoPrim + drho*self.dt pr.data[indx,:,:] = rhoPrim indx += 1 return pr def __propagate_primitive_with_relaxation(self, rhoi): pr = ReducedDensityMatrixEvolution(self.TimeAxis,rhoi) rhoPrim = rhoi.data HH = self.Hamiltonian.data RR = self.RelaxationTensor.data indx = 0 for ii in self.TimeAxis.data: for jj in range(0,self.Nref): drho = -1j*( numpy.dot(HH,rhoPrim) - numpy.dot(rhoPrim,HH) ) + numpy.tensordot(RR,rhoPrim) rhoPrim = rhoPrim + drho*self.dt pr.data[indx,:,:] = rhoPrim indx += 1 return pr def __propagate_Runge_Kutta(self, rhoi): indx = 0 for ii in self.timeaxis: self.rho[:,:,indx] = rhoi indx += 1 def __propagate_short_exp(self, rhoi, L=4): pr = ReducedDensityMatrixEvolution(self.TimeAxis,rhoi) rho1 = rhoi.data rho2 = rhoi.data if self.Hamiltonian.has_rwa: HH = self.Hamiltonian.get_RWA_data() else: HH = self.Hamiltonian.data indx = 1 for ii in self.TimeAxis.data[1:self.Nt]: for jj in range(0,self.Nref): for ll in range(1,L+1): rho1 = -1j*(self.dt/ll)*(numpy.dot(HH,rho1) - numpy.dot(rho1,HH) ) rho2 = rho2 + rho1 rho1 = rho2 pr.data[indx,:,:] = rho2 indx += 1 if self.Hamiltonian.has_rwa: pr.is_in_rwa = True return pr
MIT License
helios-protocol/py-helios-node
hp2p/tools/paragon/helpers.py
get_directly_linked_peers_without_handshake
python
async def get_directly_linked_peers_without_handshake( alice_factory: BasePeerFactory = None, bob_factory: BasePeerFactory = None) -> Tuple[BasePeer, BasePeer]: cancel_token = CancelToken("get_directly_linked_peers_without_handshake") if alice_factory is None: alice_factory = ParagonPeerFactory( privkey=ecies.generate_privkey(), context=ParagonContext(), token=cancel_token, ) if bob_factory is None: bob_factory = ParagonPeerFactory( privkey=ecies.generate_privkey(), context=ParagonContext(), token=cancel_token, ) alice_private_key = alice_factory.privkey bob_private_key = bob_factory.privkey alice_remote = kademlia.Node( bob_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0)) bob_remote = kademlia.Node( alice_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0)) use_eip8 = False initiator = auth.HandshakeInitiator(alice_remote, alice_private_key, use_eip8, cancel_token) f_alice: 'asyncio.Future[BasePeer]' = asyncio.Future() handshake_finished = asyncio.Event() ( (alice_reader, alice_writer), (bob_reader, bob_writer), ) = get_directly_connected_streams() async def do_handshake() -> None: aes_secret, mac_secret, egress_mac, ingress_mac = await auth._handshake( initiator, alice_reader, alice_writer, cancel_token) connection = PeerConnection( reader=alice_reader, writer=alice_writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, ) alice = alice_factory.create_peer( alice_remote, connection, ) f_alice.set_result(alice) handshake_finished.set() asyncio.ensure_future(do_handshake()) use_eip8 = False responder = auth.HandshakeResponder(bob_remote, bob_private_key, use_eip8, cancel_token) auth_cipher = await bob_reader.read(constants.ENCRYPTED_AUTH_MSG_LEN) initiator_ephemeral_pubkey, initiator_nonce, _ = decode_authentication( auth_cipher, bob_private_key) responder_nonce = keccak(os.urandom(constants.HASH_LEN)) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) bob_writer.write(auth_ack_ciphertext) await handshake_finished.wait() alice = await f_alice aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce, responder_nonce, initiator_ephemeral_pubkey, auth_cipher, auth_ack_ciphertext) assert egress_mac.digest() == alice.ingress_mac.digest() assert ingress_mac.digest() == alice.egress_mac.digest() connection = PeerConnection( reader=bob_reader, writer=bob_writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, ) bob = bob_factory.create_peer( bob_remote, connection, ) return alice, bob
See get_directly_linked_peers(). Neither the P2P handshake nor the sub-protocol handshake will be performed here.
https://github.com/helios-protocol/py-helios-node/blob/691b378938f0a36bf8774dc1ee4e4370b6cf7c63/hp2p/tools/paragon/helpers.py#L74-L171
import asyncio import os from typing import ( Any, Callable, cast, Tuple, ) from eth_hash.auto import keccak from cancel_token import CancelToken from hp2p import auth from hp2p import constants from hp2p import ecies from hp2p import kademlia from hp2p.auth import decode_authentication from hp2p.peer import ( BasePeer, BasePeerFactory, PeerConnection, ) from .peer import ( ParagonPeerFactory, ParagonContext, ) class MockTransport: def __init__(self) -> None: self._is_closing = False def close(self) -> None: self._is_closing = True def is_closing(self) -> bool: return self._is_closing class MockStreamWriter: def __init__(self, write_target: Callable[..., None]) -> None: self._target = write_target self.transport = MockTransport() def write(self, *args: Any, **kwargs: Any) -> None: self._target(*args, **kwargs) def close(self) -> None: self.transport.close() TConnectedStreams = Tuple[ Tuple[asyncio.StreamReader, asyncio.StreamWriter], Tuple[asyncio.StreamReader, asyncio.StreamWriter], ] def get_directly_connected_streams() -> TConnectedStreams: bob_reader = asyncio.StreamReader() alice_reader = asyncio.StreamReader() bob_writer = MockStreamWriter(alice_reader.feed_data) alice_writer = MockStreamWriter(bob_reader.feed_data) return ( (alice_reader, cast(asyncio.StreamWriter, alice_writer)), (bob_reader, cast(asyncio.StreamWriter, bob_writer)), )
MIT License
jchanvfx/nodegraphqt
NodeGraphQt/widgets/properties_bin.py
PropertiesBinWidget.__on_property_widget_changed
python
def __on_property_widget_changed(self, node_id, prop_name, prop_value): if not self._block_signal: self.property_changed.emit(node_id, prop_name, prop_value)
Slot function triggered when a property widget value has changed. Args: node_id (str): node id. prop_name (str): node property name. prop_value (object): node property value.
https://github.com/jchanvfx/nodegraphqt/blob/44a6c9d80d182367d9465f3d339d9a60ae597da8/NodeGraphQt/widgets/properties_bin.py#L160-L170
from Qt import QtWidgets, QtCore, QtGui, QtCompat from .properties import NodePropWidget class PropertiesDelegate(QtWidgets.QStyledItemDelegate): def paint(self, painter, option, index): painter.save() painter.setRenderHint(QtGui.QPainter.Antialiasing, False) painter.setPen(QtCore.Qt.NoPen) bg_clr = option.palette.midlight().color() painter.setBrush(QtGui.QBrush(bg_clr)) painter.drawRect(option.rect) border_width = 1 if option.state & QtWidgets.QStyle.State_Selected: bdr_clr = option.palette.highlight().color() painter.setPen(QtGui.QPen(bdr_clr, 1.5)) else: bdr_clr = option.palette.alternateBase().color() painter.setPen(QtGui.QPen(bdr_clr, 1)) painter.setBrush(QtCore.Qt.NoBrush) painter.drawRect(QtCore.QRect( option.rect.x() + border_width, option.rect.y() + border_width, option.rect.width() - (border_width * 2), option.rect.height() - (border_width * 2)) ) painter.restore() class PropertiesList(QtWidgets.QTableWidget): def __init__(self, parent=None): super(PropertiesList, self).__init__(parent) self.setItemDelegate(PropertiesDelegate()) self.setColumnCount(1) self.setShowGrid(False) self.verticalHeader().hide() self.horizontalHeader().hide() QtCompat.QHeaderView.setSectionResizeMode( self.verticalHeader(), QtWidgets.QHeaderView.ResizeToContents) QtCompat.QHeaderView.setSectionResizeMode( self.horizontalHeader(), 0, QtWidgets.QHeaderView.Stretch) self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) def wheelEvent(self, event): delta = event.delta() * 0.2 self.verticalScrollBar().setValue( self.verticalScrollBar().value() - delta ) class PropertiesBinWidget(QtWidgets.QWidget): property_changed = QtCore.Signal(str, str, object) def __init__(self, parent=None, node_graph=None): super(PropertiesBinWidget, self).__init__(parent) self.setWindowTitle('Properties Bin') self._prop_list = PropertiesList() self._limit = QtWidgets.QSpinBox() self._limit.setToolTip('Set display nodes limit.') self._limit.setMaximum(10) self._limit.setMinimum(0) self._limit.setValue(2) self._limit.valueChanged.connect(self.__on_limit_changed) self.resize(450, 400) self._block_signal = False self._lock = False self.btn_lock = QtWidgets.QPushButton('lock') self.btn_lock.setToolTip( 'Lock the properties bin prevent nodes from being loaded.') self.btn_lock.clicked.connect(self.lock_bin) btn_clr = QtWidgets.QPushButton('clear') btn_clr.setToolTip('Clear the properties bin.') btn_clr.clicked.connect(self.clear_bin) top_layout = QtWidgets.QHBoxLayout() top_layout.setSpacing(2) top_layout.addWidget(self._limit) top_layout.addStretch(1) top_layout.addWidget(self.btn_lock) top_layout.addWidget(btn_clr) layout = QtWidgets.QVBoxLayout(self) layout.addLayout(top_layout) layout.addWidget(self._prop_list, 1) node_graph.add_properties_bin(self) node_graph.node_double_clicked.connect(self.add_node) node_graph.nodes_deleted.connect(self.__on_nodes_deleted) node_graph.property_changed.connect(self.__on_graph_property_changed) def __repr__(self): return '<{} object at {}>'.format(self.__class__.__name__, hex(id(self))) def __on_prop_close(self, node_id): items = self._prop_list.findItems(node_id, QtCore.Qt.MatchExactly) [self._prop_list.removeRow(i.row()) for i in items] def __on_limit_changed(self, value): rows = self._prop_list.rowCount() if rows > value: self._prop_list.removeRow(rows - 1) def __on_nodes_deleted(self, nodes): [self.__on_prop_close(n) for n in nodes] def __on_graph_property_changed(self, node, prop_name, prop_value): properties_widget = self.prop_widget(node) if not properties_widget: return property_window = properties_widget.get_widget(prop_name) if property_window and prop_value != property_window.get_value(): self._block_signal = True property_window.set_value(prop_value) self._block_signal = False
MIT License
so1n/pait
tests/test_app/test_tornado.py
TestTornado.encode_multipart_formdata
python
def encode_multipart_formdata(self, data: Optional[dict] = None, files: Optional[dict] = None) -> Tuple[str, bytes]: body: BytesIO = BytesIO() boundary: str = self.choose_boundary() if data: for key, value in data.items(): body.write(("--%s\r\n" % boundary).encode(encoding="utf-8")) body.write(('Content-Disposition:form-data;name="%s"\r\n' % key).encode(encoding="utf-8")) body.write("\r\n".encode(encoding="utf-8")) if isinstance(value, int): value = str(value) body.write(("%s\r\n" % value).encode(encoding="utf-8")) if files: for key, value in files.items(): body.write(("--%s\r\n" % boundary).encode(encoding="utf-8")) body.write( ('Content-Disposition:form-data;name="file";filename="%s"\r\n' % key).encode(encoding="utf-8") ) body.write("\r\n".encode(encoding="utf-8")) body.write(value) body.write("\r\n".encode(encoding="utf-8")) body.write(("--%s--\r\n" % boundary).encode(encoding="utf-8")) content_type: str = "multipart/form-data;boundary=%s" % boundary return content_type, body.getvalue()
fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files. Return (content_type, body) ready for httplib.HTTP instance
https://github.com/so1n/pait/blob/04bd58a13bfcb670213f2a7fdfca47753d11d649/tests/test_app/test_tornado.py#L276-L306
import binascii import json import os import sys from io import BytesIO from tempfile import NamedTemporaryFile from typing import Any, Generator, Optional, Tuple from unittest import mock import pytest from tornado.testing import AsyncHTTPTestCase, HTTPResponse from tornado.web import Application from example.param_verify.tornado_example import TestCheckParamHandler as CheckParamHandler from example.param_verify.tornado_example import TestCheckRespHandler as CheckRespHandler from example.param_verify.tornado_example import TestDependAsyncContextmanagerHanler as DependAsyncContextmanagerHanler from example.param_verify.tornado_example import TestDependContextmanagerHanler as DependContextmanagerHanler from example.param_verify.tornado_example import TestGetHandler as GetHandler from example.param_verify.tornado_example import TestOtherFieldHandler as OtherFieldHandler from example.param_verify.tornado_example import TestPostHandler as PostHandler from example.param_verify.tornado_example import create_app from pait.app import auto_load_app from pait.app.tornado import TornadoTestHelper from pait.g import config @pytest.fixture def app() -> Generator[Application, None, None]: yield create_app() class TestTornado(AsyncHTTPTestCase): def get_app(self) -> Application: return create_app() def get_url(self, path: str) -> str: return "%s://localhost:%s%s" % (self.get_protocol(), self.get_http_port(), path) def test_get(self) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, GetHandler.get, path_dict={"age": 3}, query_dict={"uid": "123", "user_name": "appl", "sex": "man", "multi_user_name": ["abc", "efg"]}, ) response: HTTPResponse = self.fetch( "/api/get/3?uid=123&user_name=appl&sex=man&multi_user_name=abc&multi_user_name=efg" ) for resp in [test_helper.get(), response]: resp_dict = json.loads(resp.body.decode()) assert resp_dict["code"] == 0 assert resp_dict["data"] == { "uid": 123, "user_name": "appl", "email": "example@xxx.com", "age": 3, "sex": "man", "multi_user_name": ["abc", "efg"], } def test_check_param(self) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, CheckParamHandler.get, query_dict={"uid": 123, "user_name": "appl", "sex": "man", "age": 10, "alias_user_name": "appe"}, ) assert ( "requires at most one of param user_name or alias_user_name" in json.loads(test_helper.get().body.decode())["msg"] ) test_helper = TornadoTestHelper( self, CheckParamHandler.get, query_dict={"uid": 123, "sex": "man", "age": 10, "alias_user_name": "appe"} ) assert ( "birthday requires param alias_user_name, which if not none" in json.loads(test_helper.get().body.decode())["msg"] ) def test_check_response(self) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, CheckRespHandler.get, query_dict={"uid": 123, "user_name": "appl", "sex": "man", "age": 10}, ) with pytest.raises(RuntimeError): test_helper.get() test_helper = TornadoTestHelper( self, CheckRespHandler.get, query_dict={"uid": 123, "user_name": "appl", "sex": "man", "age": 10, "display_age": 1}, ) test_helper.get() @mock.patch("example.param_verify.model.logging.error") @mock.patch("example.param_verify.model.logging.info") def test_depend_contextmanager(self, info_logger: Any, error_logger: Any) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, DependContextmanagerHanler.get, query_dict={"uid": 123}, ) test_helper.get() info_logger.assert_called_once_with("context_depend exit") test_helper = TornadoTestHelper( self, DependContextmanagerHanler.get, query_dict={"uid": 123, "is_raise": True}, ) test_helper.get() error_logger.assert_called_once_with("context_depend error") @mock.patch("example.param_verify.model.logging.error") @mock.patch("example.param_verify.model.logging.info") def test_depend_async_contextmanager(self, info_logger: Any, error_logger: Any) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, DependAsyncContextmanagerHanler.get, query_dict={"uid": 123}, ) test_helper.get() info_logger.assert_called_once_with("context_depend exit") test_helper = TornadoTestHelper( self, DependAsyncContextmanagerHanler.get, query_dict={"uid": 123, "is_raise": True}, ) test_helper.get() error_logger.assert_called_once_with("context_depend error") def test_mock_get(self) -> None: config.enable_mock_response = True resp: dict = json.loads( self.fetch( "/api/get/3?uid=123&user_name=appl&sex=man&multi_user_name=abc&multi_user_name=efg" ).body.decode() ) assert resp == { "code": 0, "data": { "age": 99, "email": "example@so1n.me", "uid": 666, "user_name": "mock_name", "multi_user_name": [], "sex": "man", }, "msg": "success", } config.enable_mock_response = False def test_depend(self) -> None: response: HTTPResponse = self.fetch( "/api/depend?uid=123&user_name=appl", method="POST", headers={"user-agent": "customer_agent"}, body='{"age": 2}', ) resp: dict = json.loads(response.body.decode()) assert resp["code"] == 0 assert resp["data"] == {"uid": 123, "user_name": "appl", "age": 2, "user_agent": "customer_agent"} def test_get_cbv(self) -> None: response: HTTPResponse = self.fetch( "/api/cbv?uid=123&user_name=appl&age=2", headers={"user-agent": "customer_agent"} ) resp: dict = json.loads(response.body.decode()) assert resp["code"] == 0 assert resp["data"] == {"uid": 123, "user_name": "appl", "email": "example@xxx.com", "age": 2} def test_post_cbv(self) -> None: response: HTTPResponse = self.fetch( "/api/cbv", headers={"user-agent": "customer_agent"}, method="POST", body='{"uid": 123, "user_name": "appl", "age": 2}', ) resp: dict = json.loads(response.body.decode()) assert resp["code"] == 0 assert resp["data"] == {"uid": 123, "user_name": "appl", "age": 2, "user_agent": "customer_agent"} def test_post(self) -> None: test_helper: TornadoTestHelper = TornadoTestHelper( self, PostHandler.post, body_dict={"uid": 123, "user_name": "appl", "age": 2, "sex": "man"}, header_dict={"user-agent": "customer_agent"}, ) response: HTTPResponse = self.fetch( "/api/post", headers={"user-agent": "customer_agent"}, method="POST", body='{"uid": 123, "user_name": "appl", "age": 2, "sex": "man"}', ) for resp in [test_helper.post(), response]: resp_dict = json.loads(resp.body.decode()) assert resp_dict["code"] == 0 assert resp_dict["data"] == { "uid": 123, "user_name": "appl", "age": 2, "content_type": "application/x-www-form-urlencoded", "sex": "man", } def test_pait_model(self) -> None: response: HTTPResponse = self.fetch( "/api/pait_model?uid=123&user_name=appl", headers={"user-agent": "customer_agent"}, method="POST", body='{"age": 2}', ) resp: dict = json.loads(response.body.decode()) assert resp["code"] == 0 assert resp["data"] == {"uid": 123, "user_name": "appl", "age": 2, "user_agent": "customer_agent"} def test_raise_tip(self) -> None: response: HTTPResponse = self.fetch( "/api/raise_tip", headers={"user-agent": "customer_agent"}, method="POST", body='{"uid": 123, "user_name": "appl", "age": 2}', ) resp: dict = json.loads(response.body.decode()) assert "msg" in resp def test_other_field(self) -> None: cookie_str: str = "abcd=abcd;" file_content: str = "Hello Word!" f1 = NamedTemporaryFile(delete=True) file_name: str = f1.name f1.write(file_content.encode()) f1.seek(0) f2 = NamedTemporaryFile(delete=True) f2.name = file_name f2.write(file_content.encode()) f2.seek(0) test_helper: TornadoTestHelper = TornadoTestHelper( self, OtherFieldHandler.post, cookie_dict={"cookie": cookie_str}, file_dict={f1.name: f1.read()}, form_dict={"a": "1", "b": "2", "c": "3"}, ) content_type, body = self.encode_multipart_formdata( data={"a": "1", "b": "2", "c": "3"}, files={file_name: f2.read()} ) response: HTTPResponse = self.fetch( "/api/other_field", headers={"cookie": cookie_str, "Content-Type": content_type, "content-length": str(len(body))}, method="POST", body=body, ) for resp in [test_helper.post(), response]: assert { "filename": file_name, "content": file_content, "form_a": "1", "form_b": "2", "form_c": ["3"], "cookie": {"abcd": "abcd"}, } == json.loads(resp.body.decode())["data"] @staticmethod def choose_boundary() -> str: boundary: bytes = binascii.hexlify(os.urandom(16)) return boundary.decode("ascii")
Apache License 2.0
jdasoftwaregroup/kartothek
kartothek/cli/_delete.py
delete
python
def delete(ctx, include, exclude): cube = ctx.obj["cube"] store = ctx.obj["store"] all_datasets = set(ctx.obj["datasets"].keys()) delete_datasets = filter_items("dataset", all_datasets, include, exclude) delete_cube_bag(cube=cube, store=store, datasets=delete_datasets).compute()
Delete cube from store.
https://github.com/jdasoftwaregroup/kartothek/blob/6bc7e868435e98cbda0b695900f29d1ff7d49110/kartothek/cli/_delete.py#L26-L34
import click from kartothek.cli._utils import filter_items from kartothek.io.dask.bag_cube import delete_cube_bag __all__ = ("delete",) @click.option( "--include", help="Comma separated list of dataset-id to be deleted. e.g., ``--include enrich,enrich_cl`` " "also supports glob patterns", is_flag=False, metavar="<include>", type=click.STRING, ) @click.option( "--exclude", help="Delete all datasets except items in this comma separated list. e.g., ``--exclude enrich,enrich_cl`` " "also supports glob patterns", is_flag=False, metavar="<exclude>", type=click.STRING, ) @click.pass_context
MIT License
phistrom/basecampy3
basecampy3/urls/endpoints/webhooks.py
Webhooks.list
python
def list(self, project): return self._get("/buckets/{project}/webhooks.json", project=project)
List the Webhooks in a given Project. https://github.com/basecamp/bc3-api/blob/master/sections/webhooks.md#get-webhooks :param project: the ID of a Project :type project: int :return: the URL for listing Webhooks in a desired Project :rtype: basecampy3.urls.URL
https://github.com/phistrom/basecampy3/blob/9f0a301c211458b90aa1442ac9c429638afb68f3/basecampy3/urls/endpoints/webhooks.py#L14-L25
from .base import EndpointURLs class Webhooks(EndpointURLs):
MIT License
gofrendiasgard/kokoropy
kokoropy/packages/sqlalchemy/engine/base.py
Connection.begin_nested
python
def begin_nested(self): if self.__transaction is None: self.__transaction = RootTransaction(self) else: self.__transaction = NestedTransaction(self, self.__transaction) return self.__transaction
Begin a nested transaction and return a transaction handle. The returned object is an instance of :class:`.NestedTransaction`. Nested transactions require SAVEPOINT support in the underlying database. Any transaction in the hierarchy may ``commit`` and ``rollback``, however the outermost transaction still controls the overall ``commit`` or ``rollback`` of the transaction of a whole. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`.
https://github.com/gofrendiasgard/kokoropy/blob/49c8ca4b7dd2a084f2ced33fc5987b8a8b62c995/kokoropy/packages/sqlalchemy/engine/base.py#L425-L443
from __future__ import with_statement import sys from .. import exc, util, log, interfaces from ..sql import util as sql_util from .interfaces import Connectable, ExceptionContext from .util import _distill_params import contextlib class Connection(Connectable): def __init__(self, engine, connection=None, close_with_result=False, _branch=False, _execution_options=None, _dispatch=None, _has_events=None): self.engine = engine self.dialect = engine.dialect self.__connection = connection or engine.raw_connection() self.__transaction = None self.should_close_with_result = close_with_result self.__savepoint_seq = 0 self.__branch = _branch self.__invalid = False self.__can_reconnect = True if _dispatch: self.dispatch = _dispatch elif _has_events is None: self.dispatch = self.dispatch._join(engine.dispatch) self._has_events = _has_events or ( _has_events is None and engine._has_events) self._echo = self.engine._should_log_info() if _execution_options: self._execution_options = engine._execution_options.union(_execution_options) else: self._execution_options = engine._execution_options if self._has_events or self.engine._has_events: self.dispatch.engine_connect(self, _branch) def _branch(self): return self.engine._connection_cls( self.engine, self.__connection, _branch=True, _has_events=self._has_events, _dispatch=self.dispatch) def _clone(self): c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() return c def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def execution_options(self, **opt): c = self._clone() c._execution_options = c._execution_options.union(opt) if self._has_events or self.engine._has_events: self.dispatch.set_connection_execution_options(c, opt) self.dialect.set_connection_execution_options(c, opt) return c @property def closed(self): return '_Connection__connection' not in self.__dict__ and not self.__can_reconnect @property def invalidated(self): return self.__invalid @property def connection(self): try: return self.__connection except AttributeError: return self._revalidate_connection() def _revalidate_connection(self): if self.__can_reconnect and self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") self.__connection = self.engine.raw_connection() self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @property def _connection_is_valid(self): return getattr(self.__connection, 'is_valid', False) @property def _still_open_and_connection_is_valid(self): return not self.closed and not self.invalidated and getattr(self.__connection, 'is_valid', False) @property def info(self): return self.connection.info def connect(self): return self._branch() def contextual_connect(self, **kwargs): return self._branch() def invalidate(self, exception=None): if self.invalidated: return if self.closed: raise exc.ResourceClosedError("This Connection is closed") if self._connection_is_valid: self.__connection.invalidate(exception) del self.__connection self.__invalid = True def detach(self): self.__connection.detach() def begin(self): if self.__transaction is None: self.__transaction = RootTransaction(self) return self.__transaction else: return Transaction(self, self.__transaction)
MIT License
os-migrate/os-migrate
os_migrate/plugins/module_utils/resource.py
Resource._set_sdk_param
python
def _set_sdk_param(ser_params, ser_key, sdk_params, sdk_key): if ser_params.get(ser_key, None) is not None: sdk_params[sdk_key] = ser_params[ser_key]
Assign value from `ser_key` in `ser_params` dict as value for `sdk_key` in `sdk_params`, but only if it isn't None.
https://github.com/os-migrate/os-migrate/blob/9192ffb76d5e555db22dd87a606d5ce043027216/os_migrate/plugins/module_utils/resource.py#L163-L168
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from copy import deepcopy from openstack import exceptions as os_exc from ansible_collections.os_migrate.os_migrate.plugins.module_utils import const, exc class Resource(): resource_type = 'UNDEFINED' sdk_class = 'UNDEFINED' info_from_sdk = [] info_from_refs = [] params_from_sdk = [] params_from_refs = [] sdk_params_from_params = None sdk_params_from_refs = [] readonly_sdk_params = [] skip_falsey_sdk_params = [] migration_param_defaults = {} @classmethod def from_data(cls, data): res_type = data.get('type', None) if res_type != cls.resource_type: raise exc.UnexpectedResourceType(cls.resource_type, res_type) obj = cls() obj.data = data obj.data.setdefault(const.RES_PARAMS, {}) obj.data.setdefault(const.RES_INFO, {}) obj.data.setdefault(const.RES_MIGRATION_PARAMS, {}) return obj @classmethod def from_sdk(cls, conn, sdk_resource): if not isinstance(sdk_resource, cls.sdk_class): raise exc.UnexpectedResourceType( cls.sdk_class, sdk_resource.__class__) obj = cls() obj._data_from_sdk_and_refs( sdk_resource, cls._refs_from_sdk(conn, sdk_resource)) obj.data['type'] = cls.resource_type for k, v in cls.migration_param_defaults.items(): obj.data[const.RES_MIGRATION_PARAMS][k] = deepcopy(v) return obj @classmethod def _create_sdk_res(cls, conn, sdk_params): raise NotImplementedError(f"_create_sdk_res not implemented for {cls}.") @classmethod def _find_sdk_res(cls, conn, name_or_id, filters=None): raise NotImplementedError(f"_find_sdk_res not implemented for {cls}.") @staticmethod def _refs_from_sdk(conn, sdk_res): return {} @classmethod def _set_sdk_params_same_name(cls, ser_params, sdk_params, param_names): for p_name in param_names: cls._set_sdk_param(ser_params, p_name, sdk_params, p_name) @classmethod def _del_sdk_params_if_falsey(cls, sdk_params, param_names): for p_name in param_names: if p_name in sdk_params and not sdk_params[p_name]: del sdk_params[p_name] @staticmethod
Apache License 2.0
almarklein/asgineer
asgineer/_request.py
WebsocketRequest.close
python
async def close(self, code=1000): await self._send({"type": "websocket.close", "code": code}) self._app_state = DISCONNECTED
Async function to close the websocket connection.
https://github.com/almarklein/asgineer/blob/e899b015a26b280177ba7cab1ae0b93ca157392f/asgineer/_request.py#L404-L407
import weakref import json from urllib.parse import parse_qsl from ._compat import sleep, Event, wait_for_any_then_cancel_the_rest CONNECTING = 0 CONNECTED = 1 DONE = 2 DISCONNECTED = 3 class DisconnectedError(IOError): class BaseRequest: __slots__ = ("__weakref__", "_scope", "_headers", "_querylist", "_request_sets") def __init__(self, scope): self._scope = scope self._headers = None self._querylist = None self._request_sets = set() async def _destroy(self): for s in self._request_sets: try: s.discard(self) except Exception: pass self._request_sets.clear() @property def scope(self): return self._scope @property def method(self): return self._scope["method"] @property def headers(self): if self._headers is None: self._headers = dict( (key.decode(), val.decode()) for key, val in self._scope["headers"] ) return self._headers @property def url(self): url = f"{self.scheme}://{self.host}:{self.port}{self.path}" if self.querylist: url += "?" + "&".join(f"{key}={val}" for key, val in self.querylist) return url @property def scheme(self): return self._scope["scheme"] @property def host(self): return self.headers.get("host", self._scope["server"][0]).split(":")[0] @property def port(self): return self._scope["server"][1] @property def path(self): return ( self._scope.get("root_path", "") + self._scope["path"] ) @property def querylist(self): if self._querylist is None: q = self._scope["query_string"] self._querylist = parse_qsl(q.decode()) return self._querylist @property def querydict(self): return dict(self.querylist) class HttpRequest(BaseRequest): __slots__ = ( "_receive", "_send", "_client_state", "_app_state", "_body", "_wakeup_event", ) def __init__(self, scope, receive, send): super().__init__(scope) self._receive = receive self._send = send self._client_state = CONNECTED self._app_state = CONNECTING self._body = None self._wakeup_event = None async def accept(self, status=200, headers={}): if self._app_state != CONNECTING: raise IOError("Cannot accept an already accepted connection.") status = int(status) try: rawheaders = [(k.encode(), v.encode()) for k, v in headers.items()] except Exception: raise TypeError("Header keys and values must all be strings.") self._app_state = CONNECTED msg = {"type": "http.response.start", "status": status, "headers": rawheaders} await self._send(msg) async def _receive_chunk(self): if self._client_state == DISCONNECTED: raise IOError("Cannot receive from connection that already disconnected.") message = await self._receive() mt = "http.disconnect" if message is None else message["type"] if mt == "http.request": data = bytes(message.get("body", b"")) if not message.get("more_body", False): self._client_state = DONE return data elif mt == "http.disconnect": self._client_state = DISCONNECTED raise DisconnectedError() else: raise IOError(f"Unexpected message type: {mt}") async def send(self, data, more=True): more = bool(more) if isinstance(data, str): data = data.encode() elif not isinstance(data, bytes): raise TypeError(f"Can only send bytes/str over http, not {type(data)}.") message = {"type": "http.response.body", "body": data, "more_body": more} if self._app_state == CONNECTED: if not more: self._app_state = DONE await self._send(message) elif self._app_state == CONNECTING: raise IOError("Cannot send before calling accept.") else: raise IOError("Cannot send to a closed connection.") async def sleep_while_connected(self, seconds): if self._client_state == DISCONNECTED: raise IOError("Cannot wait for connection that already disconnected.") if self._wakeup_event is None: self._wakeup_event = Event() self._wakeup_event.clear() await wait_for_any_then_cancel_the_rest( sleep(seconds), self._wakeup_event.wait(), self._receive_until_disconnect(), ) if self._client_state == DISCONNECTED: raise DisconnectedError() async def _receive_until_disconnect(self): while True: try: await self._receive_chunk() except DisconnectedError: break async def wakeup(self): if self._wakeup_event is not None: self._wakeup_event.set() async def iter_body(self): if self._client_state == DONE: raise IOError("Cannot receive an http request that is already consumed.") while True: chunk = await self._receive_chunk() yield chunk if self._client_state != CONNECTED: break async def get_body(self, limit=10 * 2 ** 20): if self._body is None: nbytes = 0 chunks = [] async for chunk in self.iter_body(): nbytes += len(chunk) if nbytes > limit: chunks.clear() raise IOError("Request body too large.") chunks.append(chunk) self._body = b"".join(chunks) return self._body async def get_json(self, limit=10 * 2 ** 20): body = await self.get_body(limit) return json.loads(body.decode()) class WebsocketRequest(BaseRequest): __slots__ = ("_receive", "_send", "_client_state", "_app_state") def __init__(self, scope, receive, send): assert scope["type"] == "websocket", f"Unexpected ws scope type {scope['type']}" super().__init__(scope) self._receive = receive self._send = send self._client_state = CONNECTING self._app_state = CONNECTING async def accept(self, subprotocol=None): if self._client_state == CONNECTING: message = await self._receive() mt = message["type"] if mt == "websocket.connect": self._client_state = CONNECTED elif mt == "websocket.disconnect": self._client_state = DISCONNECTED raise DisconnectedError() else: raise IOError(f"Unexpected ws message type {mt}") elif self._client_state == DISCONNECTED: raise IOError("Cannot accept ws that already disconnected.") if self._app_state == CONNECTING: await self._send({"type": "websocket.accept", "subprotocol": subprotocol}) self._app_state = CONNECTED else: raise IOError("Cannot accept an already accepted ws connection.") async def send(self, data): if isinstance(data, bytes): message = {"type": "websocket.send", "bytes": data} elif isinstance(data, str): message = {"type": "websocket.send", "text": data} elif isinstance(data, dict): encoded = json.dumps(data).encode() message = {"type": "websocket.send", "bytes": encoded} else: raise TypeError(f"Can only send bytes/str/dict over ws, not {type(data)}") if self._client_state == DISCONNECTED: raise IOError("Cannot send to a disconnected ws.") elif self._app_state == CONNECTED: await self._send(message) elif self._app_state == CONNECTING: raise IOError("Cannot send before calling accept on ws.") else: raise IOError("Cannot send to a closed ws.") async def receive(self): if self._client_state == CONNECTED: message = await self._receive() elif self._client_state == DISCONNECTED: raise IOError("Cannot receive from ws that already disconnected.") else: raise IOError("Cannot receive before calling accept on ws.") mt = message["type"] if mt == "websocket.receive": return message.get("bytes", None) or message.get("text", None) or b"" elif mt == "websocket.disconnect": self._client_state = DISCONNECTED raise DisconnectedError(f"ws disconnect {message.get('code', 1000)}") else: raise IOError(f"Unexpected ws message type {mt}") async def receive_iter(self): while True: try: result = await self.receive() yield result except DisconnectedError: break async def receive_json(self): result = await self.receive() if isinstance(result, bytes): result = result.decode() return json.loads(result)
BSD 2-Clause Simplified License
jatinchowdhury18/audio_dspy
audio_dspy/hysteresis.py
Hysteresis.RK2
python
def RK2(self, M_n1, H, H_n1, H_d, H_d_n1): k1 = self.T * self.dMdt(M_n1, H_n1, H_d_n1) k2 = self.T * self.dMdt(M_n1 + k1/2, (H + H_n1) / 2, (H_d + H_d_n1) / 2) return M_n1 + k2
Compute hysteresis function with Runge-Kutta 2nd order Parameters ---------- M_n1 : float Previous magnetisation H : float Magnetic field H_n1 : float Previous magnetic field H_d : float Magnetic field derivative H_d_n1 : float Previous magnetic field derivative Returns ------- M : float Current magnetisation
https://github.com/jatinchowdhury18/audio_dspy/blob/5d39eba11f56ac5619173fe8f3f233e682bc1d60/audio_dspy/hysteresis.py#L122-L146
import numpy as np class Differentiator: def __init__(self, fs, alpha=1.0): self.T = 1.0 / fs self.alpha = alpha self.x_1 = 0.0 self.xD_1 = 0.0 def differentiate(self, x): xD = (((1 + self.alpha) / self.T) * (x - self.x_1)) - self.alpha * self.xD_1 self.x_1 = x self.xD_1 = xD return xD class Hysteresis: def __init__(self, drive, sat, width, fs, dAlpha=1.0, mode='RK2'): self.deriv = Differentiator(fs, dAlpha) self.T = 1.0 / fs self.M_s = 0.5 + 1.5*(1-sat) self.a = self.M_s / (0.01 + 6*drive) self.alpha = 1.6e-3 self.k = 30 * (1-0.5)**6 + 0.01 self.c = (1-width)**0.5 - 0.01 assert mode == 'RK2' or mode == 'RK4' or mode[:2] == 'NR', "Invalid mode!" self.mode = mode @staticmethod def langevin(x): if (abs(x) > 10 ** -4): return (1 / np.tanh(x)) - (1/x) else: return (x / 3) @staticmethod def langevin_deriv(x): if (abs(x) > 10 ** -4): return (1 / x ** 2) - (1 / np.tanh(x)) ** 2 + 1 else: return (1 / 3) @staticmethod def langevin_deriv2(x): if (abs(x) > 10 ** -3): return 2 * (1 / np.tanh(x)) * ((1 / np.tanh(x)) ** 2 - 1) - (2 / x ** 3) else: return -2 * x / 15 def dMdt(self, M, H, H_d): Q = (H + self.alpha * M) / self.a M_diff = self.M_s * self.langevin(Q) - M delta = 1 if H_d > 0 else -1 delta_M = 1 if np.sign(delta) == np.sign(M_diff) else 0 L_prime = self.langevin_deriv(Q) denominator = 1 - self.c * self.alpha * (self.M_s / self.a) * L_prime t1_num = (1 - self.c) * delta_M * M_diff t1_den = (1 - self.c) * delta * self.k - self.alpha * M_diff t1 = (t1_num / t1_den) * H_d t2 = self.c * (self.M_s / self.a) * H_d * L_prime return (t1 + t2) / denominator
MIT License
devopshq/teamcity
dohq_teamcity/api/project_api.py
ProjectApi.replace
python
def replace(self, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__replace_with_http_info(feature_locator, project_locator, **kwargs) else: (data) = self.__replace_with_http_info(feature_locator, project_locator, **kwargs) return data
replace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace(feature_locator, project_locator, async_req=True) >>> result = thread.get() :param async_req: bool :param str feature_locator: (required) :param str project_locator: (required) :param ProjectFeature body: :param str fields: :return: object If the method is called asynchronously, returns the request thread.
https://github.com/devopshq/teamcity/blob/84f1757ec1fddef27d39246a75739d047be0e831/dohq_teamcity/api/project_api.py#L814-L836
from __future__ import absolute_import from dohq_teamcity.custom.base_model import TeamCityObject import re import six from dohq_teamcity.models.agent_pool import AgentPool from dohq_teamcity.models.agent_pools import AgentPools from dohq_teamcity.models.branches import Branches from dohq_teamcity.models.build import Build from dohq_teamcity.models.build_type import BuildType from dohq_teamcity.models.build_types import BuildTypes from dohq_teamcity.models.builds import Builds from dohq_teamcity.models.model_property import ModelProperty from dohq_teamcity.models.new_build_type_description import NewBuildTypeDescription from dohq_teamcity.models.new_project_description import NewProjectDescription from dohq_teamcity.models.project import Project from dohq_teamcity.models.project_feature import ProjectFeature from dohq_teamcity.models.project_features import ProjectFeatures from dohq_teamcity.models.projects import Projects from dohq_teamcity.models.properties import Properties from dohq_teamcity.models.type import Type class ProjectApi(object): base_name = 'Project' def __init__(self, api_client=None): self.api_client = api_client def add(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__add_with_http_info(project_locator, **kwargs) else: (data) = self.__add_with_http_info(project_locator, **kwargs) return data def create_build_type(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__create_build_type_with_http_info(project_locator, **kwargs) else: (data) = self.__create_build_type_with_http_info(project_locator, **kwargs) return data def create_build_type_template(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__create_build_type_template_with_http_info(project_locator, **kwargs) else: (data) = self.__create_build_type_template_with_http_info(project_locator, **kwargs) return data def create_project(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__create_project_with_http_info(**kwargs) else: (data) = self.__create_project_with_http_info(**kwargs) return data def create_secure_token(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__create_secure_token_with_http_info(project_locator, **kwargs) else: (data) = self.__create_secure_token_with_http_info(project_locator, **kwargs) return data def delete(self, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_with_http_info(feature_locator, project_locator, **kwargs) else: (data) = self.__delete_with_http_info(feature_locator, project_locator, **kwargs) return data def delete_all_parameters(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_all_parameters_with_http_info(project_locator, **kwargs) else: (data) = self.__delete_all_parameters_with_http_info(project_locator, **kwargs) return data def delete_all_parameters_0(self, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_all_parameters_0_with_http_info(feature_locator, project_locator, **kwargs) else: (data) = self.__delete_all_parameters_0_with_http_info(feature_locator, project_locator, **kwargs) return data def delete_parameter(self, name, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_parameter_with_http_info(name, project_locator, **kwargs) else: (data) = self.__delete_parameter_with_http_info(name, project_locator, **kwargs) return data def delete_parameter_0(self, name, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_parameter_0_with_http_info(name, feature_locator, project_locator, **kwargs) else: (data) = self.__delete_parameter_0_with_http_info(name, feature_locator, project_locator, **kwargs) return data def delete_project(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_project_with_http_info(project_locator, **kwargs) else: (data) = self.__delete_project_with_http_info(project_locator, **kwargs) return data def delete_project_agent_pools(self, project_locator, agent_pool_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__delete_project_agent_pools_with_http_info(project_locator, agent_pool_locator, **kwargs) else: (data) = self.__delete_project_agent_pools_with_http_info(project_locator, agent_pool_locator, **kwargs) return data def get(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_with_http_info(project_locator, **kwargs) else: (data) = self.__get_with_http_info(project_locator, **kwargs) return data def get_branches(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_branches_with_http_info(project_locator, **kwargs) else: (data) = self.__get_branches_with_http_info(project_locator, **kwargs) return data def get_build_types_order(self, project_locator, field, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_build_types_order_with_http_info(project_locator, field, **kwargs) else: (data) = self.__get_build_types_order_with_http_info(project_locator, field, **kwargs) return data def get_default_template(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_default_template_with_http_info(project_locator, **kwargs) else: (data) = self.__get_default_template_with_http_info(project_locator, **kwargs) return data def get_example_new_project_description(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_example_new_project_description_with_http_info(project_locator, **kwargs) else: (data) = self.__get_example_new_project_description_with_http_info(project_locator, **kwargs) return data def get_example_new_project_description_compatibility_version1(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_example_new_project_description_compatibility_version1_with_http_info(project_locator, **kwargs) else: (data) = self.__get_example_new_project_description_compatibility_version1_with_http_info(project_locator, **kwargs) return data def get_parameter(self, name, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_with_http_info(name, project_locator, **kwargs) else: (data) = self.__get_parameter_with_http_info(name, project_locator, **kwargs) return data def get_parameter_0(self, name, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_0_with_http_info(name, feature_locator, project_locator, **kwargs) else: (data) = self.__get_parameter_0_with_http_info(name, feature_locator, project_locator, **kwargs) return data def get_parameter_type(self, name, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_type_with_http_info(name, project_locator, **kwargs) else: (data) = self.__get_parameter_type_with_http_info(name, project_locator, **kwargs) return data def get_parameter_type_raw_value(self, name, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_type_raw_value_with_http_info(name, project_locator, **kwargs) else: (data) = self.__get_parameter_type_raw_value_with_http_info(name, project_locator, **kwargs) return data def get_parameter_value_long(self, name, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_value_long_with_http_info(name, project_locator, **kwargs) else: (data) = self.__get_parameter_value_long_with_http_info(name, project_locator, **kwargs) return data def get_parameter_value_long_0(self, name, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameter_value_long_0_with_http_info(name, feature_locator, project_locator, **kwargs) else: (data) = self.__get_parameter_value_long_0_with_http_info(name, feature_locator, project_locator, **kwargs) return data def get_parameters(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameters_with_http_info(project_locator, **kwargs) else: (data) = self.__get_parameters_with_http_info(project_locator, **kwargs) return data def get_parameters_0(self, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parameters_0_with_http_info(feature_locator, project_locator, **kwargs) else: (data) = self.__get_parameters_0_with_http_info(feature_locator, project_locator, **kwargs) return data def get_parent_project(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_parent_project_with_http_info(project_locator, **kwargs) else: (data) = self.__get_parent_project_with_http_info(project_locator, **kwargs) return data def get_project_agent_pools(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_project_agent_pools_with_http_info(project_locator, **kwargs) else: (data) = self.__get_project_agent_pools_with_http_info(project_locator, **kwargs) return data def get_projects_order(self, project_locator, field, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_projects_order_with_http_info(project_locator, field, **kwargs) else: (data) = self.__get_projects_order_with_http_info(project_locator, field, **kwargs) return data def get_secure_value(self, project_locator, token, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_secure_value_with_http_info(project_locator, token, **kwargs) else: (data) = self.__get_secure_value_with_http_info(project_locator, token, **kwargs) return data def get_settings_file(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_settings_file_with_http_info(project_locator, **kwargs) else: (data) = self.__get_settings_file_with_http_info(project_locator, **kwargs) return data def get_single(self, feature_locator, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__get_single_with_http_info(feature_locator, project_locator, **kwargs) else: (data) = self.__get_single_with_http_info(feature_locator, project_locator, **kwargs) return data def reload_settings_file(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__reload_settings_file_with_http_info(project_locator, **kwargs) else: (data) = self.__reload_settings_file_with_http_info(project_locator, **kwargs) return data def remove_default_template(self, project_locator, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.__remove_default_template_with_http_info(project_locator, **kwargs) else: (data) = self.__remove_default_template_with_http_info(project_locator, **kwargs) return data
MIT License
zeshuaro/linkedrw
linkedrw/utils/helper.py
copy_files
python
def copy_files(mod_name, dir_name, output_dir): files = pkg_resources.resource_filename(mod_name, dir_name) for filename in os.listdir(files): full_filename = os.path.join(files, filename) if os.path.isdir(full_filename): try: shutil.copytree(full_filename, os.path.join(output_dir, filename)) except FileExistsError: continue else: shutil.copy(full_filename, output_dir)
Copy files under dir_name to output_dir Args: mod_name: the module name dir_name: the directory name of the files to be copied output_dir: the directory name for the files to be copied to Returns: None
https://github.com/zeshuaro/linkedrw/blob/5ee580b0c08df75a09fdbb720fe8fd0f63c86dcd/linkedrw/utils/helper.py#L10-L30
import os import pkg_resources import shutil from selenium.common.exceptions import NoSuchElementException from linkedrw.constants import LATEX_CHARS
MIT License
verimatrix/app-shield-protect
cli/aps_commands.py
ApsCommands.protect_cancel
python
def protect_cancel(self, build_id): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['cmd'] = 'cancel' response = requests.patch(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Protect cancel response: %s', response.json()) return response.json()
Cancel a protection job
https://github.com/verimatrix/app-shield-protect/blob/1e9e02ef2760139fb7cf6efc0e22155234e7aa4c/cli/aps_commands.py#L318-L328
import js2py import json import logging import os import shutil import time from datetime import datetime import dateutil.parser import requests from aps_utils import extract_version_info, get_os, extract_package_id from common import common LOGGER = logging.getLogger(__name__) PROTECT_STATES = ['protect_queue', 'protect_in_progress'] def retry_on_connection_error(method, max_retries=5): retries = 0 while retries < max_retries: try: return method() except requests.exceptions.ConnectionError as e: LOGGER.warning('Upload method failed: %s', repr(e)) retries += 1 raise Exception("Maximum retries exceeded") def check_requests_response(response): if 'Content-Type' in response.headers and response.headers['Content-Type'] == 'application/json': if 'errorMessage' in response.json(): LOGGER.warning(common.getSimpleErrorMessage(response.json()['errorMessage'])) return response.raise_for_status() def upload(url, file): def try_upload(): with open(file, 'rb') as file_handle: requests.put(url, data=file_handle) return None retry_on_connection_error(try_upload) class ApsCommands(): def __init__(self, headers, config, using_client_secret, eventually_consistent_command_wait_seconds=2): self.headers = headers self.config = config self.using_client_secret = using_client_secret self.eventually_consistent_command_wait_seconds = eventually_consistent_command_wait_seconds def get_api_gw_url(self, path): apiPath = 'api/' if self.using_client_secret else '' return '%s/%s%s' % (self.config['api_gateway_url'], apiPath, path) def get_account_info(self): url = '%s/account' % self.get_api_gw_url('report') response = requests.get(url, headers=self.headers) check_requests_response(response) LOGGER.debug('Response headers: %s', repr(response.headers)) LOGGER.debug('Get account info response: %s', response.json()) return response.json() def add_application(self, name, package_id, os_name, permissions, group=None): url = self.get_api_gw_url('applications') body = {} body['applicationName'] = name body['applicationPackageId'] = package_id body['permissionPrivate'] = permissions['private'] body['permissionUpload'] = False if permissions['private'] else not permissions['no_upload'] body['permissionDelete'] = False if permissions['private'] else not permissions['no_delete'] body['os'] = os_name if group: body['group'] = group response = requests.post(url, headers=self.headers, data=json.dumps(body)) check_requests_response(response) LOGGER.debug('Post application response: %s', response.json()) return response.json() def update_application(self, application_id, name, permissions): url = '%s/%s' % (self.get_api_gw_url('applications'), application_id) body = {} body['applicationName'] = name body['permissionPrivate'] = permissions['private'] body['permissionUpload'] = False if permissions['private'] else not permissions['no_upload'] body['permissionDelete'] = False if permissions['private'] else not permissions['no_delete'] response = requests.patch(url, headers=self.headers, data=json.dumps(body)) check_requests_response(response) LOGGER.debug('Update application response: %s', response.json()) return response.json() def list_applications(self, application_id, group=None, wait_ec=True): params = {} if application_id: url = '%s/%s' % (self.get_api_gw_url('applications'), application_id) else: url = self.get_api_gw_url('applications') if group: params['group'] = group if not application_id and self.eventually_consistent_command_wait_seconds: time.sleep(self.eventually_consistent_command_wait_seconds) response = requests.get(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Get applications response: %s', response.json()) return response.json() def delete_application(self, application_id): params = {} params['id'] = application_id url = '%s/%s' % (self.get_api_gw_url('applications'), application_id) response = requests.delete(url, headers=self.headers) check_requests_response(response) LOGGER.debug('Delete application response: %s', response.json()) return response.json() def list_builds(self, application_id, build_id): params = {} if build_id: url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) else: url = self.get_api_gw_url('builds') if application_id: params['app'] = application_id if not build_id and self.eventually_consistent_command_wait_seconds: time.sleep(self.eventually_consistent_command_wait_seconds) response = requests.get(url, headers=self.headers, params=params) check_requests_response(response) builds = response.json() LOGGER.debug('Listing builds for app_id:%s build_id:%s - %s', application_id, build_id, repr(builds)) return builds def create_build(self, application_id=None): url = self.get_api_gw_url('builds') body = {} if application_id: body['applicationId'] = application_id response = requests.post(url, headers=self.headers, data=json.dumps(body)) check_requests_response(response) LOGGER.debug('Post build response: %s', response.json()) return response.json() def set_build_metadata(self, build_id, file): version_info = extract_version_info(file) url = '%s/%s/metadata' % (self.get_api_gw_url('builds'), build_id) body = {} body['os'] = 'ios' if file.endswith('.xcarchive.zip') else 'android' body['osData'] = version_info response = requests.put(url, headers=self.headers, data=json.dumps(body)) check_requests_response(response) LOGGER.debug('Set build metadata response: %s', response.json()) return response.json() def upload_build(self, build_id, file): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['url'] = 'raw' params['uploadname'] = os.path.basename(file) response = requests.get(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Get upload raw URL response: %s', response.text) upload(response.text, file) return {} def upload_build_success(self, build_id, file): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['cmd'] = 'upload-success' response = requests.patch(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Upload build success response: %s', response.json()) return response.json() def upload_build_failed(self, build_id, message): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['cmd'] = 'upload-failed' params['message'] = message response = requests.patch(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Upload build failed response: %s', response.json()) return response.json() def add_build(self, file, application_id=None, set_metadata=True, upload=True): response = self.create_build(application_id) if 'errorMessage' in response: return response build_id = response['id'] if set_metadata: response = self.set_build_metadata(build_id, file) if 'errorMessage' in response: LOGGER.debug('set build metadata failed, delete build') self.delete_build(build_id) return response if not application_id or not upload: return response response = self.upload_build(build_id, file) if 'errorMessage' in response: LOGGER.debug('upload build failed, inform backend and then delete build') self.upload_build_failed(build_id, str(response)) self.delete_build(build_id) return response response = self.upload_build_success(build_id, file) if 'errorMessage' in response: LOGGER.debug('upload build success call failed, delete build') self.delete_build(build_id) return response def add_build_without_app(self, file, set_metadata=True): return self.add_build(file, None, set_metadata, False) def delete_build(self, build_id): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) response = requests.delete(url, headers=self.headers) check_requests_response(response) LOGGER.debug('Delete build response: %s', response.json()) return response.json() def delete_build_ticket(self, build_id, ticket_id): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['cmd'] = 'delete-ticket' params['ticket'] = ticket_id response = requests.patch(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Delete build ticket response: %s', response.json()) return response.json() def get_build_ticket(self, build_id, ticket_id): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['ticket'] = ticket_id response = requests.get(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Get build ticket response: %s', response.json()) return response.json() def protect_start(self, build_id): url = '%s/%s' % (self.get_api_gw_url('builds'), build_id) params = {} params['cmd'] = 'protect' response = requests.patch(url, headers=self.headers, params=params) check_requests_response(response) LOGGER.debug('Protect start response: %s', response.json()) return response.json() def protect_get_status(self, build_id): return self.list_builds(None, build_id)
MIT License
capitalone/dataprofiler
dataprofiler/profilers/numerical_column_stats.py
NumericStatsMixin.diff
python
def diff(self, other_profile, options=None): cls = self.__class__ if not isinstance(other_profile, cls): raise TypeError("Unsupported operand type(s) for diff: '{}' " "and '{}'".format(cls.__name__, other_profile.__class__.__name__)) differences = { "min": utils.find_diff_of_numbers(self.min, other_profile.min), "max": utils.find_diff_of_numbers(self.max, other_profile.max), "sum": utils.find_diff_of_numbers(self.sum, other_profile.sum), "mean": utils.find_diff_of_numbers(self.mean, other_profile.mean), "median": utils.find_diff_of_numbers( self.median, other_profile.median), "mode": utils.find_diff_of_lists_and_sets( self.mode, other_profile.mode), "median_absolute_deviation": utils.find_diff_of_numbers( self.median_abs_deviation, other_profile.median_abs_deviation), "variance": utils.find_diff_of_numbers(self.variance, other_profile.variance), "stddev": utils.find_diff_of_numbers(self.stddev, other_profile.stddev), "t-test": self._perform_t_test( self.mean, self.variance, self.match_count, other_profile.mean, other_profile.variance, other_profile.match_count) } return differences
Finds the differences for several numerical stats. :param other_profile: profile to find the difference with :type other_profile: NumericStatsMixin Profile :return: the numerical stats differences :rtype: dict
https://github.com/capitalone/dataprofiler/blob/7d9a1e27675116c6c48ac28dd21069846e66771e/dataprofiler/profilers/numerical_column_stats.py#L297-L332
from __future__ import print_function from __future__ import division import scipy.stats from future.utils import with_metaclass import copy import abc import warnings import itertools import numpy as np from . import utils from . import histogram_utils from .base_column_profilers import BaseColumnProfiler from .profiler_options import NumericalOptions class abstractstaticmethod(staticmethod): __slots__ = () def __init__(self, function): super(abstractstaticmethod, self).__init__(function) function.__isabstractmethod__ = True __isabstractmethod__ = True class NumericStatsMixin(with_metaclass(abc.ABCMeta, object)): type = None def __init__(self, options=None): if options and not isinstance(options, NumericalOptions): raise ValueError("NumericalStatsMixin parameter 'options' must be " "of type NumericalOptions.") self.min = None self.max = None self._top_k_modes = 5 self.sum = 0 self._biased_variance = np.nan self._biased_skewness = np.nan self._biased_kurtosis = np.nan self._median_is_enabled = True self._median_abs_dev_is_enabled = True self.max_histogram_bin = 100000 self.min_histogram_bin = 1000 self.histogram_bin_method_names = [ 'auto', 'fd', 'doane', 'scott', 'rice', 'sturges', 'sqrt' ] self.histogram_selection = None self.user_set_histogram_bin = None self.bias_correction = True self._mode_is_enabled = True self.num_zeros = 0 self.num_negatives = 0 if options: self.bias_correction = options.bias_correction.is_enabled self._top_k_modes = options.mode.top_k_modes self._median_is_enabled = options.median.is_enabled self._median_abs_dev_is_enabled = options.median_abs_deviation.is_enabled self._mode_is_enabled = options.mode.is_enabled bin_count_or_method = options.histogram_and_quantiles.bin_count_or_method if isinstance(bin_count_or_method, str): self.histogram_bin_method_names = [bin_count_or_method] elif isinstance(bin_count_or_method, list): self.histogram_bin_method_names = bin_count_or_method elif isinstance(bin_count_or_method, int): self.user_set_histogram_bin = bin_count_or_method self.histogram_bin_method_names = ['custom'] self.histogram_methods = {} self._stored_histogram = { 'total_loss': 0, 'current_loss': 0, 'suggested_bin_count': self.min_histogram_bin, 'histogram': { 'bin_counts': None, 'bin_edges': None } } self._batch_history = [] for method in self.histogram_bin_method_names: self.histogram_methods[method] = { 'total_loss': 0, 'current_loss': 0, 'suggested_bin_count': self.min_histogram_bin, 'histogram': { 'bin_counts': None, 'bin_edges': None } } num_quantiles = 1000 self.quantiles = {bin_num: None for bin_num in range(num_quantiles - 1)} self.__calculations = { "min": NumericStatsMixin._get_min, "max": NumericStatsMixin._get_max, "sum": NumericStatsMixin._get_sum, "variance": NumericStatsMixin._get_variance, "skewness": NumericStatsMixin._get_skewness, "kurtosis": NumericStatsMixin._get_kurtosis, "histogram_and_quantiles": NumericStatsMixin._get_histogram_and_quantiles, "num_zeros": NumericStatsMixin._get_num_zeros, "num_negatives": NumericStatsMixin._get_num_negatives } self._filter_properties_w_options(self.__calculations, options) def __getattribute__(self, name): return super(NumericStatsMixin, self).__getattribute__(name) def __getitem__(self, item): return super(NumericStatsMixin, self).__getitem__(item) @property def _has_histogram(self): return self._stored_histogram['histogram']['bin_counts'] is not None @BaseColumnProfiler._timeit(name="histogram_and_quantiles") def _add_helper_merge_profile_histograms(self, other1, other2): bin_methods = [x for x in other1.histogram_bin_method_names if x in other2.histogram_bin_method_names] if not bin_methods: raise ValueError('Profiles have no overlapping bin methods and ' 'therefore cannot be added together.') elif other1.user_set_histogram_bin and other2.user_set_histogram_bin: if other1.user_set_histogram_bin != other2.user_set_histogram_bin: warnings.warn('User set histogram bin counts did not match. ' 'Choosing the larger bin count.') self.user_set_histogram_bin = max(other1.user_set_histogram_bin, other2.user_set_histogram_bin) self.histogram_bin_method_names = bin_methods self.histogram_methods = dict() for method in self.histogram_bin_method_names: self.histogram_methods[method] = { 'total_loss': 0, 'current_loss': 0, 'histogram': { 'bin_counts': None, 'bin_edges': None } } combined_values = np.concatenate([other1._histogram_to_array(), other2._histogram_to_array()]) bin_counts, bin_edges = self._get_histogram(combined_values) self._stored_histogram['histogram']['bin_counts'] = bin_counts self._stored_histogram['histogram']['bin_edges'] = bin_edges histogram_loss = self._histogram_bin_error(combined_values) self._stored_histogram['histogram']['current_loss'] = histogram_loss self._stored_histogram['histogram']['total_loss'] = histogram_loss self._get_quantiles() def _add_helper(self, other1, other2): BaseColumnProfiler._merge_calculations( self._NumericStatsMixin__calculations, other1._NumericStatsMixin__calculations, other2._NumericStatsMixin__calculations) self.bias_correction = True if not other1.bias_correction or not other2.bias_correction: self.bias_correction = False if "variance" in self.__calculations.keys(): self._biased_variance = self._merge_biased_variance( other1.match_count, other1._biased_variance, other1.mean, other2.match_count, other2._biased_variance, other2.mean) if "histogram_and_quantiles" in self.__calculations.keys(): if other1._has_histogram and other2._has_histogram: self._add_helper_merge_profile_histograms(other1, other2) elif not other2._has_histogram: self.histogram_methods = other1.histogram_methods self.quantiles = other1.quantiles else: self.histogram_methods = other2.histogram_methods self.quantiles = other2.quantiles if "min" in self.__calculations.keys(): if other1.min is not None and other2.min is not None: self.min = min(other1.min, other2.min) elif other2.min is None: self.min = other1.min else: self.min = other2.min if "max" in self.__calculations.keys(): if other1.max is not None and other2.max is not None: self.max = max(other1.max, other2.max) elif other2.max is None: self.max = other1.max else: self.max = other2.max if "sum" in self.__calculations.keys(): self.sum = other1.sum + other2.sum if "skewness" in self.__calculations.keys(): self._biased_skewness = self._merge_biased_skewness( other1.match_count, other1._biased_skewness, other1._biased_variance, other1.mean, other2.match_count, other2._biased_skewness, other2._biased_variance, other2.mean) if "kurtosis" in self.__calculations.keys(): self._biased_kurtosis = self._merge_biased_kurtosis( other1.match_count, other1._biased_kurtosis, other1._biased_skewness, other1._biased_variance, other1.mean, other2.match_count, other2._biased_kurtosis, other2._biased_skewness, other2._biased_variance, other2.mean) if "num_zeros" in self.__calculations.keys(): self.num_zeros = other1.num_zeros + other2.num_zeros if "num_negatives" in self.__calculations.keys(): self.num_negatives = other1.num_negatives + other2.num_negatives self._top_k_modes = max(other1._top_k_modes, other2._top_k_modes) self._median_is_enabled = other1._median_is_enabled and other2._median_is_enabled self._mode_is_enabled = other1._mode_is_enabled and other2._mode_is_enabled self._median_abs_dev_is_enabled = other1._median_abs_dev_is_enabled and other2._median_abs_dev_is_enabled def profile(self): profile = dict( min=self.np_type_to_type(self.min), max=self.np_type_to_type(self.max), mode=self.np_type_to_type(self.mode), median=self.np_type_to_type(self.median), sum=self.np_type_to_type(self.sum), mean=self.np_type_to_type(self.mean), variance=self.np_type_to_type(self.variance), stddev=self.np_type_to_type(self.stddev), skewness=self.np_type_to_type(self.skewness), kurtosis=self.np_type_to_type(self.kurtosis), histogram=self._get_best_histogram_for_profile(), quantiles=self.quantiles, median_abs_deviation=self.np_type_to_type( self.median_abs_deviation), num_zeros=self.np_type_to_type(self.num_zeros), num_negatives=self.np_type_to_type(self.num_negatives), times=self.times, ) return profile
Apache License 2.0
qiskit/qiskit-ignis
qiskit/ignis/mitigation/expval/utils.py
calibration_data
python
def calibration_data(result: Result, metadata: List[Dict[str, any]]) -> Tuple[ Dict[int, Dict[int, int]], int]: cal_data = {} num_qubits = None method = None for i, meta in enumerate(metadata): if meta.get('experiment') == 'meas_mit': if num_qubits is None: num_qubits = len(meta['cal']) if method is None: method = meta.get('method', None) key = int(meta['cal'], 2) counts = result.get_counts(i).int_outcomes() if key not in cal_data: cal_data[key] = counts else: cal_data[key] = combine_counts(cal_data[key], counts) return cal_data, num_qubits, method
Return FullMeasureErrorMitigator from result data. Args: result: Qiskit result object. metadata: mitigation generator metadata. Returns: Calibration data dictionary {label: Counts} and number of qubits.
https://github.com/qiskit/qiskit-ignis/blob/f0728b6b6785b68693a0d31c54c51f9826ddde34/qiskit/ignis/mitigation/expval/utils.py#L150-L178
import logging from functools import partial from typing import Optional, List, Dict, Tuple import numpy as np from qiskit.exceptions import QiskitError from qiskit.result import Counts, Result from qiskit.ignis.verification.tomography import marginal_counts, combine_counts from qiskit.ignis.numba import jit_fallback logger = logging.getLogger(__name__) def expectation_value(counts: Counts, diagonal: Optional[np.ndarray] = None, qubits: Optional[List[int]] = None, clbits: Optional[List[int]] = None, meas_mitigator: Optional = None, ) -> Tuple[float, float]: if meas_mitigator is not None: return meas_mitigator.expectation_value( counts, diagonal=diagonal, clbits=clbits, qubits=qubits) if clbits is not None: counts = marginal_counts(counts, meas_qubits=clbits) probs = np.array(list(counts.values())) shots = probs.sum() probs = probs / shots if diagonal is None: coeffs = np.array([(-1) ** (key.count('1') % 2) for key in counts.keys()], dtype=probs.dtype) else: diagonal = np.asarray(diagonal) keys = [int(key, 2) for key in counts.keys()] coeffs = np.asarray(diagonal[keys], dtype=probs.dtype) return _expval_with_stddev(coeffs, probs, shots) def counts_probability_vector( counts: Counts, qubits: Optional[List[int]] = None, clbits: Optional[List[int]] = None, num_qubits: Optional[int] = None, return_shots: Optional[bool] = False) -> np.ndarray: if clbits is not None: counts = marginal_counts(counts, meas_qubits=clbits) if num_qubits is None: num_qubits = len(next(iter(counts))) vec = np.zeros(2**num_qubits, dtype=float) shots = 0 for key, val in counts.items(): shots += val vec[int(key, 2)] = val vec /= shots if qubits is not None: if len(qubits) != num_qubits: raise QiskitError("Num qubits does not match vector length.") axes = [num_qubits - 1 - i for i in reversed(np.argsort(qubits))] vec = np.reshape(vec, num_qubits * [2]).transpose(axes).reshape(vec.shape) if return_shots: return vec, shots return vec
Apache License 2.0
alexferl/flask-simpleldap
flask_simpleldap/__init__.py
LDAP.bind
python
def bind(self): conn = self.initialize try: conn.simple_bind_s( current_app.config['LDAP_USERNAME'], current_app.config['LDAP_PASSWORD']) return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args))
Attempts to bind to the LDAP server using the credentials of the service account. :return: Bound LDAP connection object if successful or ``None`` if unsuccessful.
https://github.com/alexferl/flask-simpleldap/blob/78f66d8e195f696f94fa41d352605b5425e2835e/flask_simpleldap/__init__.py#L108-L123
import re from functools import wraps import ldap from ldap import filter as ldap_filter from flask import abort, current_app, g, make_response, redirect, url_for, request __all__ = ['LDAP'] class LDAPException(RuntimeError): message = None def __init__(self, message): self.message = message def __str__(self): return self.message class LDAP(object): def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) @staticmethod def init_app(app): app.config.setdefault('LDAP_HOST', 'localhost') app.config.setdefault('LDAP_PORT', 389) app.config.setdefault('LDAP_SCHEMA', 'ldap') app.config.setdefault('LDAP_USERNAME', None) app.config.setdefault('LDAP_PASSWORD', None) app.config.setdefault('LDAP_TIMEOUT', 10) app.config.setdefault('LDAP_USE_SSL', False) app.config.setdefault('LDAP_USE_TLS', False) app.config.setdefault('LDAP_REQUIRE_CERT', False) app.config.setdefault('LDAP_CERT_PATH', '/path/to/cert') app.config.setdefault('LDAP_BASE_DN', None) app.config.setdefault('LDAP_OBJECTS_DN', 'distinguishedName') app.config.setdefault('LDAP_USER_FIELDS', []) app.config.setdefault('LDAP_USER_OBJECT_FILTER', '(&(objectclass=Person)(userPrincipalName=%s))') app.config.setdefault('LDAP_USER_GROUPS_FIELD', 'memberOf') app.config.setdefault('LDAP_GROUP_FIELDS', []) app.config.setdefault('LDAP_GROUPS_OBJECT_FILTER', 'objectclass=Group') app.config.setdefault('LDAP_GROUP_OBJECT_FILTER', '(&(objectclass=Group)(userPrincipalName=%s))') app.config.setdefault('LDAP_GROUP_MEMBERS_FIELD', 'member') app.config.setdefault('LDAP_LOGIN_VIEW', 'login') app.config.setdefault('LDAP_REALM_NAME', 'LDAP authentication') app.config.setdefault('LDAP_OPENLDAP', False) app.config.setdefault('LDAP_GROUP_MEMBER_FILTER', '*') app.config.setdefault('LDAP_GROUP_MEMBER_FILTER_FIELD', '*') app.config.setdefault('LDAP_CUSTOM_OPTIONS', None) if app.config['LDAP_USE_SSL'] or app.config['LDAP_USE_TLS']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) if app.config['LDAP_REQUIRE_CERT']: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, app.config['LDAP_CERT_PATH']) for option in ['USERNAME', 'PASSWORD', 'BASE_DN']: if app.config['LDAP_{0}'.format(option)] is None: raise LDAPException('LDAP_{0} cannot be None!'.format(option)) @staticmethod def _set_custom_options(conn): options = current_app.config['LDAP_CUSTOM_OPTIONS'] if options: for k, v in options.items(): conn.set_option(k, v) return conn @property def initialize(self): try: conn = ldap.initialize('{0}://{1}:{2}'.format( current_app.config['LDAP_SCHEMA'], current_app.config['LDAP_HOST'], current_app.config['LDAP_PORT'])) conn.set_option(ldap.OPT_NETWORK_TIMEOUT, current_app.config['LDAP_TIMEOUT']) conn = self._set_custom_options(conn) conn.protocol_version = ldap.VERSION3 if current_app.config['LDAP_USE_TLS']: conn.start_tls_s() return conn except ldap.LDAPError as e: raise LDAPException(self.error(e.args)) @property
MIT License
pkucactus/bdcn
ablation_train.py
cross_entropy_loss2d
python
def cross_entropy_loss2d(inputs, targets, cuda=False, balance=1.1): n, c, h, w = inputs.size() weights = np.zeros((n, c, h, w)) for i in xrange(n): t = targets[i, :, :, :].cpu().data.numpy() pos = (t == 1).sum() neg = (t == 0).sum() valid = neg + pos weights[i, t == 1] = neg * 1. / valid weights[i, t == 0] = pos * balance / valid weights = torch.Tensor(weights) if cuda: weights = weights.cuda() weights = Variable(weights) inputs = F.sigmoid(inputs) loss = nn.BCELoss(weights, size_average=False)(inputs, targets) return loss
:param inputs: inputs is a 4 dimensional data nx1xhxw :param targets: targets is a 3 dimensional data nx1xhxw :return:
https://github.com/pkucactus/bdcn/blob/2c81e7370579bcf3f7fa6def2ac5a798a108952f/ablation_train.py#L26-L50
import numpy as np import torch import torch.optim as optim import torch.nn as nn from torch.autograd import Variable from torch.nn import functional as F import argparse import time import re import os import sys import ablation from datasets.dataset import Data import cfg import log def adjust_learning_rate(optimizer, steps, step_size, gamma=0.1, logger=None): for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr'] * gamma if logger: logger.info('%s: %s' % (param_group['name'], param_group['lr']))
MIT License
log2timeline/dftimewolf
dftimewolf/lib/collectors/grr_hunt.py
GRRHuntFileCollector.SetUp
python
def SetUp(self, file_path_list: str, reason: str, grr_server_url: str, grr_username: str, grr_password: str, approvers: Optional[str]=None, verify: bool=True) -> None: super(GRRHuntFileCollector, self).SetUp( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.file_path_list = [item.strip() for item in file_path_list.strip().split(',')] if not file_path_list: self.ModuleError('Files must be specified for hunts', critical=True)
Initializes a GRR Hunt file collector. Args: file_path_list (str): comma-separated file paths. reason (str): justification for GRR access. grr_server_url (str): GRR server URL. grr_username (str): GRR username. grr_password (str): GRR password. approvers (Optional[str]): comma-separated GRR approval recipients. verify (Optional[bool]): True to indicate GRR server's x509 certificate should be verified.
https://github.com/log2timeline/dftimewolf/blob/bb7ae49f8031c4e5cc6cdae6fbe00c07d24acd19/dftimewolf/lib/collectors/grr_hunt.py#L159-L185
import os import tempfile import zipfile from typing import List, Optional, Tuple, Union from grr_api_client.hunt import Hunt from grr_response_proto import flows_pb2 as grr_flows from grr_response_proto.flows_pb2 import ArtifactCollectorFlowArgs from grr_response_proto.flows_pb2 import FileFinderArgs import yaml from dftimewolf.lib.collectors import grr_base from dftimewolf.lib.containers import containers from dftimewolf.lib.modules import manager as modules_manager from dftimewolf.lib.state import DFTimewolfState class GRRHunt(grr_base.GRRBaseModule): def _CreateHunt( self, name: str, args: Union[FileFinderArgs, ArtifactCollectorFlowArgs]) -> Hunt: runner_args = self.grr_api.types.CreateHuntRunnerArgs() runner_args.description = self.reason hunt = self.grr_api.CreateHunt( flow_name=name, flow_args=args, hunt_runner_args=runner_args) self.logger.success('{0!s}: Hunt created'.format(hunt.hunt_id)) self._WrapGRRRequestWithApproval(hunt, hunt.Start) return hunt class GRRHuntArtifactCollector(GRRHunt): def __init__(self, state: DFTimewolfState, name: Optional[str]=None, critical: bool=False) -> None: super(GRRHuntArtifactCollector, self).__init__( state, name=name, critical=critical) self.artifacts = [] self.use_tsk = False self.hunt = None def SetUp(self, artifacts: str, use_tsk: bool, reason: str, grr_server_url: str, grr_username: str, grr_password: str, approvers: Optional[str]=None, verify: bool=True) -> None: super(GRRHuntArtifactCollector, self).SetUp( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.artifacts = [item.strip() for item in artifacts.strip().split(',')] if not artifacts: self.ModuleError('No artifacts were specified.', critical=True) self.use_tsk = use_tsk def Process(self) -> None: self.logger.info('Artifacts to be collected: {0!s}'.format(self.artifacts)) hunt_args = grr_flows.ArtifactCollectorFlowArgs( artifact_list=self.artifacts, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False,) self._CreateHunt('ArtifactCollectorFlow', hunt_args) class GRRHuntFileCollector(GRRHunt): def __init__(self, state: DFTimewolfState, name: Optional[str]=None, critical: bool=False) -> None: super(GRRHuntFileCollector, self).__init__( state, name=name, critical=critical) self.file_path_list = []
Apache License 2.0
windelbouwman/ppci
ppci/lang/c3/context.py
Context.modules
python
def modules(self): return self.module_map.values()
Get all the modules in this context
https://github.com/windelbouwman/ppci/blob/915c069e0667042c085ec42c78e9e3c9a5295324/ppci/lang/c3/context.py#L40-L42
import logging import operator import struct from .scope import create_top_scope, Scope, SemanticError from ...arch.arch_info import Endianness from . import astnodes as ast class Context: logger = logging.getLogger("c3ctx") def __init__(self, arch_info): self.scope = create_top_scope(arch_info) self.module_map = {} self.const_map = {} self.var_map = {} self.function_map = {} self.const_workset = set() self.arch_info = arch_info self.pointerSize = arch_info.get_size("ptr") def has_module(self, name): return name in self.module_map def get_module(self, name, create=True): if name not in self.module_map and create: module = ast.Module(name, Scope(self.scope), None) self.module_map[name] = module return self.module_map[name] @property
BSD 2-Clause Simplified License
glassywing/nvae
nvae/vae_celeba.py
NVAE.forward
python
def forward(self, x): mu, log_var, xs = self.encoder(x) z = reparameterize(mu, torch.exp(0.5 * log_var)) decoder_output, losses = self.decoder(z, xs) recon_loss = torch.mean(self.adaptive_loss.lossfun( torch.mean(F.binary_cross_entropy(decoder_output, x, reduction='none'), dim=[1, 2, 3])[:, None])) kl_loss = kl(mu, log_var) return decoder_output, recon_loss, [kl_loss] + losses
:param x: Tensor. shape = (B, C, H, W) :return:
https://github.com/glassywing/nvae/blob/4ace2e8d3f6e4abd93a64edbca799b1e4147a6bb/nvae/vae_celeba.py#L24-L45
import torch import torch.nn as nn import torch.nn.functional as F from nvae.decoder import Decoder from nvae.encoder import Encoder from nvae.losses import recon, kl from nvae.utils import reparameterize import robust_loss_pytorch import numpy as np class NVAE(nn.Module): def __init__(self, z_dim, img_dim): super().__init__() self.encoder = Encoder(z_dim) self.decoder = Decoder(z_dim) self.adaptive_loss = robust_loss_pytorch.adaptive.AdaptiveLossFunction( num_dims=1, float_dtype=np.float32, device="cpu")
Apache License 2.0
marshallward/f90nml
f90nml/namelist.py
Namelist._f90repr
python
def _f90repr(self, value): if isinstance(value, self.RepeatValue): return self._f90repeat(value) elif isinstance(value, bool): return self._f90bool(value) elif isinstance(value, numbers.Integral): return self._f90int(value) elif isinstance(value, numbers.Real): return self._f90float(value) elif isinstance(value, numbers.Complex): return self._f90complex(value) elif isinstance(value, basestring): return self._f90str(value) elif value is None: return '' else: raise ValueError('Type {0} of {1} cannot be converted to a Fortran' ' type.'.format(type(value), value))
Convert primitive Python types to equivalent Fortran strings.
https://github.com/marshallward/f90nml/blob/37bcdb0e599f74f09f10f895500ddf93646ac9bf/f90nml/namelist.py#L795-L813
from __future__ import print_function import itertools import copy import numbers import os import platform try: from StringIO import StringIO except ImportError: from io import StringIO try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict try: basestring except NameError: basestring = str class Namelist(OrderedDict): class RepeatValue(object): def __init__(self, n, value): self.repeats = n self.value = value def __init__(self, *args, **kwds): s_args = list(args) if (args and not isinstance(args[0], OrderedDict) and isinstance(args[0], dict)): s_args[0] = sorted(args[0].items()) try: self._default_start_index = kwds.pop('default_start_index') except KeyError: self._default_start_index = None super(Namelist, self).__init__(*s_args, **kwds) self.start_index = self.pop('_start_index', {}) if '_complex' in self: for key in self['_complex']: if all(isinstance(v, list) for v in self[key]): self[key] = [complex(*v) for v in self[key]] else: self[key] = complex(*self[key]) self.pop('_complex') self._column_width = 72 self._indent = 4 * ' ' self._end_comma = False self._uppercase = False self._float_format = '' self._logical_repr = {False: '.false.', True: '.true.'} self._index_spacing = False self._repeat_counter = False self._split_strings = False self._newline = False self.indent = self.pop('_indent', self.indent) if (platform.python_implementation() == 'PyPy' and platform.python_version_tuple()[0] == '2'): for key, value in self.items(): self[key] = value def __contains__(self, key): return super(Namelist, self).__contains__(key.lower()) def __delitem__(self, key): return super(Namelist, self).__delitem__(key.lower()) def __getitem__(self, key): if isinstance(key, basestring): return super(Namelist, self).__getitem__(key.lower()) else: keyiter = iter(key) grp, var = next(keyiter).lower(), next(keyiter).lower() return super(Namelist, self).__getitem__(grp).__getitem__(var) def __setitem__(self, key, value): if isinstance(value, dict) and not isinstance(value, Namelist): value = Namelist(value, default_start_index=self.default_start_index) elif is_nullable_list(value, dict): for i, v in enumerate(value): if isinstance(v, Namelist) or v is None: value[i] = v else: value[i] = Namelist( v, default_start_index=self.default_start_index ) super(Namelist, self).__setitem__(key.lower(), value) def __str__(self): output = StringIO() if all(isinstance(v, Namelist) for v in self.values()): self._writestream(output) else: print(repr(self), file=output) nml_string = output.getvalue().rstrip() output.close() return nml_string @property def column_width(self): return self._column_width @column_width.setter def column_width(self, width): if isinstance(width, int): if width >= 0: self._column_width = width else: raise ValueError('Column width must be nonnegative.') else: raise TypeError('Column width must be a nonnegative integer.') @property def default_start_index(self): return self._default_start_index @default_start_index.setter def default_start_index(self, value): if not isinstance(value, int): raise TypeError('default_start_index must be an integer.') self._default_start_index = value @property def end_comma(self): return self._end_comma @end_comma.setter def end_comma(self, value): if not isinstance(value, bool): raise TypeError('end_comma attribute must be a logical type.') self._end_comma = value @property def false_repr(self): return self._logical_repr[0] @false_repr.setter def false_repr(self, value): if isinstance(value, str): if not (value.lower().startswith('f') or value.lower().startswith('.f')): raise ValueError("Logical false representation must start " "with 'F' or '.F'.") else: self._logical_repr[0] = value else: raise TypeError('Logical false representation must be a string.') @property def float_format(self): return self._float_format @float_format.setter def float_format(self, value): if isinstance(value, str): .format(1.23, value) self._float_format = value else: raise TypeError('Floating point format code must be a string.') @property def indent(self): return self._indent @indent.setter def indent(self, value): if isinstance(value, str): if value.isspace() or len(value) == 0: self._indent = value else: raise ValueError('String indentation can only contain ' 'whitespace.') elif isinstance(value, int): if value >= 0: self._indent = value * ' ' else: raise ValueError('Indentation spacing must be nonnegative.') else: raise TypeError('Indentation must be specified by string or space ' 'width.') @property def index_spacing(self): return self._index_spacing @index_spacing.setter def index_spacing(self, value): if not isinstance(value, bool): raise TypeError('index_spacing attribute must be a logical type.') self._index_spacing = value @property def logical_repr(self): return self._logical_repr @logical_repr.setter def logical_repr(self, value): if not any(isinstance(value, t) for t in (list, tuple)): raise TypeError("Logical representation must be a tuple with " "a valid true and false value.") if not len(value) == 2: raise ValueError("List must contain two values.") self.false_repr = value[0] self.true_repr = value[1] @property def repeat_counter(self): return self._repeat_counter @repeat_counter.setter def repeat_counter(self, value): if isinstance(value, bool): self._repeat_counter = value else: raise TypeError(r"repeat must be of type ``bool``") @property def split_strings(self): return self._split_strings @split_strings.setter def split_strings(self, value): if not isinstance(value, bool): raise TypeError('split_strings attribute must be a logical type.') self._split_strings = value @property def start_index(self): return self._start_index @start_index.setter def start_index(self, value): if not isinstance(value, dict): raise TypeError('start_index attribute must be a dict.') self._start_index = value @property def true_repr(self): return self._logical_repr[1] @true_repr.setter def true_repr(self, value): if isinstance(value, str): if not (value.lower().startswith('t') or value.lower().startswith('.t')): raise ValueError("Logical true representation must start with " "'T' or '.T'.") else: self._logical_repr[1] = value else: raise TypeError('Logical true representation must be a string.') @property def uppercase(self): return self._uppercase @uppercase.setter def uppercase(self, value): if not isinstance(value, bool): raise TypeError('uppercase attribute must be a logical type.') self._uppercase = value def write(self, nml_path, force=False, sort=False): nml_is_file = hasattr(nml_path, 'read') if not force and not nml_is_file and os.path.isfile(nml_path): raise IOError('File {0} already exists.'.format(nml_path)) nml_file = nml_path if nml_is_file else open(nml_path, 'w') try: self._writestream(nml_file, sort) finally: if not nml_is_file: nml_file.close() def patch(self, nml_patch): for sec in nml_patch: if sec not in self: self[sec] = Namelist() self[sec].update(nml_patch[sec]) def groups(self): for key, value in self.items(): for inner_key, inner_value in value.items(): yield (key, inner_key), inner_value def _writestream(self, nml_file, sort=False): self._newline = False if sort: sel = Namelist(sorted(self.items(), key=lambda t: t[0])) else: sel = self for grp_name, grp_vars in sel.items(): if isinstance(grp_vars, list): for g_vars in grp_vars: self._write_nmlgrp(grp_name, g_vars, nml_file, sort) else: self._write_nmlgrp(grp_name, grp_vars, nml_file, sort) def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False): if self._newline: print(file=nml_file) self._newline = True if self.uppercase: grp_name = grp_name.upper() if sort: grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0])) print('&{0}'.format(grp_name), file=nml_file) for v_name, v_val in grp_vars.items(): v_start = grp_vars.start_index.get(v_name, None) for v_str in self._var_strings(v_name, v_val, v_start=v_start): print(v_str, file=nml_file) print('/', file=nml_file) def _var_strings(self, v_name, v_values, v_idx=None, v_start=None): if self.uppercase: v_name = v_name.upper() var_strs = [] if is_nullable_list(v_values, list): if not v_idx: v_idx = [] i_s = v_start[::-1][len(v_idx)] if v_start else None if i_s is None: i_s = 1 for idx, val in enumerate(v_values, start=i_s): v_idx_new = v_idx + [idx] v_strs = self._var_strings(v_name, val, v_idx=v_idx_new, v_start=v_start) var_strs.extend(v_strs) elif isinstance(v_values, Namelist): for f_name, f_vals in v_values.items(): v_title = '%'.join([v_name, f_name]) v_start_new = v_values.start_index.get(f_name, None) v_strs = self._var_strings(v_title, f_vals, v_start=v_start_new) var_strs.extend(v_strs) elif is_nullable_list(v_values, Namelist): if not v_idx: v_idx = [] i_s = v_start[::-1][len(v_idx)] if v_start else 1 for idx, val in enumerate(v_values, start=i_s): if val is None: continue v_title = v_name + '({0})'.format(idx) v_strs = self._var_strings(v_title, val) var_strs.extend(v_strs) else: use_default_start_index = False if not isinstance(v_values, list): v_values = [v_values] use_default_start_index = False else: use_default_start_index = self.default_start_index is not None if v_idx or v_start or use_default_start_index: v_idx_repr = '(' if v_start or use_default_start_index: if v_start: i_s = v_start[0] else: i_s = self.default_start_index if i_s is None: v_idx_repr += ':' else: i_e = i_s + len(v_values) - 1 if i_s == i_e: v_idx_repr += '{0}'.format(i_s) else: v_idx_repr += '{0}:{1}'.format(i_s, i_e) else: v_idx_repr += ':' if v_idx: idx_delim = ', ' if self._index_spacing else ',' v_idx_repr += idx_delim v_idx_repr += idx_delim.join(str(i) for i in v_idx[::-1]) v_idx_repr += ')' else: v_idx_repr = '' v_header = self.indent + v_name + v_idx_repr + ' = ' val_strs = [] val_line = v_header if self._repeat_counter: v_values = list( self.RepeatValue(len(list(x)), val) for val, x in itertools.groupby(v_values) ) for i_val, v_val in enumerate(v_values): if len(v_header) >= self.column_width: column_width = len(v_header) + 1 else: column_width = self.column_width if len(val_line) < column_width: v_str = self._f90repr(v_val) if i_val < len(v_values) - 1 or self.end_comma: v_comma = ', ' else: v_comma = '' if self.split_strings and isinstance(v_val, str): idx = column_width - len(val_line + v_comma.rstrip()) v_l, v_r = v_str[:idx], v_str[idx:] if v_r: new_val_line = ( ' ' * len(v_header) + v_str + v_comma ) if len(new_val_line.rstrip()) <= column_width: val_strs.append(val_line) val_line = ' ' * len(v_header) else: while v_r: val_line += v_l val_strs.append(val_line) val_line = '' idx = column_width - len(v_comma.rstrip()) v_l, v_r = v_r[:idx], v_r[idx:] v_str = v_l val_line += v_str + v_comma if len(val_line) >= column_width: val_strs.append(val_line.rstrip()) val_line = ' ' * len(v_header) if val_line and not val_line.isspace(): val_strs.append(val_line.rstrip()) if val_strs and v_values[-1] is None: val_strs[-1] += ' ,' if val_strs: var_strs.extend(val_strs) return var_strs def todict(self, complex_tuple=False): nmldict = OrderedDict(self) for key, value in self.items(): if isinstance(value, Namelist): nml = copy.deepcopy(value) nmldict[key] = nml.todict(complex_tuple) elif isinstance(value, complex) and complex_tuple: nmldict[key] = [value.real, value.imag] try: nmldict['_complex'].append(key) except KeyError: nmldict['_complex'] = [key] elif isinstance(value, list): complex_list = False for idx, entry in enumerate(value): if isinstance(entry, Namelist): nml = copy.deepcopy(entry) nmldict[key][idx] = nml.todict(complex_tuple) elif isinstance(entry, complex) and complex_tuple: nmldict[key][idx] = [entry.real, entry.imag] complex_list = True if complex_list: try: nmldict['_complex'].append(key) except KeyError: nmldict['_complex'] = [key] if self.start_index: nmldict['_start_index'] = self.start_index return nmldict
Apache License 2.0
wistful/pympris
pympris/Player.py
Player.Shuffle
python
def Shuffle(self, value): self.set('Shuffle', value)
A value of false indicates that playback is progressing linearly through a playlist, while true means playback is progressing through a playlist in some other order.
https://github.com/wistful/pympris/blob/4bd64a1f0d151f2adfc392ab34fd9b38894786cb/pympris/Player.py#L173-L178
from .common import convert2dbus from .Base import Base __all__ = ('Player', ) class Player(Base): IFACE = "org.mpris.MediaPlayer2.Player" def __init__(self, name, bus=None, private=False): super(Player, self).__init__(name, bus, private) def Next(self): self.iface.Next() def Previous(self): self.iface.Previous() def Pause(self): self.iface.Pause() def PlayPause(self): self.iface.PlayPause() def Stop(self): self.iface.Stop() def Play(self): self.iface.Play() def Seek(self, offset): self.iface.Seek(convert2dbus(offset, 'x')) def SetPosition(self, track_id, position): self.iface.SetPosition(convert2dbus(track_id, 'o'), convert2dbus(position, 'x')) def OpenUri(self, uri): self.iface.OpenUri(uri) @property def PlaybackStatus(self): return self.get('PlaybackStatus') @property def LoopStatus(self): return self.get('LoopStatus') @LoopStatus.setter def LoopStatus(self, status): self.set('LoopStatus', status) @property def Rate(self): return self.get('Rate') @Rate.setter def Rate(self, value): self.set('Rate', value) @property def Shuffle(self): return self.get('Shuffle') @Shuffle.setter
MIT License
pchrabka/pyspark-pydata
app/jobs/movie_genres.py
_extract_data
python
def _extract_data(spark, config): return ( spark.read.format("csv") .option("header", "true") .load(f"{config.get('source_data_path')}/movies.csv") )
Load data from csv file
https://github.com/pchrabka/pyspark-pydata/blob/d71064804982db2353543b0051d802d9765a0680/app/jobs/movie_genres.py#L4-L10
from pyspark.sql.functions import col, split, explode
MIT License
fuxicv/3d-face-gcns
lib/mesh_sampling.py
generate_transform_matrices
python
def generate_transform_matrices(name, refer_vertices, refer_triangles, factors): factors = [1.0 / x for x in factors] vertices = [] triangles = [] adjacencies = [] downsamp_trans = [] upsamp_trans = [] adjacencies.append( utils.get_vert_connectivity(refer_vertices, refer_triangles)) vertices.append(refer_vertices) triangles.append(refer_triangles) for factor in factors: ds_triangle, ds_transform = qslim_decimator_transformer(vertices[-1], triangles[-1], factor=factor) downsamp_trans.append(ds_transform) ds_vertice = ds_transform.dot(vertices[-1]) vertices.append(ds_vertice) triangles.append(ds_triangle) adjacencies.append(utils.get_vert_connectivity(ds_vertice, ds_triangle)) upsamp_trans.append( setup_deformation_transfer(vertices[-1], triangles[-1], vertices[-2])) for i, (vertice, triangle) in enumerate(zip(vertices, triangles)): write_obj( os.path.join('data', 'reference', name, 'reference{}.obj'.format(i)), vertice, triangle) return adjacencies, downsamp_trans, upsamp_trans
Generates len(factors) meshes, each of them is scaled by factors[i] and computes the transformations between them. Returns: M: a set of meshes downsampled from mesh by a factor specified in factors. A: Adjacency matrix for each of the meshes D: Downsampling transforms between each of the meshes U: Upsampling transforms between each of the meshes
https://github.com/fuxicv/3d-face-gcns/blob/2c4459cfa05faaf82ef85994a696e79d2993d650/lib/mesh_sampling.py#L238-L284
import heapq import math import os from random import choice import numpy as np import scipy.sparse as sp import utils from lib.mesh_io import write_obj def vertex_quadrics(vertice, triangle): v_quadrics = np.zeros((len(vertice), 4, 4)) for _, tri in enumerate(triangle): vert_idxs = tri verts = np.hstack((vertice[vert_idxs], np.array([1, 1, 1]).reshape(-1, 1))) _, _, v = np.linalg.svd(verts) eq = v[-1, :].reshape(-1, 1) eq = eq / (np.linalg.norm(eq[0:3])) for k in range(3): v_quadrics[tri[k], :, :] += np.outer(eq, eq) return v_quadrics def setup_deformation_transfer(src_vert, src_tri, tgt_vert): rows = np.zeros(3 * tgt_vert.shape[0]) cols = np.zeros(3 * tgt_vert.shape[0]) coeffs_v = np.zeros(3 * tgt_vert.shape[0]) nearest_faces, nearest_parts, nearest_vertices = utils.aabbtree_compute_nearest( src_vert, src_tri, tgt_vert, True) nearest_faces = nearest_faces.ravel().astype(np.int64) nearest_parts = nearest_parts.ravel().astype(np.int64) nearest_vertices = nearest_vertices.ravel() for i in range(tgt_vert.shape[0]): f_id = nearest_faces[i] nearest_f = src_tri[f_id] nearest_v = nearest_vertices[3 * i:3 * i + 3] rows[3 * i:3 * i + 3] = i * np.ones(3) cols[3 * i:3 * i + 3] = nearest_f n_id = nearest_parts[i] if n_id == 0: A = np.vstack((src_vert[nearest_f])).T coeffs_v[3 * i:3 * i + 3] = np.linalg.lstsq(A, nearest_v, rcond=None)[0] elif 0 < n_id <= 3: A = np.vstack( (src_vert[nearest_f[n_id - 1]], src_vert[nearest_f[n_id % 3]])).T tmp_coeffs = np.linalg.lstsq(A, tgt_vert[i], rcond=None)[0] coeffs_v[3 * i + n_id - 1] = tmp_coeffs[0] coeffs_v[3 * i + n_id % 3] = tmp_coeffs[1] else: coeffs_v[3 * i + n_id - 4] = 1.0 matrix = sp.csc_matrix((coeffs_v, (rows, cols)), shape=(tgt_vert.shape[0], src_vert.shape[0])) return matrix def qslim_decimator_transformer(vertice, triangle, factor=None, n_verts_desired=None): if factor is None and n_verts_desired is None: raise Exception('Need either factor or n_verts_desired.') if n_verts_desired is None: n_verts_desired = math.ceil(len(vertice) * factor) * 1.0 Qv = vertex_quadrics(vertice, triangle) vert_adj = utils.get_vertices_per_edge(vertice, triangle) vert_adj = sp.csc_matrix( (vert_adj[:, 0] * 0 + 1, (vert_adj[:, 0], vert_adj[:, 1])), shape=(len(vertice), len(vertice))) vert_adj = vert_adj + vert_adj.T vert_adj = vert_adj.tocoo() def collapse_cost(Qv, r, c, v): Qsum = Qv[r, :, :] + Qv[c, :, :] p1 = np.vstack((v[r].reshape(-1, 1), np.array([1]).reshape(-1, 1))) p2 = np.vstack((v[c].reshape(-1, 1), np.array([1]).reshape(-1, 1))) destroy_c_cost = p1.T.dot(Qsum).dot(p1) destroy_r_cost = p2.T.dot(Qsum).dot(p2) result = { 'destroy_c_cost': destroy_c_cost, 'destroy_r_cost': destroy_r_cost, 'collapse_cost': min([destroy_c_cost, destroy_r_cost]), 'Qsum': Qsum } return result queue = [] for k in range(vert_adj.nnz): r = vert_adj.row[k] c = vert_adj.col[k] if r > c: continue cost = collapse_cost(Qv, r, c, vertice)['collapse_cost'] heapq.heappush(queue, (cost, (r, c))) collapse_list = [] nverts_total = len(vertice) faces = triangle.copy() while nverts_total > n_verts_desired: e = heapq.heappop(queue) r = e[1][0] c = e[1][1] if r == c: continue cost = collapse_cost(Qv, r, c, vertice) if cost['collapse_cost'] > e[0]: heapq.heappush(queue, (cost['collapse_cost'], e[1])) continue else: if cost['destroy_c_cost'] < cost['destroy_r_cost']: to_destroy = c to_keep = r else: to_destroy = r to_keep = c collapse_list.append([to_keep, to_destroy]) np.place(faces, faces == to_destroy, to_keep) which1 = [ idx for idx in range(len(queue)) if queue[idx][1][0] == to_destroy ] which2 = [ idx for idx in range(len(queue)) if queue[idx][1][1] == to_destroy ] for k in which1: queue[k] = (queue[k][0], (to_keep, queue[k][1][1])) for k in which2: queue[k] = (queue[k][0], (queue[k][1][0], to_keep)) Qv[r, :, :] = cost['Qsum'] Qv[c, :, :] = cost['Qsum'] a = faces[:, 0] == faces[:, 1] b = faces[:, 1] == faces[:, 2] c = faces[:, 2] == faces[:, 0] def logical_or3(x, y, z): return np.logical_or(x, np.logical_or(y, z)) faces_to_keep = np.logical_not(logical_or3(a, b, c)) faces = faces[faces_to_keep, :].copy() nverts_total = (len(np.unique(faces.flatten()))) new_faces, mtx = _get_sparse_transform(faces, len(vertice)) return new_faces, mtx def _get_sparse_transform(faces, num_original_verts): verts_left = np.unique(faces.flatten()) IS = np.arange(len(verts_left)) JS = verts_left data = np.ones(len(JS)) mp = np.arange(0, np.max(faces.flatten()) + 1) mp[JS] = IS new_faces = mp[faces.copy().flatten()].reshape((-1, 3)) ij = np.vstack((IS.flatten(), JS.flatten())) mtx = sp.csc_matrix((data, ij), shape=(len(verts_left), num_original_verts)) return (new_faces, mtx)
MIT License
ncullen93/pybn
pyBN/classes/factor.py
Factor.__sub__
python
def __sub__(self, rv_val): self.reduce_factor(rv_val[0],rv_val[1]) return self
Overloads subtraction operator to be used as reducing a factor by evidence.
https://github.com/ncullen93/pybn/blob/58bf684b4ac0bbfa7e2aa394ba3dd302d3dd22db/pyBN/classes/factor.py#L178-L184
from __future__ import division __author__ = """Nicholas Cullen <ncullen.th@dartmouth.edu>""" import numpy as np class Factor(object): def __init__(self, bn, var): self.bn = bn self.var = var self.cpt = np.array(bn.cpt(var)) self.scope = bn.scope(var) self.card = dict([(rv, bn.card(rv)) for rv in self.scope]) self.stride = {self.var:1} s=self.card[self.var] for v in bn.parents(var): self.stride[v]=s s*=self.card[v] def __repr__(self): s = self.var + ' | ' s += ', '.join(self.parents()) return s def __str__(self): s = self.var + ' | ' s += ', '.join(self.parents()) return s def __mul__(self, other_factor): self.multiply_factor(other_factor) return self
MIT License
pymetrics/audit-ai
auditai/utils/validate.py
_num_samples
python
def _num_samples(x): if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError("Expected sequence or array_like, got {}".format( type(x))) if hasattr(x, 'shape'): if len(x.shape) == 0: raise TypeError("Singleton array {} cannot be considered" " a valid collection.".format(x)) return x.shape[0] else: return len(x)
Return number of samples in array_like x.
https://github.com/pymetrics/audit-ai/blob/4891b1d3c813a0e6ce657eaee3f1b2ab50e8f429/auditai/utils/validate.py#L5-L21
import numpy as np import pandas as pd
MIT License
pybrain2/pybrain2
pybrain/structure/evolvables/evolvable.py
Evolvable.newSimilarInstance
python
def newSimilarInstance(self): res = self.copy() res.randomize() return res
Generates a new Evolvable of the same kind.
https://github.com/pybrain2/pybrain2/blob/33ead60704d126e58c10d458ddd1e5e5fd17b65d/pybrain/structure/evolvables/evolvable.py#L24-L28
__author__ = 'Tom Schaul, tom@idsia.ch' import copy from pybrain.utilities import abstractMethod, Named class Evolvable(Named): def mutate(self, **args): abstractMethod() def copy(self): return copy.deepcopy(self) def randomize(self): abstractMethod()
BSD 3-Clause New or Revised License
app-sre/qontract-reconcile
reconcile/queries.py
get_environments
python
def get_environments(): gqlapi = gql.get_api() return gqlapi.query(ENVIRONMENTS_QUERY)['environments']
Returns all Products
https://github.com/app-sre/qontract-reconcile/blob/67af2226f58f538b626f51700109ff5074911160/reconcile/queries.py#L891-L894
import logging import itertools from textwrap import indent from jinja2 import Template from reconcile.utils import gql APP_INTERFACE_SETTINGS_QUERY = """ { settings: app_interface_settings_v1 { vault kubeBinary mergeRequestGateway saasDeployJobTemplate hashLength dependencies { type services { name } } credentials { name secret { path field } } sqlQuery { imageRepository pullSecret { path version labels annotations type } } } } """ def get_app_interface_settings(): gqlapi = gql.get_api() settings = gqlapi.query(APP_INTERFACE_SETTINGS_QUERY)['settings'] if settings: return settings[0] return None APP_INTERFACE_EMAILS_QUERY = """ { emails: app_interface_emails_v1 { name subject to { aliases services { serviceOwners { email } } clusters { name } namespaces { name } aws_accounts { accountOwners { email } } roles { users { org_username } } users { org_username } } body } } """ def get_app_interface_emails(): gqlapi = gql.get_api() return gqlapi.query(APP_INTERFACE_EMAILS_QUERY)['emails'] CREDENTIALS_REQUESTS_QUERY = """ { credentials_requests: credentials_requests_v1 { name description user { org_username public_gpg_key } credentials } } """ def get_credentials_requests(): gqlapi = gql.get_api() return gqlapi.query(CREDENTIALS_REQUESTS_QUERY)['credentials_requests'] def get_integrations(): gqlapi = gql.get_api() return gqlapi.query(gql.INTEGRATIONS_QUERY)['integrations'] JENKINS_INSTANCES_QUERY = """ { instances: jenkins_instances_v1 { name serverUrl token { path field } previousUrls plugins deleteMethod managedProjects buildsCleanupRules { name keep_hours } } } """ def get_jenkins_instances(): gqlapi = gql.get_api() return gqlapi.query(JENKINS_INSTANCES_QUERY)['instances'] def get_jenkins_instances_previous_urls(): instances = get_jenkins_instances() all_previous_urls = [] for instance in instances: previous_urls = instance.get('previousUrls') if previous_urls: all_previous_urls.extend(previous_urls) return all_previous_urls GITLAB_INSTANCES_QUERY = """ { instances: gitlabinstance_v1 { url token { path field } managedGroups projectRequests { group projects } sslVerify } } """ def get_gitlab_instance(): gqlapi = gql.get_api() return gqlapi.query(GITLAB_INSTANCES_QUERY)['instances'][0] GITHUB_INSTANCE_QUERY = """ { instances: githuborg_v1 { url token { path field } } } """ def get_github_instance(): gqlapi = gql.get_api() instances = gqlapi.query(GITHUB_INSTANCE_QUERY)['instances'] for instance in instances: if instance['url'] == "https://github.com/app-sre": return instance GITHUB_ORGS_QUERY = """ { orgs: githuborg_v1 { name two_factor_authentication token { path field } } } """ def get_github_orgs(): gqlapi = gql.get_api() return gqlapi.query(GITHUB_ORGS_QUERY)['orgs'] AWS_ACCOUNTS_QUERY = """ { accounts: awsaccounts_v1 { path name uid consoleUrl resourcesDefaultRegion supportedDeploymentRegions providerVersion accountOwners { name email } automationToken { path field } garbageCollection enableDeletion disable { integrations } deleteKeys {% if reset_passwords %} resetPasswords { user { org_username } requestId } {% endif %} premiumSupport ecrs { region } } } """ def get_aws_accounts(reset_passwords=False): gqlapi = gql.get_api() query = Template(AWS_ACCOUNTS_QUERY).render( reset_passwords=reset_passwords, ) return gqlapi.query(query)['accounts'] CLUSTERS_QUERY = """ { clusters: clusters_v1 { path name serverUrl consoleUrl kibanaUrl elbFQDN prometheusUrl managedGroups managedClusterRoles jumpHost { hostname knownHosts user port identity { path field format } } auth { service org team } ocm { name url accessTokenClientId accessTokenUrl offlineToken { path field format version } blockedVersions } awsInfrastructureAccess { awsGroup { account { name uid terraformUsername automationToken { path field } } roles { users { org_username } } } accessLevel } spec { id external_id provider region channel version initial_version multi_az nodes instance_type storage load_balancers private provision_shard_id autoscale { min_replicas max_replicas } } externalConfiguration { labels } upgradePolicy { workloads schedule conditions { soakDays } } additionalRouters { private route_selectors } network { vpc service pod } machinePools { id instance_type replicas labels taints { key value effect } } peering { connections { name provider manageRoutes delete ... on ClusterPeeringConnectionAccount_v1 { vpc { account { name uid terraformUsername automationToken { path field } } vpc_id cidr_block region } } ... on ClusterPeeringConnectionAccountVPCMesh_v1 { account { name uid terraformUsername automationToken { path field } } tags } ... on ClusterPeeringConnectionAccountTGW_v1 { account { name uid terraformUsername automationToken { path field } } tags cidrBlock manageSecurityGroups assumeRole } ... on ClusterPeeringConnectionClusterRequester_v1 { cluster { name network { vpc } spec { region } awsInfrastructureAccess { awsGroup { account { name uid terraformUsername automationToken { path field } } } accessLevel } peering { connections { name provider manageRoutes ... on ClusterPeeringConnectionClusterAccepter_v1 { name cluster { name } } } } } } } } addons { name parameters { id value } } automationToken { path field format } internal disable { integrations } } } """ CLUSTERS_MINIMAL_QUERY = """ { clusters: clusters_v1 { name serverUrl consoleUrl kibanaUrl prometheusUrl jumpHost { hostname knownHosts user port identity { path field format } } managedGroups ocm { name } spec { private } automationToken { path field format } internal disable { integrations } auth { team } } } """ def get_clusters(minimal=False): gqlapi = gql.get_api() query = CLUSTERS_MINIMAL_QUERY if minimal else CLUSTERS_QUERY return gqlapi.query(query)['clusters'] KAFKA_CLUSTERS_QUERY = """ { clusters: kafka_clusters_v1 { name ocm { name url accessTokenClientId accessTokenUrl offlineToken { path field format version } } spec { provider region multi_az } namespaces { name cluster { name serverUrl jumpHost { hostname knownHosts user port identity { path field format } } automationToken { path field format } } } } } """ def get_kafka_clusters(minimal=False): gqlapi = gql.get_api() return gqlapi.query(KAFKA_CLUSTERS_QUERY)['clusters'] NAMESPACES_QUERY = """ { namespaces: namespaces_v1 { name delete labels managedRoles app { name serviceOwners { name email } } terraformResources { provider ... on NamespaceTerraformResourceRDS_v1 { account identifier output_resource_name defaults replica_source } ... on NamespaceTerraformResourceECR_v1 { account region identifier output_resource_name mirror { url pullCredentials { path field } tags tagsExclude } } } cluster { name serverUrl jumpHost { hostname knownHosts user port identity { path field format } } automationToken { path field format } internal disable { integrations } } managedResourceNames { resource resourceNames } limitRanges { name limits { default { cpu memory } defaultRequest { cpu memory } max { cpu memory } maxLimitRequestRatio { cpu memory } min { cpu memory } type } } quota { quotas { name resources { limits { cpu memory } requests { cpu memory } } scopes } } } } """ NAMESPACES_MINIMAL_QUERY = """ { namespaces: namespaces_v1 { name delete labels cluster { name serverUrl jumpHost { hostname knownHosts user port identity { path field format } } automationToken { path field format } internal disable { integrations } } } } """ def get_namespaces(minimal=False): gqlapi = gql.get_api() if minimal: return gqlapi.query(NAMESPACES_MINIMAL_QUERY)['namespaces'] else: return gqlapi.query(NAMESPACES_QUERY)['namespaces'] SA_TOKEN = """ namespace { name cluster { name serverUrl jumpHost { hostname knownHosts user port identity { path field format } } automationToken { path field format } internal disable { integrations } } } serviceAccountName """ SERVICEACCOUNT_TOKENS_QUERY = """ { namespaces: namespaces_v1 { name cluster { name serverUrl jumpHost { hostname knownHosts user port identity { path field format } } automationToken { path field format } internal disable { integrations } } sharedResources { openshiftServiceAccountTokens { %s } } openshiftServiceAccountTokens { %s } } } """ % (indent(SA_TOKEN, 8*' '), indent(SA_TOKEN, 6*' ')) def get_serviceaccount_tokens(): gqlapi = gql.get_api() return gqlapi.query(SERVICEACCOUNT_TOKENS_QUERY)['namespaces'] PRODUCTS_QUERY = """ { products: products_v1 { path name description environments { name description } } } """ def get_products(): gqlapi = gql.get_api() return gqlapi.query(PRODUCTS_QUERY)['products'] ENVIRONMENTS_QUERY = """ { environments: environments_v1 { path name description product { name } namespaces { name app { name } cluster { name } } } } """
Apache License 2.0
drorlab/atom3d
atom3d/util/formats.py
read_sdf
python
def read_sdf(sdf_file, name=None, sanitize=False, add_hs=False, remove_hs=False): dflist = [] molecules = read_sdf_to_mol(sdf_file, sanitize=sanitize, add_hs=add_hs, remove_hs=remove_hs) for im,m in enumerate(molecules): if m is not None: df = mol_to_df(m, residue=im, ensemble=m.GetProp("_Name"), structure=m.GetProp("_Name"), model=m.GetProp("_Name")) dflist.append(df) assert len(dflist) >= 1 if len(dflist) > 1: bp = df_to_bp(merge_dfs(dflist)) else: bp = df_to_bp(dflist[0]) return bp
Read SDF file into Biopython structure. :param sdf_file: file path :type sdf_file: Union[str, Path] :param sanitize: sanitize structure with RDKit. :type sanitize: bool :param add_hs: add hydrogen atoms with RDKit. :type add_hs: bool :param remove_hs: remove hydrogen atoms with RDKit. :type remove_hs: bool :return: Biopython object containing structure :rtype: Bio.PDB.Structure
https://github.com/drorlab/atom3d/blob/7eacb676f56b4130fd805f4b2901a600170b88f9/atom3d/util/formats.py#L280-L312
import collections as col import gzip import os import re import Bio.PDB.Atom import Bio.PDB.Chain import Bio.PDB.Model import Bio.PDB.Residue import Bio.PDB.Structure import numpy as np import pandas as pd def split_df(df, key): return [(x, y) for x, y in df.groupby(key)] def merge_dfs(dfs): return pd.concat(dfs).reset_index(drop=True) def bp_to_df(bp): df = col.defaultdict(list) for atom in Bio.PDB.Selection.unfold_entities(bp, 'A'): residue = atom.get_parent() chain = residue.get_parent() model = chain.get_parent() df['ensemble'].append(bp.get_id()) df['subunit'].append(0) df['structure'].append(bp.get_id()) df['model'].append(model.serial_num) df['chain'].append(chain.id) df['hetero'].append(residue.id[0]) df['insertion_code'].append(residue.id[2]) df['residue'].append(residue.id[1]) df['segid'].append(residue.segid) df['resname'].append(residue.resname) df['altloc'].append(atom.altloc) df['occupancy'].append(atom.occupancy) df['bfactor'].append(atom.bfactor) df['x'].append(atom.coord[0]) df['y'].append(atom.coord[1]) df['z'].append(atom.coord[2]) df['element'].append(atom.element) df['name'].append(atom.name) df['fullname'].append(atom.fullname) df['serial_number'].append(atom.serial_number) df = pd.DataFrame(df) return df def df_to_bp(df_in): all_structures = df_to_bps(df_in) if len(all_structures) > 1: raise RuntimeError('More than one structure in provided dataframe.') return all_structures[0] def df_to_bps(df_in): df = df_in.copy() all_structures = [] for (structure, s_atoms) in split_df(df_in, ['ensemble', 'structure']): new_structure = Bio.PDB.Structure.Structure(structure[1]) for (model, m_atoms) in df.groupby(['model']): new_model = Bio.PDB.Model.Model(model) for (chain, c_atoms) in m_atoms.groupby(['chain']): new_chain = Bio.PDB.Chain.Chain(chain) for (residue, r_atoms) in c_atoms.groupby( ['hetero', 'residue', 'insertion_code']): rep = r_atoms.iloc[0] new_residue = Bio.PDB.Residue.Residue( (rep['hetero'], rep['residue'], rep['insertion_code']), rep['resname'], rep['segid']) for row, atom in r_atoms.iterrows(): new_atom = Bio.PDB.Atom.Atom( atom['name'], [atom['x'], atom['y'], atom['z']], atom['bfactor'], atom['occupancy'], atom['altloc'], atom['fullname'], atom['serial_number'], atom['element']) new_residue.add(new_atom) new_chain.add(new_residue) new_model.add(new_chain) new_structure.add(new_model) all_structures.append(new_structure) return all_structures def read_any(f, name=None): if is_pdb(f): return read_pdb(f, name) elif is_pdb_gz(f): return read_pdb_gz(f, name) elif is_mmcif(f): return read_mmcif(f, name) elif is_sdf(f): return read_sdf(f) elif is_xyz(f): return read_xyz(f, name) else: raise ValueError(f"Unrecognized filetype for {f:}") patterns = { 'pdb': r'pdb[0-9]*$', 'pdb.gz': r'pdb[0-9]*\.gz$', 'mmcif': r'(mm)?cif$', 'sdf': r'sdf[0-9]*$', 'xyz': r'xyz[0-9]*$', 'xyz-gdb': r'xyz[0-9]*$', 'silent': r'out$', 'sharded': r'@[0-9]+', } _regexes = {k: re.compile(v) for k, v in patterns.items()} def is_type(f, filetype): if filetype in _regexes: return _regexes[filetype].search(str(f)) else: return re.compile(filetype + r'$').search(str(f)) def is_pdb(f): return _regexes['pdb'].search(str(f)) def is_pdb_gz(f): return _regexes['pdb.gz'].search(str(f)) def is_mmcif(f): return _regexes['mmcif'].search(str(f)) def is_sdf(f): return _regexes['sdf'].search(str(f)) def is_xyz(f): return _regexes['xyz'].search(str(f)) def is_sharded(f): return _regexes['sharded'].search(str(f)) def read_pdb(pdb_file, name=None): if name is None: name = os.path.basename(pdb_file) parser = Bio.PDB.PDBParser(QUIET=True) bp = parser.get_structure(name, pdb_file) return bp def read_pdb_gz(pdb_gz_file, name=None): if name is None: name = os.path.basename(pdb_gz_file) parser = Bio.PDB.PDBParser(QUIET=True) bp = parser.get_structure( name, gzip.open(pdb_gz_file, mode='rt', encoding='latin1')) return bp def read_mmcif(mmcif_file, name=None): if name is None: os.path.basename(mmcif_file) parser = Bio.PDB.MMCIFParser(QUIET=True) return parser.get_structure(name, mmcif_file)
MIT License
vitruvianscience/opendeep
opendeep/utils/midi/RawInstreamFile.py
RawInstreamFile.getCursor
python
def getCursor(self): return self.cursor
Returns the value of the cursor
https://github.com/vitruvianscience/opendeep/blob/e96efc449101094354b615cf15afe6d03644fc36/opendeep/utils/midi/RawInstreamFile.py#L54-L56
from __future__ import absolute_import from six import string_types from .DataTypeConverters import readBew, readVar, varLen class RawInstreamFile: def __init__(self, infile=''): if infile: if isinstance(infile, string_types): infile = open(infile, 'rb') self.data = infile.read() infile.close() else: self.data = infile.read() else: self.data = '' self.cursor = 0 def setData(self, data=''): self.data = data def setCursor(self, position=0): self.cursor = position
Apache License 2.0
google/clusterfuzz
src/clusterfuzz/_internal/metrics/fuzzer_stats.py
Query._job_and_fuzzer_selector
python
def _job_and_fuzzer_selector(self): result = [] if self.job_types: result.append('(%s)' % ' OR '.join( ['job = \'%s\'' % job_type for job_type in self.job_types])) if self.fuzzer_name != self.fuzzer_or_engine_name: result.append('fuzzer = \'%s\'' % self.fuzzer_name) return result
Return the job filter condition.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/metrics/fuzzer_stats.py#L775-L785
import datetime import functools import itertools import json import os import random import re from clusterfuzz._internal.base import memoize from clusterfuzz._internal.base import utils from clusterfuzz._internal.datastore import data_handler from clusterfuzz._internal.datastore import data_types from clusterfuzz._internal.datastore import fuzz_target_utils from clusterfuzz._internal.google_cloud_utils import big_query from clusterfuzz._internal.google_cloud_utils import storage from clusterfuzz._internal.metrics import fuzzer_logs from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell STATS_FILE_EXTENSION = '.stats2' PERFORMANCE_REPORT_VIEWER_PATH = '/performance-report/{fuzzer}/{job}/{date}' JOB_RUN_SCHEMA = { 'fields': [{ 'name': 'testcases_executed', 'type': 'INTEGER', 'mode': 'NULLABLE' }, { 'name': 'build_revision', 'type': 'INTEGER', 'mode': 'NULLABLE' }, { 'name': 'new_crashes', 'type': 'INTEGER', 'mode': 'NULLABLE' }, { 'name': 'job', 'type': 'STRING', 'mode': 'NULLABLE' }, { 'name': 'timestamp', 'type': 'FLOAT', 'mode': 'NULLABLE' }, { 'name': 'crashes', 'type': 'RECORD', 'mode': 'REPEATED', 'fields': [{ 'name': 'crash_type', 'type': 'STRING', 'mode': 'NULLABLE' }, { 'name': 'is_new', 'type': 'BOOLEAN', 'mode': 'NULLABLE' }, { 'name': 'crash_state', 'type': 'STRING', 'mode': 'NULLABLE' }, { 'name': 'security_flag', 'type': 'BOOLEAN', 'mode': 'NULLABLE' }, { 'name': 'count', 'type': 'INTEGER', 'mode': 'NULLABLE' }] }, { 'name': 'known_crashes', 'type': 'INTEGER', 'mode': 'NULLABLE' }, { 'name': 'fuzzer', 'type': 'STRING', 'mode': 'NULLABLE' }, { 'name': 'kind', 'type': 'STRING', 'mode': 'NULLABLE' }] } class FuzzerStatsException(Exception): class BaseRun(object): VALID_FIELDNAME_PATTERN = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*') def __init__(self, fuzzer, job, build_revision, timestamp): self._stats_data = { 'fuzzer': fuzzer, 'job': job, 'build_revision': build_revision, 'timestamp': timestamp, } def __getitem__(self, key): return self._stats_data.__getitem__(key) def __setitem__(self, key, value): if not re.compile(self.VALID_FIELDNAME_PATTERN): raise ValueError('Invalid key name.') return self._stats_data.__setitem__(key, value) def __delitem__(self, key): return self._stats_data.__delitem__(key) def __contains__(self, key): return self._stats_data.__contains__(key) def to_json(self): return json.dumps(self._stats_data) def update(self, other): self._stats_data.update(other) @property def data(self): return self._stats_data @property def kind(self): return self._stats_data['kind'] @property def fuzzer(self): return self._stats_data['fuzzer'] @property def job(self): return self._stats_data['job'] @property def build_revision(self): return self._stats_data['build_revision'] @property def timestamp(self): return self._stats_data['timestamp'] @staticmethod def from_json(json_data): try: data = json.loads(json_data) except (ValueError, TypeError): return None if not isinstance(data, dict): return None result = None try: kind = data['kind'] if kind == 'TestcaseRun': result = TestcaseRun(data['fuzzer'], data['job'], data['build_revision'], data['timestamp']) elif kind == 'JobRun': result = JobRun(data['fuzzer'], data['job'], data['build_revision'], data['timestamp'], data['testcases_executed'], data['new_crashes'], data['known_crashes'], data.get('crashes')) except KeyError: return None if result: result.update(data) return result class JobRun(BaseRun): SCHEMA = JOB_RUN_SCHEMA def __init__(self, fuzzer, job, build_revision, timestamp, number_of_testcases, new_crashes, known_crashes, crashes): super(JobRun, self).__init__(fuzzer, job, build_revision, timestamp) self._stats_data.update({ 'kind': 'JobRun', 'testcases_executed': number_of_testcases, 'new_crashes': new_crashes, 'known_crashes': known_crashes, 'crashes': crashes }) class TestcaseRun(BaseRun): SCHEMA = None def __init__(self, fuzzer, job, build_revision, timestamp): super(TestcaseRun, self).__init__(fuzzer, job, build_revision, timestamp) self._stats_data.update({ 'kind': 'TestcaseRun', }) source = environment.get_value('STATS_SOURCE') if source: self._stats_data['source'] = source @staticmethod def get_stats_filename(testcase_file_path): return testcase_file_path + STATS_FILE_EXTENSION @staticmethod def read_from_disk(testcase_file_path, delete=False): stats_file_path = TestcaseRun.get_stats_filename(testcase_file_path) if not os.path.exists(stats_file_path): return None fuzzer_run = None with open(stats_file_path) as f: fuzzer_run = BaseRun.from_json(f.read()) if delete: shell.remove_file(stats_file_path) return fuzzer_run @staticmethod def write_to_disk(testcase_run, testcase_file_path): if not testcase_run: return stats_file_path = TestcaseRun.get_stats_filename(testcase_file_path) with open(stats_file_path, 'w') as f: f.write(testcase_run.to_json()) class QueryGroupBy(object): GROUP_BY_NONE = 0 GROUP_BY_REVISION = 1 GROUP_BY_DAY = 2 GROUP_BY_TIME = 3 GROUP_BY_JOB = 4 GROUP_BY_FUZZER = 5 def group_by_to_field_name(group_by): if group_by == QueryGroupBy.GROUP_BY_REVISION: return 'build_revision' if group_by == QueryGroupBy.GROUP_BY_DAY: return 'date' if group_by == QueryGroupBy.GROUP_BY_TIME: return 'time' if group_by == QueryGroupBy.GROUP_BY_JOB: return 'job' if group_by == QueryGroupBy.GROUP_BY_FUZZER: return 'fuzzer' return None class BuiltinFieldData(object): def __init__(self, value, sort_key=None, link=None): self.value = value self.sort_key = sort_key self.link = link class BuiltinFieldSpecifier(object): def __init__(self, name, alias=None): self.name = name self.alias = alias def create(self, ctx=None): constructor = BUILTIN_FIELD_CONSTRUCTORS.get(self.name) if not constructor: return None return constructor(ctx) def field_class(self): constructor = BUILTIN_FIELD_CONSTRUCTORS.get(self.name) if not constructor: return None if isinstance(constructor, functools.partial): return constructor.func return constructor class BuiltinField(object): def __init__(self, ctx=None): self.ctx = ctx def get(self, group_by, group_by_value): return None class BuiltinFieldContext(object): def __init__(self, fuzzer=None, jobs=None): self.fuzzer = fuzzer self.jobs = jobs def single_job_or_none(self): if self.jobs and len(self.jobs) == 1: return self.jobs[0] return None class CoverageFieldContext(BuiltinFieldContext): def __init__(self, fuzzer=None, jobs=None): super(CoverageFieldContext, self).__init__(fuzzer=fuzzer, jobs=jobs) @memoize.wrap(memoize.FifoInMemory(256)) def get_coverage_info(self, fuzzer, date=None): if fuzzer in data_types.BUILTIN_FUZZERS: job = self.single_job_or_none() project = data_handler.get_project_name(job) return get_coverage_info(project, date) fuzz_target = data_handler.get_fuzz_target(fuzzer) if fuzz_target: fuzzer = fuzz_target.project_qualified_name() return get_coverage_info(fuzzer, date) class BaseCoverageField(object): CONTEXT_CLASS = CoverageFieldContext def __init__(self, ctx): self.ctx = ctx def get_coverage_info(self, group_by, group_by_value): coverage_info = None if group_by == QueryGroupBy.GROUP_BY_DAY: coverage_info = self.ctx.get_coverage_info(self.ctx.fuzzer, group_by_value) elif group_by == QueryGroupBy.GROUP_BY_FUZZER: coverage_info = self.ctx.get_coverage_info(group_by_value) elif group_by == QueryGroupBy.GROUP_BY_JOB: coverage_info = self.ctx.get_coverage_info(self.ctx.fuzzer) return coverage_info class CoverageField(BaseCoverageField): EDGE = 0 FUNCTION = 1 VALUE_TYPE = float def __init__(self, coverage_type, ctx=None): super(CoverageField, self).__init__(ctx) self.coverage_type = coverage_type def get(self, group_by, group_by_value): coverage_info = self.get_coverage_info(group_by, group_by_value) if not coverage_info: return None if self.coverage_type == self.EDGE: covered = coverage_info.edges_covered total = coverage_info.edges_total else: covered = coverage_info.functions_covered total = coverage_info.functions_total if covered is None or total is None: return None if not total: logs.log_error( 'Invalid coverage info: total equals 0 for "%s".' % self.ctx.fuzzer) return BuiltinFieldData('No coverage', sort_key=0.0) percentage = 100.0 * float(covered) / total display_value = '%.2f%% (%d/%d)' % (percentage, covered, total) return BuiltinFieldData(display_value, sort_key=percentage) class CorpusBackupField(BaseCoverageField): VALUE_TYPE = str def __init__(self, ctx=None): super(CorpusBackupField, self).__init__(ctx) def get(self, group_by, group_by_value): coverage_info = self.get_coverage_info(group_by, group_by_value) if not coverage_info: return None if not coverage_info.corpus_backup_location: return None corpus_backup_location = os.path.dirname( coverage_info.corpus_backup_location) display_value = 'Download' return BuiltinFieldData(display_value, link=corpus_backup_location) class CorpusSizeField(BaseCoverageField): CORPUS = 0 QUARANTINE = 1 VALUE_TYPE = int def __init__(self, corpus_type, ctx=None): super(CorpusSizeField, self).__init__(ctx) self.corpus_type = corpus_type def get(self, group_by, group_by_value): if (self.ctx.fuzzer in data_types.BUILTIN_FUZZERS and group_by == QueryGroupBy.GROUP_BY_DAY): return None coverage_info = self.get_coverage_info(group_by, group_by_value) if not coverage_info: return None if self.corpus_type == self.CORPUS: corpus_size_units = coverage_info.corpus_size_units corpus_size_bytes = coverage_info.corpus_size_bytes corpus_location = coverage_info.corpus_location else: corpus_size_units = coverage_info.quarantine_size_units corpus_size_bytes = coverage_info.quarantine_size_bytes corpus_location = coverage_info.quarantine_location if corpus_size_units is None or corpus_size_bytes is None: return None display_value = '%d (%s)' % (corpus_size_units, utils.get_size_string(corpus_size_bytes)) return BuiltinFieldData( display_value, sort_key=corpus_size_units, link=corpus_location) class CoverageReportField(BaseCoverageField): VALUE_TYPE = str def __init__(self, ctx=None): super(CoverageReportField, self).__init__(ctx) def get(self, group_by, group_by_value): coverage_info = self.get_coverage_info(group_by, group_by_value) if not coverage_info or not coverage_info.html_report_url: return None display_value = 'Coverage' return BuiltinFieldData(display_value, link=coverage_info.html_report_url) def _logs_bucket_key_fn(func, args, kwargs): return 'fuzzer_logs_bucket:' + args[1] class FuzzerRunLogsContext(BuiltinFieldContext): MEMCACHE_TTL = 30 * 60 def __init__(self, fuzzer=None, jobs=None): super(FuzzerRunLogsContext, self).__init__(fuzzer=fuzzer, jobs=jobs) @memoize.wrap(memoize.FifoInMemory(1024)) def _get_logs_bucket_from_job(self, job_type): return data_handler.get_value_from_job_definition_or_environment( job_type, 'FUZZ_LOGS_BUCKET') @memoize.wrap(memoize.Memcache(MEMCACHE_TTL, key_fn=_logs_bucket_key_fn)) def _get_logs_bucket_from_fuzzer(self, fuzzer_name): jobs = [ mapping.job for mapping in fuzz_target_utils.get_fuzz_target_jobs( fuzz_target_name=fuzzer_name) ] if not jobs: return None bucket = self._get_logs_bucket_from_job(jobs[0]) if all(bucket == self._get_logs_bucket_from_job(job) for job in jobs[1:]): return bucket return None def get_logs_bucket(self, fuzzer_name=None, job_type=None): if job_type: return self._get_logs_bucket_from_job(job_type) if fuzzer_name: return self._get_logs_bucket_from_fuzzer(fuzzer_name) return None class FuzzerRunLogsField(BuiltinField): CONTEXT_CLASS = FuzzerRunLogsContext VALUE_TYPE = str def _get_logs_bucket_path(self, group_by, group_by_value): fuzzer = self.ctx.fuzzer job = self.ctx.single_job_or_none() date = None if group_by == QueryGroupBy.GROUP_BY_FUZZER: fuzzer = group_by_value elif group_by == QueryGroupBy.GROUP_BY_DAY: if not fuzzer: return None if not job: date = None else: date = group_by_value elif group_by == QueryGroupBy.GROUP_BY_JOB: job = group_by_value else: return None if not fuzzer: return None logs_bucket = self.ctx.get_logs_bucket(fuzzer_name=fuzzer, job_type=job) if not logs_bucket: return None return 'gs:/' + fuzzer_logs.get_logs_directory(logs_bucket, fuzzer, job, date) def get(self, group_by, group_by_value): logs_path = self._get_logs_bucket_path(group_by, group_by_value) if not logs_path: return None return BuiltinFieldData('Logs', link=logs_path) class PerformanceReportField(BuiltinField): CONTEXT_CLASS = FuzzerRunLogsContext VALUE_TYPE = str def _get_performance_report_path(self, group_by, group_by_value): fuzzer = self.ctx.fuzzer job = self.ctx.single_job_or_none() date = 'latest' if group_by == QueryGroupBy.GROUP_BY_FUZZER: fuzzer = group_by_value elif group_by == QueryGroupBy.GROUP_BY_JOB: job = group_by_value elif group_by == QueryGroupBy.GROUP_BY_DAY: date = group_by_value else: return None if not fuzzer or not job: return None return PERFORMANCE_REPORT_VIEWER_PATH.format( fuzzer=fuzzer, job=job, date=date) def get(self, group_by, group_by_value): report_path = self._get_performance_report_path(group_by, group_by_value) if not report_path: return None return BuiltinFieldData('Performance', link=report_path) class QueryField(object): def __init__(self, table_alias, field_name, aggregate_function, select_alias=None): self.table_alias = table_alias self.name = field_name self.aggregate_function = aggregate_function self.select_alias = select_alias or field_name def is_custom(self): return (self.aggregate_function and self.aggregate_function.lower() == 'custom') def __str__(self): if self.aggregate_function: result = '%s(%s.%s)' % (self.aggregate_function, self.table_alias, self.name) else: result = '%s.%s' % (self.table_alias, self.name) if self.select_alias: result += ' as ' + self.select_alias return result class Query(object): def _ensure_valid_name(self, name, regex): if name and not regex.match(name): raise FuzzerStatsException('Invalid fuzzer or job name.') def __init__(self, fuzzer_name, job_types, query_fields, group_by, date_start, date_end, base_table, alias): assert group_by is not None self._ensure_valid_name(fuzzer_name, data_types.Fuzzer.VALID_NAME_REGEX) if job_types: for job_type in job_types: self._ensure_valid_name(job_type, data_types.Job.VALID_NAME_REGEX) self.fuzzer_name = fuzzer_name self.job_types = job_types self.query_fields = query_fields self.group_by = group_by self.date_start = date_start self.date_end = date_end self.base_table = base_table self.alias = alias self.fuzzer_or_engine_name = get_fuzzer_or_engine_name(fuzzer_name) def _group_by_select(self): if self.group_by == QueryGroupBy.GROUP_BY_DAY: return ('TIMESTAMP_TRUNC(TIMESTAMP_SECONDS(CAST(' 'timestamp AS INT64)), DAY, "UTC") as date') if self.group_by == QueryGroupBy.GROUP_BY_TIME: return 'TIMESTAMP_SECONDS(CAST(timestamp AS INT64)) as time' return group_by_to_field_name(self.group_by) def _group_by(self): return group_by_to_field_name(self.group_by) def _select_fields(self): group_by_select = self._group_by_select() fields = [group_by_select] if group_by_select else [] for field in self.query_fields: if field.is_custom(): continue if field.aggregate_function: fields.append('%s(%s) as %s' % (field.aggregate_function, field.name, field.select_alias)) else: fields.append('%s as %s' % (field.name, field.select_alias)) return ', '.join(fields) def _table_name(self): app_id = utils.get_application_id() dataset = dataset_name(self.fuzzer_or_engine_name) return '`%s`.%s.%s' % (app_id, dataset, self.base_table) def _where(self): result = [] result.extend(self._partition_selector()) result.extend(self._job_and_fuzzer_selector()) result = ' AND '.join(result) if result: return 'WHERE ' + result return ''
Apache License 2.0
haohanwang/cf
vanilla/run.py
loadData
python
def loadData(): Xtrain = None Ytrain = None Xval = None Yval = None Xtest = None Ytest = None return Xtrain, Ytrain, Xval, Yval, Xtest, Ytest
example method for loading data X: data Y: labels train: training data val: validation data test: testing data
https://github.com/haohanwang/cf/blob/b8768788ae82149abebf7a44a912ad07d6f16aa5/vanilla/run.py#L15-L30
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import csv import time import json import argparse import numpy as np sys.path.append('../')
MIT License
googleapis/gapic-generator-python
tests/integration/goldens/logging/google/cloud/logging_v2/services/logging_service_v2/client.py
LoggingServiceV2Client.list_log_entries
python
def list_log_entries(self, request: Union[logging.ListLogEntriesRequest, dict] = None, *, resource_names: Sequence[str] = None, filter: str = None, order_by: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListLogEntriesPager: has_flattened_params = any([resource_names, filter, order_by]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') if not isinstance(request, logging.ListLogEntriesRequest): request = logging.ListLogEntriesRequest(request) if resource_names is not None: request.resource_names = resource_names if filter is not None: request.filter = filter if order_by is not None: request.order_by = order_by rpc = self._transport._wrapped_methods[self._transport.list_log_entries] response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) response = pagers.ListLogEntriesPager( method=rpc, request=request, response=response, metadata=metadata, ) return response
r"""Lists log entries. Use this method to retrieve log entries that originated from a project/folder/organization/billing account. For ways to export log entries, see `Exporting Logs <https://cloud.google.com/logging/docs/export>`__. Args: request (Union[google.cloud.logging_v2.types.ListLogEntriesRequest, dict]): The request object. The parameters to `ListLogEntries`. resource_names (Sequence[str]): Required. Names of one or more parent resources from which to retrieve log entries: :: "projects/[PROJECT_ID]" "organizations/[ORGANIZATION_ID]" "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]" May alternatively be one or more views projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] organization/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID] Projects listed in the ``project_ids`` field are added to this list. This corresponds to the ``resource_names`` field on the ``request`` instance; if ``request`` is provided, this should not be set. filter (str): Optional. A filter that chooses which log entries to return. See `Advanced Logs Queries <https://cloud.google.com/logging/docs/view/advanced-queries>`__. Only log entries that match the filter are returned. An empty filter matches all log entries in the resources listed in ``resource_names``. Referencing a parent resource that is not listed in ``resource_names`` will cause the filter to return no results. The maximum length of the filter is 20000 characters. This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. order_by (str): Optional. How the results should be sorted. Presently, the only permitted values are ``"timestamp asc"`` (default) and ``"timestamp desc"``. The first option returns entries in order of increasing values of ``LogEntry.timestamp`` (oldest first), and the second option returns entries in order of decreasing timestamps (newest first). Entries with equal timestamps are returned in order of their ``insert_id`` values. This corresponds to the ``order_by`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.logging_v2.services.logging_service_v2.pagers.ListLogEntriesPager: Result returned from ListLogEntries. Iterating over this object will yield results and resolve additional pages automatically.
https://github.com/googleapis/gapic-generator-python/blob/582fed9c43bd8c1a3c5a9a7705fa2e39b729b910/tests/integration/goldens/logging/google/cloud/logging_v2/services/logging_service_v2/client.py#L580-L707
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Iterable, Iterator, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account OptionalRetry = Union[retries.Retry, object] from google.api import monitored_resource_pb2 from google.cloud.logging_v2.services.logging_service_v2 import pagers from google.cloud.logging_v2.types import log_entry from google.cloud.logging_v2.types import logging from .transports.base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO from .transports.grpc import LoggingServiceV2GrpcTransport from .transports.grpc_asyncio import LoggingServiceV2GrpcAsyncIOTransport class LoggingServiceV2ClientMeta(type): _transport_registry = OrderedDict() _transport_registry["grpc"] = LoggingServiceV2GrpcTransport _transport_registry["grpc_asyncio"] = LoggingServiceV2GrpcAsyncIOTransport def get_transport_class(cls, label: str = None, ) -> Type[LoggingServiceV2Transport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class LoggingServiceV2Client(metaclass=LoggingServiceV2ClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "logging.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file( filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> LoggingServiceV2Transport: return self._transport @staticmethod def log_path(project: str,log: str,) -> str: return "projects/{project}/logs/{log}".format(project=project, log=log, ) @staticmethod def parse_log_path(path: str) -> Dict[str,str]: m = re.match(r"^projects/(?P<project>.+?)/logs/(?P<log>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str, ) -> str: return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str,str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str, ) -> str: return "folders/{folder}".format(folder=folder, ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str,str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str, ) -> str: return "organizations/{organization}".format(organization=organization, ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str,str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str, ) -> str: return "projects/{project}".format(project=project, ) @staticmethod def parse_common_project_path(path: str) -> Dict[str,str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str, ) -> str: return "projects/{project}/locations/{location}".format(project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str,str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__(self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, LoggingServiceV2Transport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) if isinstance(transport, LoggingServiceV2Transport): if credentials or client_options.credentials_file: raise ValueError("When providing a transport instance, " "provide its credentials directly.") if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, ) def delete_log(self, request: Union[logging.DeleteLogRequest, dict] = None, *, log_name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([log_name]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') if not isinstance(request, logging.DeleteLogRequest): request = logging.DeleteLogRequest(request) if log_name is not None: request.log_name = log_name rpc = self._transport._wrapped_methods[self._transport.delete_log] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ("log_name", request.log_name), )), ) rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def write_log_entries(self, request: Union[logging.WriteLogEntriesRequest, dict] = None, *, log_name: str = None, resource: monitored_resource_pb2.MonitoredResource = None, labels: Sequence[logging.WriteLogEntriesRequest.LabelsEntry] = None, entries: Sequence[log_entry.LogEntry] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> logging.WriteLogEntriesResponse: has_flattened_params = any([log_name, resource, labels, entries]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') if not isinstance(request, logging.WriteLogEntriesRequest): request = logging.WriteLogEntriesRequest(request) if log_name is not None: request.log_name = log_name if resource is not None: request.resource = resource if labels is not None: request.labels = labels if entries is not None: request.entries = entries rpc = self._transport._wrapped_methods[self._transport.write_log_entries] response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) return response
Apache License 2.0
nastools/homeassistant
homeassistant/__main__.py
get_arguments
python
def get_arguments() -> argparse.Namespace: import homeassistant.config as config_util parser = argparse.ArgumentParser( description="Home Assistant: Observe, Control, Automate.") parser.add_argument('--version', action='version', version=__version__) parser.add_argument( '-c', '--config', metavar='path_to_config_dir', default=config_util.get_default_config_dir(), help="Directory that contains the Home Assistant configuration") parser.add_argument( '--demo-mode', action='store_true', help='Start Home Assistant in demo mode') parser.add_argument( '--debug', action='store_true', help='Start Home Assistant in debug mode') parser.add_argument( '--open-ui', action='store_true', help='Open the webinterface in a browser') parser.add_argument( '--skip-pip', action='store_true', help='Skips pip install of required packages on startup') parser.add_argument( '-v', '--verbose', action='store_true', help="Enable verbose logging to file.") parser.add_argument( '--pid-file', metavar='path_to_pid_file', default=None, help='Path to PID file useful for running as daemon') parser.add_argument( '--log-rotate-days', type=int, default=None, help='Enables daily log rotation and keeps up to the specified days') parser.add_argument( '--runner', action='store_true', help='On restart exit with code {}'.format(RESTART_EXIT_CODE)) parser.add_argument( '--script', nargs=argparse.REMAINDER, help='Run one of the embedded scripts') if os.name == "posix": parser.add_argument( '--daemon', action='store_true', help='Run Home Assistant as daemon') arguments = parser.parse_args() if os.name != "posix" or arguments.debug or arguments.runner: setattr(arguments, 'daemon', False) return arguments
Get parsed passed in arguments.
https://github.com/nastools/homeassistant/blob/7ca1180bd42713f2d77bbc3f0b27b231ba8784aa/homeassistant/__main__.py#L119-L178
from __future__ import print_function import argparse import os import platform import subprocess import sys import threading from typing import Optional, List from homeassistant.const import ( __version__, EVENT_HOMEASSISTANT_START, REQUIRED_PYTHON_VER, REQUIRED_PYTHON_VER_WIN, RESTART_EXIT_CODE, ) from homeassistant.util.async import run_callback_threadsafe def monkey_patch_asyncio(): import asyncio.tasks class IgnoreCalls: def add(self, other): return asyncio.tasks.Task._all_tasks = IgnoreCalls() try: del asyncio.tasks.Task.__del__ except: pass def validate_python() -> None: if sys.platform == "win32" and sys.version_info[:3] < REQUIRED_PYTHON_VER_WIN: print("Home Assistant requires at least Python {}.{}.{}".format( *REQUIRED_PYTHON_VER_WIN)) sys.exit(1) elif sys.version_info[:3] < REQUIRED_PYTHON_VER: print("Home Assistant requires at least Python {}.{}.{}".format( *REQUIRED_PYTHON_VER)) sys.exit(1) def ensure_config_path(config_dir: str) -> None: import homeassistant.config as config_util lib_dir = os.path.join(config_dir, 'deps') if not os.path.isdir(config_dir): if config_dir != config_util.get_default_config_dir(): print(('Fatal Error: Specified configuration directory does ' 'not exist {} ').format(config_dir)) sys.exit(1) try: os.mkdir(config_dir) except OSError: print(('Fatal Error: Unable to create default configuration ' 'directory {} ').format(config_dir)) sys.exit(1) if not os.path.isdir(lib_dir): try: os.mkdir(lib_dir) except OSError: print(('Fatal Error: Unable to create library ' 'directory {} ').format(lib_dir)) sys.exit(1) def ensure_config_file(config_dir: str) -> str: import homeassistant.config as config_util config_path = config_util.ensure_config_exists(config_dir) if config_path is None: print('Error getting configuration path') sys.exit(1) return config_path
MIT License
snakemake/snakemake
snakemake/benchmark.py
print_benchmark_records
python
def print_benchmark_records(records, file_): print(BenchmarkRecord.get_header(), file=file_) for r in records: print(r.to_tsv(), file=file_)
Write benchmark records to file-like object
https://github.com/snakemake/snakemake/blob/ec87b97d8a7a92f3734001433d7fba3d5e4a642a/snakemake/benchmark.py#L315-L319
__author__ = "Manuel Holtgrewe" __copyright__ = "Copyright 2017, Manuel Holtgrewe" __email__ = "manuel.holtgrewe@bihealth.de" __license__ = "MIT" import contextlib import datetime from itertools import chain import os import sys import time import threading import psutil from snakemake.exceptions import WorkflowError BENCHMARK_INTERVAL = 30 BENCHMARK_INTERVAL_SHORT = 0.5 class BenchmarkRecord: @classmethod def get_header(klass): return "\t".join( ( "s", "h:m:s", "max_rss", "max_vms", "max_uss", "max_pss", "io_in", "io_out", "mean_load", "cpu_time", ) ) def __init__( self, running_time=None, max_rss=None, max_vms=None, max_uss=None, max_pss=None, io_in=None, io_out=None, cpu_usages=None, cpu_time=None, ): self.running_time = running_time self.max_rss = max_rss self.max_vms = max_vms self.max_uss = max_uss self.max_pss = max_pss self.io_in = io_in self.io_out = io_out self.cpu_usages = cpu_usages or 0 self.cpu_time = cpu_time or 0 self.first_time = None self.prev_time = None def to_tsv(self): def to_tsv_str(x): if x is None: return "-" elif isinstance(x, float): return "{:.2f}".format(x) else: return str(x) def timedelta_to_str(x): mm, ss = divmod(x.seconds, 60) hh, mm = divmod(mm, 60) s = "%d:%02d:%02d" % (hh, mm, ss) if x.days: def plural(n): return n, abs(n) != 1 and "s" or "" s = ("%d day%s, " % plural(x.days)) + s return s return "\t".join( map( to_tsv_str, ( "{:.4f}".format(self.running_time), timedelta_to_str(datetime.timedelta(seconds=self.running_time)), self.max_rss, self.max_vms, self.max_uss, self.max_pss, self.io_in, self.io_out, self.cpu_usages / self.running_time, self.cpu_time, ), ) ) class DaemonTimer(threading.Thread): def __init__(self, interval, function, args=None, kwargs=None): threading.Thread.__init__(self, daemon=True) self.interval = interval self.function = function self.args = args if args is not None else [] self.kwargs = kwargs if kwargs is not None else {} self.finished = threading.Event() def cancel(self): self.finished.set() def run(self): self.finished.wait(self.interval) if not self.finished.is_set(): self.function(*self.args, **self.kwargs) self.finished.set() class ScheduledPeriodicTimer: def __init__(self, interval): self._times_called = 0 self._interval = interval self._timer = None self._stopped = True def start(self): self.work() self._times_called += 1 self._stopped = False if self._times_called > self._interval: self._timer = DaemonTimer(self._interval, self._action) else: self._timer = DaemonTimer(BENCHMARK_INTERVAL_SHORT, self._action) self._timer.start() def _action(self): self.work() self._times_called += 1 if self._times_called > self._interval: self._timer = DaemonTimer(self._interval, self._action) else: self._timer = DaemonTimer(BENCHMARK_INTERVAL_SHORT, self._action) self._timer.start() def work(self): raise NotImplementedError("Override me!") def cancel(self): self._timer.cancel() self._stopped = True class BenchmarkTimer(ScheduledPeriodicTimer): def __init__(self, pid, bench_record, interval=BENCHMARK_INTERVAL): ScheduledPeriodicTimer.__init__(self, interval) self.pid = pid self.main = psutil.Process(self.pid) self.bench_record = bench_record self.procs = {} def work(self): try: self._update_record() except psutil.NoSuchProcess: pass except AttributeError: pass def _update_record(self): rss, vms, uss, pss = 0, 0, 0, 0 io_in, io_out = 0, 0 check_io = True cpu_usages = 0 cpu_time = 0 try: this_time = time.time() for proc in chain((self.main,), self.main.children(recursive=True)): proc = self.procs.setdefault(proc.pid, proc) with proc.oneshot(): if self.bench_record.prev_time: cpu_usages += proc.cpu_percent() * ( this_time - self.bench_record.prev_time ) meminfo = proc.memory_full_info() rss += meminfo.rss vms += meminfo.vms uss += meminfo.uss pss += meminfo.pss if check_io: try: ioinfo = proc.io_counters() io_in += ioinfo.read_bytes io_out += ioinfo.write_bytes except NotImplementedError as nie: check_io = False cpu_times = proc.cpu_times() cpu_time += cpu_times.user + cpu_times.system self.bench_record.prev_time = this_time if not self.bench_record.first_time: self.bench_record.prev_time = this_time rss /= 1024 * 1024 vms /= 1024 * 1024 uss /= 1024 * 1024 pss /= 1024 * 1024 if check_io: io_in /= 1024 * 1024 io_out /= 1024 * 1024 else: io_in = None io_out = None except psutil.Error as e: return self.bench_record.max_rss = max(self.bench_record.max_rss or 0, rss) self.bench_record.max_vms = max(self.bench_record.max_vms or 0, vms) self.bench_record.max_uss = max(self.bench_record.max_uss or 0, uss) self.bench_record.max_pss = max(self.bench_record.max_pss or 0, pss) self.bench_record.io_in = io_in self.bench_record.io_out = io_out self.bench_record.cpu_usages += cpu_usages self.bench_record.cpu_time = cpu_time @contextlib.contextmanager def benchmarked(pid=None, benchmark_record=None, interval=BENCHMARK_INTERVAL): result = benchmark_record or BenchmarkRecord() if pid is False: yield result else: start_time = time.time() bench_thread = BenchmarkTimer(int(pid or os.getpid()), result, interval) bench_thread.start() yield result bench_thread.cancel() result.running_time = time.time() - start_time
MIT License
trungdq88/logmine
src/map_reduce.py
MapReduce.__init__
python
def __init__(self, map_func, reduce_func, params=None): if STATIC_POOL[0] is None: original_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) STATIC_POOL[0] = multiprocessing.Pool() signal.signal(signal.SIGINT, original_handler) self.map_func = map_func self.reduce_func = reduce_func self.pool = STATIC_POOL[0] self.params = params
map_func Function to map inputs to intermediate data. Takes as argument one input value and returns a tuple with the key and a value to be reduced. reduce_func Function to reduce partitioned version of intermediate data to final output. Takes as argument a key as produced by map_func and a sequence of the values associated with that key. num_workers The number of workers to create in the pool. Defaults to the number of CPUs available on the current host.
https://github.com/trungdq88/logmine/blob/a7595c6a0b313b43969199c18465cc8bec3b57d1/src/map_reduce.py#L15-L44
import signal import collections import itertools import multiprocessing STATIC_POOL = [None] class MapReduce:
MIT License
kuri65536/python-for-android
python-build/python-libs/gdata/build/lib/gdata/Crypto/PublicKey/RSA.py
generate
python
def generate(bits, randfunc, progress_func=None): obj=RSAobj() if progress_func: progress_func('p,q\n') p = q = 1L while number.size(p*q) < bits: p = pubkey.getPrime(bits/2, randfunc) q = pubkey.getPrime(bits/2, randfunc) if p > q: (p, q)=(q, p) obj.p = p obj.q = q if progress_func: progress_func('u\n') obj.u = pubkey.inverse(obj.p, obj.q) obj.n = obj.p*obj.q obj.e = 65537L if progress_func: progress_func('d\n') obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1)) assert bits <= 1+obj.size(), "Generated key is too small" return obj
generate(bits:int, randfunc:callable, progress_func:callable) Generate an RSA key of length 'bits', using 'randfunc' to get random data and 'progress_func', if present, to display the progress of the key generation.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/gdata/build/lib/gdata/Crypto/PublicKey/RSA.py#L26-L61
__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $" from Crypto.PublicKey import pubkey from Crypto.Util import number try: from Crypto.PublicKey import _fastmath except ImportError: _fastmath = None class error (Exception): pass
Apache License 2.0
blockio/multimerchant-python
multimerchant/wallet/keys.py
PrivateKey.get_extended_key
python
def get_extended_key(self): network_hex_chars = hexlify( chr_py2(self.network.SECRET_KEY)) return ensure_bytes(network_hex_chars + self.get_key())
Get the extended key. Extended keys contain the network bytes and the public or private key.
https://github.com/blockio/multimerchant-python/blob/a1a8d48fc49aaaa46449f5aef1d9beb8e49f1c4d/multimerchant/wallet/keys.py#L69-L77
from binascii import hexlify from binascii import unhexlify from collections import namedtuple from hashlib import sha256 import base58 from ecdsa import SigningKey from ecdsa import VerifyingKey from ecdsa import SECP256k1 from ecdsa.ellipticcurve import Point as _ECDSA_Point from ecdsa.numbertheory import square_root_mod_prime import six from ..network import BitcoinMainNet from .utils import chr_py2 from .utils import ensure_bytes from .utils import ensure_str from .utils import hash160 from .utils import is_hex_string from .utils import long_or_int from .utils import long_to_hex from .utils import memoize PublicPair = namedtuple("PublicPair", ["x", "y"]) class Key(object): def __init__(self, network, compressed=False): self.network = network self.compressed = compressed def __eq__(self, other): return (other and self.network == other.network and type(self) == type(other)) def __ne__(self, other): return not self == other __hash__ = object.__hash__ def get_key(self): raise NotImplementedError() class PrivateKey(Key): def __init__(self, secret_exponent, network=BitcoinMainNet, *args, **kwargs): if not isinstance(secret_exponent, six.integer_types): raise ValueError("secret_exponent must be a long") super(PrivateKey, self).__init__(network=network, *args, **kwargs) self._private_key = SigningKey.from_secret_exponent( secret_exponent, curve=SECP256k1) def get_key(self): return ensure_bytes(hexlify(self._private_key.to_string())) @memoize def get_public_key(self): return PublicKey.from_verifying_key( self._private_key.get_verifying_key(), network=self.network, compressed=self.compressed)
MIT License
mariacer/cl_in_rnns
sequential/audioset/train_utils_audioset.py
get_loss_func
python
def get_loss_func(config, device, logger, ewc_loss=False): if not ewc_loss: ts_weighting = config.ts_weighting else: ts_weighting = config.ts_weighting_fisher purpose = 'Fisher' if ewc_loss else 'loss' if ts_weighting == 'none' or ts_weighting == 'unpadded': logger.debug('Considering the NLL of all timesteps for %s computation.' % purpose) elif ts_weighting == 'last': logger.debug('Considering the NLL of last timestep for ' + '%s computation.' % purpose) elif ts_weighting == 'last_ten_percent': logger.debug('Considering the NLL of last 10% of timestep ' + 'for %s computation.' % purpose) else: assert ts_weighting == 'discount' logger.debug('Weighting the NLL of the later timesteps more than ' + 'the NLL of earlier timesteps for %s computation.' % purpose) ce_loss = tuseq.sequential_nll(loss_type='ce', reduction='sum') seq_length = 10 ts_factors = torch.zeros(seq_length, 1).to(device) if ts_weighting == 'none' or ts_weighting == 'unpadded': ts_factors = None if ts_weighting == 'last': ts_factors[-1, :] = 1 elif ts_weighting == 'last_ten_percent': sl_10 = seq_length // 10 ts_factors[-sl_10:, :] = 1 else: assert ts_weighting == 'discount' gamma = 1. discount = 0.9 for tt in range(seq_length-1, -1, -1): ts_factors[tt, 0] = gamma gamma *= discount if ts_factors is not None: ts_factors /= ts_factors.sum() loss_func = lambda Y, T, dh, ao, ef, _: ce_loss(Y, T, None, None, None, ts_factors=ts_factors, beta=None) return loss_func
Get a function handle that can be used as task loss function. Note, this function makes use of function :func:`sequential.train_utils_sequential.sequential_nll`. Args: config (argparse.Namespace): The command line arguments. device: Torch device (cpu or gpu). logger: Console (and file) logger. ewc_loss (bool): Whether the loss is determined for task training or to compute Fisher elements via EWC. Note, based on the user configuration, the loss computation might be different. Returns: (func): A function handler as described by argument ``custom_nll`` of function :func:`utils.ewc_regularizer.compute_fisher`, if option ``pass_ids=True``. Note: This loss **sums** the NLL across the batch dimension. A proper scaling wrt other loss terms during training would require a multiplication of the loss with a factor :math:`N/B`, where :math:`N` is the training set size and :math:`B` is the mini-batch size.
https://github.com/mariacer/cl_in_rnns/blob/333b8e03391600a8e3df7d684a3f171b135d273a/sequential/audioset/train_utils_audioset.py#L81-L174
import numpy as np import torch from data.timeseries import audioset_data from data.timeseries.split_audioset import get_split_audioset_handlers from mnets.classifier_interface import Classifier from sequential.replay_utils import gauss_reconstruction_loss from sequential import train_utils_sequential as tuseq def _generate_tasks(config, logger, experiment='split_audioset', writer=None): assert experiment in ['audioset', 'split_audioset'] if experiment == 'audioset': logger.info('Running multitask Audioset experiment.') dhandler = audioset_data.AudiosetData('../../datasets', use_one_hot=True, validation_size=config.val_set_size, rseed=config.data_random_seed) dhandler._data['task_id'] = 0 dhandlers = [dhandler] elif experiment.startswith('split'): logger.info('Running SplitAudioset experiment.') dhandlers = get_split_audioset_handlers('../../datasets', use_one_hot=True, num_tasks=config.num_tasks, num_classes_per_task=config.num_classes_per_task, rseed=config.data_random_seed, validation_size=config.val_set_size) for t, dh in enumerate(dhandlers): assert 'task_id' not in dh._data.keys() dh._data['task_id'] = t if config.num_tasks * config.num_classes_per_task < 10: logger.info('Running SplitAudioset experiments only for classes ' + '0 - %d.' % (config.num_tasks * config.num_classes_per_task - 1)) else: raise ValueError('Experiment type "%s" unknown.' % experiment) assert(len(dhandlers) == config.num_tasks) return dhandlers
Apache License 2.0
tusimple/simpledet
models/tridentnet/resnet_v2.py
TridentResNetV2Builder.resnet_trident_unit
python
def resnet_trident_unit(cls, data, name, filter, stride, dilate, proj, norm_type, norm_mom, ndev, branch_ids, branch_bn_shared, branch_conv_shared, branch_deform=False): if branch_ids is None: branch_ids = range(len(data)) norm = X.normalizer_factory(type=norm_type, ndev=ndev, mom=norm_mom) bn1 = cls.bn_shared( data, name=name + "_bn1", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared) relu1 = [X.relu(bn) for bn in bn1] conv1 = cls.conv_shared( relu1, name=name + "_conv1", num_filter=filter // 4, kernel=(1, 1), branch_ids=branch_ids, share_weight=branch_conv_shared) bn2 = cls.bn_shared( conv1, name=name + "_bn2", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared) relu2 = [X.relu(bn) for bn in bn2] if not branch_deform: conv2 = cls.conv_shared( relu2, name=name + "_conv2", num_filter=filter // 4, kernel=(3, 3), pad=dilate, stride=stride, dilate=dilate, branch_ids=branch_ids, share_weight=branch_conv_shared) else: conv2_offset = cls.conv_shared( relu2, name=name + "_conv2_offset", num_filter=72, kernel=(3, 3), pad=(1, 1), stride=(1, 1), dilate=(1, 1), no_bias=False, branch_ids=branch_ids, share_weight=branch_conv_shared) conv2 = cls.deform_conv_shared( relu2, name=name + "_conv2", conv_offset=conv2_offset, num_filter=filter // 4, kernel=(3, 3), pad=dilate, stride=stride, dilate=dilate, num_deformable_group=4, branch_ids=branch_ids, share_weight=branch_conv_shared) bn3 = cls.bn_shared( conv2, name=name + "_bn3", normalizer=norm, branch_ids=branch_ids, share_weight=branch_bn_shared) relu3 = [X.relu(bn) for bn in bn3] conv3 = cls.conv_shared( relu3, name=name + "_conv3", num_filter=filter, kernel=(1, 1), branch_ids=branch_ids, share_weight=branch_conv_shared) if proj: shortcut = cls.conv_shared( relu1, name=name + "_sc", num_filter=filter, kernel=(1, 1), branch_ids=branch_ids, share_weight=branch_conv_shared) else: shortcut = data return [X.add(conv3_i, shortcut_i, name=name + "_plus_branch{}".format(i)) for i, conv3_i, shortcut_i in zip(branch_ids, conv3, shortcut)]
One resnet unit is comprised of 2 or 3 convolutions and a shortcut. :param data: :param name: :param filter: :param stride: :param dilate: :param proj: :param norm_type: :param norm_mom: :param ndev: :param branch_ids: :param branch_bn_shared: :param branch_conv_shared: :param branch_deform: :return:
https://github.com/tusimple/simpledet/blob/97413463f0bc3116f684eaf7031fd3dd6ded3149/models/tridentnet/resnet_v2.py#L104-L168
from __future__ import print_function import mxnet as mx import mxnext as X from mxnext.backbone.resnet_v2 import Builder bn_count = [10000] class TridentResNetV2Builder(Builder): def __init__(self): super().__init__() @staticmethod def bn_shared(data, name, normalizer, branch_ids=None, share_weight=True): if branch_ids is None: branch_ids = range(len(data)) gamma = X.var(name + "_gamma") beta = X.var(name + "_beta") moving_mean = X.var(name + "_moving_mean") moving_var = X.var(name + "_moving_var") bn_layers = [] for i, data_i in zip(branch_ids, data): if share_weight: bn_i = normalizer(data=data_i, name=name + "_shared%d" % i, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var) else: bn_i = normalizer(data=data_i, name=name + "_branch%d" % i) bn_layers.append(bn_i) return bn_layers @staticmethod def conv_shared(data, name, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True, pad=(0, 0), stride=(1, 1), dilate=(1, 1)): if branch_ids is None: branch_ids = range(len(data)) weight = X.var(name + '_weight') if no_bias: bias = None else: bias = X.var(name + '_bias') conv_layers = [] for i in range(len(data)): data_i = data[i] stride_i = stride[i] if type(stride) is list else stride dilate_i = dilate[i] if type(dilate) is list else dilate pad_i = pad[i] if type(pad) is list else pad branch_i = branch_ids[i] if share_weight: conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i, name=name + '_shared%d' % branch_i, no_bias=no_bias, weight=weight, bias=bias) else: conv_i = X.conv(data=data_i, kernel=kernel, filter=num_filter, stride=stride_i, dilate=dilate_i, pad=pad_i, name=name + '_branch%d' % branch_i, no_bias=no_bias) conv_layers.append(conv_i) return conv_layers @staticmethod def deform_conv_shared(data, name, conv_offset, kernel, num_filter, branch_ids=None, no_bias=True, share_weight=True, num_deformable_group=4, pad=(0, 0), stride=(1, 1), dilate=(1, 1)): if branch_ids is None: branch_ids = range(len(data)) weight = X.var(name + '_weight') if no_bias: bias = None else: bias = X.var(name + '_bias') conv_layers = [] for i in range(len(data)): data_i = data[i] stride_i = stride[i] if type(stride) is list else stride dilate_i = dilate[i] if type(dilate) is list else dilate pad_i = pad[i] if type(pad) is list else pad conv_offset_i = conv_offset[i] if type(conv_offset) is list else conv_offset branch_i = branch_ids[i] if share_weight: conv_i = mx.contrib.symbol.DeformableConvolution( data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4, dilate=dilate_i, pad=pad_i, no_bias=no_bias, weight=weight, bias=bias, name=name + '_shared%d' % branch_i) else: conv_i = mx.contrib.symbol.DeformableConvolution( data=data_i, offset=conv_offset_i, kernel=kernel, num_filter=num_filter, stride=stride_i, num_deformable_group=4, dilate=dilate_i, pad=pad_i, no_bias=no_bias, name=name + '_branch%d' % branch_i) conv_layers.append(conv_i) return conv_layers @staticmethod def stack_branch_symbols(data_list): data = mx.symbol.stack(*data_list, axis=1) data = mx.symbol.Reshape(data, (-3, -2)) return data @classmethod
Apache License 2.0
tensorpack/tensorpack
examples/DeepQNetwork/atari.py
AtariPlayer.__init__
python
def __init__(self, rom_file, viz=0, frame_skip=4, nullop_start=30, live_lost_as_eoe=True, max_num_frames=0, grayscale=True): super(AtariPlayer, self).__init__() if not os.path.isfile(rom_file) and '/' not in rom_file: rom_file = get_dataset_path('atari_rom', rom_file) assert os.path.isfile(rom_file), "ROM {} not found. Please download at {}".format(rom_file, ROM_URL) try: ALEInterface.setLoggerMode(ALEInterface.Logger.Error) except AttributeError: if execute_only_once(): logger.warn("You're not using latest ALE") with _ALE_LOCK: self.ale = ALEInterface() self.rng = get_rng(self) self.ale.setInt(b"random_seed", self.rng.randint(0, 30000)) self.ale.setInt(b"max_num_frames_per_episode", max_num_frames) self.ale.setBool(b"showinfo", False) self.ale.setInt(b"frame_skip", 1) self.ale.setBool(b'color_averaging', False) self.ale.setFloat(b'repeat_action_probability', 0.0) if isinstance(viz, six.string_types): assert os.path.isdir(viz), viz self.ale.setString(b'record_screen_dir', viz) viz = 0 if isinstance(viz, int): viz = float(viz) self.viz = viz if self.viz and isinstance(self.viz, float): self.windowname = os.path.basename(rom_file) cv2.namedWindow(self.windowname) self.ale.loadROM(rom_file.encode('utf-8')) self.width, self.height = self.ale.getScreenDims() self.actions = self.ale.getMinimalActionSet() self.live_lost_as_eoe = live_lost_as_eoe self.frame_skip = frame_skip self.nullop_start = nullop_start self.action_space = spaces.Discrete(len(self.actions)) self.grayscale = grayscale shape = (self.height, self.width) if grayscale else (self.height, self.width, 3) self.observation_space = spaces.Box( low=0, high=255, shape=shape, dtype=np.uint8) self._restart_episode()
Args: rom_file: path to the rom frame_skip: skip every k frames and repeat the action viz: visualization to be done. Set to 0 to disable. Set to a positive number to be the delay between frames to show. Set to a string to be a directory to store frames. nullop_start: start with random number of null ops. live_losts_as_eoe: consider lost of lives as end of episode. Useful for training. max_num_frames: maximum number of frames per episode. grayscale (bool): if True, return 2D image. Otherwise return HWC image.
https://github.com/tensorpack/tensorpack/blob/1a79d595f7eda9dc9dc8428f4461680ed2222ab6/examples/DeepQNetwork/atari.py#L33-L100
import numpy as np import os import threading import cv2 import gym import six from atari_py.ale_python_interface import ALEInterface from gym import spaces from gym.envs.atari.atari_env import ACTION_MEANING from tensorpack.utils import logger, execute_only_once, get_rng from tensorpack.utils.fs import get_dataset_path __all__ = ['AtariPlayer'] ROM_URL = "https://github.com/openai/atari-py/tree/gdb/atari_py/atari_roms" _ALE_LOCK = threading.Lock() class AtariPlayer(gym.Env):
Apache License 2.0
terraref/computing-pipeline
scripts/environmental_logger/environmental_logger_json2netcdf.py
getSpectrometerInformation
python
def getSpectrometerInformation(arrayOfJSON): maxFixedIntensity = [int(intensityMembers["spectrometer"]["maxFixedIntensity"]) for intensityMembers in arrayOfJSON] integrationTime = [int(integrateMembers["spectrometer"]["integration time in ?s"]) for integrateMembers in arrayOfJSON] return maxFixedIntensity, integrationTime
Collect information from spectrometer with special care these information contain: 1. max fixed intensity 2. integration time
https://github.com/terraref/computing-pipeline/blob/5d0d089501154c6c0de68229579c131d79e39b5e/scripts/environmental_logger/environmental_logger_json2netcdf.py#L119-L131
import numpy as np import json import time import sys import os from datetime import date, datetime from netCDF4 import Dataset from environmental_logger_calculation import * _UNIT_DICTIONARY = {u'm': {"original":"meter", "SI":"meter", "power":1}, u"hPa": {"original":"hectopascal", "SI":"pascal", "power":1e2}, u"DegCelsius": {"original":"celsius", "SI":"celsius", "power":1}, u's': {"original":"second", "SI":"second", "power":1}, u'm/s': {"original":"meter second-1", "SI":"meter second-1", "power":1}, u"mm/h": {"original":"millimeter hour-1", "SI":"meter second-1", "power":2.78e-7}, u"relHumPerCent": {"original":"percent", "SI":"percent", "power":1}, u"?mol/(m^2*s)": {"original":"micromole meter-2 second-1", "SI":"mole second-1", "power":1e-6}, u"umol/(m^2*s)": {"original":"micromole meter-2 second-1", "SI":"mole second-1", "power":1e-6}, u'kilo Lux': {"original":"kiloLux", "SI":"lux", "power":1e3}, u'degrees': {"original":"degree", "SI":"degree", "power":1}, u'?s': {"original":"microsecond", "SI":"second", "power":1e-6}, u'us': {"original":"microsecond", "SI":"second", "power":1e-6}, u'ppm': {"original":"pascal meter-2", "SI":"pascal meter-2", "power":1}, '': ''} _NAMES = {'sensor par': 'Sensor Photosynthetically Active Radiation'} _UNIX_BASETIME = date(year=1970, month=1, day=1) _TIMESTAMP = lambda: time.strftime("%a %b %d %H:%M:%S %Y", time.localtime(int(time.time()))) def JSONHandler(fileLocation): with open(fileLocation, 'r') as fileHandler: return json.loads(fileHandler.read()) def renameTheValue(name): if name in _UNIT_DICTIONARY: name = _UNIT_DICTIONARY[name] elif name in _NAMES: name = _NAMES[name] return name.replace(" ", "_")
BSD 3-Clause New or Revised License
azure/blobxfer
blobxfer/models/download.py
Descriptor.convert_vectored_io_slice_to_final_path_name
python
def convert_vectored_io_slice_to_final_path_name(local_path, ase): name = blobxfer.models.metadata. remove_vectored_io_slice_suffix_from_name( local_path.name, ase.vectored_io.slice_id) _tmp = list(local_path.parts[:-1]) _tmp.append(name) return pathlib.Path(*_tmp)
Convert vectored io slice to final path name :param pathlib.Path local_path: local path :param blobxfer.models.azure.StorageEntity ase: Storage Entity :rtype: pathlib.Path :return: converted final path
https://github.com/azure/blobxfer/blob/c6c6c143e8ee413d09a1110abafdb92e9e8afc39/blobxfer/models/download.py#L357-L371
import collections import logging import math import os import pathlib import tempfile import threading import time import blobxfer.models.azure import blobxfer.models.crypto import blobxfer.models.options import blobxfer.util logger = logging.getLogger(__name__) _AUTO_SELECT_CHUNKSIZE_BYTES = 8388608 Offsets = collections.namedtuple( 'Offsets', [ 'chunk_num', 'fd_start', 'num_bytes', 'range_end', 'range_start', 'unpad', ] ) UncheckedChunk = collections.namedtuple( 'UncheckedChunk', [ 'data_len', 'fd_start', 'file_path', 'temp', ] ) LocalPathView = collections.namedtuple( 'LocalPathView', [ 'fd_end', 'fd_start', ] ) class LocalDestinationPath(object): def __init__(self, path=None): self._is_dir = None if path is not None: self.path = path @property def path(self): return self._path @path.setter def path(self, value): self._path = pathlib.Path(value) @property def is_dir(self): return self._is_dir @is_dir.setter def is_dir(self, value): self._is_dir = value def ensure_path_exists(self): if self._is_dir is None: raise RuntimeError('is_dir not set') if self._is_dir: self._path.mkdir(mode=0o750, parents=True, exist_ok=True) else: if self._path.exists() and self._path.is_dir(): raise RuntimeError( ('destination path {} already exists and is a ' 'directory').format(self._path)) else: self._path.parent.mkdir( mode=0o750, parents=True, exist_ok=True) class Specification(object): def __init__( self, download_options, skip_on_options, local_destination_path): self.options = download_options self.skip_on = skip_on_options self.destination = local_destination_path self.sources = [] if not self.options.check_file_md5 and self.skip_on.md5_match: raise ValueError( 'cannot specify skip on MD5 match without file MD5 enabled') if (self.options.restore_file_properties.attributes and not blobxfer.util.on_windows() and os.getuid() != 0): logger.warning('cannot set file uid/gid without root privileges') if self.options.chunk_size_bytes < 0: raise ValueError('chunk size cannot be negative') if self.options.max_single_object_concurrency < 1: raise ValueError( 'must specify a positive value for max single object ' 'concurrency') def add_azure_source_path(self, source): self.sources.append(source) class Descriptor(object): _AES_BLOCKSIZE = blobxfer.models.crypto.AES256_BLOCKSIZE_BYTES def __init__(self, lpath, ase, options, general_options, resume_mgr): self._verbose = general_options.verbose self._offset = 0 self._chunk_num = 0 self._next_integrity_chunk = 0 self._unchecked_chunks = {} self._allocated = False self._finalized = False self._meta_lock = threading.Lock() self._hasher_lock = threading.Lock() self._resume_mgr = resume_mgr self._restore_file_properties = options.restore_file_properties self._ase = ase self.final_path = lpath self.view = None if options.chunk_size_bytes == 0: chunk_size_bytes = _AUTO_SELECT_CHUNKSIZE_BYTES else: chunk_size_bytes = options.chunk_size_bytes self._chunk_size = min((chunk_size_bytes, self._ase.size)) self._total_chunks = self._compute_total_chunks(self._chunk_size) self._outstanding_ops = self._total_chunks self.hmac = None self.md5 = None self._integrity_failed = False self._initialize_integrity_checkers(options) @property def entity(self): return self._ase @property def must_compute_md5(self): return self.md5 is not None @property def all_operations_completed(self): with self._meta_lock: return (self._outstanding_ops == 0 and len(self._unchecked_chunks) == 0) @property def is_resumable(self): return self._resume_mgr is not None and self.hmac is None def _compute_total_chunks(self, chunk_size): try: return int(math.ceil(self._ase.size / chunk_size)) except ZeroDivisionError: return 0 def _initialize_integrity_checkers(self, options): if self._ase.is_encrypted: if blobxfer.util.is_none_or_empty( self._ase.encryption_metadata.symmetric_key): raise RuntimeError( 'symmetric key is invalid: provide RSA private key ' 'or metadata corrupt') self.hmac = self._ase.encryption_metadata.initialize_hmac() if (self.hmac is None and options.check_file_md5 and blobxfer.util.is_not_empty(self._ase.md5)): self.md5 = blobxfer.util.new_md5_hasher() @staticmethod def compute_allocated_size(size, is_encrypted): if size > 0: if is_encrypted: allocatesize = ( size // blobxfer.models.download.Descriptor._AES_BLOCKSIZE - 1 ) * blobxfer.models.download.Descriptor._AES_BLOCKSIZE if allocatesize < 0: raise RuntimeError('allocatesize is negative') else: allocatesize = size else: allocatesize = 0 return allocatesize @staticmethod def generate_view(ase): slicesize = blobxfer.models.download.Descriptor.compute_allocated_size( ase.size, ase.is_encrypted) if ase.vectored_io is None: view = LocalPathView( fd_start=0, fd_end=slicesize, ) total_size = slicesize else: view = LocalPathView( fd_start=ase.vectored_io.offset_start, fd_end=ase.vectored_io.offset_start + slicesize, ) total_size = ase.vectored_io.total_size return view, total_size @staticmethod
MIT License
spantaleev/roscraco
roscraco/response/wireless.py
WirelessSettings.supports_auto_channel
python
def supports_auto_channel(self): return self._supports_auto_channel
Tells whether auto channel is supported. Channel 0 is considered the auto channel, because that's how most routers represent the ``Auto`` value. Some devices, however, do not support Auto channel at all.
https://github.com/spantaleev/roscraco/blob/87a5a7c54931d5586fd7d30c8c67a699bef69c1f/roscraco/response/wireless.py#L38-L46
from roscraco.helper import validator from roscraco.exception import RouterSettingsError class WirelessSettings(object): SECURITY_TYPE_NONE = 'none' SECURITY_TYPE_WEP64 = 'wep64' SECURITY_TYPE_WEP128 = 'wep128' SECURITY_TYPE_WPA = 'wpa' SECURITY_TYPE_WPA2 = 'wpa2' PROPERTIES = ( 'security_type', 'ssid', 'is_enabled', 'is_broadcasting_ssid', 'channel', 'password' ) def __init__(self): self._supports_wireless = True self._ssid = None self._enabled_status = True self._ssid_broadcast_status = True self._channel = None self._password = None self._internal_params = {} self._supported_security_types = set([self.__class__.SECURITY_TYPE_NONE]) self._security_type = None self._supports_ascii_wep_passwords = True self._supports_auto_channel = True self._changes_require_reboot = True def set_auto_channel_support(self, value): self._supports_auto_channel = bool(value) @property
BSD 3-Clause New or Revised License
programming-book-practice/python-crash-course-homework
chapter11/employee.py
Employee.__init__
python
def __init__(self, f_name, l_name, salary): self.first = f_name.title() self.last = l_name.title() self.salary = salary
Initialize the employee.
https://github.com/programming-book-practice/python-crash-course-homework/blob/d8f991215cfb78550ff5655c24108c67c789a068/chapter11/employee.py#L4-L8
class Employee():
MIT License
googlecloudplatform/professional-services-data-validator
data_validation/clients.py
get_data_client
python
def get_data_client(connection_config): connection_config = copy.deepcopy(connection_config) source_type = connection_config.pop(consts.SOURCE_TYPE) if consts.GOOGLE_SERVICE_ACCOUNT_KEY_PATH in connection_config: key_path = connection_config.pop(consts.GOOGLE_SERVICE_ACCOUNT_KEY_PATH) if key_path: connection_config[ "credentials" ] = google.oauth2.service_account.Credentials.from_service_account_file( key_path ) if source_type not in CLIENT_LOOKUP: msg = 'ConfigurationError: Source type "{source_type}" is not supported'.format( source_type=source_type ) raise Exception(msg) try: data_client = CLIENT_LOOKUP[source_type](**connection_config) data_client._source_type = source_type except Exception as e: msg = 'Connection Type "{source_type}" could not connect: {error}'.format( source_type=source_type, error=str(e) ) raise exceptions.DataClientConnectionFailure(msg) return data_client
Return DataClient client from given configuration
https://github.com/googlecloudplatform/professional-services-data-validator/blob/781d8bf259ba2864e05c40dfe2a69d6c954cfec6/data_validation/clients.py#L181-L211
import pandas import warnings import copy import google.oauth2.service_account from google.cloud import bigquery import ibis import ibis_bigquery import ibis.backends.pandas from ibis.backends.pandas.client import PandasClient from ibis.backends.mysql.client import MySQLClient from ibis.backends.postgres.client import PostgreSQLClient import third_party.ibis.ibis_addon.datatypes from third_party.ibis.ibis_cloud_spanner.api import connect as spanner_connect from third_party.ibis.ibis_impala.api import impala_connect from data_validation import client_info from data_validation import consts, exceptions third_party.ibis.ibis_addon.datatypes warnings.filterwarnings( "ignore", "Your application has authenticated using end user credentials" ) warnings.filterwarnings( "ignore", "Cannot create BigQuery Storage client, the dependency google-cloud-bigquery-storage is not installed", ) warnings.filterwarnings( "ignore", "The GenericFunction 'regex_extract' is already registered" ) def _raise_missing_client_error(msg): def get_client_call(*args, **kwargs): raise Exception(msg) return get_client_call try: from third_party.ibis.ibis_teradata.client import TeradataClient except Exception: msg = "pip install teradatasql (requires Teradata licensing)" TeradataClient = _raise_missing_client_error(msg) try: from third_party.ibis.ibis_oracle.client import OracleClient except Exception: OracleClient = _raise_missing_client_error("pip install cx_Oracle") try: from third_party.ibis.ibis_mssql import connect as mssql_connect except Exception: mssql_connect = _raise_missing_client_error("pip install pyodbc") try: from third_party.ibis.ibis_snowflake.client import ( SnowflakeClient as snowflake_connect, ) except Exception: snowflake_connect = _raise_missing_client_error( "pip install snowflake-connector-python" ) def get_bigquery_client(project_id, dataset_id=None, credentials=None): info = client_info.get_http_client_info() google_client = bigquery.Client( project=project_id, client_info=info, credentials=credentials ) ibis_client = ibis_bigquery.connect( project_id, dataset_id=dataset_id, credentials=credentials ) ibis_client.client = google_client return ibis_client def get_pandas_client(table_name, file_path, file_type): if file_type == "csv": df = pandas.read_csv(file_path) elif file_type == "json": df = pandas.read_json(file_path) else: raise ValueError(f"Unknown Pandas File Type: {file_type}") pandas_client = ibis.backends.pandas.connect({table_name: df}) return pandas_client def get_ibis_table(client, schema_name, table_name, database_name=None): if type(client) in [OracleClient, PostgreSQLClient]: return client.table(table_name, database=database_name, schema=schema_name) elif type(client) in [PandasClient]: return client.table(table_name, schema=schema_name) else: return client.table(table_name, database=schema_name) def list_schemas(client): if type(client) in [OracleClient, PostgreSQLClient]: return client.list_schemas() elif hasattr(client, "list_databases"): return client.list_databases() else: return [None] def list_tables(client, schema_name): if type(client) in [OracleClient, PostgreSQLClient]: return client.list_tables(schema=schema_name) elif schema_name: return client.list_tables(database=schema_name) else: return client.list_tables() def get_all_tables(client, allowed_schemas=None): table_objs = [] schemas = list_schemas(client) for schema_name in schemas: if allowed_schemas and schema_name not in allowed_schemas: continue try: tables = list_tables(client, schema_name) except Exception as e: print(f"List Tables Error: {schema_name} -> {e}") continue for table_name in tables: table_objs.append((schema_name, table_name)) return table_objs
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_10/api/administrators_api.py
AdministratorsApi.api210_admins_patch_with_http_info
python
def api210_admins_patch_with_http_info( self, admin=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if admin is None: raise TypeError("Missing the required parameter `admin` when calling `api210_admins_patch`") collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'admin' in params: body_params = params['admin'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
Modify an administrator Modifies properties for the specified administrator. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api210_admins_patch_with_http_info(admin, async_req=True) >>> result = thread.get() :param AdminPatch admin: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: AdminResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_10/api/administrators_api.py#L840-L933
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class AdministratorsApi(object): def __init__(self, api_client): self.api_client = api_client def api210_admins_api_tokens_delete_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/api-tokens', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_api_tokens_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, expose_api_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api210_admins_api_tokens_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api210_admins_api_tokens_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'expose_api_token' in params: query_params.append(('expose_api_token', params['expose_api_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/api-tokens', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminApiTokenGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_api_tokens_post_with_http_info( self, authorization=None, x_request_id=None, names=None, timeout=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'timeout' in params: query_params.append(('timeout', params['timeout'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/api-tokens', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminApiTokenResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_cache_delete_with_http_info( self, remove_all_entries=None, authorization=None, x_request_id=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if remove_all_entries is None: raise TypeError("Missing the required parameter `remove_all_entries` when calling `api210_admins_cache_delete`") collection_formats = {} path_params = {} query_params = [] if 'remove_all_entries' in params: query_params.append(('remove_all_entries', params['remove_all_entries'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/cache', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_cache_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api210_admins_cache_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api210_admins_cache_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/cache', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminCacheGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_cache_put_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins/cache', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminCacheResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_delete_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api210_admins_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, expose_api_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api210_admins_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api210_admins_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'expose_api_token' in params: query_params.append(('expose_api_token', params['expose_api_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.10/admins', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AdminGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
html5rocks/updates.html5rocks.com
lib/aetycoon/__init__.py
_DerivedProperty.__init__
python
def __init__(self, derive_func, *args, **kwargs): super(_DerivedProperty, self).__init__(*args, **kwargs) self.derive_func = derive_func
Constructor. Args: func: A function that takes one argument, the model instance, and returns a calculated value.
https://github.com/html5rocks/updates.html5rocks.com/blob/144b5ff9a36a7d37924d30c14810a0debdbd76ff/lib/aetycoon/__init__.py#L71-L79
import hashlib import logging import pickle from google.appengine.ext import db def DerivedProperty(func=None, *args, **kwargs): if func: return _DerivedProperty(func, *args, **kwargs) else: def decorate(decorated_func): return _DerivedProperty(decorated_func, *args, **kwargs) return decorate class _DerivedProperty(db.Property):
Apache License 2.0
drexly/openhgsenti
lib/django/contrib/admin/tests.py
AdminSeleniumWebDriverTestCase.has_css_class
python
def has_css_class(self, selector, klass): return (self.selenium.find_element_by_css_selector(selector) .get_attribute('class').find(klass) != -1)
Returns True if the element identified by `selector` has the CSS class `klass`.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/admin/tests.py#L157-L163
import os from unittest import SkipTest from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ class AdminSeleniumWebDriverTestCase(StaticLiveServerTestCase): available_apps = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', ] webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver' @classmethod def setUpClass(cls): if not os.environ.get('DJANGO_SELENIUM_TESTS', False): raise SkipTest('Selenium tests not requested') try: cls.selenium = import_string(cls.webdriver_class)() except Exception as e: raise SkipTest('Selenium webdriver "%s" not installed or not ' 'operational: %s' % (cls.webdriver_class, str(e))) super(AdminSeleniumWebDriverTestCase, cls).setUpClass() @classmethod def _tearDownClassInternal(cls): if hasattr(cls, 'selenium'): cls.selenium.quit() super(AdminSeleniumWebDriverTestCase, cls)._tearDownClassInternal() def wait_until(self, callback, timeout=10): from selenium.webdriver.support.wait import WebDriverWait WebDriverWait(self.selenium, timeout).until(callback) def wait_for_popup(self, num_windows=2, timeout=10): self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout) def wait_loaded_tag(self, tag_name, timeout=10): self.wait_for(tag_name, timeout) def wait_for(self, css_selector, timeout=10): from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as ec self.wait_until( ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)), timeout ) def wait_for_text(self, css_selector, text, timeout=10): from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as ec self.wait_until( ec.text_to_be_present_in_element( (By.CSS_SELECTOR, css_selector), text), timeout ) def wait_for_value(self, css_selector, text, timeout=10): from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as ec self.wait_until( ec.text_to_be_present_in_element_value( (By.CSS_SELECTOR, css_selector), text), timeout ) def wait_page_loaded(self): from selenium.common.exceptions import TimeoutException try: self.wait_loaded_tag('body') except TimeoutException: pass def admin_login(self, username, password, login_url='/admin/'): self.selenium.get('%s%s' % (self.live_server_url, login_url)) username_input = self.selenium.find_element_by_name('username') username_input.send_keys(username) password_input = self.selenium.find_element_by_name('password') password_input.send_keys(password) login_text = _('Log in') self.selenium.find_element_by_xpath( '//input[@value="%s"]' % login_text).click() self.wait_page_loaded() def get_css_value(self, selector, attribute): return self.selenium.execute_script( 'return django.jQuery("%s").css("%s")' % (selector, attribute)) def get_select_option(self, selector, value): from selenium.common.exceptions import NoSuchElementException options = self.selenium.find_elements_by_css_selector('%s > option' % selector) for option in options: if option.get_attribute('value') == value: return option raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector)) def assertSelectOptions(self, selector, values): options = self.selenium.find_elements_by_css_selector('%s > option' % selector) actual_values = [] for option in options: actual_values.append(option.get_attribute('value')) self.assertEqual(values, actual_values)
Apache License 2.0
globocom/gcloud-utils
gcloud_utils/functions.py
Functions.list_functions
python
def list_functions(self): request = self.functions.list(location=self.parent) return self.__execute_request(request)
List the cloud functions
https://github.com/globocom/gcloud-utils/blob/9b01b2ae6ce42243e590ebe03d0ec36cb86716ce/gcloud_utils/functions.py#L99-L102
import os import re import json import logging import zipfile import requests from googleapiclient import discovery from googleapiclient.errors import HttpError logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s') class Functions(object): def __init__(self, project, zone): self.project = project self.zone = zone self.logger = logging.getLogger(name=self.__class__.__name__) self.client = discovery.build('cloudfunctions', 'v1beta2') self.functions = self.__get_functions_resource() self.parent = self.__build_parent() def __build_parent(self): return 'projects/{}/locations/{}'.format(self.project, self.zone) def __get_functions_resource(self): return self.client.projects().locations().functions() def __get_upload_url(self): generate_upload_url_request = self.functions.generateUploadUrl( parent=self.parent) res = self.__execute_request(generate_upload_url_request) return res['uploadUrl'] def __execute_request(self, request): return request.execute() def __upload_function(self, path, filename, upload_url): name, extension = os.path.splitext(filename) self.__compress_function(path, name, extension) zip_filename = '{}.zip'.format(name) with open(os.path.join(path, zip_filename), 'rb') as zip_file: headers = { 'content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600' } return requests.put(upload_url, data=zip_file, headers=headers) def __get_filename(self, runtime): filename = "" if re.match(r'^nodejs', runtime): filename = "index.js" elif runtime == "python37": filename = "main.py" return filename def __build_function(self, name, runtime, path): upload_url = self.__get_upload_url() filename = self.__get_filename(runtime) self.__upload_function(path, filename, upload_url) body = { "entryPoint": name, "runtime": runtime, "sourceUploadUrl": upload_url, "httpsTrigger": {}, "name": '{}/functions/{}'.format(self.parent, name) } return self.functions.create(location=self.parent, body=body) def __compress_function(self, path, filename, extension): self.logger.info("Compressing File %s", filename) zip_file = zipfile.ZipFile('{}/{}.zip'.format(path, filename), 'w') zip_file.write(os.path.join(path, filename + extension), compress_type=zipfile.ZIP_DEFLATED, arcname=(filename + extension)) zip_file.close() def create_function(self, name, runtime, path=os.getcwd()): request = self.__build_function(name, runtime, path) try: res = self.__execute_request(request) return res except HttpError as err: body = err.args[1] err_message = json.loads(body.decode('utf-8'))['error']['message'] self.logger.error(err_message)
Apache License 2.0
lisc-tools/lisc
lisc/collect/utils.py
make_comp
python
def make_comp(terms, joiner='OR'): comp = '' if terms and terms[0]: terms = ['"'+ item + '"' for item in terms] comp = '(' + joiner.join(terms) + ')' comp = comp.replace(' ', '+') return comp
Make a search term component. Parameters ---------- terms : list of str List of words to connect together with 'OR'. joiner : {'OR', AND', 'NOT'} The string to join together the inputs with. Returns ------- comp : str Search term component. Notes ----- - This function deals with empty list inputs. - This function adds "" to terms to make them exact search only. - This function replaces any spaces in terms with '+'.
https://github.com/lisc-tools/lisc/blob/2fd81d837468c8e2cbfc6b7666c827d2b1932cd3/lisc/collect/utils.py#L26-L54
def make_term(term, incl_joiner='OR'): return join(join(make_comp(term.search), make_comp(term.inclusions, incl_joiner), 'AND'), make_comp(term.exclusions), 'NOT')
Apache License 2.0
netspi/jig
JIG.py
extractFilters
python
def extractFilters(response): userList = re.compile(r"</span>.\((.*)\)").findall(response.text) return list(set(userList))
Takes in the response body for the manage filters page and returns a list containing usernames.
https://github.com/netspi/jig/blob/bc36ed013b5ba48e549a16151b9135e271d55055/JIG.py#L35-L40
import re import sys from itertools import izip as zip import argparse import requests parser = argparse.ArgumentParser(description='Jira attack script') parser.add_argument('URL', type=str , help='the URL of the Jira instance... ex. https://jira.organization.com/') parser.add_argument('-u' ,'--usernames', dest='names', action='store_const', const=True, help='Print discovered usernames') parser.add_argument('-e' , '--emails', dest='emails',action='store_const', const=True, help='Print discovered email addresses') parser.add_argument('-a' ,'--all', dest='all',action='store_const',const=True,help='Print discovered email addresses and usernames') parser.add_argument('-eu' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS) parser.add_argument('-ue' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS) args = parser.parse_args() url = args.URL if args.URL[-1] != '/': args.URL = args.URL + "/" pickerURL = args.URL + "secure/popups/UserPickerBrowser.jspa?max=9999" filtersURL = args.URL + "secure/ManageFilters.jspa?filter=popular" def extractPicker(response): userList = re.compile(r"-name\">(.*)</td>").findall(response.text) emailList = re.compile(r">(.*\@.*)</td>").findall(response.text) dictionary = dict(zip(userList , emailList)) return dictionary
MIT License
adaptivepele/adaptivepele
AdaptivePELE/simulation/openmm_simulations.py
getDeviceIndexStr
python
def getDeviceIndexStr(deviceIndex, devicesPerTraj, devicesPerReplica=None): if devicesPerReplica is not None: devices = [d % devicesPerReplica for d in range(deviceIndex, deviceIndex+devicesPerTraj)] else: devices = range(deviceIndex, deviceIndex+devicesPerTraj) return ",".join(str(x) for x in devices)
Create a string to pass to OpenMM platform to select the resources to use :param deviceIndex: Index of the trajectory in the replica :type deviceIndex: int :param devicesPerTraj: Number of devices to use per trajectory :type devicesPerTraj: int :param devicesPerReplica: Number of maximum devices to use per replica :type devicesPerReplica: int :returns: str -- String that tells OpenMM how to use the resources
https://github.com/adaptivepele/adaptivepele/blob/b7c908a53a2ba9ec19fa81a517377cc365176036/AdaptivePELE/simulation/openmm_simulations.py#L636-L653
from __future__ import absolute_import, division, print_function import os import sys import time import functools import traceback import numpy as np import simtk.openmm as mm import simtk.openmm.app as app import simtk.unit as unit from AdaptivePELE.constants import constants, blockNames from AdaptivePELE.utilities import utilities from mdtraj.reporters.basereporter import _BaseReporter from mdtraj.formats import XTCTrajectoryFile try: FileNotFoundError except NameError: FileNotFoundError = IOError try: basestring except NameError: basestring = str def get_traceback(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except Exception as ex: ret = '#' * 60 ret += "\nException caught:" ret += "\n"+'-'*60 ret += "\n" + traceback.format_exc() ret += "\n" + '-' * 60 ret += "\n" + "#" * 60 print(sys.stderr, ret) sys.stderr.flush() raise ex return wrapper class ForceReporter(object): def __init__(self, file_name, reportInterval): self._out = open(file_name, 'w') self._reportInterval = reportInterval def __del__(self): self._out.close() def describeNextReport(self, simulation): steps = self._reportInterval - simulation.currentStep % self._reportInterval return (steps, False, False, True, False, None) def report(self, simulation, state): self._out.write("Step %d\n" % simulation.currentStep) forces = state.getForces().value_in_unit(unit.kilojoules/unit.mole/unit.nanometer) for i, f in enumerate(forces): self._out.write('%d %g %g %g\n' % (i, f[0], f[1], f[2])) self._out.flush() class XTCReporter(_BaseReporter): @property def backend(self): return XTCTrajectoryFile def __init__(self, file_name, reportInterval, atomSubset=None, append=False, enforcePeriodicBox=True): if append: if isinstance(file_name, basestring): with self.backend(file_name, 'r') as f: contents = f.read() elif isinstance(file_name, self.backend): raise ValueError("Currently passing an XTCTrajectoryFile in append mode is not supported, please pass a string with the filename") else: raise TypeError("I don't know how to handle %s" % file_name) super(XTCReporter, self).__init__(file_name, reportInterval, coordinates=True, time=True, cell=True, potentialEnergy=False, kineticEnergy=False, temperature=False, velocities=False, atomSubset=atomSubset) self._enforcePeriodicBox = enforcePeriodicBox if append: self._traj_file.write(*contents) def describeNextReport(self, simulation): steps = self._reportInterval - simulation.currentStep % self._reportInterval return (steps, self._coordinates, self._velocities, False, self._needEnergy, self._enforcePeriodicBox) def report(self, simulation, state): if not self._is_intialized: self._initialize(simulation) self._is_intialized = True self._checkForErrors(simulation, state) args = () kwargs = {} if self._coordinates: coordinates = state.getPositions(asNumpy=True)[self._atomSlice] coordinates = coordinates.value_in_unit(getattr(unit, self._traj_file.distance_unit)) args = (coordinates,) if self._time: time_step = state.getTime() kwargs['time'] = time_step.value_in_unit(time_step.unit) kwargs['step'] = simulation.currentStep if self._cell: kwargs['box'] = state.getPeriodicBoxVectors(asNumpy=True).value_in_unit(getattr(unit, self._traj_file.distance_unit)) self._traj_file.write(*args, **kwargs) if hasattr(self._traj_file, 'flush'): self._traj_file.flush() class CustomStateDataReporter(app.StateDataReporter): def __init__(self, file_name, reportInterval, step=False, time_sim=False, potentialEnergy=False, kineticEnergy=False, totalEnergy=False, temperature=False, volume=False, density=False, progress=False, remainingTime=False, speed=False, elapsedTime=False, separator=',', systemMass=None, totalSteps=None, append=False, initialStep=0): progress = False if isinstance(file_name, basestring): file_name = str(file_name) app.StateDataReporter.__init__(self, file_name, reportInterval, step, time_sim, potentialEnergy, kineticEnergy, totalEnergy, temperature, volume, density, progress, remainingTime, speed, elapsedTime, separator, systemMass, totalSteps) self._append = append self.initialStep = initialStep self._initialClockTime = None self._initialSimulationTime = None self._initialSteps = None self._hasInitialized = None def report(self, simulation, state): if not self._hasInitialized: self._initializeConstants(simulation) headers = self._constructHeaders() if not self._append: print('#"%s"' % ('"' + self._separator + '"').join(headers), file=self._out) try: self._out.flush() except AttributeError: pass self._initialClockTime = time.time() self._initialSimulationTime = state.getTime() self._initialSteps = simulation.currentStep self._hasInitialized = True self._checkForErrors(simulation, state) values = self._constructReportValues(simulation, state) print(self._separator.join(str(v) for v in values), file=self._out) try: self._out.flush() except AttributeError: pass def _constructReportValues(self, simulation, state): values = super(CustomStateDataReporter, self)._constructReportValues(simulation, state) values[0] = values[0] + self.initialStep return values @get_traceback def runEquilibration(equilibrationFiles, reportName, parameters, worker): prmtop, inpcrd = equilibrationFiles prmtop = app.AmberPrmtopFile(prmtop) inpcrd = app.AmberInpcrdFile(inpcrd) PLATFORM = mm.Platform_getPlatformByName(str(parameters.runningPlatform)) if parameters.runningPlatform == "CUDA": platformProperties = {"Precision": "mixed", "DeviceIndex": getDeviceIndexStr(worker, parameters.devicesPerTrajectory, devicesPerReplica=parameters.maxDevicesPerReplica), "UseCpuPme": "false"} else: platformProperties = {} if worker == 0: utilities.print_unbuffered("Running %d steps of minimization" % parameters.minimizationIterations) if parameters.boxCenter or parameters.cylinderBases: dummies = findDummyAtom(prmtop) assert dummies is not None else: dummies = None simulation = minimization(prmtop, inpcrd, PLATFORM, parameters.constraintsMin, parameters, platformProperties, dummies) state = simulation.context.getState(getPositions=True, getVelocities=True) positions = state.getPositions() velocities = state.getVelocities() if worker == 0: utilities.print_unbuffered("Running %d steps of NVT equilibration" % parameters.equilibrationLengthNVT) simulation = NVTequilibration(prmtop, positions, PLATFORM, parameters.equilibrationLengthNVT, parameters.constraintsNVT, parameters, reportName, platformProperties, velocities=velocities, dummy=dummies) state = simulation.context.getState(getPositions=True, getVelocities=True) positions = state.getPositions() velocities = state.getVelocities() if worker == 0: utilities.print_unbuffered("Running %d steps of NPT equilibration" % parameters.equilibrationLengthNPT) simulation = NPTequilibration(prmtop, positions, PLATFORM, parameters.equilibrationLengthNPT, parameters.constraintsNPT, parameters, reportName, platformProperties, velocities=velocities, dummy=dummies) state = simulation.context.getState(getPositions=True) root, _ = os.path.splitext(reportName) outputPDB = "%s_NPT.pdb" % root with open(outputPDB, 'w') as fw: app.PDBFile.writeFile(simulation.topology, state.getPositions(), fw) return outputPDB @get_traceback def minimization(prmtop, inpcrd, PLATFORM, constraints, parameters, platformProperties, dummy=None): if parameters.ligandName is None: ligandNames = [] else: ligandNames = parameters.ligandName system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=parameters.nonBondedCutoff * unit.angstroms, constraints=app.HBonds) integrator = mm.VerletIntegrator(parameters.timeStep * unit.femtoseconds) if parameters.constraints is not None: addConstraints(system, prmtop.topology, parameters.constraints) if parameters.boxCenter or parameters.cylinderBases: addDummyAtomToSystem(system, prmtop.topology, inpcrd.positions, ligandNames, dummy, 3) if constraints: force = mm.CustomExternalForce(str("k*periodicdistance(x, y, z, x0, y0, z0)^2")) force.addGlobalParameter(str("k"), constraints * unit.kilocalories_per_mole / unit.angstroms ** 2) force.addPerParticleParameter(str("x0")) force.addPerParticleParameter(str("y0")) force.addPerParticleParameter(str("z0")) atomNames = ('CA', 'C', 'N', 'O') for j, atom in enumerate(prmtop.topology.atoms()): if (atom.name in atomNames and atom.residue.name != "HOH") or (atom.residue.name in ligandNames and atom.element.symbol != "H"): force.addParticle(j, inpcrd.positions[j].value_in_unit(unit.nanometers)) system.addForce(force) simulation = app.Simulation(prmtop.topology, system, integrator, PLATFORM, platformProperties=platformProperties) try: boxVectors = inpcrd.boxVectors except AttributeError: boxVectors = None if boxVectors is not None: simulation.context.setPeriodicBoxVectors(*boxVectors) simulation.context.setPositions(inpcrd.positions) simulation.minimizeEnergy(maxIterations=parameters.minimizationIterations) return simulation @get_traceback def NVTequilibration(topology, positions, PLATFORM, simulation_steps, constraints, parameters, reportName, platformProperties, velocities=None, dummy=None): if parameters.ligandName is None: ligandNames = [] else: ligandNames = parameters.ligandName system = topology.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=parameters.nonBondedCutoff * unit.angstroms, constraints=app.HBonds) system.addForce(mm.AndersenThermostat(parameters.Temperature * unit.kelvin, 1 / unit.picosecond)) integrator = mm.VerletIntegrator(parameters.timeStep * unit.femtoseconds) if parameters.constraints is not None: addConstraints(system, topology.topology, parameters.constraints) if parameters.boxCenter or parameters.cylinderBases: addDummyAtomToSystem(system, topology.topology, positions, ligandNames, dummy, 3) if constraints: force = mm.CustomExternalForce(str("k*periodicdistance(x, y, z, x0, y0, z0)^2")) force.addGlobalParameter(str("k"), constraints * unit.kilocalories_per_mole / unit.angstroms ** 2) force.addPerParticleParameter(str("x0")) force.addPerParticleParameter(str("y0")) force.addPerParticleParameter(str("z0")) for j, atom in enumerate(topology.topology.atoms()): if (atom.name in ('CA', 'C', 'N', 'O') and atom.residue.name != "HOH") or (atom.residue.name in ligandNames and atom.element.symbol != "H"): force.addParticle(j, positions[j].value_in_unit(unit.nanometers)) system.addForce(force) simulation = app.Simulation(topology.topology, system, integrator, PLATFORM, platformProperties=platformProperties) simulation.context.setPositions(positions) if velocities: simulation.context.setVelocities(velocities) else: simulation.context.setVelocitiesToTemperature(parameters.Temperature * unit.kelvin, 1) root, _ = os.path.splitext(reportName) reportFile = "%s_report_NVT" % root report_freq = int(min(parameters.reporterFreq, simulation_steps/4)) simulation.reporters.append(CustomStateDataReporter(reportFile, report_freq, step=True, potentialEnergy=True, temperature=True, time_sim=True, volume=True, remainingTime=True, speed=True, totalSteps=parameters.equilibrationLengthNVT, separator="\t")) simulation.step(simulation_steps) return simulation @get_traceback def NPTequilibration(topology, positions, PLATFORM, simulation_steps, constraints, parameters, reportName, platformProperties, velocities=None, dummy=None): if parameters.ligandName is None: ligandNames = [] else: ligandNames = parameters.ligandName system = topology.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=parameters.nonBondedCutoff * unit.angstroms, constraints=app.HBonds) system.addForce(mm.AndersenThermostat(parameters.Temperature * unit.kelvin, 1 / unit.picosecond)) integrator = mm.VerletIntegrator(parameters.timeStep * unit.femtoseconds) system.addForce(mm.MonteCarloBarostat(1 * unit.bar, parameters.Temperature * unit.kelvin)) if parameters.constraints is not None: addConstraints(system, topology.topology, parameters.constraints) if parameters.boxCenter or parameters.cylinderBases: addDummyAtomToSystem(system, topology.topology, positions, ligandNames, dummy, 3) if constraints: force = mm.CustomExternalForce(str("k*periodicdistance(x, y, z, x0, y0, z0)^2")) force.addGlobalParameter(str("k"), constraints * unit.kilocalories_per_mole / unit.angstroms ** 2) force.addPerParticleParameter(str("x0")) force.addPerParticleParameter(str("y0")) force.addPerParticleParameter(str("z0")) for j, atom in enumerate(topology.topology.atoms()): if atom.name == 'CA' or (atom.residue.name in ligandNames and atom.element.symbol != "H"): force.addParticle(j, positions[j].value_in_unit(unit.nanometers)) system.addForce(force) simulation = app.Simulation(topology.topology, system, integrator, PLATFORM, platformProperties=platformProperties) simulation.context.setPositions(positions) if velocities: simulation.context.setVelocities(velocities) else: simulation.context.setVelocitiesToTemperature(parameters.Temperature * unit.kelvin, 1) root, _ = os.path.splitext(reportName) reportFile = "%s_report_NPT" % root report_freq = int(min(parameters.reporterFreq, simulation_steps/4)) simulation.reporters.append(CustomStateDataReporter(reportFile, report_freq, step=True, potentialEnergy=True, temperature=True, time_sim=True, volume=True, remainingTime=True, speed=True, totalSteps=parameters.equilibrationLengthNPT, separator="\t")) simulation.step(simulation_steps) return simulation @get_traceback def runProductionSimulation(equilibrationFiles, workerNumber, outputDir, seed, parameters, reportFileName, checkpoint, ligandName, replica_id, trajsPerReplica, epoch_number, restart=False): if parameters.ligandName is None: ligandNames = [] else: ligandNames = parameters.ligandName deviceIndex = workerNumber workerNumber += replica_id*trajsPerReplica + 1 prmtop, pdb = equilibrationFiles trajName = os.path.join(outputDir, constants.AmberTemplates.trajectoryTemplate % (workerNumber, parameters.format)) stateReporter = os.path.join(outputDir, "%s_%s" % (reportFileName, workerNumber)) checkpointReporter = os.path.join(outputDir, constants.AmberTemplates.CheckPointReporterTemplate % workerNumber) lastStep = getLastStep(stateReporter) simulation_length = parameters.productionLength - lastStep pdb = app.PDBFile(str(pdb)) prmtop = app.AmberPrmtopFile(prmtop) PLATFORM = mm.Platform_getPlatformByName(str(parameters.runningPlatform)) if parameters.runningPlatform == "CUDA": platformProperties = {"Precision": "mixed", "DeviceIndex": getDeviceIndexStr(deviceIndex, parameters.devicesPerTrajectory, devicesPerReplica=parameters.maxDevicesPerReplica), "UseCpuPme": "false"} else: platformProperties = {} dummies = None if parameters.boxCenter or parameters.cylinderBases: dummies = findDummyAtom(prmtop) if epoch_number > 0: min_sim = minimization(prmtop, pdb, PLATFORM, parameters.constraintsMin, parameters, platformProperties, dummy=dummies) positions = min_sim.context.getState(getPositions=True).getPositions() else: positions = pdb.positions system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=parameters.nonBondedCutoff * unit.angstroms, constraints=app.HBonds, removeCMMotion=True) if parameters.boxCenter or parameters.cylinderBases: addDummyAtomToSystem(system, prmtop.topology, positions, ligandNames, dummies, deviceIndex) system.addForce(mm.AndersenThermostat(parameters.Temperature * unit.kelvin, 1 / unit.picosecond)) integrator = mm.VerletIntegrator(parameters.timeStep * unit.femtoseconds) system.addForce(mm.MonteCarloBarostat(1 * unit.bar, parameters.Temperature * unit.kelvin)) if parameters.constraints is not None: addConstraints(system, prmtop.topology, parameters.constraints) if (parameters.boxCenter or parameters.cylinderBases) and parameters.ligandsToRestrict is not None: for ligand_resname in parameters.ligandsToRestrict: if parameters.boxType == blockNames.SimulationParams.sphere: if deviceIndex == 0: utilities.print_unbuffered("Adding spherical ligand box") assert len(dummies) == 1 addLigandBox(prmtop.topology, positions, system, ligand_resname, dummies[0], parameters.boxRadius, deviceIndex) elif parameters.boxType == blockNames.SimulationParams.cylinder: if deviceIndex == 0: utilities.print_unbuffered("Adding cylinder ligand box") addLigandCylinderBox(prmtop.topology, positions, system, ligand_resname, dummies, parameters.boxRadius, deviceIndex) simulation = app.Simulation(prmtop.topology, system, integrator, PLATFORM, platformProperties=platformProperties) simulation.context.setPositions(positions) if restart: with open(str(checkpoint), 'rb') as check: simulation.context.loadCheckpoint(check.read()) stateData = open(str(stateReporter), "a") else: simulation.context.setVelocitiesToTemperature(parameters.Temperature * unit.kelvin, seed) stateData = open(str(stateReporter), "w") if parameters.format == "xtc": simulation.reporters.append(XTCReporter(str(trajName), parameters.reporterFreq, append=restart, enforcePeriodicBox=parameters.postprocessing)) elif parameters.format == "dcd": simulation.reporters.append(app.DCDReporter(str(trajName), parameters.reporterFreq, append=restart, enforcePeriodicBox=parameters.postprocessing)) simulation.reporters.append(app.CheckpointReporter(str(checkpointReporter), parameters.reporterFreq)) simulation.reporters.append(CustomStateDataReporter(stateData, parameters.reporterFreq, step=True, potentialEnergy=True, temperature=True, time_sim=True, volume=True, remainingTime=True, speed=True, totalSteps=simulation_length, separator="\t", append=restart, initialStep=lastStep)) if workerNumber == 1: frequency = min(10 * parameters.reporterFreq, parameters.productionLength) simulation.reporters.append(app.StateDataReporter(sys.stdout, frequency, step=True)) simulation.step(simulation_length) stateData.close() def getLastStep(reportfile): try: with open(reportfile, "r") as inp: report = inp.read() lines = report.split("\n") if len(lines) <= 2: return 0 last_step = int(lines[-2].split("\t")[0]) except FileNotFoundError: return 0 return last_step
MIT License
nat-d/featurecontrolhrl
worker.py
main
python
def main(_): parser = argparse.ArgumentParser(description=None) parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.') parser.add_argument('--task', default=0, type=int, help='Task index') parser.add_argument('--job-name', default="worker", help='worker or ps') parser.add_argument('--num-workers', default=1, type=int, help='Number of workers') parser.add_argument('--log-dir', default="/tmp/pong", help='Log directory path') parser.add_argument('--env-id', default="PongDeterministic-v3", help='Environment id') parser.add_argument('-r', '--remotes', default=None, help='References to environments to create (e.g. -r 20), ' 'or the address of pre-existing VNC servers and ' 'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901)') parser.add_argument('--visualise', action='store_true', help="Visualise the gym environment by running env.render() between each timestep") parser.add_argument('--seed_offset', default=0, type=int, help='Offset to the seed number') parser.add_argument('--eval', action='store_true', help="Evaluation Thread") args = parser.parse_args() spec = cluster_spec(args.num_workers, 1) cluster = tf.train.ClusterSpec(spec).as_cluster_def() def shutdown(signal, frame): logger.warn('Received signal %s: exiting', signal) sys.exit(128+signal) signal.signal(signal.SIGHUP, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGTERM, shutdown) if args.job_name == "worker": server = tf.train.Server(cluster, job_name="worker", task_index=args.task, config=tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=2)) run(args, server) else: server = tf.train.Server(cluster, job_name="ps", task_index=args.task, config=tf.ConfigProto(device_filters=["/job:ps"])) while True: time.sleep(1000)
Setting up Tensorflow for data parallel work
https://github.com/nat-d/featurecontrolhrl/blob/89df9c5110466312ff1b8b23ad73f2f7d0998e6b/worker.py#L134-L180
import cv2 import go_vncdriver import tensorflow as tf import argparse import logging import sys, signal import time import os from a3c import A3C from envs import create_env import distutils.version use_tf12_api = distutils.version.LooseVersion(tf.VERSION) >= distutils.version.LooseVersion('0.12.0') from time import sleep logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class FastSaver(tf.train.Saver): def save(self, sess, save_path, global_step=None, latest_filename=None, meta_graph_suffix="meta", write_meta_graph=True): super(FastSaver, self).save(sess, save_path, global_step, latest_filename, meta_graph_suffix, False) def run(args, server): env = create_env(args.env_id, client_id=str(args.task), remotes=args.remotes) trainer = A3C(env, args.task, args.visualise) if use_tf12_api: variables_to_save = [v for v in tf.global_variables() if not v.name.startswith("local")] init_op = tf.variables_initializer(variables_to_save) init_all_op = tf.global_variables_initializer() else: variables_to_save = [v for v in tf.all_variables() if not v.name.startswith("local")] init_op = tf.initialize_variables(variables_to_save) init_all_op = tf.initialize_all_variables() saver = FastSaver(variables_to_save) var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) logger.info('Trainable vars:') for v in var_list: logger.info(' %s %s', v.name, v.get_shape()) def init_fn(ses): logger.info("Initializing all parameters.") ses.run(init_all_op) config = tf.ConfigProto(device_filters=["/job:ps", "/job:worker/task:{}/cpu:0".format(args.task)]) logdir = os.path.join(args.log_dir, 'train') if use_tf12_api: summary_writer = tf.summary.FileWriter(logdir + "_%d" % args.task) else: summary_writer = tf.train.SummaryWriter(logdir + "_%d" % args.task) if args.eval: summary_writer = tf.summary.FileWriter(logdir + "_eval") logger.info("Events directory: %s_%s", logdir, args.task) sv = tf.train.Supervisor(is_chief=(args.task == 0), logdir=logdir, saver=saver, summary_op=None, init_op=init_op, init_fn=init_fn, summary_writer=summary_writer, ready_op=tf.report_uninitialized_variables(variables_to_save), global_step=trainer.global_step, save_model_secs=30, save_summaries_secs=30) num_global_steps = 1000000000 logger.info( "Starting session. If this hangs, we're mostly likely waiting to connect to the parameter server. " + "One common cause is that the parameter server DNS name isn't resolving yet, or is misspecified.") with sv.managed_session(server.target, config=config) as sess, sess.as_default(): tf.set_random_seed(args.task + args.seed_offset) sess.run(trainer.meta_sync) sess.run(trainer.sync) trainer.start(sess, summary_writer) global_step = sess.run(trainer.global_step) if args.eval: logger.info("Starting Evaluate-worker") else: logger.info("Starting training at step=%d", global_step) eval_step = 1 while not sv.should_stop() and (not num_global_steps or global_step < num_global_steps): if args.eval: global_step = sess.run(trainer.global_step) if global_step > eval_step * 100000: logger.info(" !!!! Starting Evaluation at step=%d", global_step) trainer.evaluate(sess) eval_step += 1 sleep(10) else: trainer.process(sess) global_step = sess.run(trainer.global_step) sv.stop() logger.info('reached %s steps. worker stopped.', global_step) def cluster_spec(num_workers, num_ps): cluster = {} port = 12222 all_ps = [] host = '127.0.0.1' for _ in range(num_ps): all_ps.append('{}:{}'.format(host, port)) port += 1 cluster['ps'] = all_ps all_workers = [] for _ in range(num_workers + 1): all_workers.append('{}:{}'.format(host, port)) port += 1 cluster['worker'] = all_workers return cluster
MIT License
monarch-initiative/embiggen
embiggen/embedders/glove.py
GloVe.fit
python
def fit( self, X: Tuple[np.ndarray, np.ndarray], frequencies: np.ndarray, *args: List, epochs: int = 1000, batch_size: int = 2**20, early_stopping_monitor: str = "loss", early_stopping_min_delta: float = 0.001, early_stopping_patience: int = 10, early_stopping_mode: str = "min", reduce_lr_monitor: str = "loss", reduce_lr_min_delta: float = 0.01, reduce_lr_patience: int = 10, reduce_lr_mode: str = "min", reduce_lr_factor: float = 0.9, verbose: int = 1, **kwargs: Dict ) -> pd.DataFrame: return super().fit( GloveSequence( *X, frequencies, batch_size=batch_size, directed=self._directed, random_state=self._random_state ), *args, epochs=epochs, early_stopping_monitor=early_stopping_monitor, early_stopping_min_delta=early_stopping_min_delta, early_stopping_patience=early_stopping_patience, early_stopping_mode=early_stopping_mode, reduce_lr_monitor=reduce_lr_monitor, reduce_lr_min_delta=reduce_lr_min_delta, reduce_lr_patience=reduce_lr_patience, reduce_lr_mode=reduce_lr_mode, reduce_lr_factor=reduce_lr_factor, verbose=verbose, **kwargs )
Return pandas dataframe with training history. Parameters ----------------------- X: Tuple[np.ndarray, np.ndarray], Tuple with source and destinations. frequencies: np.ndarray, The frequencies to predict. *args: List, Other arguments to provide to the model. epochs: int = 1000, Epochs to train the model for. batch_size: int = 2**20, The batch size. Tipically batch sizes for the GloVe model can be immense. early_stopping_monitor: str = "loss", Metric to monitor for early stopping. early_stopping_min_delta: float = 0.001, Minimum delta of metric to stop the training. early_stopping_patience: int = 10, Number of epochs to wait for when the given minimum delta is not achieved after which trigger early stopping. early_stopping_mode: str = "min", Direction of the variation of the monitored metric for early stopping. reduce_lr_monitor: str = "loss", Metric to monitor for reducing learning rate. reduce_lr_min_delta: float = 0.01, Minimum delta of metric to reduce learning rate. reduce_lr_patience: int = 10, Number of epochs to wait for when the given minimum delta is not achieved after which reducing learning rate. reduce_lr_mode: str = "min", Direction of the variation of the monitored metric for learning rate. reduce_lr_factor: float = 0.9, Factor for reduction of learning rate. verbose: int = 1, Wethever to show the loading bar. Specifically, the options are: * 0 or False: No loading bar. * 1 or True: Showing only the loading bar for the epochs. * 2: Showing loading bar for both epochs and batches. **kwargs: Dict, Additional kwargs to pass to the Keras fit call. Raises ----------------------- ValueError, If given verbose value is not within the available set (-1, 0, 1). Returns ----------------------- Dataframe with training history.
https://github.com/monarch-initiative/embiggen/blob/55b3c03d908f654a14ec0f36d1ed7d0c77258086/embiggen/embedders/glove.py#L177-L269
from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.layers import Add from tensorflow.keras.layers import Concatenate, Dot, Embedding, Flatten, Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Optimizer from ..sequences import GloveSequence from .embedder import Embedder class GloVe(Embedder): def __init__( self, vocabulary_size: int, embedding_size: int, embedding: Union[np.ndarray, pd.DataFrame] = None, extra_features: Union[np.ndarray, pd.DataFrame] = None, optimizer: Union[str, Optimizer] = None, alpha: float = 0.75, random_state: int = 42, directed: bool = False, use_gradient_centralization: bool = True, ): self._alpha = alpha self._random_state = random_state self._directed = directed super().__init__( vocabulary_size=vocabulary_size, embedding_size=embedding_size, embedding=embedding, extra_features=extra_features, optimizer=optimizer, use_gradient_centralization=use_gradient_centralization ) def _glove_loss(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> float: return K.sum( K.pow(K.clip(y_true, 0.0, 1.0), self._alpha) * K.square(y_pred - K.log(y_true)), axis=-1 ) def _build_model(self): left_input_layer = Input((1,), name="left_input_layer") right_input_layer = Input((1,), name="right_input_layer") trainable_left_embedding = Embedding( self._vocabulary_size, self._embedding_size, input_length=1, weights=None if self._embedding is None else [ self._embedding ], name=Embedder.TERMS_EMBEDDING_LAYER_NAME )(left_input_layer) trainable_right_embedding = Embedding( self._vocabulary_size, self._embedding_size, input_length=1, )(right_input_layer) if self._extra_features is not None: extra_features_matrix = Embedding( *self._extra_features, input_length=1, weights=self._extra_features, trainable=False, name="extra_features_matrix" ) trainable_left_embedding = Concatenate()([ extra_features_matrix(left_input_layer), trainable_left_embedding ]) trainable_right_embedding = Concatenate()([ extra_features_matrix(right_input_layer), trainable_right_embedding ]) dot_product_layer = Dot(axes=2)([ trainable_left_embedding, trainable_right_embedding ]) biases = [ Embedding(self._vocabulary_size, 1, input_length=1)(input_layer) for input_layer in (left_input_layer, right_input_layer) ] prediction = Flatten()(Add()([dot_product_layer, *biases])) glove = Model( inputs=[ left_input_layer, right_input_layer ], outputs=prediction, name="GloVe" ) return glove def _compile_model(self) -> Model: self._model.compile( loss=self._glove_loss, optimizer=self._optimizer )
BSD 3-Clause New or Revised License
pokemongof/pokemongo-bot-desktop
build/pywin/Lib/inspect.py
formatargspec
python
def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): specs = [] if defaults: firstdefault = len(args) - len(defaults) for i, arg in enumerate(args): spec = strseq(arg, formatarg, join) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(varargs)) if varkw is not None: specs.append(formatvarkw(varkw)) return '(' + string.join(specs, ', ') + ')'
Format an argument spec from the 4 values returned by getargspec. The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.
https://github.com/pokemongof/pokemongo-bot-desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/inspect.py#L845-L869
__author__ = 'Ka-Ping Yee <ping@lfw.org>' __date__ = '1 Jan 2001' import sys import os import types import string import re import dis import imp import tokenize import linecache from operator import attrgetter from collections import namedtuple CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8 CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40 TPFLAGS_IS_ABSTRACT = 1 << 20 def ismodule(object): return isinstance(object, types.ModuleType) def isclass(object): return isinstance(object, (type, types.ClassType)) def ismethod(object): return isinstance(object, types.MethodType) def ismethoddescriptor(object): return (hasattr(object, "__get__") and not hasattr(object, "__set__") and not ismethod(object) and not isfunction(object) and not isclass(object)) def isdatadescriptor(object): return (hasattr(object, "__set__") and hasattr(object, "__get__")) if hasattr(types, 'MemberDescriptorType'): def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: def ismemberdescriptor(object): return False if hasattr(types, 'GetSetDescriptorType'): def isgetsetdescriptor(object): return isinstance(object, types.GetSetDescriptorType) else: def isgetsetdescriptor(object): return False def isfunction(object): return isinstance(object, types.FunctionType) def isgeneratorfunction(object): return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) def isgenerator(object): return isinstance(object, types.GeneratorType) def istraceback(object): return isinstance(object, types.TracebackType) def isframe(object): return isinstance(object, types.FrameType) def iscode(object): return isinstance(object, types.CodeType) def isbuiltin(object): return isinstance(object, types.BuiltinFunctionType) def isroutine(object): return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object)) def isabstract(object): return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT) def getmembers(object, predicate=None): results = [] for key in dir(object): try: value = getattr(object, key) except AttributeError: continue if not predicate or predicate(value): results.append((key, value)) results.sort() return results Attribute = namedtuple('Attribute', 'name kind defining_class object') def classify_class_attrs(cls): mro = getmro(cls) names = dir(cls) result = [] for name in names: homecls = None for base in (cls,) + mro: if name in base.__dict__: obj = base.__dict__[name] homecls = base break else: obj = getattr(cls, name) homecls = getattr(obj, "__objclass__", homecls) if isinstance(obj, staticmethod): kind = "static method" elif isinstance(obj, classmethod): kind = "class method" elif isinstance(obj, property): kind = "property" elif ismethoddescriptor(obj): kind = "method" elif isdatadescriptor(obj): kind = "data" else: obj_via_getattr = getattr(cls, name) if (ismethod(obj_via_getattr) or ismethoddescriptor(obj_via_getattr)): kind = "method" else: kind = "data" obj = obj_via_getattr result.append(Attribute(name, kind, homecls, obj)) return result def _searchbases(cls, accum): if cls in accum: return accum.append(cls) for base in cls.__bases__: _searchbases(base, accum) def getmro(cls): if hasattr(cls, "__mro__"): return cls.__mro__ else: result = [] _searchbases(cls, result) return tuple(result) def indentsize(line): expline = string.expandtabs(line) return len(expline) - len(string.lstrip(expline)) def getdoc(object): try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None return cleandoc(doc) def cleandoc(doc): try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None else: margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n') def getfile(object): if ismodule(object): if hasattr(object, '__file__'): return object.__file__ raise TypeError('{!r} is a built-in module'.format(object)) if isclass(object): object = sys.modules.get(object.__module__) if hasattr(object, '__file__'): return object.__file__ raise TypeError('{!r} is a built-in class'.format(object)) if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): return object.co_filename raise TypeError('{!r} is not a module, class, method, ' 'function, traceback, frame, or code object'.format(object)) ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type') def getmoduleinfo(path): filename = os.path.basename(path) suffixes = map(lambda info: (-len(info[0]), info[0], info[1], info[2]), imp.get_suffixes()) suffixes.sort() for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return ModuleInfo(filename[:neglen], suffix, mode, mtype) def getmodulename(path): info = getmoduleinfo(path) if info: return info[0] def getsourcefile(object): filename = getfile(object) if string.lower(filename[-4:]) in ('.pyc', '.pyo'): filename = filename[:-4] + '.py' for suffix, mode, kind in imp.get_suffixes(): if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix: return None if os.path.exists(filename): return filename if hasattr(getmodule(object, filename), '__loader__'): return filename if filename in linecache.cache: return filename def getabsfile(object, _filename=None): if _filename is None: _filename = getsourcefile(object) or getfile(object) return os.path.normcase(os.path.abspath(_filename)) modulesbyfile = {} _filesbymodname = {} def getmodule(object, _filename=None): if ismodule(object): return object if hasattr(object, '__module__'): return sys.modules.get(object.__module__) if _filename is not None and _filename in modulesbyfile: return sys.modules.get(modulesbyfile[_filename]) try: file = getabsfile(object, _filename) except TypeError: return None if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) for modname, module in sys.modules.items(): if ismodule(module) and hasattr(module, '__file__'): f = module.__file__ if f == _filesbymodname.get(modname, None): continue _filesbymodname[modname] = f f = getabsfile(module) modulesbyfile[f] = modulesbyfile[ os.path.realpath(f)] = module.__name__ if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) main = sys.modules['__main__'] if not hasattr(object, '__name__'): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main builtin = sys.modules['__builtin__'] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin def findsource(object): file = getfile(object) sourcefile = getsourcefile(object) if not sourcefile and file[:1] + file[-1:] != '<>': raise IOError('source code not available') file = sourcefile if sourcefile else file module = getmodule(object, file) if module: lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: if lines[i][0] == 'c': return lines, i candidates.append((match.group(1), i)) if candidates: candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') lnum = object.co_firstlineno - 1 pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') while lnum > 0: if pat.match(lines[lnum]): break lnum = lnum - 1 return lines, lnum raise IOError('could not find code object') def getcomments(object): try: lines, lnum = findsource(object) except (IOError, TypeError): return None if ismodule(object): start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and string.strip(lines[start]) in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, '') elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == '#' and indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == '#': comments[:1] = [] while comments and string.strip(comments[-1]) == '#': comments[-1:] = [] return string.join(comments, '') class EndOfBlock(Exception): pass class BlockFinder: def __init__(self): self.indent = 0 self.islambda = False self.started = False self.passline = False self.last = 1 def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True elif type == tokenize.NEWLINE: self.passline = False self.last = srow if self.islambda: raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): raise EndOfBlock def getblock(lines): blockfinder = BlockFinder() try: tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) except (EndOfBlock, IndentationError): pass return lines[:blockfinder.last] def getsourcelines(object): lines, lnum = findsource(object) if ismodule(object): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 def getsource(object): lines, lnum = getsourcelines(object) return string.join(lines, '') def walktree(classes, children, parent): results = [] classes.sort(key=attrgetter('__module__', '__name__')) for c in classes: results.append((c, c.__bases__)) if c in children: results.append(walktree(children[c], children, c)) return results def getclasstree(classes, unique=0): children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if not parent in children: children[parent] = [] if c not in children[parent]: children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) Arguments = namedtuple('Arguments', 'args varargs keywords') def getargs(co): if not iscode(co): raise TypeError('{!r} is not a code object'.format(co)) nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) step = 0 for i in range(nargs): if args[i][:1] in ('', '.'): stack, remain, count = [], [], [] while step < len(co.co_code): op = ord(co.co_code[step]) step = step + 1 if op >= dis.HAVE_ARGUMENT: opname = dis.opname[op] value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 step = step + 2 if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): remain.append(value) count.append(value) elif opname == 'STORE_FAST': stack.append(names[value]) if not remain: stack[0] = [stack[0]] break else: remain[-1] = remain[-1] - 1 while remain[-1] == 0: remain.pop() size = count.pop() stack[-size:] = [stack[-size:]] if not remain: break remain[-1] = remain[-1] - 1 if not remain: break args[i] = stack[0] varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return Arguments(args, varargs, varkw) ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') def getargspec(func): if ismethod(func): func = func.im_func if not isfunction(func): raise TypeError('{!r} is not a Python function'.format(func)) args, varargs, varkw = getargs(func.func_code) return ArgSpec(args, varargs, varkw, func.func_defaults) ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') def getargvalues(frame): args, varargs, varkw = getargs(frame.f_code) return ArgInfo(args, varargs, varkw, frame.f_locals) def joinseq(seq): if len(seq) == 1: return '(' + seq[0] + ',)' else: return '(' + string.join(seq, ', ') + ')' def strseq(object, convert, join=joinseq): if type(object) in (list, tuple): return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) else: return convert(object)
MIT License
elcorto/pwtools
pwtools/crys.py
volume_cc
python
def volume_cc(cryst_const): assert len(cryst_const) == 6, "shape must be (6,)" a = cryst_const[0] b = cryst_const[1] c = cryst_const[2] alpha = cryst_const[3]*pi/180 beta = cryst_const[4]*pi/180 gamma = cryst_const[5]*pi/180 return a*b*c*sqrt(1+ 2*cos(alpha)*cos(beta)*cos(gamma) - cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2)
Volume of the unit cell from crystallographic constants [1]_. Parameters ---------- %(cryst_const_doc)s Returns ------- volume, unit: [a]**3 References ---------- .. [1] http://en.wikipedia.org/wiki/Parallelepiped
https://github.com/elcorto/pwtools/blob/99831540c6eb1fc7e8bd9b1ce61375b330f4f43e/pwtools/crys.py#L100-L123
from math import acos, pi, sin, cos, sqrt import textwrap import time import tempfile import copy import itertools import numpy as np from scipy.linalg import inv from pwtools import common, signal, num, atomic_data, constants, _flib from pwtools.common import assert_cond from pwtools.decorators import crys_add_doc from pwtools.base import FlexibleGetters from pwtools.constants import Angstrom from pwtools.num import fempty, rms3d, match_mask, norm import warnings def angle(x,y): return acos(np.dot(x,y)/norm(x)/norm(y))*180.0/pi @crys_add_doc def volume_cell(cell): assert_cond(cell.shape == (3,3), "input must be (3,3) array") return abs(np.linalg.det(cell)) def volume_cell3d(cell, axis=0): assert cell.ndim == 3 sl = [slice(None)]*cell.ndim ret = [] for ii in range(cell.shape[axis]): sl[axis] = ii ret.append(volume_cell(cell[tuple(sl)])) return np.array(ret) @crys_add_doc
BSD 3-Clause New or Revised License
furkanom/tf-ssd
utils/bbox_utils.py
renormalize_bboxes_with_min_max
python
def renormalize_bboxes_with_min_max(bboxes, min_max): y_min, x_min, y_max, x_max = tf.split(min_max, 4) renomalized_bboxes = bboxes - tf.concat([y_min, x_min, y_min, x_min], -1) renomalized_bboxes /= tf.concat([y_max-y_min, x_max-x_min, y_max-y_min, x_max-x_min], -1) return tf.clip_by_value(renomalized_bboxes, 0, 1)
Renormalizing given bounding boxes to the new boundaries. r = (x - min) / (max - min) outputs: bboxes = (total_bboxes, [y1, x1, y2, x2]) min_max = ([y_min, x_min, y_max, x_max])
https://github.com/furkanom/tf-ssd/blob/734bfd0cd1343b424bfad59c4b8c3cbef4775d86/utils/bbox_utils.py#L178-L188
import tensorflow as tf def non_max_suppression(pred_bboxes, pred_labels, **kwargs): return tf.image.combined_non_max_suppression( pred_bboxes, pred_labels, **kwargs ) def generate_iou_map(bboxes, gt_boxes, transpose_perm=[0, 2, 1]): gt_rank = tf.rank(gt_boxes) gt_expand_axis = gt_rank - 2 bbox_y1, bbox_x1, bbox_y2, bbox_x2 = tf.split(bboxes, 4, axis=-1) gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(gt_boxes, 4, axis=-1) gt_area = tf.squeeze((gt_y2 - gt_y1) * (gt_x2 - gt_x1), axis=-1) bbox_area = tf.squeeze((bbox_y2 - bbox_y1) * (bbox_x2 - bbox_x1), axis=-1) x_top = tf.maximum(bbox_x1, tf.transpose(gt_x1, transpose_perm)) y_top = tf.maximum(bbox_y1, tf.transpose(gt_y1, transpose_perm)) x_bottom = tf.minimum(bbox_x2, tf.transpose(gt_x2, transpose_perm)) y_bottom = tf.minimum(bbox_y2, tf.transpose(gt_y2, transpose_perm)) intersection_area = tf.maximum(x_bottom - x_top, 0) * tf.maximum(y_bottom - y_top, 0) union_area = (tf.expand_dims(bbox_area, -1) + tf.expand_dims(gt_area, gt_expand_axis) - intersection_area) return intersection_area / union_area def get_bboxes_from_deltas(prior_boxes, deltas): all_pbox_width = prior_boxes[..., 3] - prior_boxes[..., 1] all_pbox_height = prior_boxes[..., 2] - prior_boxes[..., 0] all_pbox_ctr_x = prior_boxes[..., 1] + 0.5 * all_pbox_width all_pbox_ctr_y = prior_boxes[..., 0] + 0.5 * all_pbox_height all_bbox_width = tf.exp(deltas[..., 3]) * all_pbox_width all_bbox_height = tf.exp(deltas[..., 2]) * all_pbox_height all_bbox_ctr_x = (deltas[..., 1] * all_pbox_width) + all_pbox_ctr_x all_bbox_ctr_y = (deltas[..., 0] * all_pbox_height) + all_pbox_ctr_y y1 = all_bbox_ctr_y - (0.5 * all_bbox_height) x1 = all_bbox_ctr_x - (0.5 * all_bbox_width) y2 = all_bbox_height + y1 x2 = all_bbox_width + x1 return tf.stack([y1, x1, y2, x2], axis=-1) def get_deltas_from_bboxes(bboxes, gt_boxes): bbox_width = bboxes[..., 3] - bboxes[..., 1] bbox_height = bboxes[..., 2] - bboxes[..., 0] bbox_ctr_x = bboxes[..., 1] + 0.5 * bbox_width bbox_ctr_y = bboxes[..., 0] + 0.5 * bbox_height gt_width = gt_boxes[..., 3] - gt_boxes[..., 1] gt_height = gt_boxes[..., 2] - gt_boxes[..., 0] gt_ctr_x = gt_boxes[..., 1] + 0.5 * gt_width gt_ctr_y = gt_boxes[..., 0] + 0.5 * gt_height bbox_width = tf.where(tf.equal(bbox_width, 0), 1e-3, bbox_width) bbox_height = tf.where(tf.equal(bbox_height, 0), 1e-3, bbox_height) delta_x = tf.where(tf.equal(gt_width, 0), tf.zeros_like(gt_width), tf.truediv((gt_ctr_x - bbox_ctr_x), bbox_width)) delta_y = tf.where(tf.equal(gt_height, 0), tf.zeros_like(gt_height), tf.truediv((gt_ctr_y - bbox_ctr_y), bbox_height)) delta_w = tf.where(tf.equal(gt_width, 0), tf.zeros_like(gt_width), tf.math.log(gt_width / bbox_width)) delta_h = tf.where(tf.equal(gt_height, 0), tf.zeros_like(gt_height), tf.math.log(gt_height / bbox_height)) return tf.stack([delta_y, delta_x, delta_h, delta_w], axis=-1) def get_scale_for_nth_feature_map(k, m=6, scale_min=0.2, scale_max=0.9): return scale_min + ((scale_max - scale_min) / (m - 1)) * (k - 1) def generate_base_prior_boxes(aspect_ratios, feature_map_index, total_feature_map): current_scale = get_scale_for_nth_feature_map(feature_map_index, m=total_feature_map) next_scale = get_scale_for_nth_feature_map(feature_map_index + 1, m=total_feature_map) base_prior_boxes = [] for aspect_ratio in aspect_ratios: height = current_scale / tf.sqrt(aspect_ratio) width = current_scale * tf.sqrt(aspect_ratio) base_prior_boxes.append([-height/2, -width/2, height/2, width/2]) height = width = tf.sqrt(current_scale * next_scale) base_prior_boxes.append([-height/2, -width/2, height/2, width/2]) return tf.cast(base_prior_boxes, dtype=tf.float32) def generate_prior_boxes(feature_map_shapes, aspect_ratios): prior_boxes = [] for i, feature_map_shape in enumerate(feature_map_shapes): base_prior_boxes = generate_base_prior_boxes(aspect_ratios[i], i+1, len(feature_map_shapes)) stride = 1 / feature_map_shape grid_coords = tf.cast(tf.range(0, feature_map_shape) / feature_map_shape + stride / 2, dtype=tf.float32) grid_x, grid_y = tf.meshgrid(grid_coords, grid_coords) flat_grid_x, flat_grid_y = tf.reshape(grid_x, (-1, )), tf.reshape(grid_y, (-1, )) grid_map = tf.stack([flat_grid_y, flat_grid_x, flat_grid_y, flat_grid_x], -1) prior_boxes_for_feature_map = tf.reshape(base_prior_boxes, (1, -1, 4)) + tf.reshape(grid_map, (-1, 1, 4)) prior_boxes_for_feature_map = tf.reshape(prior_boxes_for_feature_map, (-1, 4)) prior_boxes.append(prior_boxes_for_feature_map) prior_boxes = tf.concat(prior_boxes, axis=0) return tf.clip_by_value(prior_boxes, 0, 1)
Apache License 2.0
google/openhtf
openhtf/util/logs.py
configure_logging
python
def configure_logging(): htf_logger = logging.getLogger(LOGGER_PREFIX) htf_logger.propagate = False htf_logger.setLevel(logging.DEBUG) if CLI_LOGGING_VERBOSITY == 0: htf_logger.addHandler(logging.NullHandler()) return if CLI_LOGGING_VERBOSITY == 1: logging_level = logging.INFO else: logging_level = logging.DEBUG cli_handler = KillableThreadSafeStreamHandler(stream=sys.stdout) cli_handler.setFormatter(CliFormatter()) cli_handler.setLevel(logging_level) cli_handler.addFilter(MAC_FILTER) htf_logger.addHandler(cli_handler) cli_handler.addFilter(console_output.CliQuietFilter())
One-time initialization of loggers. See module docstring for more info.
https://github.com/google/openhtf/blob/4646aa6b9ba67532ce7e8743ce16d7bd4369ad3d/openhtf/util/logs.py#L335-L362
import collections import datetime import logging import os import re import sys import textwrap from openhtf.util import argv from openhtf.util import console_output from openhtf.util import functions from openhtf.util import threads import six CLI_LOGGING_VERBOSITY = 0 ARG_PARSER = argv.module_parser() ARG_PARSER.add_argument( '-v', action=argv.StoreRepsInModule, target='%s.CLI_LOGGING_VERBOSITY' % __name__, help=textwrap.dedent("""\ CLI logging verbosity. Can be repeated to increase verbosity (i.e. -v, -vv, -vvv).""")) LOGGER_PREFIX = 'openhtf' RECORD_LOGGER_PREFIX = '.'.join((LOGGER_PREFIX, 'test_record')) RECORD_LOGGER_RE = re.compile(r'%s\.(?P<test_uid>[^.]*)\.?' % RECORD_LOGGER_PREFIX) SUBSYSTEM_LOGGER_RE = re.compile( r'%s\.[^.]*\.(?P<subsys>plug|phase)\.(?P<id>[^.]*)' % RECORD_LOGGER_PREFIX) _LOG_ONCE_SEEN = set() class LogRecord( collections.namedtuple('LogRecord', [ 'level', 'logger_name', 'source', 'lineno', 'timestamp_millis', 'message', ])): pass class HtfTestLogger(logging.Logger): def getChild(self, suffix): child = HtfTestLogger('.'.join((self.name, suffix))) child.parent = self return child def get_record_logger_for(test_uid): htf_logger = logging.getLogger(RECORD_LOGGER_PREFIX) record_logger = HtfTestLogger('.'.join(((RECORD_LOGGER_PREFIX, test_uid)))) record_logger.parent = htf_logger return record_logger def initialize_record_handler(test_uid, test_record, notify_update): htf_logger = logging.getLogger(LOGGER_PREFIX) htf_logger.addHandler(RecordHandler(test_uid, test_record, notify_update)) def remove_record_handler(test_uid): handlers = logging.getLogger(LOGGER_PREFIX).handlers for handler in handlers: if isinstance(handler, RecordHandler) and handler.test_uid is test_uid: handlers.remove(handler) break def log_once(log_func, msg, *args, **kwargs): if msg not in _LOG_ONCE_SEEN: log_func(msg, *args, **kwargs) _LOG_ONCE_SEEN.add(msg) class MacAddressLogFilter(logging.Filter): MAC_REPLACE_RE = re.compile( r""" ((?:[\dA-F]{2}:){3}) # 3-part prefix, f8:8f:ca means google (?:[\dA-F]{2}(:|\b)){3} # the remaining octets """, re.IGNORECASE | re.VERBOSE) MAC_REPLACEMENT = r'\1<REDACTED>' def filter(self, record): if self.MAC_REPLACE_RE.search(record.getMessage()): if isinstance(record.msg, six.string_types): record.msg = self.MAC_REPLACE_RE.sub(self.MAC_REPLACEMENT, record.msg) record.args = tuple([ self.MAC_REPLACE_RE.sub(self.MAC_REPLACEMENT, str(arg)) if isinstance(arg, six.string_types) else arg for arg in record.args ]) else: record.msg = self.MAC_REPLACE_RE.sub(self.MAC_REPLACEMENT, record.getMessage()) return True MAC_FILTER = MacAddressLogFilter() class TestUidFilter(logging.Filter): def __init__(self, test_uid): super(TestUidFilter, self).__init__() self.test_uid = test_uid def filter(self, record): match = RECORD_LOGGER_RE.match(record.name) if not match: return True return match.group('test_uid') == self.test_uid class KillableThreadSafeStreamHandler(logging.StreamHandler): def handle(self, record): with threads.safe_lock_release_context(self.lock): return super(KillableThreadSafeStreamHandler, self).handle(record) class RecordHandler(logging.Handler): def __init__(self, test_uid, test_record, notify_update): super(RecordHandler, self).__init__() self.test_uid = test_uid self._test_record = test_record self._notify_update = notify_update self.addFilter(MAC_FILTER) self.addFilter(TestUidFilter(test_uid)) def handle(self, record): with threads.safe_lock_release_context(self.lock): return super(RecordHandler, self).handle(record) def emit(self, record): try: message = self.format(record) log_record = LogRecord( record.levelno, record.name, os.path.basename(record.pathname), record.lineno, int(record.created * 1000), message, ) self._test_record.add_log_record(log_record) self._notify_update() except Exception: self.handleError(record) class CliFormatter(logging.Formatter): def format(self, record): super(CliFormatter, self).format(record) localized_time = datetime.datetime.fromtimestamp(record.created) terse_time = localized_time.strftime(u'%H:%M:%S') terse_level = record.levelname[0] terse_name = record.name.split('.')[-1] match = RECORD_LOGGER_RE.match(record.name) if match: subsys_match = SUBSYSTEM_LOGGER_RE.match(record.name) if subsys_match: terse_name = '<{subsys}: {id}>'.format( subsys=subsys_match.group('subsys'), id=subsys_match.group('id')) else: terse_name = '<test %s>' % match.group('test_uid')[-5:] return '{lvl} {time} {logger} - {msg}'.format( lvl=terse_level, time=terse_time, logger=terse_name, msg=record.message) @functions.call_once
Apache License 2.0
thingsboard/python_tb_rest_client
tb_rest_client/models/models_pe/relations_search_parameters.py
RelationsSearchParameters.fetch_last_level_only
python
def fetch_last_level_only(self): return self._fetch_last_level_only
Gets the fetch_last_level_only of this RelationsSearchParameters. # noqa: E501 :return: The fetch_last_level_only of this RelationsSearchParameters. # noqa: E501 :rtype: bool
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/relations_search_parameters.py#L224-L231
import pprint import re import six class RelationsSearchParameters(object): swagger_types = { 'entity_id': 'EntityId', 'root_id': 'str', 'root_type': 'str', 'direction': 'str', 'relation_type_group': 'str', 'max_level': 'int', 'fetch_last_level_only': 'bool' } attribute_map = { 'entity_id': 'entityId', 'root_id': 'rootId', 'root_type': 'rootType', 'direction': 'direction', 'relation_type_group': 'relationTypeGroup', 'max_level': 'maxLevel', 'fetch_last_level_only': 'fetchLastLevelOnly' } def __init__(self, entity_id=None, root_id=None, root_type=None, direction=None, relation_type_group=None, max_level=None, fetch_last_level_only=None): self._entity_id = None self._root_id = None self._root_type = None self._direction = None self._relation_type_group = None self._max_level = None self._fetch_last_level_only = None self.discriminator = None if entity_id is not None: self.entity_id = entity_id self.root_id = root_id self.root_type = root_type self.direction = direction self.relation_type_group = relation_type_group self.max_level = max_level self.fetch_last_level_only = fetch_last_level_only @property def entity_id(self): return self._entity_id @entity_id.setter def entity_id(self, entity_id): self._entity_id = entity_id @property def root_id(self): return self._root_id @root_id.setter def root_id(self, root_id): if root_id is None: raise ValueError("Invalid value for `root_id`, must not be `None`") self._root_id = root_id @property def root_type(self): return self._root_type @root_type.setter def root_type(self, root_type): if root_type is None: raise ValueError("Invalid value for `root_type`, must not be `None`") allowed_values = ["TENANT", "CUSTOMER", "USER", "DASHBOARD", "ASSET", "DEVICE", "ALARM", "ENTITY_GROUP", "CONVERTER", "INTEGRATION", "RULE_CHAIN", "RULE_NODE", "SCHEDULER_EVENT", "BLOB_ENTITY", "ENTITY_VIEW", "WIDGETS_BUNDLE", "WIDGET_TYPE", "ROLE", "GROUP_PERMISSION", "TENANT_PROFILE", "DEVICE_PROFILE", "API_USAGE_STATE", "TB_RESOURCE", "OTA_PACKAGE", "EDGE", "RPC"] if root_type not in allowed_values: raise ValueError( "Invalid value for `root_type` ({0}), must be one of {1}" .format(root_type, allowed_values) ) self._root_type = root_type @property def direction(self): return self._direction @direction.setter def direction(self, direction): if direction is None: raise ValueError("Invalid value for `direction`, must not be `None`") allowed_values = ["FROM", "TO"] if direction not in allowed_values: raise ValueError( "Invalid value for `direction` ({0}), must be one of {1}" .format(direction, allowed_values) ) self._direction = direction @property def relation_type_group(self): return self._relation_type_group @relation_type_group.setter def relation_type_group(self, relation_type_group): if relation_type_group is None: raise ValueError("Invalid value for `relation_type_group`, must not be `None`") allowed_values = ["COMMON", "ALARM", "DASHBOARD", "TO_ENTITY_GROUP", "FROM_ENTITY_GROUP", "RULE_CHAIN", "RULE_NODE", "EDGE", "EDGE_AUTO_ASSIGN_RULE_CHAIN"] if relation_type_group not in allowed_values: raise ValueError( "Invalid value for `relation_type_group` ({0}), must be one of {1}" .format(relation_type_group, allowed_values) ) self._relation_type_group = relation_type_group @property def max_level(self): return self._max_level @max_level.setter def max_level(self, max_level): if max_level is None: raise ValueError("Invalid value for `max_level`, must not be `None`") self._max_level = max_level @property
Apache License 2.0
shenxn/ha-dyson
custom_components/dyson_local/__init__.py
DysonEntity.unique_id
python
def unique_id(self) -> str: if self.sub_unique_id is None: return self._device.serial return f"{self._device.serial}-{self.sub_unique_id}"
Return the entity unique id.
https://github.com/shenxn/ha-dyson/blob/7b0d18ed5ea943a0d836005190bdd676c55d48d4/custom_components/dyson_local/__init__.py#L199-L203
import asyncio from datetime import timedelta from functools import partial import logging from typing import List, Optional from libdyson import ( Dyson360Eye, Dyson360Heurist, DysonPureHotCool, DysonPureHotCoolLink, DysonPureHumidifyCool, MessageType, get_device, ) from libdyson.discovery import DysonDiscovery from libdyson.dyson_device import DysonDevice from libdyson.exceptions import DysonException from homeassistant.components.zeroconf import async_get_instance from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers.entity import Entity from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( CONF_CREDENTIAL, CONF_DEVICE_TYPE, CONF_SERIAL, DATA_COORDINATORS, DATA_DEVICES, DATA_DISCOVERY, DOMAIN, ) _LOGGER = logging.getLogger(__name__) ENVIRONMENTAL_DATA_UPDATE_INTERVAL = timedelta(seconds=30) async def async_setup(hass: HomeAssistant, config: dict) -> bool: hass.data[DOMAIN] = { DATA_DEVICES: {}, DATA_COORDINATORS: {}, DATA_DISCOVERY: None, } return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: device = get_device( entry.data[CONF_SERIAL], entry.data[CONF_CREDENTIAL], entry.data[CONF_DEVICE_TYPE], ) if not isinstance(device, Dyson360Eye) and not isinstance(device, Dyson360Heurist): async def async_update_data(): try: await hass.async_add_executor_job(device.request_environmental_data) except DysonException as err: raise UpdateFailed("Failed to request environmental data") from err coordinator = DataUpdateCoordinator( hass, _LOGGER, name="environmental", update_method=async_update_data, update_interval=ENVIRONMENTAL_DATA_UPDATE_INTERVAL, ) else: coordinator = None async def _async_forward_entry_setup(): for component in _async_get_platforms(device): hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) def setup_entry(host: str, is_discovery: bool = True) -> bool: try: device.connect(host) except DysonException: if is_discovery: _LOGGER.error( "Failed to connect to device %s at %s", device.serial, host, ) return raise ConfigEntryNotReady hass.data[DOMAIN][DATA_DEVICES][entry.entry_id] = device hass.data[DOMAIN][DATA_COORDINATORS][entry.entry_id] = coordinator asyncio.run_coroutine_threadsafe( _async_forward_entry_setup(), hass.loop ).result() host = entry.data.get(CONF_HOST) if host: await hass.async_add_executor_job( partial(setup_entry, host, is_discovery=False) ) else: discovery = hass.data[DOMAIN][DATA_DISCOVERY] if discovery is None: discovery = DysonDiscovery() hass.data[DOMAIN][DATA_DISCOVERY] = discovery _LOGGER.debug("Starting dyson discovery") discovery.start_discovery(await async_get_instance(hass)) def stop_discovery(_): _LOGGER.debug("Stopping dyson discovery") discovery.stop_discovery() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_discovery) await hass.async_add_executor_job( discovery.register_device, device, setup_entry ) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: device = hass.data[DOMAIN][DATA_DEVICES][entry.entry_id] ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, component) for component in _async_get_platforms(device) ] ) ) if ok: hass.data[DOMAIN][DATA_DEVICES].pop(entry.entry_id) hass.data[DOMAIN][DATA_COORDINATORS].pop(entry.entry_id) await hass.async_add_executor_job(device.disconnect) return ok @callback def _async_get_platforms(device: DysonDevice) -> List[str]: if isinstance(device, Dyson360Eye) or isinstance(device, Dyson360Heurist): return ["binary_sensor", "sensor", "vacuum"] platforms = ["air_quality", "fan", "sensor", "switch"] if isinstance(device, DysonPureHotCool) or isinstance(device, DysonPureHotCoolLink): platforms.append("climate") if isinstance(device, DysonPureHumidifyCool): platforms.append("humidifier") return platforms class DysonEntity(Entity): _MESSAGE_TYPE = MessageType.STATE def __init__(self, device: DysonDevice, name: str): self._device = device self._name = name async def async_added_to_hass(self) -> None: self._device.add_message_listener(self._on_message) def _on_message(self, message_type: MessageType) -> None: if self._MESSAGE_TYPE is None or message_type == self._MESSAGE_TYPE: self.schedule_update_ha_state() @property def should_poll(self) -> bool: return False @property def name(self) -> str: if self.sub_name is None: return self._name return f"{self._name} {self.sub_name}" @property def sub_name(self) -> Optional[str]: return None @property
MIT License
nikcub/floyd
floyd/util/dateformat.py
DateFormat.b
python
def b(self): return MONTHS_3[self.data.month]
Month, textual, 3 letters, lowercase; e.g. 'jan
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/util/dateformat.py#L234-L236
import re import time import calendar import time from datetime import tzinfo, timedelta from floyd.util.unicode import force_unicode, force_utf8 from floyd.util.dates import MONTHS, MONTHS_3, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR from floyd.util.translation import ugettext as _ re_formatchars = re.compile(r'(?<!\\)([aAbBcdDfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])') re_escaped = re.compile(r'\\(.)') try: from email.utils import formatdate def HTTPDate(timeval=None): return formatdate(timeval, usegmt=True) except ImportError: from rfc822 import formatdate as HTTPDate class LocalTimezone(tzinfo): def __init__(self, dt): tzinfo.__init__(self) self._tzname = self.tzname(dt) def __repr__(self): return smart_str(self._tzname) def utcoffset(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) else: return timedelta(seconds=-time.timezone) def dst(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone) else: return timedelta(0) def tzname(self, dt): try: return force_unicode(time.tzname[self._isdst(dt)], 'utf-8') except UnicodeDecodeError: return None def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1) try: stamp = time.mktime(tt) except (OverflowError, ValueError): tt = (2037,) + tt[1:] stamp = time.mktime(tt) tt = time.localtime(stamp) return tt.tm_isdst > 0 class Formatter(object): def format(self, formatstr): pieces = [] for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))): if i % 2: pieces.append(force_unicode(getattr(self, piece)())) elif piece: pieces.append(re_escaped.sub(r'\1', piece)) return u''.join(pieces) class TimeFormat(Formatter): def __init__(self, t): self.data = t def a(self): if self.data.hour > 11: return _('p.m.') return _('a.m.') def A(self): if self.data.hour > 11: return _('PM') return _('AM') def B(self): raise NotImplementedError def f(self): if self.data.minute == 0: return self.g() return u'%s:%s' % (self.g(), self.i()) def g(self): if self.data.hour == 0: return 12 if self.data.hour > 12: return self.data.hour - 12 return self.data.hour def G(self): return self.data.hour def h(self): return u'%02d' % self.g() def H(self): return u'%02d' % self.G() def i(self): return u'%02d' % self.data.minute def P(self): if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return u'%s %s' % (self.f(), self.a()) def s(self): return u'%02d' % self.data.second def u(self): return self.data.microsecond class DateFormat(TimeFormat): year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] def __init__(self, dt): self.data = dt self.timezone = getattr(dt, 'tzinfo', None) if hasattr(self.data, 'hour') and not self.timezone: self.timezone = LocalTimezone(dt)
BSD 2-Clause Simplified License
docusign/docusign-python-client
docusign_esign/models/user_signature.py
UserSignature.image_base64
python
def image_base64(self): return self._image_base64
Gets the image_base64 of this UserSignature. # noqa: E501 # noqa: E501 :return: The image_base64 of this UserSignature. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/user_signature.py#L328-L336
import pprint import re import six from docusign_esign.client.configuration import Configuration class UserSignature(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'adopted_date_time': 'str', 'created_date_time': 'str', 'custom_field': 'str', 'date_stamp_properties': 'DateStampProperties', 'disallow_user_resize_stamp': 'str', 'error_details': 'ErrorDetails', 'external_id': 'str', 'image_base64': 'str', 'image_type': 'str', 'initials150_image_id': 'str', 'initials_image_uri': 'str', 'is_default': 'str', 'last_modified_date_time': 'str', 'nrds_id': 'str', 'nrds_last_name': 'str', 'nrds_status': 'str', 'phonetic_name': 'str', 'signature150_image_id': 'str', 'signature_font': 'str', 'signature_id': 'str', 'signature_image_uri': 'str', 'signature_initials': 'str', 'signature_name': 'str', 'signature_rights': 'str', 'signature_type': 'str', 'stamp_format': 'str', 'stamp_image_uri': 'str', 'stamp_size_mm': 'str', 'stamp_type': 'str', 'status': 'str' } attribute_map = { 'adopted_date_time': 'adoptedDateTime', 'created_date_time': 'createdDateTime', 'custom_field': 'customField', 'date_stamp_properties': 'dateStampProperties', 'disallow_user_resize_stamp': 'disallowUserResizeStamp', 'error_details': 'errorDetails', 'external_id': 'externalID', 'image_base64': 'imageBase64', 'image_type': 'imageType', 'initials150_image_id': 'initials150ImageId', 'initials_image_uri': 'initialsImageUri', 'is_default': 'isDefault', 'last_modified_date_time': 'lastModifiedDateTime', 'nrds_id': 'nrdsId', 'nrds_last_name': 'nrdsLastName', 'nrds_status': 'nrdsStatus', 'phonetic_name': 'phoneticName', 'signature150_image_id': 'signature150ImageId', 'signature_font': 'signatureFont', 'signature_id': 'signatureId', 'signature_image_uri': 'signatureImageUri', 'signature_initials': 'signatureInitials', 'signature_name': 'signatureName', 'signature_rights': 'signatureRights', 'signature_type': 'signatureType', 'stamp_format': 'stampFormat', 'stamp_image_uri': 'stampImageUri', 'stamp_size_mm': 'stampSizeMM', 'stamp_type': 'stampType', 'status': 'status' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._adopted_date_time = None self._created_date_time = None self._custom_field = None self._date_stamp_properties = None self._disallow_user_resize_stamp = None self._error_details = None self._external_id = None self._image_base64 = None self._image_type = None self._initials150_image_id = None self._initials_image_uri = None self._is_default = None self._last_modified_date_time = None self._nrds_id = None self._nrds_last_name = None self._nrds_status = None self._phonetic_name = None self._signature150_image_id = None self._signature_font = None self._signature_id = None self._signature_image_uri = None self._signature_initials = None self._signature_name = None self._signature_rights = None self._signature_type = None self._stamp_format = None self._stamp_image_uri = None self._stamp_size_mm = None self._stamp_type = None self._status = None self.discriminator = None setattr(self, "_{}".format('adopted_date_time'), kwargs.get('adopted_date_time', None)) setattr(self, "_{}".format('created_date_time'), kwargs.get('created_date_time', None)) setattr(self, "_{}".format('custom_field'), kwargs.get('custom_field', None)) setattr(self, "_{}".format('date_stamp_properties'), kwargs.get('date_stamp_properties', None)) setattr(self, "_{}".format('disallow_user_resize_stamp'), kwargs.get('disallow_user_resize_stamp', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('external_id'), kwargs.get('external_id', None)) setattr(self, "_{}".format('image_base64'), kwargs.get('image_base64', None)) setattr(self, "_{}".format('image_type'), kwargs.get('image_type', None)) setattr(self, "_{}".format('initials150_image_id'), kwargs.get('initials150_image_id', None)) setattr(self, "_{}".format('initials_image_uri'), kwargs.get('initials_image_uri', None)) setattr(self, "_{}".format('is_default'), kwargs.get('is_default', None)) setattr(self, "_{}".format('last_modified_date_time'), kwargs.get('last_modified_date_time', None)) setattr(self, "_{}".format('nrds_id'), kwargs.get('nrds_id', None)) setattr(self, "_{}".format('nrds_last_name'), kwargs.get('nrds_last_name', None)) setattr(self, "_{}".format('nrds_status'), kwargs.get('nrds_status', None)) setattr(self, "_{}".format('phonetic_name'), kwargs.get('phonetic_name', None)) setattr(self, "_{}".format('signature150_image_id'), kwargs.get('signature150_image_id', None)) setattr(self, "_{}".format('signature_font'), kwargs.get('signature_font', None)) setattr(self, "_{}".format('signature_id'), kwargs.get('signature_id', None)) setattr(self, "_{}".format('signature_image_uri'), kwargs.get('signature_image_uri', None)) setattr(self, "_{}".format('signature_initials'), kwargs.get('signature_initials', None)) setattr(self, "_{}".format('signature_name'), kwargs.get('signature_name', None)) setattr(self, "_{}".format('signature_rights'), kwargs.get('signature_rights', None)) setattr(self, "_{}".format('signature_type'), kwargs.get('signature_type', None)) setattr(self, "_{}".format('stamp_format'), kwargs.get('stamp_format', None)) setattr(self, "_{}".format('stamp_image_uri'), kwargs.get('stamp_image_uri', None)) setattr(self, "_{}".format('stamp_size_mm'), kwargs.get('stamp_size_mm', None)) setattr(self, "_{}".format('stamp_type'), kwargs.get('stamp_type', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) @property def adopted_date_time(self): return self._adopted_date_time @adopted_date_time.setter def adopted_date_time(self, adopted_date_time): self._adopted_date_time = adopted_date_time @property def created_date_time(self): return self._created_date_time @created_date_time.setter def created_date_time(self, created_date_time): self._created_date_time = created_date_time @property def custom_field(self): return self._custom_field @custom_field.setter def custom_field(self, custom_field): self._custom_field = custom_field @property def date_stamp_properties(self): return self._date_stamp_properties @date_stamp_properties.setter def date_stamp_properties(self, date_stamp_properties): self._date_stamp_properties = date_stamp_properties @property def disallow_user_resize_stamp(self): return self._disallow_user_resize_stamp @disallow_user_resize_stamp.setter def disallow_user_resize_stamp(self, disallow_user_resize_stamp): self._disallow_user_resize_stamp = disallow_user_resize_stamp @property def error_details(self): return self._error_details @error_details.setter def error_details(self, error_details): self._error_details = error_details @property def external_id(self): return self._external_id @external_id.setter def external_id(self, external_id): self._external_id = external_id @property
MIT License
mindspore-ai/mindinsight
mindinsight/mindconverter/code_analysis.py
CodeAnalyzer._is_ref_convertible_imports
python
def _is_ref_convertible_imports(self, node): check_result = False whole_name = self._get_whole_name(node) if whole_name: module_name = whole_name.split('.')[0] for ref_name, ref_info in self._external_references.items(): external_ref = ref_info['external_ref_info'] if external_ref.name in APIAnalysisSpec.get_convertible_external_names(): if module_name == ref_name.split('.')[0]: check_result = True break return check_result
Check whether the node references convertible imports.
https://github.com/mindspore-ai/mindinsight/blob/253a210719dbb1e55b826f2e489322f402d66676/mindinsight/mindconverter/code_analysis.py#L249-L264
import ast import pasta from pasta.base import scope from mindinsight.mindconverter.common.exceptions import ScriptNotSupport class APIAnalysisSpec: import_name_mapping = {'torch': ['mindspore', None], 'torch.nn': ['mindspore.nn', 'nn'], 'torch.nn.functional': ['mindspore.ops.operations', 'P']} base_name_mapping = {'Module': 'Cell', 'Sequential': 'SequentialCell' } @classmethod def get_convertible_external_names(cls): return cls.import_name_mapping.keys() @staticmethod def get_network_base_class_names(): return ['Module', 'Sequential', 'ModuleList', 'ModuleDict', 'ParameterList', 'ParameterDict'] @staticmethod def check_external_alias_ref(ref_name, external_name): if ref_name != 'nn' and external_name == 'torch.nn': is_standard = False elif ref_name != 'F' and external_name == 'torch.nn.functional': is_standard = False else: is_standard = True return is_standard class CodeAnalyzer(ast.NodeVisitor): def __init__(self): self._stack = [] self._external_references = {} self._is_standard_external_ref = True self._root_scope = None self._network_functions = [] self._functions_stack = [] self._network_classes = {} @property def root_scope(self): return self._root_scope @property def is_standard_external_ref(self): return self._is_standard_external_ref @property def external_references(self): return self._external_references def network_definitions(self): return {"functions": self._network_functions, "cell": self._network_classes} def process(self, ast_tree): self.__init__() self._root_scope = scope.analyze(ast_tree) self._pre_process() self.visit(ast_tree) if not self._network_classes: msg = "model definition not be found." raise ScriptNotSupport(msg) @staticmethod def _check_external_standard(external_refs): is_standard = True for external_name, external_ref_info in external_refs.items(): is_standard = APIAnalysisSpec.check_external_alias_ref(external_name, external_ref_info.name) if not is_standard: break return is_standard def _is_base_from_cell(self, node): if self._is_ref_convertible_imports(node): whole_name = self._get_whole_name(node) if whole_name.split('.')[-1] in APIAnalysisSpec.get_network_base_class_names(): return True return False def _pre_process(self): is_torch = False for ref_name in self._root_scope.external_references.keys(): if ref_name.split('.')[0] in APIAnalysisSpec.get_convertible_external_names(): is_torch = True break if not is_torch: msg = "The source code does not import torch, model definition can not be found." raise ScriptNotSupport(msg) external_refs = self._analyze_import_references(self._root_scope) self._is_standard_external_ref = self._check_external_standard(external_refs) self._check_external_standard(external_refs) for external_name, external_ref_info in external_refs.items(): self._external_references.update({ external_name: { 'external_ref_info': external_ref_info, 'parent_node': None } }) @staticmethod def _analyze_import_references(root_scope): external_name_ref = dict() all_node_references = [] for node_references in root_scope.external_references.values(): all_node_references.extend(node_references) for node_ref in all_node_references: name_ref = node_ref.name_ref if not name_ref: continue definition = name_ref.definition if node_ref.name_ref.id in [definition.asname, definition.name]: external_name_ref[name_ref.id] = node_ref return external_name_ref def visit(self, node): self._stack.append(node) super(CodeAnalyzer, self).visit(node) self._stack.pop() @staticmethod def _get_full_name(node): if not isinstance(node, (ast.Attribute, ast.Name)): return None return pasta.dump(node) def _get_whole_name(self, node): full_name = self._get_full_name(node) if not full_name: return None whole_name = full_name if node is self._stack[-1]: parent_index = -1 while isinstance(self._stack[parent_index], ast.Attribute): parent_index -= 1 whole_name = self._get_full_name(self._stack[parent_index]) return whole_name
Apache License 2.0
gussand/anubis
api/anubis/lms/questions.py
reset_question_assignments
python
def reset_question_assignments(assignment: Assignment, commit: bool = True): assigned_student_questions = AssignedStudentQuestion.query.filter( AssignedStudentQuestion.assignment_id == assignment.id ).all() assigned_student_question_ids = list(map(lambda x: x.id, assigned_student_questions)) AssignedQuestionResponse.query.filter( AssignedQuestionResponse.assigned_question_id.in_( assigned_student_question_ids ), ).delete() AssignedStudentQuestion.query.filter( AssignedStudentQuestion.assignment_id == assignment.id ).delete() if commit: db.session.commit()
Reset the question assignments for an assignment. This will first delete all the existing question responses, then the question assignments. The questions themselves will stay :param assignment: :param commit: :return:
https://github.com/gussand/anubis/blob/5ff4e293b84049af92b53b3bcc264c7782ffb9e6/api/anubis/lms/questions.py#L50-L83
import io import random import zipfile from typing import List, Dict, Optional import yaml from anubis.models import ( db, Assignment, AssignmentQuestion, AssignedStudentQuestion, AssignedQuestionResponse, User, InCourse, ) from anubis.utils.data import _verify_data_shape, is_debug from anubis.lms.students import get_students, get_students_in_class from anubis.utils.cache import cache from anubis.utils.logging import logger def get_question_pool_mapping( questions: List[AssignmentQuestion], ) -> Dict[int, List[AssignmentQuestion]]: pools = set(question.pool for question in questions) sequence_to_questions = {pool: [] for pool in pools} for question in questions: sequence_to_questions[question.pool].append(question) return sequence_to_questions
MIT License
mcgillmrl/kusanagi
examples/PILCO/pendulum_learn.py
experiment1_params
python
def experiment1_params(n_rnd=1, n_opt=100, dynmodel_class=regression.SSGP_UI, **kwargs): params = pendulum.default_params() params['n_rnd'] = int(n_rnd) params['n_opt'] = int(n_opt) for key in kwargs: if key in params: params[key] = eval(kwargs[key]) params['dynmodel_class'] = dynmodel_class loss_kwargs = {} polopt_kwargs = {} extra_inps = [] return params, loss_kwargs, polopt_kwargs, extra_inps
pilco with rbf controller
https://github.com/mcgillmrl/kusanagi/blob/ad8cbcbaecf6d1b3d8d193c4625832f13d4080a3/examples/PILCO/pendulum_learn.py#L29-L44
import argparse import dill import os import sys import numpy as np import lasagne import theano from functools import partial from kusanagi import utils from kusanagi.ghost import regression, control from kusanagi.shell import experiment_utils, pendulum np.set_printoptions(linewidth=500) def eval_str_arg(arg): if type(arg) is str: arg = eval(arg) return arg
MIT License
zebrafishlabs/fastly-python
bin/fastly_upload_vcl.py
main
python
def main(): parser = OptionParser(description= "Upload a vcl file (set as main) to a given fastly service. All arguments are required.") parser.add_option("-k", "--key", dest="apikey", help="fastly api key") parser.add_option("-u", "--user", dest="user", help="fastly user name") parser.add_option("-p", "--password", dest="password", help="fastly password") parser.add_option("-f", "--file", dest="filename", help="vcl file to upload") parser.add_option("-s", "--service", dest="service_name", help="service to update") parser.add_option("-d", "--delete_vcl", action="store_true", dest="delete_vcl", default=False, help="delete existing vcl files from service\ before uploading") parser.add_option("-i", "--include", action="store_true", dest="include_vcl", default=False, help="do not set uploaded vcl as main,\ to be included only") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options:" parser.print_help() sys.exit(1) vcl_name = options.filename.split('/').pop() service_name = options.service_name vcl_file = open(options.filename, 'r') vcl_content = vcl_file.read() client = fastly.connect(options.apikey) client.login(options.user, options.password) service = client.get_service_by_name(service_name) versions = client.list_versions(service.id) latest = versions.pop() if latest.locked is True or latest.active is True: print "\n[ Cloning version %d ]\n" % (latest.number) latest = client.clone_version(service.id, latest.number) if options.delete_vcl: vcls = client.list_vcls(service.id, latest.number) for vcl in vcls: print "\n[ Deleting vcl file %s from version %d ]\n" % (service_name, latest.number) client.delete_vcl(service.id, latest.number, vcl.name) if vcl_name in latest.vcls: print "\n[ Updating vcl file %s on service %s version %d ]\n" % (vcl_name, service_name, latest.number) client.update_vcl(service.id, latest.number, vcl_name, content=vcl_content) else: print "\n[ Uploading new vcl file %s on service %s version %d ]\n" % (vcl_name, service_name, latest.number) client.upload_vcl(service.id, latest.number, vcl_name, vcl_content) if options.include_vcl is False: print "\n[ Setting vcl %s as main ]\n" % (vcl_name) client.set_main_vcl(service.id, latest.number, vcl_name) client.activate_version(service.id, latest.number) print "\n[ Activing configuration version %d ]\n" % (latest.number)
Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given.
https://github.com/zebrafishlabs/fastly-python/blob/72be5db55819c0bd4316ab00170446d4707b5d06/bin/fastly_upload_vcl.py#L34-L111
import sys import fastly from optparse import OptionParser
BSD 2-Clause Simplified License
nvbn/thefuck
thefuck/entrypoints/shell_logger.py
shell_logger
python
def shell_logger(output): if not os.environ.get('SHELL'): logs.warn("Shell logger doesn't support your platform.") sys.exit(1) fd = os.open(output, os.O_CREAT | os.O_TRUNC | os.O_RDWR) os.write(fd, b'\x00' * const.LOG_SIZE_IN_BYTES) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_WRITE) return_code = _spawn(os.environ['SHELL'], partial(_read, buffer)) sys.exit(return_code)
Logs shell output to the `output`. Works like unix script command with `-f` flag.
https://github.com/nvbn/thefuck/blob/c719712b6256f4add4e65e8d4369b36d73342b48/thefuck/entrypoints/shell_logger.py#L64-L79
import array import fcntl from functools import partial import mmap import os import pty import signal import sys import termios import tty from .. import logs, const def _read(f, fd): data = os.read(fd, 1024) try: f.write(data) except ValueError: position = const.LOG_SIZE_IN_BYTES - const.LOG_SIZE_TO_CLEAN f.move(0, const.LOG_SIZE_TO_CLEAN, position) f.seek(position) f.write(b'\x00' * const.LOG_SIZE_TO_CLEAN) f.seek(position) return data def _set_pty_size(master_fd): buf = array.array('h', [0, 0, 0, 0]) fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True) fcntl.ioctl(master_fd, termios.TIOCSWINSZ, buf) def _spawn(shell, master_read): pid, master_fd = pty.fork() if pid == pty.CHILD: os.execlp(shell, shell) try: mode = tty.tcgetattr(pty.STDIN_FILENO) tty.setraw(pty.STDIN_FILENO) restore = True except tty.error: restore = False _set_pty_size(master_fd) signal.signal(signal.SIGWINCH, lambda *_: _set_pty_size(master_fd)) try: pty._copy(master_fd, master_read, pty._read) except OSError: if restore: tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode) os.close(master_fd) return os.waitpid(pid, 0)[1]
MIT License
square/connect-python-sdk
squareconnect/models/v1_order.py
V1Order.recipient_name
python
def recipient_name(self): return self._recipient_name
Gets the recipient_name of this V1Order. The name of the order's buyer. :return: The recipient_name of this V1Order. :rtype: str
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/v1_order.py#L191-L199
from pprint import pformat from six import iteritems import re class V1Order(object): def __init__(self, errors=None, id=None, buyer_email=None, recipient_name=None, recipient_phone_number=None, state=None, shipping_address=None, subtotal_money=None, total_shipping_money=None, total_tax_money=None, total_price_money=None, total_discount_money=None, created_at=None, updated_at=None, expires_at=None, payment_id=None, buyer_note=None, completed_note=None, refunded_note=None, canceled_note=None, tender=None, order_history=None, promo_code=None, btc_receive_address=None, btc_price_satoshi=None): self.swagger_types = { 'errors': 'list[Error]', 'id': 'str', 'buyer_email': 'str', 'recipient_name': 'str', 'recipient_phone_number': 'str', 'state': 'str', 'shipping_address': 'Address', 'subtotal_money': 'V1Money', 'total_shipping_money': 'V1Money', 'total_tax_money': 'V1Money', 'total_price_money': 'V1Money', 'total_discount_money': 'V1Money', 'created_at': 'str', 'updated_at': 'str', 'expires_at': 'str', 'payment_id': 'str', 'buyer_note': 'str', 'completed_note': 'str', 'refunded_note': 'str', 'canceled_note': 'str', 'tender': 'V1Tender', 'order_history': 'list[V1OrderHistoryEntry]', 'promo_code': 'str', 'btc_receive_address': 'str', 'btc_price_satoshi': 'float' } self.attribute_map = { 'errors': 'errors', 'id': 'id', 'buyer_email': 'buyer_email', 'recipient_name': 'recipient_name', 'recipient_phone_number': 'recipient_phone_number', 'state': 'state', 'shipping_address': 'shipping_address', 'subtotal_money': 'subtotal_money', 'total_shipping_money': 'total_shipping_money', 'total_tax_money': 'total_tax_money', 'total_price_money': 'total_price_money', 'total_discount_money': 'total_discount_money', 'created_at': 'created_at', 'updated_at': 'updated_at', 'expires_at': 'expires_at', 'payment_id': 'payment_id', 'buyer_note': 'buyer_note', 'completed_note': 'completed_note', 'refunded_note': 'refunded_note', 'canceled_note': 'canceled_note', 'tender': 'tender', 'order_history': 'order_history', 'promo_code': 'promo_code', 'btc_receive_address': 'btc_receive_address', 'btc_price_satoshi': 'btc_price_satoshi' } self._errors = errors self._id = id self._buyer_email = buyer_email self._recipient_name = recipient_name self._recipient_phone_number = recipient_phone_number self._state = state self._shipping_address = shipping_address self._subtotal_money = subtotal_money self._total_shipping_money = total_shipping_money self._total_tax_money = total_tax_money self._total_price_money = total_price_money self._total_discount_money = total_discount_money self._created_at = created_at self._updated_at = updated_at self._expires_at = expires_at self._payment_id = payment_id self._buyer_note = buyer_note self._completed_note = completed_note self._refunded_note = refunded_note self._canceled_note = canceled_note self._tender = tender self._order_history = order_history self._promo_code = promo_code self._btc_receive_address = btc_receive_address self._btc_price_satoshi = btc_price_satoshi @property def errors(self): return self._errors @errors.setter def errors(self, errors): self._errors = errors @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def buyer_email(self): return self._buyer_email @buyer_email.setter def buyer_email(self, buyer_email): self._buyer_email = buyer_email @property
Apache License 2.0
naiqili/dgpg
compatible/kernels.py
Kernel.on_separate_dims
python
def on_separate_dims(self, other_kernel): if isinstance(self.active_dims, slice) or isinstance(other_kernel.active_dims, slice): return False if np.any(self.active_dims.reshape(-1, 1) == other_kernel.active_dims.reshape(1, -1)): return False return True
Checks if the dimensions, over which the kernels are specified, overlap. Returns True if they are defined on different/separate dimensions and False otherwise.
https://github.com/naiqili/dgpg/blob/5d0e6349e2ee6a82d4512f941537b93544b6286d/compatible/kernels.py#L117-L129
from functools import reduce, lru_cache import warnings import tensorflow as tf import numpy as np from gpflow import transforms from gpflow import settings from gpflow.decors import params_as_tensors, autoflow from gpflow.misc import _broadcasting_elementwise_op from gpflow.params import Parameter, Parameterized, ParamList class Kernel(Parameterized): def __init__(self, input_dim, active_dims=None, name=None): super().__init__(name=name) self.input_dim = int(input_dim) if active_dims is None: self.active_dims = slice(input_dim) elif isinstance(active_dims, slice): self.active_dims = active_dims if active_dims.start is not None and active_dims.stop is not None and active_dims.step is not None: assert len( range(active_dims.start, active_dims.stop, active_dims.step)) == input_dim else: self.active_dims = np.array(active_dims, dtype=np.int32) assert len(active_dims) == input_dim def _validate_ard_shape(self, name, value, ARD=None): if ARD is None: ARD = np.asarray(value).squeeze().shape != () if ARD: value = value * np.ones(self.input_dim, dtype=settings.float_type) if self.input_dim == 1 or not ARD: correct_shape = () else: correct_shape = (self.input_dim,) if np.asarray(value).squeeze().shape != correct_shape: raise ValueError("shape of {} does not match input_dim".format(name)) return value, ARD @autoflow((settings.float_type, [None, None]), (settings.float_type, [None, None])) def compute_K(self, X, Z): return self.K(X, Z) @autoflow((settings.float_type, [None, None])) def compute_K_symm(self, X): return self.K(X) @autoflow((settings.float_type, [None, None])) def compute_Kdiag(self, X): return self.Kdiag(X)
MIT License
rgtjf/semantic-texual-similarity-toolkits
stst/utils.py
vectorize
python
def vectorize(sentence, idf_weight, vocab, convey='idf'): vec = np.zeros(len(vocab), dtype=np.float32) for word in sentence: if word not in vocab: continue if convey == 'idf': vec[vocab[word]] += idf_weight[word] elif convey == 'count': vec[vocab[word]] += 1 else: raise NotImplementedError return vec
idf_weight: {word: weight} vocab: {word: index}
https://github.com/rgtjf/semantic-texual-similarity-toolkits/blob/7ef271e4e4ca55330b31bce06368274c2ddbe3a9/stst/utils.py#L252-L267
from __future__ import print_function import datetime import io import time import csv, math import codecs import logging import configparser from functools import wraps from collections import Counter import numpy as np import os import pickle import six import array import pyprind from stst.libs.kernel import vector_kernel as vk logger = logging.getLogger(__name__) def fn_timer(function): @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print("total time running %s: %s seconds" % (function.__name__, str(t1 - t0))) return result return function_timer def singleton(cls): instances = {} def _singleton(*args, **kwargs): if (cls, args) not in instances: instances[(cls, args)] = cls(*args, **kwargs) return instances[(cls, args)] return _singleton @fn_timer def Test(): pass @singleton class SingletonTest(object): pass def get_time_name(prefix): time_str = datetime.datetime.now().strftime('_%m%d_%H_%M') return prefix + time_str def get_logger(file_name): logger = logging.getLogger() logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%m-%d %H:%M") fh = logging.FileHandler(file_name) fh.setFormatter(formatter) logger.addHandler(fh) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) return logger def get_config(config_file): config = configparser.ConfigParser(allow_no_value=True, interpolation=configparser.ExtendedInterpolation()) config.read(config_file) return config class DictVocab(object): @staticmethod def load_from_file(file_path, sep='\t'): vocab = {} with create_read_file(file_path) as f: for idx, line in enumerate(f): items = line.strip().split(sep) if len(items) == 1: vocab[items[0]] = idx elif len(items) == 2: vocab[items[0]] = items[1] else: raise NotImplementedError print('load from FILE {}'.format(file_path)) return vocab @staticmethod def dump_to_file(vocab, file_path, sep='\t', sort_by_key=True, reverse=False): with create_write_file(file_path) as fw: items = vocab.items() if sort_by_key: keys = sorted(items, cmp=lambda x: x[0], reverse=reverse) else: keys = sorted(items, cmp=lambda x: x[1], reverse=reverse) for key in keys: print("{}\t{}".format(key, vocab[key]), file=fw) print('dump to FILE {}'.format(file_path)) def split_abbreviation(word): res = [] char = '' for ch in word: if char != '' and char[-1].islower() and ch.isupper(): res.append(char) char = '' char += ch if char != '': res.append(char) return res def word2char(word_list): if type(word_list) is six.text_type: word_list = word_list.split() char_list = [] word_string = ''.join(word_list) char = '' for ch in word_string: if ord(ch) < 128: char += ch else: if char != '': char_list += split_abbreviation(char) char = '' char_list.append(ch) if char != '': char_list += split_abbreviation(char) return char_list def word2index(word_list): if type(word_list) is list: vocab = {word:i for i, word in enumerate(word_list)} elif type(word_list) is dict: vocab = {word:i for i, word in enumerate(word_list.keys())} else: raise NotImplementedError return vocab def pos2tag(pos): if pos in ['NN', 'NNS', 'NNP', 'NNPS']: pos = 'n' elif pos in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']: pos = 'v' elif pos in ['JJ', 'JJR', 'JJS']: pos = 'a' elif pos in ['RB', 'RBR', 'RBS']: pos = 'r' else: pos = '#' return pos def idf_calculator(sentence_list, min_cnt=1): doc_num = 0 word_list = [] for sequence in sentence_list: word_list += sequence doc_num += 1 word_count = Counter() for word in word_list: word_count[word] += 1 idf_dict = {} good_keys = [v for v in word_count.keys() if word_count[v] >= min_cnt] for key in good_keys: idf_dict[key] = word_count[key] for key in idf_dict.keys(): idf_dict[key] = math.log(float(doc_num) / float(idf_dict[key])) / math.log(10) return idf_dict
MIT License
tensorflow/graphics
tensorflow_graphics/image/matting.py
loss
python
def loss(matte: type_alias.TensorLike, laplacian: type_alias.TensorLike, name: str = "matting_loss") -> tf.Tensor: with tf.name_scope(name): matte = tf.convert_to_tensor(value=matte) laplacian = tf.convert_to_tensor(value=laplacian) pixels = tf.compat.dimension_value(laplacian.shape[-1]) shape.check_static(matte, has_rank=4, has_dim_equals=(-1, 1)) shape.check_static(laplacian, has_rank=5, has_dim_equals=(-2, pixels)) shape.compare_batch_dimensions( tensors=(matte, laplacian), last_axes=0, broadcast_compatible=False) size = np.sqrt(pixels) patches = tf.expand_dims(_image_patches(matte, size), axis=-2) losses = _quadratic_form(laplacian, patches) return tf.reduce_mean(input_tensor=losses)
Computes the matting loss function based on the matting Laplacian. Computes the matting loss function based on the `laplacian` generated by the `build_matrices` function which implements the approach proposed by Levin et al. in "A Closed Form Solution to Natural Image Matting". Args: matte: A tensor of shape `[B, H, W, 1]`. laplacian: A tensor of shape `[B, H - pad, W - pad, size^2, size^2]` containing the Laplacian matrices computed by the `build_matrices` function, where `pad` is equal to `size - 1` and `size` is the patch size used to compute this tensor. name: A name for this op. Defaults to "matting_loss". Returns: A tensor containing a scalar value defining the matting loss. Raises: ValueError: If the last dimension of `matte` is not 1. If `matte` is not of rank 4. If the last two dimensions of `laplacian` are not of the same size. If `laplacian` is not of rank 5. If `B` is different between `matte` and `laplacian`.
https://github.com/tensorflow/graphics/blob/d0817aec7dee35635814e925a59d83955459d93c/tensorflow_graphics/image/matting.py#L223-L262
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Tuple, Union import numpy as np import tensorflow as tf from tensorflow_graphics.math import vector as tfg_vector from tensorflow_graphics.util import asserts from tensorflow_graphics.util import export_api from tensorflow_graphics.util import shape from tensorflow_graphics.util import type_alias def _shape(batch_shape: Union[type_alias.TensorLike, List[int]], *shapes: Union[type_alias.TensorLike, List[type_alias.TensorLike]] ) -> tf.Tensor: return tf.concat((batch_shape, shapes), axis=-1) def _quadratic_form(matrix: type_alias.TensorLike, vector: type_alias.TensorLike) -> tf.Tensor: vector_matrix = tf.matmul(vector, matrix) vector_matrix_vector = tf.matmul(vector_matrix, vector, transpose_b=True) return vector_matrix_vector def _image_patches(image: type_alias.TensorLike, size: int) -> tf.Tensor: return tf.image.extract_patches( image, sizes=(1, size, size, 1), strides=(1, 1, 1, 1), rates=(1, 1, 1, 1), padding="VALID") def _image_average(image: type_alias.TensorLike, size: int) -> tf.Tensor: return tf.nn.avg_pool2d( input=image, ksize=(1, size, size, 1), strides=(1, 1, 1, 1), padding="VALID") def build_matrices(image: type_alias.TensorLike, size: int = 3, eps: type_alias.Float = 1e-5, name: str = "matting_build_matrices" ) -> Tuple[tf.Tensor, tf.Tensor]: with tf.name_scope(name): image = tf.convert_to_tensor(value=image) eps = tf.constant(value=eps, dtype=image.dtype) shape.check_static(image, has_rank=4) if size % 2 == 0: raise ValueError("The patch size is expected to be an odd value.") pixels = size**2 channels = tf.shape(input=image)[-1] dtype = image.dtype patches = _image_patches(image, size) batches = tf.shape(input=patches)[:-1] patches = tf.reshape(patches, shape=_shape(batches, pixels, channels)) ones = tf.ones(shape=_shape(batches, pixels, 1), dtype=dtype) affine = tf.concat((patches, ones), axis=-1) diag = tf.sqrt(eps) * tf.eye(channels, batch_shape=(1, 1, 1), dtype=dtype) zeros = tf.zeros(shape=_shape((1, 1, 1), channels, 1), dtype=dtype) regularizer = tf.concat((diag, zeros), axis=-1) regularizer = tf.tile(regularizer, multiples=_shape(batches, 1, 1)) mat = tf.concat((affine, regularizer), axis=-2) inverse = tf.linalg.inv(tf.matmul(mat, mat, transpose_a=True)) inverse = asserts.assert_no_infs_or_nans(inverse) pseudo_inverse = tf.matmul(inverse, affine, transpose_b=True) identity = tf.eye(num_rows=pixels, dtype=dtype) laplacian = identity - tf.matmul(affine, pseudo_inverse) return laplacian, pseudo_inverse def linear_coefficients(matte: type_alias.TensorLike, pseudo_inverse: type_alias.TensorLike, name: str = "matting_linear_coefficients" ) -> Tuple[tf.Tensor, tf.Tensor]: with tf.name_scope(name): matte = tf.convert_to_tensor(value=matte) pseudo_inverse = tf.convert_to_tensor(value=pseudo_inverse) pixels = tf.compat.dimension_value(pseudo_inverse.shape[-1]) shape.check_static(matte, has_rank=4, has_dim_equals=(-1, 1)) shape.check_static(pseudo_inverse, has_rank=5) shape.compare_batch_dimensions( tensors=(matte, pseudo_inverse), last_axes=0, broadcast_compatible=False) size = np.sqrt(pixels) patches = tf.expand_dims(_image_patches(matte, size), axis=-1) coeffs = tf.squeeze(tf.matmul(pseudo_inverse, patches), axis=-1) height = tf.shape(input=coeffs)[1] width = tf.shape(input=coeffs)[2] ones = tf.ones(shape=_shape((1,), height, width, 1), dtype=matte.dtype) height = tf.shape(input=matte)[1] + size - 1 width = tf.shape(input=matte)[2] + size - 1 coeffs = tf.image.resize_with_crop_or_pad(coeffs, height, width) ones = tf.image.resize_with_crop_or_pad(ones, height, width) coeffs = _image_average(coeffs, size) / _image_average(ones, size) return tf.split(coeffs, (-1, 1), axis=-1)
Apache License 2.0
voxel51/eta
eta/classifiers/tfslim_classifiers.py
TFSlimFeaturizer.__init__
python
def __init__(self, config): super(TFSlimFeaturizer, self).__init__() self.config = config self.validate(self.config) self._classifier = None
Creates a TFSlimFeaturizer instance. Args: config: a TFSlimFeaturizer instance
https://github.com/voxel51/eta/blob/e51510fda0722ac7cadb17b109bad413a6602ed3/eta/classifiers/tfslim_classifiers.py#L458-L467
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import * import logging import sys import numpy as np import eta.constants as etac from eta.core.config import Config import eta.core.data as etad from eta.core.features import ImageFeaturizer import eta.core.learning as etal import eta.core.tfutils as etat import eta.core.utils as etau sys.path.insert(1, etac.TF_SLIM_DIR) _ensure_tf1 = lambda: etau.ensure_import("tensorflow<2") tf = etau.lazy_import("tensorflow", callback=_ensure_tf1) _ERROR_MSG = "You must run `eta install models` in order to use this model" pf = etau.lazy_import( "preprocessing.preprocessing_factory", error_msg=_ERROR_MSG ) nf = etau.lazy_import("nets.nets_factory", error_msg=_ERROR_MSG) logger = logging.getLogger(__name__) _NUMPY_PREPROC_FUNCTIONS = { "resnet_v1_50": etat.vgg_preprocessing_numpy, "resnet_v2_50": etat.inception_preprocessing_numpy, "mobilenet_v2": etat.inception_preprocessing_numpy, "inception_v3": etat.inception_preprocessing_numpy, "inception_v4": etat.inception_preprocessing_numpy, "inception_resnet_v2": etat.inception_preprocessing_numpy, } _DEFAULT_FEATURES_NAMES = { "resnet_v1_50": "resnet_v1_50/pool5", "resnet_v2_50": "resnet_v2_50/pool5", "mobilenet_v2": "MobilenetV2/Logits/AvgPool", "inception_v4": "InceptionV4/Logits/PreLogitsFlatten/flatten/Reshape", "inception_resnet_v2": "InceptionResnetV2/Logits/Dropout/Identity", } _DEFAULT_OUTPUT_NAMES = { "resnet_v1_50": "resnet_v1_50/predictions/Reshape_1", "resnet_v2_50": "resnet_v2_50/predictions/Reshape_1", "mobilenet_v1_025": "MobilenetV1/Predictions/Reshape_1", "mobilenet_v2": "MobilenetV2/Predictions/Reshape_1", "inception_v3": "InceptionV3/Predictions/Reshape_1", "inception_v4": "InceptionV4/Logits/Predictions", "inception_resnet_v2": "InceptionResnetV2/Logits/Predictions", } class TFSlimClassifierConfig(Config, etal.HasPublishedModel): def __init__(self, d): d = self.init(d) self.attr_name = self.parse_string(d, "attr_name") self.network_name = self.parse_string(d, "network_name") self.labels_path = etau.fill_config_patterns( self.parse_string(d, "labels_path") ) self.preprocessing_fcn = self.parse_string( d, "preprocessing_fcn", default=None ) self.input_name = self.parse_string(d, "input_name", default="input") self.features_name = self.parse_string( d, "features_name", default=None ) self.output_name = self.parse_string(d, "output_name", default=None) self.confidence_thresh = self.parse_number( d, "confidence_thresh", default=0 ) self.generate_features = self.parse_bool( d, "generate_features", default=False ) class TFSlimClassifier( etal.ImageClassifier, etal.ExposesFeatures, etal.ExposesProbabilities, etat.UsesTFSession, ): def __init__(self, config): self.config = config etat.UsesTFSession.__init__(self) self.config.download_model_if_necessary() model_path = self.config.model_path self._prefix = "main" self._graph = etat.load_graph(model_path, prefix=self._prefix) self._sess = None labels_map = etal.load_labels_map(self.config.labels_path) self._class_labels = etal.get_class_labels(labels_map) self._num_classes = len(self._class_labels) network_name = self.config.network_name network_fn = nf.get_network_fn( network_name, num_classes=self._num_classes, is_training=False ) self.img_size = network_fn.default_image_size self._input_op = self._graph.get_operation_by_name( self._prefix + "/" + self.config.input_name ) features_name = None if self.config.generate_features: if self.config.features_name: features_name = self.config.features_name elif network_name in _DEFAULT_FEATURES_NAMES: features_name = _DEFAULT_FEATURES_NAMES[network_name] if features_name is not None: self._features_op = self._graph.get_operation_by_name( self._prefix + "/" + features_name ) else: self._features_op = None if self.config.output_name: output_name = self.config.output_name else: output_name = _DEFAULT_OUTPUT_NAMES.get(network_name, None) if output_name is None: raise ValueError( "`output_name` was not provided and network `%s` was not " "found in default outputs map" % network_name ) self._output_op = self._graph.get_operation_by_name( self._prefix + "/" + output_name ) self._transforms = self._make_preprocessing_fcn( network_name, self.config.preprocessing_fcn ) self._preprocess = True self._last_features = None self._last_probs = None def __enter__(self): self._sess = self.make_tf_session(graph=self._graph) return self def __exit__(self, *args): self.close() @property def is_multilabel(self): return False @property def ragged_batches(self): return False @property def transforms(self): return self._transforms @property def preprocess(self): return self._preprocess @preprocess.setter def preprocess(self, value): self._preprocess = value @property def exposes_features(self): return self._features_op is not None @property def features_dim(self): if not self.exposes_features: return None dim = self._features_op.outputs[0].get_shape().as_list()[-1] if dim is None: logger.warning( "Unable to statically get feature dimension; returning None" ) return dim @property def exposes_probabilities(self): return True @property def num_classes(self): return self._num_classes @property def class_labels(self): return self._class_labels def get_features(self): if not self.exposes_features: return None return self._last_features def get_probabilities(self): if not self.exposes_probabilities: return None return self._last_probs def predict(self, img): return self._predict([img])[0] def predict_all(self, imgs): return self._predict(imgs) def _predict(self, imgs): if self.preprocess: imgs = self._preprocess_batch(imgs) if self.exposes_features: features, probs = self._evaluate( imgs, [self._features_op, self._output_op] ) else: features = None probs = self._evaluate(imgs, [self._output_op])[0] max_num_preds = 0 predictions = [] for probsi in probs: predsi, keepi = self._parse_prediction(probsi) if keepi: max_num_preds = 1 predictions.append(predsi) probs = probs[:, np.newaxis, :] probs = probs[:, :max_num_preds, :] if self.exposes_features: self._last_features = features self._last_probs = probs return predictions def _preprocess_batch(self, imgs): return [self.transforms(img) for img in imgs] def _evaluate(self, imgs, ops): in_tensor = self._input_op.outputs[0] out_tensors = [op.outputs[0] for op in ops] return self._sess.run(out_tensors, feed_dict={in_tensor: imgs}) def _parse_prediction(self, probs): idx = np.argmax(probs) label = self.class_labels[idx] confidence = probs[idx] attrs = etad.AttributeContainer() keep = confidence > self.config.confidence_thresh if keep: attrs.add( etad.CategoricalAttribute( self.config.attr_name, label, confidence=confidence ) ) return attrs, keep def _make_preprocessing_fcn(self, network_name, preprocessing_fcn): dim = self.img_size if preprocessing_fcn: logger.debug( "Using user-provided preprocessing '%s'", preprocessing_fcn ) user_fcn = etau.get_function(preprocessing_fcn) return lambda img: user_fcn(img, dim, dim) numpy_fcn = _NUMPY_PREPROC_FUNCTIONS.get(network_name, None) if numpy_fcn is not None: logger.debug( "Using numpy-based preprocessing for network '%s'", network_name, ) return lambda img: numpy_fcn(img, dim, dim) logger.debug( "Using TF-based preprocessing for network '%s'", network_name ) tfslim_fcn = pf.get_preprocessing(network_name, is_training=False) sess = self.make_tf_session() _img = tf.placeholder("uint8", [None, None, 3]) _img_out = tfslim_fcn(_img, dim, dim) return lambda img: sess.run(_img_out, feed_dict={_img: img}) class TFSlimFeaturizerConfig(TFSlimClassifierConfig): def __init__(self, d): d["attr_name"] = "" d["generate_features"] = True super(TFSlimFeaturizerConfig, self).__init__(d) class TFSlimFeaturizer(ImageFeaturizer):
Apache License 2.0
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/endpoints/resource.py
ResourceEndpoint.validate_protected_resource_request
python
def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) request.validator_log['client'] = valid_client request.validator_log['resource_owner'] = valid_resource_owner request.validator_log['realm'] = valid_realm request.validator_log['signature'] = valid_signature v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid token: %s", valid_resource_owner) log.info("Valid realm: %s", valid_realm) log.info("Valid signature: %s", valid_signature) return v, request
Create a request token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param realms: A list of realms the resource is protected under. This will be supplied to the ``validate_realms`` method of the request validator. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object.
https://github.com/oauthlib/oauthlib/blob/f655d73f9dcbc1f7a1475038d6703870ef99c1fb/oauthlib/oauth1/rfc5849/endpoints/resource.py#L53-L163
import logging from .. import errors from .base import BaseEndpoint log = logging.getLogger(__name__) class ResourceEndpoint(BaseEndpoint):
BSD 3-Clause New or Revised License
bigmlcom/bigmler
bigmler/resourcesapi/datasets.py
set_basic_dataset_args
python
def set_basic_dataset_args(args, name=None): if name is None: name = args.name dataset_args = set_basic_args(args, name) if args.sample_rate != 1 and args.no_model: dataset_args.update({ "seed": SEED if args.seed is None else args.seed, "sample_rate": args.sample_rate }) if hasattr(args, "range") and args.range_: dataset_args.update({ "range": args.range_ }) return dataset_args
Return dataset basic arguments dict
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/resourcesapi/datasets.py#L39-L56
import sys import bigml.api from bigmler.utils import (dated, get_url, log_message, check_resource, is_shared, check_resource_error, log_created_resources) from bigmler.reports import report from bigmler.resourcesapi.common import set_basic_args, update_attributes, update_json_args, configure_input_fields, check_fields_struct from bigmler.resourcesapi.common import SEED, DS_NAMES, ALL_FIELDS_QS
Apache License 2.0
snakemake/snakemake
snakemake/caching/local.py
OutputFileCache.exists
python
def exists(self, job: Job): for outputfile, cachefile in self.get_outputfiles_and_cachefiles(job): if not cachefile.exists(): return False self.check_readable(cachefile) return True
Return True if job is already cached
https://github.com/snakemake/snakemake/blob/ec87b97d8a7a92f3734001433d7fba3d5e4a642a/snakemake/caching/local.py#L109-L119
__authors__ = "Johannes Köster, Sven Nahnsen" __copyright__ = "Copyright 2021, Johannes Köster, Sven Nahnsen" __email__ = "johannes.koester@uni-due.de" __license__ = "MIT" from tempfile import TemporaryDirectory from pathlib import Path import os import shutil import stat from snakemake.logging import logger from snakemake.jobs import Job from snakemake.exceptions import WorkflowError from snakemake.caching.hash import ProvenanceHashMap from snakemake.caching import LOCATION_ENVVAR, AbstractOutputFileCache class OutputFileCache(AbstractOutputFileCache): def __init__(self): super().__init__() self.path = Path(self.cache_location) self.file_permissions = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH ) self.dir_permissions = ( self.file_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH ) def check_writeable(self, cachefile): if not (os.access(cachefile.parent, os.W_OK) or os.access(cachefile, os.W_OK)): self.raise_write_error(cachefile) def check_readable(self, cachefile): if not os.access(cachefile, os.R_OK): self.raise_read_error(cachefile) def store(self, job: Job): if not os.access(self.path, os.W_OK): raise WorkflowError( "Cannot access cache location {}. Please ensure that " "it is present and writeable.".format(self.path) ) with TemporaryDirectory(dir=self.path) as tmpdirname: tmpdir = Path(tmpdirname) for outputfile, cachefile in self.get_outputfiles_and_cachefiles(job): if not os.path.exists(outputfile): raise WorkflowError( "Cannot move output file {} to cache. It does not exist " "(maybe it was not created by the job?)." ) self.check_writeable(cachefile) logger.info("Moving output file {} to cache.".format(outputfile)) tmp = tmpdir / cachefile.name shutil.move(outputfile, tmp, copy_function=shutil.copy) self.set_permissions(tmp) shutil.move(tmp, cachefile) self.symlink(cachefile, outputfile, utime=False) def fetch(self, job: Job): for outputfile, cachefile in self.get_outputfiles_and_cachefiles(job): if not cachefile.exists(): self.raise_cache_miss_exception(job) self.check_readable(cachefile) if cachefile.is_dir(): outputfile.mkdir(parents=True, exist_ok=True) for f in cachefile.iterdir(): self.symlink(f, outputfile / f.name) else: self.symlink(cachefile, outputfile)
MIT License
flynnbr11/qmla
qmla/exploration_strategies/exploration_strategy.py
ExplorationStrategy.true_model_terms
python
def true_model_terms(self): true_terms = self.true_model_constructor.terms_names latex_true_terms = [self.latex_name(term) for term in true_terms] self.true_op_terms = set(sorted(latex_true_terms)) return self.true_op_terms
r"""Terms (as latex strings) which make up the true model
https://github.com/flynnbr11/qmla/blob/d4a04fa157010aa79a017624967fdbb39529a414/qmla/exploration_strategies/exploration_strategy.py#L490-L498
from __future__ import absolute_import import sys import os import pickle import numpy as np import itertools import qinfer import matplotlib import matplotlib.pyplot as plt import scipy import qinfer as qi from lfig import LatexFigure import qmla.shared_functionality.prior_distributions import qmla.shared_functionality.experiment_design_heuristics import qmla.shared_functionality.probe_set_generation as probe_set_generation import qmla.shared_functionality.expectation_value_functions import qmla.utilities import qmla.model_building_utilities as model_building_utilities import qmla.shared_functionality.rating_system import qmla.shared_functionality.qinfer_model_interface from qmla.exploration_strategies.exploration_strategy_decorator import ( ExplorationStrategyDecorator, ) __all__ = ["ExplorationStrategy"] class ExplorationStrategy: def __init__(self, exploration_rules, true_model=None, **kwargs): self.exploration_rules = exploration_rules if true_model is not None: self.true_model = true_model else: self.true_model = None if "log_file" in kwargs: self.log_file = kwargs["log_file"] else: self.log_file = ".default_qmla_log.log" if "qmla_id" in kwargs: self.qmla_id = kwargs["qmla_id"] else: self.qmla_id = -1 if "true_params_path" in kwargs: self.true_params_path = kwargs["true_params_path"] else: self.true_params_path = None if "plot_probes_path" in kwargs: self.plot_probes_path = kwargs["plot_probes_path"] else: self.plot_probes_path = None self._setup_modular_subroutines() self._setup_true_model() self._setup_model_learning() self._setup_tree_infrastructure() self._setup_logistics() self.overwrite_default_parameters() def overwrite_default_parameters(self): pass def _setup_modular_subroutines(self): self.expectation_value_subroutine = ( qmla.shared_functionality.expectation_value_functions.default_expectation_value ) self.system_probes_generation_subroutine = ( qmla.shared_functionality.probe_set_generation.separable_probe_dict ) self.simulator_probes_generation_subroutine = ( self.system_probes_generation_subroutine ) self.shared_probes = ( True ) self.plot_probes_generation_subroutine = ( qmla.shared_functionality.probe_set_generation.plus_probes_dict ) self.evaluation_probe_generation_subroutine = None self.probe_noise_level = 1e-5 self.model_constructor = qmla.shared_functionality.model_constructors.BaseModel self.model_heuristic_subroutine = ( qmla.shared_functionality.experiment_design_heuristics.MultiParticleGuessHeuristic ) self.qinfer_model_subroutine = ( qmla.shared_functionality.qinfer_model_interface.QInferModelQMLA ) self.prior_distribution_subroutine = ( qmla.shared_functionality.prior_distributions.gaussian_prior ) def _setup_true_model(self): if self.true_model is None: self.true_model = "pauliSet_1_x_d1" self.qhl_models = ["pauliSet_1_x_d1", "pauliSet_1_y_d1", "pauliSet_1_z_d1"] self.true_model_terms_params = {} self._shared_true_parameters = True def _setup_model_learning(self): self.gaussian_prior_means_and_widths = {} self.min_param = 0 self.max_param = 1 self.true_param_cov_mtx_widen_factor = 1 self.prior_random_mean = False self.fixed_true_terms = False self.num_probes = 40 self.max_time_to_consider = 15 self.terminate_learning_at_volume_convergence = False self.volume_convergence_threshold = 1e-8 self.iqle_mode = False self.reallocate_resources = False self.max_num_parameter_estimate = 2 self.qinfer_resampler_a = 0.98 self.qinfer_resampler_threshold = 0.5 self.hard_fix_resample_effective_sample_size = None self.fraction_experiments_for_bf = 1 self.fraction_own_experiments_for_bf = 1.0 self.fraction_opponents_experiments_for_bf = 1.0 self.fraction_particles_for_bf = ( 1.0 ) self.force_evaluation = False self.exclude_evaluation = False self.plot_time_increment = None def _setup_tree_infrastructure(self): self.initial_models = ["xTi", "yTi", "zTi"] self.tree_completed_initially = False self.prune_completed_initially = True self.max_spawn_depth = 10 self.max_num_qubits = 8 self.max_num_probe_qubits = 6 self.num_top_models_to_build_on = 1 self.ratings_class = qmla.shared_functionality.rating_system.ModifiedEloRating( initial_rating=1000, k_const=30 ) self.branch_champion_selection_stratgey = "number_comparison_wins" self.branch_comparison_strategy = "all" self.check_champion_reducibility = True self.learned_param_limit_for_negligibility = 0.05 self.reduce_champ_bayes_factor_threshold = 1e1 self.storage = qmla.utilities.StorageUnit() self.spawn_stage = [None] self.spawn_step = 0 self.prune_complete = False self.prune_step = 0 self.track_cov_mtx = False def _setup_logistics(self): self.max_num_models_by_shape = {1: 0, 2: 1, "other": 0} self.num_processes_to_parallelise_over = 6 self.timing_insurance_factor = 1 self.f_score_cmap = matplotlib.cm.RdBu self.bf_cmap = matplotlib.cm.PRGn def true_model_latex(self): return self.latex_name(self.true_model) @property
MIT License
bastipaeltz/codedict
source/processor.py
set_wait_option
python
def set_wait_option(option): value = "" if 'on' in option: value = "on" print "Enabling 'wait' option." else: print "Disabling 'wait' option." database = db.Database() database.set_config_item('wait', value) sys.exit(0)
Sets the wait option to either on or off.
https://github.com/bastipaeltz/codedict/blob/5830cc277c5d0dbcd62d5d86217383fad4d0f207/source/processor.py#L60-L74
try: import readline except ImportError: pass import database as db import lib.prettytable as prettytable import tempfile import re import subprocess import textwrap import sys import os import urlparse import webbrowser import pydoc from xml.etree import ElementTree def start_process(cmd_line_args): relevant_args = ({key: value for key, value in cmd_line_args.iteritems() if value is not False and value is not None}) relevant_args = unicode_everything(relevant_args) if '--editor' in relevant_args: configure_editor(relevant_args) elif '--suffix' in relevant_args: configure_suffix(relevant_args) elif '--line' in relevant_args: configure_line_length(relevant_args) elif '--wait' in relevant_args: set_wait_option(relevant_args) elif 'rollback' in relevant_args: database = db.Database() database.rollback() else: body, flags = split_arguments(relevant_args) determine_proceeding(body, flags)
MIT License
ironport/coreutils
find_leaks.py
analyze_strings
python
def analyze_strings (cutoff=10, tmpdir='/tmp/'): def NAME (kind): return '%s%s.txt' % ( os.path.join (tmpdir, 'all_strings'), '.' + kind ) import mstats print 'dumping... (%s)' % (NAME ('dump')) mstats.dump_strings (NAME ('dump')) print 'sorting...' cmd = 'sort -T %s %s > %s' % (tmpdir, NAME ('dump'), NAME ('sorted')) if not os.system (cmd): os.unlink (NAME ('dump')) print 'building histogram...' f = open (NAME ('sorted'), 'rb') f2 = open (NAME ('hist'), 'wb') last = None count = 1 total = 0 while 1: l = f.readline() if not l: break elif l == last: count += 1 else: if count >= cutoff: f2.write ('%10d %r\n' % (count, last)) total += 1 count = 1 last = l if count >= cutoff: f2.write ('%10d %r\n' % (count, last)) total += 1 f2.close() f.close() os.unlink (NAME ('sorted')) if total: cmd = 'sort -T %s -n -k 1,1 %s > %s' % (tmpdir, NAME ('hist'), NAME ('sorted_hist')) if not os.system (cmd): print 'done. histogram is in %s' % (NAME ('sorted_hist'),) else: print 'error sorting histogram' else: print 'no strings duplicated over %d times' % (cutoff,) os.unlink (NAME ('hist')) else: print 'error sorting string dump'
analyze_strings ([<cutoff>=10], [<tmpdir>='/tmp/']) => None dump all strings to a file, then build a histogram of all the duplicates with more than <cutoff> identical copies. Warning: may use lots of space in <tmpdir>... Note: requires /usr/bin/sort.
https://github.com/ironport/coreutils/blob/c64e36256ddcfd293f6cead8cf4061a67c8a09bb/find_leaks.py#L228-L283
import types import sys import string import os import getrusage import gc out = sys.stdout if hasattr (sys, 'getcounts'): _prev_live = {} def live_changes (dont_print=0): global _prev_live output = [] for (name, alloced, freed, max_alloc) in sys.getcounts(): live = alloced - freed if not _prev_live.has_key (name): line = "new: %s %d" % (name, live) elif _prev_live[name] != live: line = "change: %s %d" % (name, live - _prev_live[name]) else: line = None if line is not None: if dont_print: output.append (line) else: print line _prev_live[name] = live if dont_print: return output def get_refcounts(): d = {} for m in sys.modules.values(): for sym in dir(m): o = getattr (m, sym) if type(o) is types.ClassType: d[o] = sys.getrefcount (o) pairs = map (lambda x: (x[1],x[0]), d.items()) pairs.sort() pairs.reverse() return pairs def find_all_types(): d = {} for m in sys.modules.values(): for sym in dir(m): o = getattr (m, sym) ot = type(o) if ot is types.TypeType: d[o] = None else: d[type(ot)] = None all_types = d.keys() all_types.sort (lambda a,b: cmp (id(a),id(b))) return all_types def print_type_counts (n=20): import mstats tl = find_all_types() mstats.initialize_type_table (tl) cl = mstats.get_type_hist() sorted = zip (cl, tl) sorted.sort() if n: sorted = sorted[-n:] sorted.reverse() for count, type in sorted: print '%10d %s' % (count, type) def print_top_100(): return print_top_n(100) def print_top_n(num): for n, c in get_refcounts()[:num]: print '%10d %s' % (n, c.__name__) class object_tracking_mixin: _addresses = {} def _register_object (self): addrs = object_tracking_mixin._addresses.get (self.__class__, {}) addrs[id(self)] = 1 object_tracking_mixin._addresses[self.__class__] = addrs def __del__ (self): del object_tracking_mixin._addresses[self.__class__][id(self)] _ohw_addresses = {} class object_hiding_wrapper: def __init__ (self, obj): self.__dict__['__ido'] = id(obj) _ohw_addresses[id(obj)] = obj def __getattr__ (self, attr): return getattr (_ohw_addresses[self.__dict__['__ido']], attr) def __setattr__ (self, attr, value): setattr (_ohw_addresses[self.__dict__['__ido']], attr, value) def __del__ (self): del _ohw_addresses[self.__dict__['__ido']] def process_size(): if not os.path.exists('/proc/curproc'): raise NotImplementedError, "sorry, FreeBSD only right now" fd = open ('/proc/curproc/map') vsize = 0 while 1: line = fd.readline() if not line: break [first, second] = line.split ()[:2] startaddr = string.atol (first, 16) endaddr = string.atol (second, 16) vsize += endaddr - startaddr fd.close() rsize = getrusage.getrusage() [3] * 1024L return rsize, vsize
MIT License
kubernetes-client/python
kubernetes/client/api/batch_v2alpha1_api.py
BatchV2alpha1Api.list_namespaced_cron_job_with_http_info
python
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'pretty', 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'resource_version_match', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) if 'watch' in local_var_params and local_var_params['watch'] is not None: query_params.append(('watch', local_var_params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJobList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
list_namespaced_cron_job # noqa: E501 list or watch objects of kind CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_cron_job_with_http_info(namespace, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V2alpha1CronJobList, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
https://github.com/kubernetes-client/python/blob/96dade6021dc2e9ee1430172e1b65d9e9e232b10/kubernetes/client/api/batch_v2alpha1_api.py#L804-L931
from __future__ import absolute_import import re import six from kubernetes.client.api_client import ApiClient from kubernetes.client.exceptions import ( ApiTypeError, ApiValueError ) class BatchV2alpha1Api(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_namespaced_cron_job(self, namespace, body, **kwargs): kwargs['_return_http_data_only'] = True return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) def create_namespaced_cron_job_with_http_info(self, namespace, body, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'body', 'pretty', 'dry_run', 'field_manager' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cron_job`") if self.api_client.client_side_validation and ('body' not in local_var_params or local_var_params['body'] is None): raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: query_params.append(('fieldManager', local_var_params['field_manager'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJob', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_namespaced_cron_job(self, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_collection_namespaced_cron_job_with_http_info(namespace, **kwargs) def delete_collection_namespaced_cron_job_with_http_info(self, namespace, **kwargs): local_var_params = locals() all_params = [ 'namespace', 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'resource_version_match', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`") collection_formats = {} path_params = {} if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_namespaced_cron_job(self, name, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) def delete_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs): local_var_params = locals() all_params = [ 'name', 'namespace', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_namespaced_cron_job" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('name' not in local_var_params or local_var_params['name'] is None): raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`") if self.api_client.client_side_validation and ('namespace' not in local_var_params or local_var_params['namespace'] is None): raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`") collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] if 'namespace' in local_var_params: path_params['namespace'] = local_var_params['namespace'] query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: query_params.append(('dryRun', local_var_params['dry_run'])) if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): kwargs['_return_http_data_only'] = True return self.get_api_resources_with_http_info(**kwargs) def get_api_resources_with_http_info(self, **kwargs): local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_cron_job_for_all_namespaces(self, **kwargs): kwargs['_return_http_data_only'] = True return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs) def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs): local_var_params = locals() all_params = [ 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'pretty', 'resource_version', 'resource_version_match', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_cron_job_for_all_namespaces" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) if '_continue' in local_var_params and local_var_params['_continue'] is not None: query_params.append(('continue', local_var_params['_continue'])) if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: query_params.append(('fieldSelector', local_var_params['field_selector'])) if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: query_params.append(('labelSelector', local_var_params['label_selector'])) if 'limit' in local_var_params and local_var_params['limit'] is not None: query_params.append(('limit', local_var_params['limit'])) if 'pretty' in local_var_params and local_var_params['pretty'] is not None: query_params.append(('pretty', local_var_params['pretty'])) if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: query_params.append(('resourceVersion', local_var_params['resource_version'])) if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) if 'watch' in local_var_params and local_var_params['watch'] is not None: query_params.append(('watch', local_var_params['watch'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) auth_settings = ['BearerToken'] return self.api_client.call_api( '/apis/batch/v2alpha1/cronjobs', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V2alpha1CronJobList', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_namespaced_cron_job(self, namespace, **kwargs): kwargs['_return_http_data_only'] = True return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
Apache License 2.0
physiopy/phys2bids
phys2bids/physio_obj.py
BlueprintInput.print_info
python
def print_info(self, filename): info = (f'\n------------------------------------------------' f'\nFile {filename} contains:\n') for ch in range(1, self.ch_amount): info = info + (f'{ch:02d}. {self.ch_name[ch]};' f' sampled at {self.freq[ch]} Hz\n') info = info + '------------------------------------------------\n' LGR.info(info)
Print info on the file, channel by channel. Parameters ---------- filename: str or path Name of the input file to phys2bids Notes ----- Outcome: ch: Returns to stdout (e.g. on screen) channels, their names and their sampling rate.
https://github.com/physiopy/phys2bids/blob/edbb77da40d9f070ead3983e9bbcc2e7f2d8dc4d/phys2bids/physio_obj.py#L531-L554
import logging import re from copy import deepcopy from itertools import groupby import numpy as np TRIGGER_NAMES = ["trig", "trigger"] LGR = logging.getLogger(__name__) LGR.setLevel(logging.INFO) def is_valid(var, var_type, list_type=None): if not isinstance(var, var_type): raise AttributeError(f'The given variable is not a {var_type}') if var_type is list and list_type is not None: for element in var: _ = is_valid(element, list_type) return var def has_size(var, data_size, token): if len(var) > data_size: var = var[:data_size] if len(var) < data_size: _ = is_valid(token, type(var[0])) var = var + [token] * (data_size - len(var)) return var def are_equal(self, other): def _deal_with_dict_value_error(self, other): try: self['timeseries'] except KeyError: return False except TypeError: return False else: if self.keys() == other.keys(): alltrue_timeseries = [False] * len(self['timeseries']) alltrue_keys = [False] * len(self) for j, key in enumerate(self.keys()): if key == 'timeseries': for i in range(len(self['timeseries'])): alltrue_timeseries[i] = (self['timeseries'][i].all() == other['timeseries'][i].all()) alltrue_keys[j] = all(alltrue_timeseries) else: alltrue_keys[j] = (self[key] == other[key]) return all(alltrue_keys) else: return False try: return self.__dict__ == other.__dict__ except ValueError: return _deal_with_dict_value_error(self.__dict__, other.__dict__) except AttributeError: try: return self.__dict__ == other except ValueError: return _deal_with_dict_value_error(self.__dict__, other) except AttributeError: try: return self == other.__dict__ except ValueError: return _deal_with_dict_value_error(self, other.__dict__) except AttributeError: try: return self == other except ValueError: return _deal_with_dict_value_error(self, other) class BlueprintInput(): def __init__(self, timeseries, freq, ch_name, units, trigger_idx, num_timepoints_found=None, thr=None, time_offset=0): self.timeseries = deepcopy(is_valid(timeseries, list, list_type=np.ndarray)) self.freq = deepcopy(has_size(is_valid(freq, list, list_type=(int, float)), self.ch_amount, 0.0)) self.ch_name = deepcopy(has_size(ch_name, self.ch_amount, 'unknown')) self.units = deepcopy(has_size(units, self.ch_amount, '[]')) self.trigger_idx = deepcopy(is_valid(trigger_idx, int)) self.num_timepoints_found = deepcopy(num_timepoints_found) self.thr = deepcopy(thr) self.time_offset = deepcopy(time_offset) if trigger_idx == 0: self.auto_trigger_selection() else: if ch_name[trigger_idx] not in TRIGGER_NAMES: LGR.info('Trigger channel name is not in our trigger channel name alias list. ' 'Please make sure you choose the proper channel.') @property def ch_amount(self): return len(self.timeseries) def __getitem__(self, idx): sliced_timeseries = [None] * self.ch_amount return_instant = False if not self.trigger_idx: self.trigger_idx = 0 trigger_length = len(self.timeseries[self.trigger_idx]) if isinstance(idx, int): return_instant = True if idx < 0: idx = trigger_length + idx idx = slice(idx, idx + 1) if idx.start is None: idx = slice(0, idx.stop) if idx.stop is None: idx = slice(idx.start, trigger_length) if idx.start >= trigger_length or idx.stop > trigger_length: raise IndexError(f'Slice ({idx.start}, {idx.stop}) is out of ' f'bounds for channel {self.trigger_idx} ' f'with size {trigger_length}') for n, channel in enumerate(self.timeseries): idx_dict = {'start': idx.start, 'stop': idx.stop, 'step': idx.step} for i in ['start', 'stop', 'step']: if idx_dict[i]: idx_dict[i] = int(np.floor(self.freq[n] / self.freq[self.trigger_idx] * idx_dict[i])) if idx_dict['start'] == idx_dict['stop'] or return_instant: idx_dict['stop'] = idx_dict['start'] + 1 elif trigger_length == idx.stop: idx_dict['stop'] = len(channel) new_idx = slice(idx_dict['start'], idx_dict['stop'], idx_dict['step']) sliced_timeseries[n] = channel[new_idx] return BlueprintInput(sliced_timeseries, self.freq, self.ch_name, self.units, self.trigger_idx, self.num_timepoints_found, self.thr, self.time_offset) def __eq__(self, other): return are_equal(self, other) def rename_channels(self, new_names): if 'time' in new_names: del new_names[new_names.index('time')] new_names = ['time', ] + new_names self.ch_name = has_size(is_valid(new_names, list, list_type=str), self.ch_amount, 'unknown') def return_index(self, idx): return (self.timeseries[idx], self.ch_amount, self.freq[idx], self.ch_name[idx], self.units[idx]) def delete_at_index(self, idx): del self.timeseries[idx] del self.freq[idx] del self.ch_name[idx] del self.units[idx] if self.trigger_idx == idx: LGR.warning('Removing trigger channel - are you sure you are doing' 'the right thing?') self.trigger_idx = 0 def check_trigger_amount(self, thr=None, num_timepoints_expected=0, tr=0): LGR.info('Counting trigger points') trigger = self.timeseries[self.trigger_idx] time = self.timeseries[0] LGR.info(f'The trigger is in channel {self.trigger_idx}') if len(time) != len(trigger): LGR.warning('The trigger channel has a different sampling ' 'from the registered time. Using a resampled version ' 'of time to find the starting time.') time = np.linspace(time[0], time[-1], len(trigger)) flag = 0 if thr is None: thr = np.mean(trigger) + 2 * np.std(trigger) flag = 1 timepoints = trigger > thr num_timepoints_found = len([is_true for is_true, _ in groupby(timepoints, lambda x: x != 0) if is_true]) if flag == 1: LGR.info(f'The number of timepoints according to the std_thr method ' f'is {num_timepoints_found}. The computed threshold is {thr:.4f}') else: LGR.info(f'The number of timepoints found with the manual threshold of {thr:.4f} ' f'is {num_timepoints_found}') time_offset = time[timepoints.argmax()] if num_timepoints_expected: LGR.info('Checking number of timepoints') if num_timepoints_found > num_timepoints_expected: timepoints_extra = (num_timepoints_found - num_timepoints_expected) LGR.warning(f'Found {timepoints_extra} timepoints' ' more than expected!\n' 'Assuming extra timepoints are at the end ' '(try again with a more liberal thr)') elif num_timepoints_found < num_timepoints_expected: timepoints_missing = (num_timepoints_expected - num_timepoints_found) LGR.warning(f'Found {timepoints_missing} timepoints' ' less than expected!') if tr: LGR.warning('Correcting time offset, assuming missing ' 'timepoints are at the beginning (try again ' 'with a more conservative thr)') time_offset -= (timepoints_missing * tr) else: LGR.warning('Can\'t correct time offset - you should ' 'specify the TR') else: LGR.info('Found just the right amount of timepoints!') else: LGR.warning('The necessary options to find the amount of timepoints ' 'were not provided.') self.thr = thr self.time_offset = time_offset self.timeseries[0] = self.timeseries[0] - time_offset self.num_timepoints_found = num_timepoints_found
Apache License 2.0
armmbed/mbed-tools
src/mbed_tools/devices/_internal/windows/disk_partition_logical_disk_relationships.py
DiskPartitionLogicalDiskRelationship.disk_partition_id
python
def disk_partition_id(self) -> str: return DiskPartitionLogicalDiskRelationship._parse_reference(self.get("Antecedent"))
Gets the disk partition id.
https://github.com/armmbed/mbed-tools/blob/607dd7ebc539862540f66933f78372f912c06015/src/mbed_tools/devices/_internal/windows/disk_partition_logical_disk_relationships.py#L50-L52
from typing import NamedTuple from mbed_tools.devices._internal.windows.component_descriptor import ComponentDescriptor class DiskToPartitionMsdnDefinition(NamedTuple): EndingAddress: int StartingAddress: int Antecedent: str Dependent: str class DiskPartitionLogicalDiskRelationship(ComponentDescriptor): def __init__(self) -> None: super().__init__(DiskToPartitionMsdnDefinition, win32_class_name="Win32_LogicalDiskToPartition") @property def component_id(self) -> str: return f"{self.get('Antecedent')}->{self.get('Dependent')}" @staticmethod def _parse_reference(ref: str) -> str: return ref.replace("'", "").replace('"', "").split("=")[1] @property def logical_disk_id(self) -> str: return DiskPartitionLogicalDiskRelationship._parse_reference(self.get("Dependent")) @property
Apache License 2.0
hazyresearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_prev_sibling_tags
python
def get_prev_sibling_tags( mention: Union[Candidate, Mention, TemporarySpanMention] ) -> List[str]: span = _to_span(mention) prev_sibling_tags: List[str] = [] i = _get_node(span.sentence) while i.getprevious() is not None: prev_sibling_tags.insert(0, str(i.getprevious().tag)) i = i.getprevious() return prev_sibling_tags
Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate
https://github.com/hazyresearch/fonduer/blob/c9fd6b91998cd708ab95aeee3dfaf47b9e549ffd/src/fonduer/utils/data_model_utils/structural.py#L73-L91
import functools from builtins import str from typing import List, Optional, Tuple, Union import numpy as np from lxml import etree from lxml.etree import _ElementTree from lxml.html import HtmlElement, fromstring from fonduer.candidates.models import Candidate, Mention from fonduer.candidates.models.span_mention import SpanMention, TemporarySpanMention from fonduer.parser.models.sentence import Sentence from fonduer.utils.data_model_utils.utils import _to_span def get_tag(mention: Union[Candidate, Mention, TemporarySpanMention]) -> str: span = _to_span(mention) return str(span.sentence.html_tag) def get_attributes( mention: Union[Candidate, Mention, TemporarySpanMention] ) -> List[str]: span = _to_span(mention) return span.sentence.html_attrs @functools.lru_cache(maxsize=16) def _get_etree_for_text(text: str) -> _ElementTree: return etree.ElementTree(fromstring(text)) def _get_node(sentence: Sentence) -> HtmlElement: doc_etree = _get_etree_for_text(sentence.document.text) return doc_etree.xpath(sentence.xpath)[0] def get_parent_tag( mention: Union[Candidate, Mention, TemporarySpanMention] ) -> Optional[str]: span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
MIT License
martinlackner/abcvoting
abcvoting/genprofiles.py
__generate_2d_points
python
def __generate_2d_points(agents, mode, sigma): points = {} if mode == "twogroups": for i in range(int(len(agents) // 3)): points[agents[i]] = (random.gauss(-0.5, sigma), random.gauss(-0.5, sigma)) for i in range(int(len(agents) // 3), len(agents)): points[agents[i]] = (random.gauss(0.5, sigma), random.gauss(0.5, sigma)) elif mode == "normal": for i in range(len(agents)): points[agents[i]] = (random.gauss(0.0, sigma), random.gauss(0.0, sigma)) elif mode == "uniform_square": for a in agents: points[a] = (random.uniform(-1, 1), random.uniform(-1, 1)) else: raise ValueError("mode", mode, "not known") return points
Generates a list of 2d coordinates subject to various distributions.
https://github.com/martinlackner/abcvoting/blob/e0bb05a8f0478c071e0bab7d0914c936a3fff155/abcvoting/genprofiles.py#L267-L288
import random from math import fabs, sqrt from abcvoting.preferences import Profile from abcvoting.misc import check_enough_approved_candidates MAX_ITERATIONS = 100 def random_profile(num_voters, num_cand, prob_distribution, committeesize=None, **kwargs): for _ in range(MAX_ITERATIONS): if prob_distribution == "IC": profile = random_IC_profile(num_cand, num_voters, **kwargs) elif prob_distribution.startswith("Mallows"): dispersion = float(prob_distribution[7:]) profile = random_mallows_profile(num_cand, num_voters, dispersion=dispersion, **kwargs) else: raise ValueError(f"Probability model {prob_distribution} unknown.") try: if committeesize: check_enough_approved_candidates(profile, committeesize) return profile except ValueError: pass else: raise RuntimeError( f"Did not find profile with enough approved candidates with {MAX_ITERATIONS} tries.\n" f"(num_voters={num_voters}, num_cand={num_cand}, prob_distribution={prob_distribution}," f"committeesize={committeesize}, {kwargs}" ) def random_urn_profile(num_cand, num_voters, setsize, replace): currsize = 1.0 approval_sets = [] replacedsets = {} for _ in range(num_voters): r = random.random() * currsize if r < 1.0: randset = random.sample(range(num_cand), setsize) approval_sets.append(randset) key = tuple(set(randset)) if key in replacedsets: replacedsets[key] += 1 else: replacedsets[key] = 1 currsize += replace else: r = random.randint(0, sum(replacedsets.values())) for approval_set in replacedsets: count = replacedsets[approval_set] if r <= count: approval_sets.append(list(approval_set)) break else: r -= count profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_urn_party_list_profile(num_cand, num_voters, num_parties, replace, uniform=False): currsize = 1.0 approval_sets = [] replacedsets = {} parties = list(range(num_parties)) party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform) for _ in range(num_voters): r = random.random() * currsize if r < 1.0: party = random.choice(parties) randpartyset = list(party_cands[party]) approval_sets.append(randpartyset) if party in replacedsets: replacedsets[party] += 1 else: replacedsets[party] = 1 currsize += replace else: r = random.randint(0, sum(replacedsets.values())) for party in replacedsets: count = replacedsets[party] if r <= count: approval_sets.append(list(party_cands[party])) break else: r -= count profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_IC_profile(num_cand, num_voters, setsize): approval_sets = [] for _ in range(num_voters): randset = random.sample(range(num_cand), setsize) approval_sets.append(randset) profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_IC_party_list_profile(num_cand, num_voters, num_parties, uniform=False): parties = list(range(num_parties)) party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform) approval_sets = [] for _ in range(num_voters): approval_sets.append(party_cands[random.choice(parties)]) profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_2d_points_profile( num_cand, num_voters, candpointmode, voterpointmode, sigma, approval_threshold ): voters = list(range(num_voters)) cands = list(range(num_cand)) voter_points = __generate_2d_points(voters, voterpointmode, sigma) cand_points = __generate_2d_points(cands, candpointmode, sigma) approval_sets = __get_profile_from_points( voters, cands, voter_points, cand_points, approval_threshold ) profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_2d_points_party_list_profile( num_cand, num_voters, num_parties, partypointmode, voterpointmode, sigma, uniform=False ): parties = list(range(num_parties)) party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform) voters = list(range(num_voters)) voter_points = __generate_2d_points(voters, voterpointmode, sigma) party_points = __generate_2d_points(parties, partypointmode, sigma) party_sets = __get_profile_from_points(voters, parties, voter_points, party_points, 1.0) approval_sets = [] for p in party_sets: approval_sets.append(party_cands[p[0]]) profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def random_mallows_profile(num_cand, num_voters, setsize, dispersion): if not (0 < dispersion <= 1): raise Exception("Invalid dispersion, needs to be in (0, 1].") reference_ranking = list(range(num_cand)) random.shuffle(reference_ranking) insert_dist = __compute_mallows_insert_distributions(num_cand, dispersion) approval_sets = [] for _ in range(num_voters): vote = [] for i, distribution in enumerate(insert_dist): pos = __select_pos(distribution) vote.insert(pos, reference_ranking[i]) approval_sets.append(vote[:setsize]) profile = Profile(num_cand) profile.add_voters(approval_sets) return profile def __compute_mallows_insert_distributions(num_cand, dispersion): distributions = [] denominator = 0 for i in range(num_cand): denominator += pow(dispersion, i) dist = [] for j in range(i + 1): dist.append(pow(dispersion, i - j) / denominator) distributions.append(dist) return distributions def __select_pos(distribution): if round(sum(distribution), 10) != 1.0: raise Exception("Invalid Distribution", distribution, "sum:", sum(distribution)) r = round(random.random(), 10) pos = -1 s = 0 for prob in distribution: pos += 1 s += prob if s >= r: return pos return pos def __distribute_candidates_to_parties(num_cand, parties, uniform): if num_cand < len(parties): raise ValueError("Not enough candidates to split them between" + "the parties.") if uniform: if num_cand % len(parties) != 0: raise ValueError( "To uniformly distribute candidates " + "between parties the number of candidates" + " needs to be divisible by the number of" + " parties." ) party_cands = {} party_size = int(num_cand / len(parties)) cands = set(range(num_cand)) for i, party in enumerate(parties): appr = random.sample(tuple(cands), party_size) party_cands[party] = appr cands = cands - set(appr) return party_cands else: num_parties = len(parties) party_cands = {} num_random_cands = num_cand - num_parties for i, party in enumerate(parties): party_cands[party] = [num_random_cands + i] for cand in range(num_random_cands): party = random.choice(parties) party_cands[party].append(cand) return party_cands
MIT License
jbronn/django-forge
forge/models.py
ModuleManager.get_for_full_name
python
def get_for_full_name(self, full_name): parsed = self.parse_full_name(full_name) if parsed: author, name = parsed return self.get(author__name__iexact=author, name=name) else: raise self.model.DoesNotExist
Returns Module for the given full name, e.g., 'puppetlabs/stdlib'.
https://github.com/jbronn/django-forge/blob/16e1c981745581ebad336eb87e673ef2de35a02d/forge/models.py#L42-L51
import hashlib import json import os import tarfile import warnings from django.db import models from semantic_version.django_fields import VersionField from .constants import MODULE_REGEX from .storage import ForgeStorage class AuthorManager(models.Manager): def get_by_natural_key(self, name): return self.get(name__iexact=name) class Author(models.Model): name = models.CharField(max_length=64, unique=True) objects = AuthorManager() def __unicode__(self): return self.name def natural_key(self): return (self.name,) @property def v3(self): return { 'username': self.name.lower(), } class ModuleManager(models.Manager): def get_by_natural_key(self, author, name): return self.get(author=Author.objects.get_by_natural_key(author), name__iexact=name)
Apache License 2.0
erykoff/redmapper
redmapper/randoms.py
RandomWeigher.weight_randoms
python
def weight_randoms(self, minlambda, zrange=None, lambdabin=None): if zrange is None: zrange = np.array([self.config.zrange[0], self.config.zrange[1]]) zname = 'z%03d-%03d' % (int(self.config.zrange[0]*100), int(self.config.zrange[1]*100)) vlimname = 'vl%02d' % (int(self.vlim_lstar*10)) if lambdabin is None: lamname = 'lgt%03d' % (int(minlambda)) lambdabin = np.array([0.0, 1000.0]) else: lamname = 'lgt%03d_l%03d-%03d' % (int(minlambda), int(lambdabin[0]), int(lambdabin[1])) zuse, = np.where((self.randcat.z > zrange[0]) & (self.randcat.z < zrange[1])) if zuse.size == 0: raise RuntimeError("No random points in specified redshift range %.2f < z < %.2f" % (zrange[0], zrange[1])) st = np.argsort(self.randcat.id_input[zuse]) uid = np.unique(self.randcat.id_input[zuse[st]]) a, b = esutil.numpy_util.match(self.redmapper_cat.mem_match_id, uid) if b.size < uid.size: raise RuntimeError("IDs in randcat do not match those of corresponding redmapper catalog.") a, b = esutil.numpy_util.match(self.redmapper_cat.mem_match_id, self.randcat.id_input[zuse]) if self.config.select_scaleval: luse, = np.where((self.redmapper_cat.Lambda[a]/self.redmapper_cat.scaleval[a] > minlambda) & (self.redmapper_cat.Lambda[a] > lambdabin[0]) & (self.redmapper_cat.Lambda[a] <= lambdabin[1])) else: luse, = np.where((self.redmapper_cat.Lambda[a] > minlambda) & (self.redmapper_cat.Lambda[a] > lambdabin[0]) & (self.redmapper_cat.Lambda[a] <= lambdabin[1])) if luse.size == 0: raise RuntimeError("No random points in specified richness range %0.2f < lambda < %0.2f and lambda > %.2f" % (lambdabin[0], lambdabin[1], minlambda)) alluse = zuse[b[luse]] randpoints = Catalog.zeros(luse.size, dtype=[('ra', 'f8'), ('dec', 'f8'), ('ztrue', 'f4'), ('lambda_in', 'f4'), ('avg_lambdaout', 'f4'), ('weight', 'f4')]) randpoints.ra = self.randcat.ra[alluse] randpoints.dec = self.randcat.dec[alluse] randpoints.ztrue = self.randcat.z[alluse] randpoints.lambda_in = self.randcat.lambda_in[alluse] randpoints.avg_lambdaout = self.randcat.lambda_in[alluse] h, rev = esutil.stat.histogram(self.randcat.id_input[alluse], rev=True) ok, = np.where(h > 0) for i in ok: i1a = rev[rev[i]: rev[i + 1]] if self.config.select_scaleval: gd, = np.where((self.randcat.lambda_in[alluse[i1a]]/self.randcat.scaleval[alluse[i1a]] > minlambda) & (self.randcat.maskfrac[alluse[i1a]] < self.config.max_maskfrac) & (self.randcat.lambda_in[alluse[i1a]] > lambdabin[0]) & (self.randcat.lambda_in[alluse[i1a]] <= lambdabin[1])) else: gd, = np.where((self.randcat.lambda_in[alluse[i1a]] > minlambda) & (self.randcat.maskfrac[alluse[i1a]] < self.config.max_maskfrac) & (self.randcat.lambda_in[alluse[i1a]] > lambdabin[0]) & (self.randcat.lambda_in[alluse[i1a]] <= lambdabin[1])) if gd.size > 0: randpoints.weight[i1a[gd]] = float(i1a.size)/float(gd.size) use, = np.where(randpoints.weight > 0.0) fname_base = 'weighted_randoms_%s_%s_%s' % (zname, lamname, vlimname) randfile_out = self.config.redmapper_filename(fname_base, withversion=True) randpoints.to_fits_file(randfile_out, indices=use) astr = self.vlim_mask.get_areas() nodes = make_nodes(self.config.zrange, self.config.area_nodesize) zbinsize = self.config.area_coarsebin zbins = np.arange(self.config.zrange[0], self.config.zrange[1], zbinsize) st = np.argsort(self.randcat.z[alluse]) ind1 = np.searchsorted(self.randcat.z[alluse[st]], zbins) if self.config.select_scaleval: gd, = np.where((self.randcat.lambda_in[alluse[st]]/self.randcat.scaleval[alluse[st]] > minlambda) & (self.randcat.maskfrac[alluse[st]] < self.config.max_maskfrac) & (self.randcat.lambda_in[alluse[st]] > lambdabin[0]) & (self.randcat.lambda_in[alluse[st]] < lambdabin[1])) else: gd, = np.where((self.randcat.lambda_in[alluse[st]] > minlambda) & (self.randcat.maskfrac[alluse[st]] < self.config.max_maskfrac) & (self.randcat.lambda_in[alluse[st]] > lambdabin[0]) & (self.randcat.lambda_in[alluse[st]] < lambdabin[1])) ind2 = np.searchsorted(self.randcat.z[alluse[st[gd]]], zbins) xvals = (zbins[0: -2] + zbins[1: -1])/2. with np.warnings.catch_warnings(): np.warnings.simplefilter("ignore") yvals = np.nan_to_num(ind2[1: -1].astype(np.float64) / ind1[1: -1].astype(np.float64)) fitter = MedZFitter(nodes, xvals, yvals) p0 = np.ones(nodes.size) pars0 = fitter.fit(p0) pars = fitter.fit(pars0) spl = CubicSpline(nodes, pars) corrs = np.clip(spl(astr.z), 0.0, 1.0) astr.area = corrs*astr.area areafile_out = self.config.redmapper_filename(fname_base + '_area', withversion=True) astr.to_fits_file(areafile_out) return (randfile_out, areafile_out)
Compute random weights. Parameters ---------- minlambda : `float` Minimum lambda to use in computations zrange : `np.ndarray`, optional 2-element list of redshift range. Default is full range. lambdabin : `np.ndarray`, optional 2-element list of lambda range. Default is full range.
https://github.com/erykoff/redmapper/blob/23fb66c7369de784c67ce6c41ada2f1f51a84acb/redmapper/randoms.py#L264-L409
import fitsio import esutil import re import copy import numpy as np import healsparse from .catalog import Catalog, Entry from .galaxy import GalaxyCatalog, GalaxyCatalogMaker from .cluster import ClusterCatalog from .utilities import make_nodes, CubicSpline from .fitters import MedZFitter from .volumelimit import VolumeLimitMask class GenerateRandoms(object): def __init__(self, config, vlim_mask=None, vlim_lstar=None, redmapper_cat=None): self.config = config if self.config.randfile is None: raise RuntimeError("Must set randfile in config to run GenerateRandoms.") if vlim_lstar is None: self.vlim_lstar = self.config.vlim_lstar else: self.vlim_lstar = vlim_lstar if vlim_mask is None: self.vlim_mask = VolumeLimitMask(self.config, self.vlim_lstar) else: self.vlim_mask = vlim_mask if redmapper_cat is None: self.redmapper_cat = ClusterCatalog.from_fits_file(self.config.catfile) else: self.redmapper_cat = redmapper_cat def generate_randoms(self, nrandoms, rng=None): if rng is None: rng = np.random.RandomState() min_gen = 10000 max_gen = 1000000 n_left = copy.copy(nrandoms) ctr = 0 dtype = [('id', 'i4'), ('ra', 'f8'), ('dec', 'f8'), ('z', 'f4'), ('lambda', 'f4'), ('id_input', 'i4')] info_dict = {} m = re.search(r'(.*)\_master\_table.fit$', self.config.randfile) if m is None: raise RuntimeError("Config has randfile of incorrect format. Must end in _master_table.fit") outbase = m.groups()[0] maker = RandomCatalogMaker(outbase, info_dict, nside=self.config.galfile_nside) self.config.logger.info("Generating %d randoms to %s" % (n_left, outbase)) while (n_left > 0): n_gen = np.clip(n_left * 3, min_gen, max_gen) ra_rand, dec_rand = healsparse.make_uniform_randoms(self.vlim_mask.sparse_vlimmap, n_gen, rng=rng) zmax, fracgood = self.vlim_mask.calc_zmax(ra_rand, dec_rand, get_fracgood=True) r = rng.uniform(size=n_gen) gd, = np.where(r < fracgood) if gd.size == 0: continue tempcat = Catalog(np.zeros(gd.size, dtype=dtype)) tempcat.ra = ra_rand[gd] tempcat.dec = dec_rand[gd] tempcat.z = -1.0 r = rng.choice(np.arange(self.redmapper_cat.size), size=gd.size, replace=True) zz = self.redmapper_cat.z_lambda[r] ll = self.redmapper_cat.Lambda[r] ii = self.redmapper_cat.mem_match_id[r] zctr = 0 for i in range(tempcat.size): if (zz[zctr] < zmax[i]): tempcat.z[i] = zz[zctr] tempcat.Lambda[i] = ll[zctr] tempcat.id_input[i] = ii[zctr] zctr += 1 gd, = np.where(tempcat.z > 0.0) n_good = gd.size if n_good == 0: continue if n_good > n_left: n_good = n_left gd = gd[0: n_good] tempcat = tempcat[gd] tempcat.id = np.arange(ctr + 1, ctr + n_good + 1) maker.append_randoms(tempcat._ndarray[: n_good]) ctr += n_good n_left -= n_good self.config.logger.info("There are %d randoms remaining..." % (n_left)) maker.finalize_catalog() class RandomCatalog(GalaxyCatalog): @classmethod def from_randfile(cls, filename, nside=0, hpix=[], border=0.0): return super(RandomCatalog, cls).from_galfile(filename, nside=nside, hpix=hpix, border=border) @classmethod def from_galfile(cls, filename, zredfile=None, nside=0, hpix=[], border=0.0, truth=False): raise NotImplementedError("Cannot call from_galfile on a RandomCatalog") @property def galcol(self): raise NotImplementedError("Cannot call galcol on a RandomCatalog") @property def galcol_err(self): raise NotImplementedError("Cannot call galcol_err on a RandomCatalog") @property def add_zred_fields(self): raise NotImplementedError("Cannot call add_zred_fields on a RandomCatalog") class RandomCatalogMaker(GalaxyCatalogMaker): def __init__(self, outbase, info_dict, nside=32, maskfile=None, mask_mode=0, parallel=False): if 'LIM_REF' not in info_dict: info_dict['LIM_REF'] = 0.0 if 'REF_IND' not in info_dict: info_dict['REF_IND'] = 0 if 'AREA' not in info_dict: info_dict['AREA'] = 0.0 if 'NMAG' not in info_dict: info_dict['NMAG'] = 0 if 'MODE' not in info_dict: info_dict['MODE'] = 'NONE' if 'ZP' not in info_dict: info_dict['ZP'] = 0.0 super(RandomCatalogMaker, self).__init__(outbase, info_dict, nside=nside, maskfile=maskfile, mask_mode=mask_mode, parallel=parallel) def split_randoms(self, rands): if self.is_finalized: raise RuntimeError("Cannot split randoms for an already finalized catalog.") if os.path.isfile(self.filename): raise RuntimeError("Cannot split randoms when final file %s already exists." % (self.filename)) self.append_randoms(rands) self.finalize_catalog() def append_randoms(self, rands): self.append_galaxies(rands) def _check_galaxies(self, rands): return True class RandomWeigher(object): def __init__(self, config, randcatfile, vlim_mask=None, vlim_lstar=None, redmapper_cat=None): self.config = config self.randcat = Catalog.from_fits_file(randcatfile) if vlim_lstar is None: self.vlim_lstar = self.config.vlim_lstar else: self.vlim_lstar = vlim_lstar if vlim_mask is None: self.vlim_mask = VolumeLimitMask(self.config, self.vlim_lstar) else: self.vlim_mask = vlim_mask if redmapper_cat is None: self.redmapper_cat = ClusterCatalog.from_fits_file(self.config.catfile) else: self.redmapper_cat = redmapper_cat
Apache License 2.0
darkdarkfruit/python-weed
weed/operation.py
WeedOperation.cp
python
def cp(self, src_fid, dst_fid, src_file_name='') -> None or WeedOperationResponse: try: src_file_rsp = self.crud_read(src_fid, file_name=src_file_name) fp = io.BytesIO(src_file_rsp.content) g_logger.debug( 'Updating file: dst_fid: %s, src_fid: %s, src_file_name: %s, fp: %s' % ( dst_fid, src_fid, src_file_name, fp)) return self.crud_update(fp, dst_fid, src_file_name) except Exception as e: err_msg = 'Could not Updating file: dst_fid: %s, src_fid: %s, src_file_name: %s. e: %s' % ( dst_fid, src_fid, src_file_name, e) g_logger.error(err_msg) return None
cp src_fid dst_fid replace file@dst_fid with file@src_fid
https://github.com/darkdarkfruit/python-weed/blob/32722b9aa3143116970a993dad690835c9cd415b/weed/operation.py#L329-L345
__all__ = ['WeedOperation'] import io import random from weed.master import * from weed.util import * class WeedOperation(object): def __init__(self, master_url_base='http://localhost:9333', prefetch_volume_ids=False): self.master_url_base = master_url_base self.master = WeedMaster(url_base=master_url_base, prefetch_volume_ids=prefetch_volume_ids) def acquire_new_fids(self, count=1) -> [str]: wak = self.master.acquire_new_assign_key(count=count) fid = wak.fid if count == 1: return [fid] else: fids = [fid] + [fid + ('_%d' % (i + 1)) for i in range(count)] return fids def get_fid_full_url(self, fid, use_public_url=False) -> None or str: volume_id = fid.split(',')[0] full_url = None try: r = self.master.lookup(volume_id) locations = r['locations'] location = locations[random.randint(0, len(locations) - 1)] if not use_public_url: full_url = 'http://%s/%s' % (location['url'], fid) else: full_url = 'http://%s/%s' % (location['publicUrl'], fid) except Exception as e: g_logger.error('Could not get volume location of this fid: %s. Exception is: %s' % (fid, e)) return full_url def get(self, fid, file_name='') -> WeedOperationResponse: g_logger.debug('|--> Getting file. fid: %s, file_name:%s' % (fid, file_name)) fid_full_url = 'wrong_url' wor = WeedOperationResponse() try: fid_full_url = self.get_fid_full_url(fid) g_logger.debug('Reading file fid: %s, file_name: %s, fid_full_url: %s' % (fid, file_name, fid_full_url)) rsp = self.get_http_response(fid_full_url) wor.status = Status.SUCCESS wor.fid = fid wor.url = fid_full_url wor.name = file_name wor.content = rsp.content wor.content_type = rsp.headers.get('content-type') except Exception as e: err_msg = 'Could not read file fid: %s, file_name: %s, fid_full_url: %s, e: %s' % ( fid, file_name, fid_full_url, e) g_logger.error(err_msg) wor.status = Status.FAILED wor.message = err_msg return wor def get_url(self, fid) -> None or str: return self.get_fid_full_url(fid) @staticmethod def get_http_response(fid_full_url) -> requests.Response: return requests.get(fid_full_url) def get_content(self, fid, file_name='') -> bytes: return self.get(fid, file_name).content def put(self, fp, fid=None, file_name='') -> None or WeedOperationResponse: g_logger.info('|--> Putting file@fid:%s, file_name:%s' % (fid, file_name)) fid_full_url = 'wrong_url' _fid = fid try: if not fid: wak = self.master.acquire_new_assign_key() _fid = wak.fid g_logger.debug('no fid. accquired new one: "%s"' % _fid) fid_full_url = wak.fid_full_url else: fid_full_url = self.get_fid_full_url(fid) except Exception as e: err_msg = 'Could not put file. fp: "%s", file_name: "%s", fid_full_url: "%s", e: %s' % ( fp, file_name, fid_full_url, e) g_logger.error(err_msg) return None wor = WeedOperationResponse() is_our_responsibility_to_close_file = False if isinstance(fp, str): _fp = open(fp, 'rb') is_our_responsibility_to_close_file = True else: _fp = fp try: g_logger.info('Putting file with fid: %s, fid_full_url:%s for file: fp: %s, file_name: %s' % (_fid, fid_full_url, fp, file_name)) wor = put_file(_fp, fid_full_url, file_name) g_logger.info('%s' % wor) wor.fid = _fid except Exception as e: err_msg = 'Could not put file. fp: "%s", file_name: "%s", fid_full_url: "%s", e: %s' % ( fp, file_name, fid_full_url, e) g_logger.error(err_msg) wor.status = Status.FAILED wor.message = err_msg if is_our_responsibility_to_close_file: try: _fp.close() except Exception as e: g_logger.warning('Could not close fp: %s. e: %s' % (_fp, e)) return wor def delete(self, fid, file_name='') -> WeedOperationResponse: g_logger.debug('|--> Deleting file@%s, file_name: %s' % (fid, file_name)) wor = WeedOperationResponse() fid_full_url = 'wrong_url' try: fid_full_url = self.get_fid_full_url(fid) g_logger.debug('Deleting file: fid: %s, file_name: %s, fid_full_url: %s' % (fid, file_name, fid_full_url)) r = requests.delete(fid_full_url) rsp_json = r.json() wor.status = Status.SUCCESS wor.fid = fid wor.url = fid_full_url wor.name = file_name if 'size' in rsp_json: wor.storage_size = rsp_json['size'] if wor.storage_size == 0: err_msg = ('Error: fid@%s is not exist.' % fid) wor.status = Status.FAILED wor.message = err_msg g_logger.error(err_msg) except Exception as e: err_msg = 'Deleting file: fid: %s, file_name: %s, fid_full_url: %s, e: %s' % ( fid, file_name, fid_full_url, e) g_logger.error(err_msg) wor.status = Status.FAILED wor.message = err_msg g_logger.error(err_msg) return wor def exists(self, fid) -> bool: if ',' not in fid: return False try: volume_id = fid.split(',')[0] except Exception as e: g_logger.error('Invalid fid:"%s". e: %s' % (fid, e)) return False if not self.master.lookup(volume_id): return False fid_full_url = self.get_fid_full_url(fid) try: rsp = requests.head(fid_full_url, allow_redirects=True) if not rsp.ok: return False else: return True except Exception as e: g_logger.error('Error occurs while requests.head. e: %s' % e) return False def crud_create(self, fp, file_name='') -> None or WeedOperationResponse: g_logger.debug('--> Trying to create a file. fp:%s, file_name:%s' % (fp, file_name)) return self.put(fp, file_name=file_name) def crud_read(self, fid, file_name='') -> WeedOperationResponse: g_logger.debug('--> Trying to read a file. fid: %s, file_name:%s' % (fid, file_name)) return self.get(fid, file_name=file_name) def crud_update(self, fp, fid, file_name='') -> None or WeedOperationResponse: g_logger.info('--> Trying to update a file@fid:%s, file_name: %s' % (fid, file_name)) return self.put(fp, fid=fid, file_name=file_name) def crud_delete(self, fid, file_name='') -> WeedOperationResponse: g_logger.info('--> Trying to delete a file@fid:%s, file_name: %s' % (fid, file_name)) return self.delete(fid=fid, file_name=file_name)
MIT License
ylongqi/openrec
openrec/tf1/legacy/utils/dataset.py
Dataset.shuffle
python
def shuffle(self): if self.data is None: self.data = self.raw_data.copy() np.random.shuffle(self.data)
Shuffle the dataset entries.
https://github.com/ylongqi/openrec/blob/a00de2345844858194ef43ab6845342114a5be93/openrec/tf1/legacy/utils/dataset.py#L65-L71
import numpy as np class Dataset(object): def __init__(self, raw_data, max_user, max_item, name='dataset'): self.name = name if type(raw_data) == np.ndarray: self.raw_data = raw_data else: raise TypeError("Unsupported data input schema. Please use structured numpy array.") self.data = None self._max_user = max_user self._max_item = max_item def max_user(self): return self._max_user def max_item(self): return self._max_item
Apache License 2.0
missionpinball/mpf
mpf/core/events.py
EventManager.remove_all_handlers_for_event
python
def remove_all_handlers_for_event(self, event: str) -> None: if event in self.registered_handlers: del self.registered_handlers[event]
Remove all handlers for event. Use carefully. This is currently used to remove handlers for all init events which only occur once.
https://github.com/missionpinball/mpf/blob/1eda6ba6892b8f7cc6dedf6cb6472ff92293b8ef/mpf/core/events.py#L292-L298
import inspect from collections import deque, namedtuple, defaultdict import uuid import asyncio from functools import partial, lru_cache from unittest.mock import MagicMock from typing import Dict, Any, Tuple, Optional, Callable, List from mpf.core.mpf_controller import MpfController MYPY = False if MYPY: from mpf.core.machine import MachineController from mpf.core.placeholder_manager import BaseTemplate from typing import Deque EventHandlerKey = namedtuple("EventHandlerKey", ["key", "event"]) RegisteredHandler = namedtuple("RegisteredHandler", ["callback", "priority", "kwargs", "key", "condition", "blocking_facility"]) PostedEvent = namedtuple("PostedEvent", ["event", "type", "callback", "kwargs"]) class EventHandlerException(Exception): class EventManager(MpfController): config_name = "event_manager" __slots__ = ["registered_handlers", "event_queue", "callback_queue", "monitor_events", "_queue_tasks", "_stopped"] def __init__(self, machine: "MachineController") -> None: super().__init__(machine) self.registered_handlers = defaultdict(list) self.event_queue = deque([]) self.callback_queue = deque([]) self.monitor_events = False self._queue_tasks = [] self._stopped = False self.add_handler("debug_dump_stats", self._debug_dump_events) def _debug_dump_events(self, **kwargs): del kwargs self.log.info("--- DEBUG DUMP EVENTS ---") self.log.info("Total registered_handlers: %s. Total event_queue: %s. Total callback_queue: %s. " "Total _queue_tasks: %s", len(self.registered_handlers), len(self.event_queue), len(self.callback_queue), len(self._queue_tasks)) self.log.info("Registered Handlers:") handlers = sorted(self.registered_handlers.items(), key=lambda x: -len(x[1])) for event_name, event_list in handlers: self.log.info(" Total handlers: %s (for %s)", len(event_list), event_name) self.log.info("Queue events:") for event_task in self._queue_tasks: self.log.info(" %s:", event_task) self.log.info("--- DEBUG DUMP EVENTS END ---") @lru_cache() def get_event_and_condition_from_string(self, event_string: str) -> Tuple[str, Optional["BaseTemplate"], int]: placeholder = None additional_priority = 0 if event_string[-1:] == "}": first_bracket_pos = event_string.find("{") if first_bracket_pos < 0: raise ValueError('Failed to parse condition in event name, ' 'please remedy "{}"'.format(event_string)) if " " in event_string[0:first_bracket_pos]: raise ValueError('Cannot handle events with spaces in the event name, ' 'please remedy "{}"'.format(event_string)) placeholder = self.machine.placeholder_manager.build_bool_template(event_string[first_bracket_pos + 1:-1]) event_string = event_string[0:first_bracket_pos] else: if " " in event_string: raise ValueError('Cannot handle events with spaces in the event name, ' 'please remedy "{}"'.format(event_string)) if "{" in event_string: raise ValueError('Failed to parse condition in event name, ' 'please remedy "{}"'.format(event_string)) priority_start = event_string.find(".") if priority_start > 0: additional_priority = int(event_string[priority_start + 1:]) event_string = event_string[:priority_start] return event_string, placeholder, additional_priority def add_async_handler(self, event: str, handler: Any, priority: int = 1, blocking_facility: Any = None, **kwargs) -> EventHandlerKey: return self.add_handler(event, partial(self._async_handler_coroutine, handler), priority, blocking_facility, **kwargs) def _async_handler_coroutine(self, _coroutine, queue, **kwargs): queue.wait() task = self.machine.clock.loop.create_task(_coroutine(**kwargs)) task.add_done_callback(partial(self._async_handler_done, queue)) @staticmethod def _async_handler_done(queue, future): try: future.result() except asyncio.CancelledError: pass queue.clear() def add_handler(self, event: str, handler: Any, priority: int = 1, blocking_facility: Any = None, **kwargs) -> EventHandlerKey: if event is None: raise AssertionError("Cannot pass event None.") if not self.machine.options['production']: if hasattr(self.machine, "switches") and event in self.machine.switches: self.raise_config_error('Switch name "{name}" name used as event handler for {handler}. ' 'Did you mean "{name}_active"?'.format(name=event, handler=handler), 1) if not callable(handler): raise AssertionError('Cannot add handler "{}" for event "{}". Did you ' 'accidentally add parenthesis to the end of the ' 'handler you passed?'.format(handler, event)) sig = inspect.signature(handler) if 'kwargs' not in sig.parameters: raise AssertionError("Handler {} for event '{}' is missing **kwargs. Actual signature: {}".format( handler, event, sig)) if sig.parameters['kwargs'].kind != inspect.Parameter.VAR_KEYWORD: raise AssertionError("Handler {} for event '{}' param kwargs is missing '**'. " "Actual signature: {}".format(handler, event, sig)) event, condition, additional_priority = self.get_event_and_condition_from_string(event) priority += additional_priority key = uuid.uuid4() if hasattr(handler, "relative_priority") and not isinstance(handler, MagicMock): priority += handler.relative_priority self.registered_handlers[event].append(RegisteredHandler(handler, priority, kwargs, key, condition, blocking_facility)) if self._debug: self.debug_log("Registered %s as a handler for '%s', priority: %s, " "kwargs: %s", self._pretty_format_handler(handler), event, priority, kwargs) if len(self.registered_handlers[event]) > 1: self.registered_handlers[event].sort(key=lambda x: x.priority, reverse=True) if self._info: self._verify_handlers(event, self.registered_handlers[event]) return EventHandlerKey(key, event) def _get_handler_signature(self, handler): cls = handler.callback.__self__ if hasattr(self.machine, "device_manager") and cls == self.machine.device_manager and handler.callback == self.machine.device_manager._control_event_handler: cls = (handler.kwargs["callback"].__self__, handler.kwargs["ms_delay"]) handler_signature = (cls, handler.priority, handler.condition) return handler_signature def _verify_handlers(self, event, sorted_handlers): if not sorted_handlers or len(sorted_handlers) <= 1 or event.startswith("init_phase_"): return seen = set() collisions = [] for handler in sorted_handlers: if not inspect.ismethod(handler.callback): continue handler_signature = self._get_handler_signature(handler) if handler_signature not in seen: seen.add(handler_signature) else: collisions.append(handler_signature) for collision in collisions: handlers = [x for x in sorted_handlers if inspect.ismethod(x.callback) and self._get_handler_signature(x) == collision] self.info_log( "Unordered handler for class {} on event {} with priority {}. Handlers: {}. The order of those " "handlers is not defined and they will be executed in random order. This might lead to race " "conditions and potential bugs.".format(collision[0], event, collision[1], handlers) ) def replace_handler(self, event: str, handler: Any, priority: int = 1, **kwargs: dict) -> EventHandlerKey: if event in self.registered_handlers: if kwargs: for rh in self.registered_handlers[event][:]: if rh[0] == handler and rh[2] == kwargs: self.registered_handlers[event].remove(rh) else: for rh in self.registered_handlers[event][:]: if rh[0] == handler: self.registered_handlers[event].remove(rh) return self.add_handler(event, handler, priority, **kwargs)
MIT License
invinst/responsebot
responsebot/responsebot_client.py
ResponseBotClient.get_list
python
def get_list(self, list_id): return List(tweepy_list_to_json(self._client.get_list(list_id=list_id)))
Get info of specified list :param list_id: list ID number :return: :class:`~responsebot.models.List` object
https://github.com/invinst/responsebot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L249-L256
from __future__ import absolute_import from decorator import decorate from tweepy.error import TweepError, RateLimitError from responsebot.common.constants import TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_TWEET_NOT_FOUND_ERROR, TWITTER_USER_NOT_FOUND_ERROR, TWITTER_DELETE_OTHER_USER_TWEET, TWITTER_ACCOUNT_SUSPENDED_ERROR, TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER, TWITTER_AUTOMATED_REQUEST_ERROR, TWITTER_OVER_CAPACITY_ERROR, TWITTER_DAILY_STATUS_UPDATE_LIMIT_ERROR, TWITTER_CHARACTER_LIMIT_ERROR_1, TWITTER_CHARACTER_LIMIT_ERROR_2, TWITTER_STATUS_DUPLICATE_ERROR from responsebot.common.exceptions import APIQuotaError, AutomatedRequestError, OverCapacityError, DailyStatusUpdateError, CharacterLimitError, StatusDuplicateError from responsebot.models import Tweet, User, List from responsebot.utils.tweepy import tweepy_list_to_json def api_error_handle(func): def func_wrapper(f, *args, **kwargs): try: return f(*args, **kwargs) except RateLimitError as e: raise APIQuotaError(str(e)) except TweepError as e: if e.api_code == TWITTER_AUTOMATED_REQUEST_ERROR: raise AutomatedRequestError elif e.api_code == TWITTER_OVER_CAPACITY_ERROR: raise OverCapacityError elif e.api_code in [TWITTER_CHARACTER_LIMIT_ERROR_1, TWITTER_CHARACTER_LIMIT_ERROR_2]: raise CharacterLimitError elif e.api_code == TWITTER_DAILY_STATUS_UPDATE_LIMIT_ERROR: raise DailyStatusUpdateError elif e.api_code == TWITTER_STATUS_DUPLICATE_ERROR: raise StatusDuplicateError else: raise return decorate(func, func_wrapper) class ResponseBotClient(object): def __init__(self, client, config): self._client = client self._current_user = None self.config = config @property def tweepy_api(self): return self._client def get_current_user(self): if self._current_user is None: self._current_user = User(self._client.me()._json) return self._current_user @api_error_handle def tweet(self, text, in_reply_to=None, filename=None, file=None): if filename is None: return Tweet(self._client.update_status(status=text, in_reply_to_status_id=in_reply_to)._json) else: return Tweet(self._client.update_with_media(filename=filename, file=file, status=text, in_reply_to_status_id=in_reply_to)._json) def retweet(self, id): try: self._client.retweet(id=id) return True except TweepError as e: if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR: return False raise def get_tweet(self, id): try: return Tweet(self._client.get_status(id=id)._json) except TweepError as e: if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR: return None raise def get_user(self, id): try: return User(self._client.get_user(user_id=id)._json) except TweepError as e: if e.api_code == TWITTER_USER_NOT_FOUND_ERROR: return None raise def remove_tweet(self, id): try: self._client.destroy_status(id=id) return True except TweepError as e: if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]: return False raise def follow(self, user_id, notify=False): try: return User(self._client.create_friendship(user_id=user_id, follow=notify)._json) except TweepError as e: if e.api_code in [TWITTER_ACCOUNT_SUSPENDED_ERROR]: return self.get_user(user_id) raise def unfollow(self, user_id): return User(self._client.destroy_friendship(user_id=user_id)._json) @api_error_handle def create_list(self, name, mode='public', description=None): return List(tweepy_list_to_json(self._client.create_list(name=name, mode=mode, description=description))) @api_error_handle def destroy_list(self, list_id): return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id))) @api_error_handle def update_list(self, list_id, name=None, mode=None, description=None): return List(tweepy_list_to_json( self._client.update_list(list_id=list_id, name=name, mode=mode, description=description)) ) @api_error_handle def lists(self): return [List(tweepy_list_to_json(list)) for list in self._client.lists_all()] @api_error_handle def lists_memberships(self): return [List(tweepy_list_to_json(list)) for list in self._client.lists_memberships()] @api_error_handle def lists_subscriptions(self): return [List(tweepy_list_to_json(list)) for list in self._client.lists_subscriptions()] @api_error_handle def list_timeline(self, list_id, since_id=None, max_id=None, count=20): statuses = self._client.list_timeline(list_id=list_id, since_id=since_id, max_id=max_id, count=count) return [Tweet(tweet._json) for tweet in statuses] @api_error_handle
Apache License 2.0
ethereum/beaconrunner
notebooks/thunderdome/eth2spec/altair/spec.py
get_beacon_committee
python
def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: epoch = compute_epoch_at_slot(slot) committees_per_slot = get_committee_count_per_slot(state, epoch) return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, count=committees_per_slot * SLOTS_PER_EPOCH, )
Return the beacon committee at ``slot`` for ``index``.
https://github.com/ethereum/beaconrunner/blob/aac61ed7b2d8b9875a117b24b86f58d6a93b1dd2/notebooks/thunderdome/eth2spec/altair/spec.py#L802-L813
from eth2spec.phase0 import spec as phase0 from eth2spec.config.config_util import apply_constants_config from typing import ( Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional, Union ) from dataclasses import ( dataclass, field, ) from lru import LRU from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes from eth2spec.utils.ssz.ssz_typing import ( View, boolean, Container, List, Vector, uint8, uint32, uint64, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, Path, ) from eth2spec.utils import bls from eth2spec.utils.hash_function import hash from importlib import reload reload(phase0) SSZVariableName = str GeneralizedIndex = NewType('GeneralizedIndex', int) SSZObject = TypeVar('SSZObject', bound=View) CONFIG_NAME = 'mainnet' fork = 'altair' class Slot(uint64): pass class Epoch(uint64): pass class CommitteeIndex(uint64): pass class ValidatorIndex(uint64): pass class Gwei(uint64): pass class Root(Bytes32): pass class Version(Bytes4): pass class DomainType(Bytes4): pass class ForkDigest(Bytes4): pass class Domain(Bytes32): pass class BLSPubkey(Bytes48): pass class BLSSignature(Bytes96): pass class Ether(uint64): pass class ParticipationFlags(uint8): pass def ceillog2(x: int) -> uint64: if x < 1: raise ValueError(f"ceillog2 accepts only positive values, x={x}") return uint64((x - 1).bit_length()) def floorlog2(x: int) -> uint64: if x < 1: raise ValueError(f"floorlog2 accepts only positive values, x={x}") return uint64(x.bit_length() - 1) FINALIZED_ROOT_INDEX = GeneralizedIndex(105) NEXT_SYNC_COMMITTEE_INDEX = GeneralizedIndex(55) GENESIS_SLOT = Slot(0) GENESIS_EPOCH = Epoch(0) FAR_FUTURE_EPOCH = Epoch(2**64 - 1) BASE_REWARDS_PER_EPOCH = uint64(4) DEPOSIT_CONTRACT_TREE_DEPTH = uint64(2**5) JUSTIFICATION_BITS_LENGTH = uint64(4) ENDIANNESS = 'little' ETH1_FOLLOW_DISTANCE = uint64(2**11) MAX_COMMITTEES_PER_SLOT = uint64(2**6) TARGET_COMMITTEE_SIZE = uint64(2**7) MAX_VALIDATORS_PER_COMMITTEE = uint64(2**11) MIN_PER_EPOCH_CHURN_LIMIT = uint64(2**2) CHURN_LIMIT_QUOTIENT = uint64(2**16) SHUFFLE_ROUND_COUNT = uint64(90) MIN_GENESIS_ACTIVE_VALIDATOR_COUNT = uint64(2**14) MIN_GENESIS_TIME = uint64(1606824000) HYSTERESIS_QUOTIENT = uint64(4) HYSTERESIS_DOWNWARD_MULTIPLIER = uint64(1) HYSTERESIS_UPWARD_MULTIPLIER = uint64(5) MIN_DEPOSIT_AMOUNT = Gwei(2**0 * 10**9) MAX_EFFECTIVE_BALANCE = Gwei(2**5 * 10**9) EJECTION_BALANCE = Gwei(2**4 * 10**9) EFFECTIVE_BALANCE_INCREMENT = Gwei(2**0 * 10**9) GENESIS_FORK_VERSION = Version('0x00000000') BLS_WITHDRAWAL_PREFIX = Bytes1('0x00') ETH1_ADDRESS_WITHDRAWAL_PREFIX = Bytes1('0x01') GENESIS_DELAY = uint64(604800) SECONDS_PER_SLOT = uint64(12) SECONDS_PER_ETH1_BLOCK = uint64(14) MIN_ATTESTATION_INCLUSION_DELAY = uint64(2**0) SLOTS_PER_EPOCH = uint64(2**5) MIN_SEED_LOOKAHEAD = uint64(2**0) MAX_SEED_LOOKAHEAD = uint64(2**2) MIN_EPOCHS_TO_INACTIVITY_PENALTY = uint64(2**2) EPOCHS_PER_ETH1_VOTING_PERIOD = uint64(2**6) SLOTS_PER_HISTORICAL_ROOT = uint64(2**13) MIN_VALIDATOR_WITHDRAWABILITY_DELAY = uint64(2**8) SHARD_COMMITTEE_PERIOD = uint64(2**8) EPOCHS_PER_HISTORICAL_VECTOR = uint64(2**16) EPOCHS_PER_SLASHINGS_VECTOR = uint64(2**13) HISTORICAL_ROOTS_LIMIT = uint64(2**24) VALIDATOR_REGISTRY_LIMIT = uint64(2**40) BASE_REWARD_FACTOR = uint64(2**6) WHISTLEBLOWER_REWARD_QUOTIENT = uint64(2**9) PROPOSER_REWARD_QUOTIENT = uint64(2**3) INACTIVITY_PENALTY_QUOTIENT = uint64(2**26) MIN_SLASHING_PENALTY_QUOTIENT = uint64(2**7) PROPORTIONAL_SLASHING_MULTIPLIER = uint64(1) MAX_PROPOSER_SLASHINGS = 2**4 MAX_ATTESTER_SLASHINGS = 2**1 MAX_ATTESTATIONS = 2**7 MAX_DEPOSITS = 2**4 MAX_VOLUNTARY_EXITS = 2**4 DOMAIN_BEACON_PROPOSER = DomainType('0x00000000') DOMAIN_BEACON_ATTESTER = DomainType('0x01000000') DOMAIN_RANDAO = DomainType('0x02000000') DOMAIN_DEPOSIT = DomainType('0x03000000') DOMAIN_VOLUNTARY_EXIT = DomainType('0x04000000') DOMAIN_SELECTION_PROOF = DomainType('0x05000000') DOMAIN_AGGREGATE_AND_PROOF = DomainType('0x06000000') SAFE_SLOTS_TO_UPDATE_JUSTIFIED = 2**3 TARGET_AGGREGATORS_PER_COMMITTEE = 2**4 RANDOM_SUBNETS_PER_VALIDATOR = 2**0 EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION = 2**8 ATTESTATION_SUBNET_COUNT = 64 ETH_TO_GWEI = uint64(10**9) SAFETY_DECAY = uint64(10) TIMELY_HEAD_FLAG_INDEX = 0 TIMELY_SOURCE_FLAG_INDEX = 1 TIMELY_TARGET_FLAG_INDEX = 2 TIMELY_HEAD_WEIGHT = 12 TIMELY_SOURCE_WEIGHT = 12 TIMELY_TARGET_WEIGHT = 24 SYNC_REWARD_WEIGHT = 8 PROPOSER_WEIGHT = 8 WEIGHT_DENOMINATOR = 64 G2_POINT_AT_INFINITY = BLSSignature(b'\xc0' + b'\x00' * 95) INACTIVITY_PENALTY_QUOTIENT_ALTAIR = uint64(3 * 2**24) MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR = uint64(2**6) PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR = uint64(2) SYNC_COMMITTEE_SIZE = uint64(2**10) SYNC_PUBKEYS_PER_AGGREGATE = uint64(2**6) INACTIVITY_SCORE_BIAS = uint64(4) EPOCHS_PER_SYNC_COMMITTEE_PERIOD = Epoch(2**8) DOMAIN_SYNC_COMMITTEE = DomainType('0x07000000') DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF = DomainType('0x08000000') DOMAIN_CONTRIBUTION_AND_PROOF = DomainType('0x09000000') ALTAIR_FORK_VERSION = Version('0x01000000') ALTAIR_FORK_SLOT = Slot(0) TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE = 2**2 SYNC_COMMITTEE_SUBNET_COUNT = 8 MIN_SYNC_COMMITTEE_PARTICIPANTS = 1 MAX_VALID_LIGHT_CLIENT_UPDATES = uint64(2**64 - 1) LIGHT_CLIENT_UPDATE_TIMEOUT = Slot(2**13) apply_constants_config(globals()) class Fork(Container): previous_version: Version current_version: Version epoch: Epoch class ForkData(Container): current_version: Version genesis_validators_root: Root class Checkpoint(Container): epoch: Epoch root: Root class Validator(Container): pubkey: BLSPubkey withdrawal_credentials: Bytes32 effective_balance: Gwei slashed: boolean activation_eligibility_epoch: Epoch activation_epoch: Epoch exit_epoch: Epoch withdrawable_epoch: Epoch class AttestationData(Container): slot: Slot index: CommitteeIndex beacon_block_root: Root source: Checkpoint target: Checkpoint class IndexedAttestation(Container): attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] data: AttestationData signature: BLSSignature class PendingAttestation(Container): aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] data: AttestationData inclusion_delay: Slot proposer_index: ValidatorIndex class Eth1Data(Container): deposit_root: Root deposit_count: uint64 block_hash: Bytes32 class HistoricalBatch(Container): block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] class DepositMessage(Container): pubkey: BLSPubkey withdrawal_credentials: Bytes32 amount: Gwei class DepositData(Container): pubkey: BLSPubkey withdrawal_credentials: Bytes32 amount: Gwei signature: BLSSignature class BeaconBlockHeader(Container): slot: Slot proposer_index: ValidatorIndex parent_root: Root state_root: Root body_root: Root class SigningData(Container): object_root: Root domain: Domain class AttesterSlashing(Container): attestation_1: IndexedAttestation attestation_2: IndexedAttestation class Attestation(Container): aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] data: AttestationData signature: BLSSignature class Deposit(Container): proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH + 1] data: DepositData class VoluntaryExit(Container): epoch: Epoch validator_index: ValidatorIndex class SignedVoluntaryExit(Container): message: VoluntaryExit signature: BLSSignature class SignedBeaconBlockHeader(Container): message: BeaconBlockHeader signature: BLSSignature class ProposerSlashing(Container): signed_header_1: SignedBeaconBlockHeader signed_header_2: SignedBeaconBlockHeader class Eth1Block(Container): timestamp: uint64 deposit_root: Root deposit_count: uint64 class AggregateAndProof(Container): aggregator_index: ValidatorIndex aggregate: Attestation selection_proof: BLSSignature class SignedAggregateAndProof(Container): message: AggregateAndProof signature: BLSSignature class SyncAggregate(Container): sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] sync_committee_signature: BLSSignature class BeaconBlockBody(Container): randao_reveal: BLSSignature eth1_data: Eth1Data graffiti: Bytes32 proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] attestations: List[Attestation, MAX_ATTESTATIONS] deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] sync_aggregate: SyncAggregate class BeaconBlock(Container): slot: Slot proposer_index: ValidatorIndex parent_root: Root state_root: Root body: BeaconBlockBody class SignedBeaconBlock(Container): message: BeaconBlock signature: BLSSignature class SyncCommittee(Container): pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE] pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_PUBKEYS_PER_AGGREGATE] class BeaconState(Container): genesis_time: uint64 genesis_validators_root: Root slot: Slot fork: Fork latest_block_header: BeaconBlockHeader block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] eth1_data: Eth1Data eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] eth1_deposit_index: uint64 validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] previous_justified_checkpoint: Checkpoint current_justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] current_sync_committee: SyncCommittee next_sync_committee: SyncCommittee class SyncCommitteeSignature(Container): slot: Slot beacon_block_root: Root validator_index: ValidatorIndex signature: BLSSignature class SyncCommitteeContribution(Container): slot: Slot beacon_block_root: Root subcommittee_index: uint64 aggregation_bits: Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT] signature: BLSSignature class ContributionAndProof(Container): aggregator_index: ValidatorIndex contribution: SyncCommitteeContribution selection_proof: BLSSignature class SignedContributionAndProof(Container): message: ContributionAndProof signature: BLSSignature class SyncCommitteeSigningData(Container): slot: Slot subcommittee_index: uint64 class LightClientSnapshot(Container): header: BeaconBlockHeader current_sync_committee: SyncCommittee next_sync_committee: SyncCommittee class LightClientUpdate(Container): header: BeaconBlockHeader next_sync_committee: SyncCommittee next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)] finality_header: BeaconBlockHeader finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)] sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] sync_committee_signature: BLSSignature fork_version: Version class LightClientStore(Container): snapshot: LightClientSnapshot valid_updates: List[LightClientUpdate, MAX_VALID_LIGHT_CLIENT_UPDATES] @dataclass(eq=True, frozen=True) class LatestMessage(object): epoch: Epoch root: Root @dataclass class Store(object): time: uint64 genesis_time: uint64 justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint best_justified_checkpoint: Checkpoint blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) block_states: Dict[Root, BeaconState] = field(default_factory=dict) checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) def integer_squareroot(n: uint64) -> uint64: x = n y = (x + 1) // 2 while y < x: x = y y = (x + n // x) // 2 return x def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2)) def bytes_to_uint64(data: bytes) -> uint64: return uint64(int.from_bytes(data, ENDIANNESS)) def is_active_validator(validator: Validator, epoch: Epoch) -> bool: return validator.activation_epoch <= epoch < validator.exit_epoch def is_eligible_for_activation_queue(validator: Validator) -> bool: return ( validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance == MAX_EFFECTIVE_BALANCE ) def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: return ( validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch and validator.activation_epoch == FAR_FUTURE_EPOCH ) def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch) def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool: return ( (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch) ) def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: indices = indexed_attestation.attesting_indices if len(indices) == 0 or not indices == sorted(set(indices)): return False pubkeys = [state.validators[i].pubkey for i in indices] domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) signing_root = compute_signing_root(indexed_attestation.data, domain) return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool: value = leaf for i in range(depth): if index // (2**i) % 2: value = hash(branch[i] + value) else: value = hash(value + branch[i]) return value == root def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: assert index < index_count for current_round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count flip = (pivot + index_count - index) % index_count position = max(index, flip) source = hash( seed + uint_to_bytes(uint8(current_round)) + uint_to_bytes(uint32(position // 256)) ) byte = uint8(source[(position % 256) // 8]) bit = (byte >> (position % 8)) % 2 index = flip if bit else index return index def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex: assert len(indices) > 0 MAX_RANDOM_BYTE = 2**8 - 1 i = uint64(0) total = uint64(len(indices)) while True: candidate_index = indices[compute_shuffled_index(i % total, total, seed)] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] effective_balance = state.validators[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index i += 1 def compute_committee(indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64) -> Sequence[ValidatorIndex]: start = (len(indices) * index) // count end = (len(indices) * uint64(index + 1)) // count return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)] def compute_epoch_at_slot(slot: Slot) -> Epoch: return Epoch(slot // SLOTS_PER_EPOCH) def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: return Slot(epoch * SLOTS_PER_EPOCH) def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: return hash_tree_root(ForkData( current_version=current_version, genesis_validators_root=genesis_validators_root, )) def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest: return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4]) def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain: if fork_version is None: fork_version = GENESIS_FORK_VERSION if genesis_validators_root is None: genesis_validators_root = Root() fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) return Domain(domain_type + fork_data_root[:28]) def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: return hash_tree_root(SigningData( object_root=hash_tree_root(ssz_object), domain=domain, )) def get_current_epoch(state: BeaconState) -> Epoch: return compute_epoch_at_slot(state.slot) def get_previous_epoch(state: BeaconState) -> Epoch: current_epoch = get_current_epoch(state) return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) def get_block_root(state: BeaconState, epoch: Epoch) -> Root: return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)] def get_validator_churn_limit(state: BeaconState) -> uint64: active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT) def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) return hash(domain_type + uint_to_bytes(epoch) + mix) def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: return max(uint64(1), min( MAX_COMMITTEES_PER_SLOT, uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, ))
MIT License
a2i2/surround
surround_cli/surround_cli/remote/cli.py
parse_remote_args
python
def parse_remote_args(remote_parser, parsed_args): global_ = parsed_args.glob add = parsed_args.add remote_name = parsed_args.name remote_url = parsed_args.url if add: add_remote(remote_parser, parsed_args) elif remote_name or remote_url: print("error: unknown switch [-n NAME] [-u URL]") else: if global_: remotes = BASE_REMOTE.read_all_from_global_config("remote") print_remote_info(parsed_args, remotes) else: if is_surround_project(): actual_current_dir = os.getcwd() os.chdir(get_project_root_from_current_dir()) remotes = BASE_REMOTE.read_all_from_local_config("remote") print_remote_info(parsed_args, remotes) os.chdir(actual_current_dir) else: print("error: not a surround project")
Executes the "remote" sub-command which will either add new remote or list remotes in the current project depending on the arguments. :param remote_parser: argument parser used for the "remote" sub-command :type remote_parser: <class 'argparse.ArgumentParser'> :param parsed_args: the arguments parsed from the user :type parsed_args: <class 'argparse.Namespace'>
https://github.com/a2i2/surround/blob/6aa6f7939bb06856e20bba12d5f7329129dd61de/surround_cli/surround_cli/remote/cli.py#L230-L262
import os from pathlib import Path from . import base from . import local __author__ = 'Akshat Bajaj' __date__ = '2019/02/26' BASE_REMOTE = base.BaseRemote() LOCAL = local.Local() def is_surround_project(): dir_ = get_project_root_from_current_dir() if dir_ is None: return False return True def get_project_root_from_current_dir(): return get_project_root(os.getcwd()) def get_project_root(current_directory): home = str(Path.home()) while True: list_ = os.listdir(current_directory) parent_directory = os.path.dirname(current_directory) if current_directory in (home, parent_directory): break if ".surround" in list_: return current_directory current_directory = parent_directory def add_store_parser(sub_parser): store_parser = sub_parser.add_parser('store', help="Data remote storage tool") sub_parser = store_parser.add_subparsers(dest='sub_command', description="Must be called with one of the following commands") add_remote_parser(sub_parser) add_pull_parser(sub_parser) add_push_parser(sub_parser) add_list_parser(sub_parser) return store_parser def add_remote_parser(sub_parser): remote_parser = sub_parser.add_parser('remote', help="Initialise a new remote") remote_parser.add_argument('-n', '--name', help="Name of the remote") remote_parser.add_argument('-u', '--url', help="Url of the remote") remote_parser.add_argument('-a', '--add', help="Used to add a remote", action='store_true') remote_parser.add_argument('-v', '--verbose', help="verbose remote", action='store_true') remote_parser.add_argument('--global', help="Used to specify a global remote", action='store_true', dest='glob') return remote_parser def add_pull_parser(sub_parser): pull_parser = sub_parser.add_parser('pull', help="Pull file from remote") pull_parser.add_argument('remote', help="remote to pull") pull_parser.add_argument('-k', '--key', help="key of file to pull (from .surround/config.yaml)") def add_push_parser(sub_parser): push_parser = sub_parser.add_parser('push', help="Push file to remote") push_parser.add_argument('remote', help="remote to push") push_parser.add_argument('-k', '--key', help="key of file to push (from .surround/config.yaml)") def add_list_parser(sub_parser): list_parser = sub_parser.add_parser('list', help="List file in remote") list_parser.add_argument('remote', help="remote to list") def write_remote_config(parsed_args, remote_parser, file_to_write): remote_name = parsed_args.name remote_url = parsed_args.url if remote_name and remote_url: BASE_REMOTE.write_config("remote", file_to_write, remote_name, remote_url) else: print("error: supply remote name and url") remote_parser.print_usage() print("error: [-a ADD] [-n NAME] [-u URL] are mutually inclusive") def add_remote(remote_parser, parsed_args): verbose = parsed_args.verbose global_ = parsed_args.glob if verbose: print("error: unknown switch [-v VERBOSE]") remote_parser.print_usage() print("[-a ADD] and [-v VERBOSE] are mutually exclusive") else: if global_: home = str(Path.home()) os.makedirs(os.path.dirname(os.path.join(home, ".surround/config.yaml")), exist_ok=True) write_remote_config(parsed_args, remote_parser, os.path.join(home, ".surround/config.yaml")) else: if is_surround_project(): actual_current_dir = os.getcwd() os.chdir(get_project_root_from_current_dir()) write_remote_config(parsed_args, remote_parser, ".surround/config.yaml") os.makedirs(parsed_args.name, exist_ok=True) os.chdir(actual_current_dir) else: print("error: not a surround project") def print_remote_info(parsed_args, remotes): verbose = parsed_args.verbose if remotes: for key, value in remotes.items(): if key: if verbose: print(key + ": " + value) else: print(key) else: print("info: no remote found") def parse_store_args(remote_parser, parsed_args, extra_args): if parsed_args.sub_command == "remote": parse_remote_args(remote_parser, parsed_args) elif parsed_args.sub_command == "pull": parse_pull_args(parsed_args) elif parsed_args.sub_command == "push": parse_push_args(remote_parser, parsed_args, extra_args) elif parsed_args.sub_command == "list": parse_list_args(remote_parser, parsed_args, extra_args) else: remote_parser.print_help()
BSD 3-Clause New or Revised License
huxiaoling/imageseg-2.5d_topo
TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/tarfile.py
copyfileobj
python
def copyfileobj(src, dst, length=None): if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return
Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content.
https://github.com/huxiaoling/imageseg-2.5d_topo/blob/86ca52e53f838309132a67f2a3e58cf69d314770/TopologyForceV1/venv/lib64/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/distlib/_backport/tarfile.py#L256-L283
from __future__ import print_function __version__ = "$Revision$" version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" __cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." import sys import os import stat import errno import time import struct import copy import re try: import grp, pwd except ImportError: grp = pwd = None symlink_exception = (AttributeError, NotImplementedError) try: symlink_exception += (WindowsError,) except NameError: pass __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins _open = builtins.open NUL = b"\0" BLOCKSIZE = 512 RECORDSIZE = BLOCKSIZE * 20 GNU_MAGIC = b"ustar \0" POSIX_MAGIC = b"ustar\x0000" LENGTH_NAME = 100 LENGTH_LINK = 100 LENGTH_PREFIX = 155 REGTYPE = b"0" AREGTYPE = b"\0" LNKTYPE = b"1" SYMTYPE = b"2" CHRTYPE = b"3" BLKTYPE = b"4" DIRTYPE = b"5" FIFOTYPE = b"6" CONTTYPE = b"7" GNUTYPE_LONGNAME = b"L" GNUTYPE_LONGLINK = b"K" GNUTYPE_SPARSE = b"S" XHDTYPE = b"x" XGLTYPE = b"g" SOLARIS_XHDTYPE = b"X" USTAR_FORMAT = 0 GNU_FORMAT = 1 PAX_FORMAT = 2 DEFAULT_FORMAT = GNU_FORMAT SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } S_IFLNK = 0o120000 S_IFREG = 0o100000 S_IFBLK = 0o060000 S_IFDIR = 0o040000 S_IFCHR = 0o020000 S_IFIFO = 0o010000 TSUID = 0o4000 TSGID = 0o2000 TSVTX = 0o1000 TUREAD = 0o400 TUWRITE = 0o200 TUEXEC = 0o100 TGREAD = 0o040 TGWRITE = 0o020 TGEXEC = 0o010 TOREAD = 0o004 TOWRITE = 0o002 TOEXEC = 0o001 if os.name in ("nt", "ce"): ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() def stn(s, length, encoding, errors): s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): if s[0] != chr(0o200): try: n = int(nts(s, "ascii", "strict") or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") else: n = 0 for i in range(len(s) - 1): n <<= 8 n += ord(s[i + 1]) return n def itn(n, digits=8, format=DEFAULT_FORMAT): if 0 <= n < 8 ** (digits - 1): s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL else: if format != GNU_FORMAT or n >= 256 ** (digits - 1): raise ValueError("overflow in number field") if n < 0: n = struct.unpack("L", struct.pack("l", n))[0] s = bytearray() for i in range(digits - 1): s.insert(0, n & 0o377) n >>= 8 s.insert(0, 0o200) return s def calc_chksums(buf): unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum
MIT License
smartsheet-platform/smartsheet-python-sdk
smartsheet/attachments.py
Attachments.get_attachment
python
def get_attachment(self, sheet_id, attachment_id): _op = fresh_operation('get_attachment') _op['method'] = 'GET' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments/' + str( attachment_id) expected = 'Attachment' prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response
Fetch the specified Attachment. Args: sheet_id (int): Sheet ID attachment_id (int): Attachment ID Returns: Attachment
https://github.com/smartsheet-platform/smartsheet-python-sdk/blob/ef256b7421a65a56a7138dc2b3eb5d69a1a06590/smartsheet/attachments.py#L342-L361
from __future__ import absolute_import from .models import DownloadedFile from .models import Error, ErrorResult import requests import logging import os.path from . import fresh_operation class Attachments(object): def __init__(self, smartsheet_obj): self._base = smartsheet_obj self._log = logging.getLogger(__name__) def attach_file_to_comment(self, sheet_id, comment_id, _file): if not all(val is not None for val in ['sheet_id', 'comment_id', '_file']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('attach_file_to_comment') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/comments/' + str( comment_id) + '/attachments' _op['files'] = {} _op['files']['file'] = _file expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_file_to_row(self, sheet_id, row_id, _file): if not all(val is not None for val in ['sheet_id', 'row_id', '_file']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('attach_file_to_row') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/rows/' + str( row_id) + '/attachments' _op['files'] = {} _op['files']['file'] = _file expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_file_to_sheet(self, sheet_id, _file): _op = fresh_operation('attach_file_to_sheet') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments' _op['files'] = {} _op['files']['file'] = _file expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_new_version(self, sheet_id, attachment_id, _file): if not all(val is not None for val in ['sheet_id', 'attachment_id', '_file']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('attach_new_version') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments/' + str( attachment_id) + '/versions' _op['files'] = {} _op['files']['file'] = _file expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_url_to_comment(self, sheet_id, comment_id, attachment_obj): if not all(val is not None for val in ['sheet_id', 'comment_id', 'attachment_obj']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('attach_url_to_comment') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/comments/' + str( comment_id) + '/attachments' _op['json'] = attachment_obj expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_url_to_row(self, sheet_id, row_id, attachment_obj): if not all(val is not None for val in ['sheet_id', 'row_id', 'attachment_obj']): raise ValueError( ('One or more required values ' 'are missing from call to ' + __name__)) _op = fresh_operation('attach_url_to_row') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/rows/' + str( row_id) + '/attachments' _op['json'] = attachment_obj expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def attach_url_to_sheet(self, sheet_id, attachment_obj): _op = fresh_operation('attach_url_to_sheet') _op['method'] = 'POST' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments' _op['json'] = attachment_obj expected = ['Result', 'Attachment'] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def delete_attachment(self, sheet_id, attachment_id): _op = fresh_operation('delete_attachment') _op['method'] = 'DELETE' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments/' + str( attachment_id) expected = ['Result', None] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response def delete_attachment_versions(self, sheet_id, attachment_id): _op = fresh_operation('delete_attachment_versions') _op['method'] = 'DELETE' _op['path'] = '/sheets/' + str(sheet_id) + '/attachments/' + str( attachment_id) + '/versions' expected = ['Result', None] prepped_request = self._base.prepare_request(_op) response = self._base.request(prepped_request, expected, _op) return response
Apache License 2.0