repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
soaxelbrooke/datastreams
datastreams/datastreams.py
DataSet.apply
python
def apply(self, function): return self.Set(function(self))
Apply a function to the whole dataset :param function function: function to be called on the whole dataset :rtype: DataSet
https://github.com/soaxelbrooke/datastreams/blob/a04a02ed2ee2fc4097df3af228de90ae0a6f26a0/datastreams/datastreams.py#L902-L908
from itertools import islice, chain import csv from copy import copy import random import os try: from collections import defaultdict, deque, Counter, namedtuple except ImportError: from backport_collections import defaultdict, deque, Counter, namedtuple import sys try: reduce except NameError: from functools import reduce class Nothing(object): pass class Datum(object): def __init__(self, attributes): if isinstance(attributes, dict): for name, value in attributes.items(): setattr(self, name, value) else: for name, value in attributes: setattr(self, name, value) def __repr__(self): return "Datum({})".format(self.__dict__) class DataStream(object): @staticmethod def Stream(iterable, transform=lambda row: row, predicate=lambda row: True): return DataStream(iterable, transform=transform, predicate=predicate) @staticmethod def Set(iterable): return DataSet(iterable) def __init__(self, source, transform=lambda row: row, predicate=lambda row: True): self._source = iter(source) self._transform = transform self._predicate = predicate def __iter__(self): return (self._transform(row) for row in self._source if self._predicate(row)) def __repr__(self): return "{}({})".format(self.__class__.__name__, str(self._source)) def __str__(self): return self.__repr__() def __next__(self): while True: src_next = next(self._source) if self._predicate(src_next): return self._transform(src_next) def next(self): return self.__next__() def reduce(self, function, initial=None): if initial is None: initial = next(self) return reduce(function, self, initial) def reduce_to_dataset(self, function, initial=None): return DataSet(self.reduce(function, initial)) def map(self, function): return self.Stream(self, transform=function) def map_method(self, method, *args, **kwargs): return self.map(lambda row: self.getattr(row, method)(*args, **kwargs)) def concat(self): return self.chain() def concat_map(self, function): return self.map(function).concat() def chain(self): return self.Stream(chain.from_iterable(self)) def filter(self, filter_fn): return self.Stream(self, predicate=filter_fn) def filters(self, filter_fns): predicate = lambda row: all([pred(row) for pred in filter_fns]) return self.Stream(self, predicate=predicate) def filter_method(self, method, *args, **kwargs): return self.filter(lambda row: self.getattr(row, method)(*args, **kwargs)) def set(self, name, transfer_func=None, value=None): if transfer_func is not None: def row_setattr(row): new_row = copy(row) self.setattr(new_row, name, transfer_func(row)) return new_row else: def row_setattr(row): new_row = copy(row) self.setattr(new_row, name, value) return new_row return self.map(row_setattr) def get(self, name, default=None): def row_getattr(row): return self.getattr(row, name) if self.hasattr(row, name) else default return self.map(row_getattr) def delete(self, attr): def obj_del(row): new_row = copy(row) delattr(new_row, attr) return new_row return self.map(obj_del) def for_each(self, function): def apply_fn(row): function(row) return row return self.map(apply_fn) def print_each(self): def printer(row): print(row) return self.for_each(printer) def take(self, n): return self.Stream(islice(self, 0, n)) def take_now(self, n): return self.Set([next(self) for _ in range(n)]) def drop(self, n): return self.Stream(islice(self, n, None)) def collect(self): return self.Set(self) def collect_as(self, constructor): return self.map(constructor).collect() def execute(self): list(self) def count(self): count = 0 for _ in self: count += 1 return count def batch(self, batch_size): return self.window(batch_size, batch_size) def window(self, length, interval): queue = deque(maxlen=length) def window_iter(): queue.extend(self.take_now(length)) yield self.Set(queue) while True: for _ in range(interval): queue.popleft() try: for _ in range(interval): queue.append(next(self)) yield self.Set(queue) except StopIteration: if len(queue) != 0: yield self.Set(queue) break return self.Stream(window_iter()) def dedupe(self, key_fn=lambda a: a): seen = set() def unique(): for row in self: if key_fn(row) not in seen: seen.add(key_fn(row)) yield row return self.Stream(unique()) def sample(self, probability, n): return self.filter(lambda row: random.random() > probability).take(n) def group_by(self, key): return self.group_by_fn(lambda row: self.getattr(row, key)) def group_by_fn(self, key_fn): grouper = defaultdict(list) for ele in self: grouper[key_fn(ele)].append(ele) return self.Set(grouper.items()) def to(self, constructor): return constructor(self) def to_dict(self): return dict(self.collect()) def to_list(self): return list(self.collect()) def to_set(self): return set(self.collect()) def pipe_to_stdout(self): map(sys.stdout.write, self) def count_frequency(self): return self.Set(Counter(self).items()) @staticmethod def join_objects(left, right): joined_class = type(left.__class__.__name__ + right.__class__.__name__, (Datum,), {}) attrs = {} attrs.update(get_object_attrs(right)) attrs.update(get_object_attrs(left)) attrs['left'] = left attrs['right'] = right return joined_class(attrs) def join(self, how, key, right): if how == 'left': return self.left_join(key, right) elif how == 'right': return self.right_join(key, right) elif how == 'inner': return self.inner_join(key, right) elif how == 'outer': return self.outer_join(key, right) else: raise ValueError("Invalid value for how: {}, must be left, right, " "inner, or outer.".format(str(how))) def join_by(self, how, left_key_fn, right_key_fn, right): if how == 'left': return self.left_join_by(left_key_fn, right_key_fn, right) elif how == 'right': return self.right_join_by(left_key_fn, right_key_fn, right) elif how == 'inner': return self.inner_join_by(left_key_fn, right_key_fn, right) elif how == 'outer': return self.outer_join_by(left_key_fn, right_key_fn, right) else: raise ValueError("Invalid value for how: {}, must be left, right, " "inner, or outer.".format(str(how))) def left_join(self, key, right): key_fn = lambda ele: self.getattr(ele, key) return self.left_join_by(key_fn, key_fn, right) def left_join_by(self, left_key_fn, right_key_fn, right): joiner = defaultdict(list) for ele in right: joiner[right_key_fn(ele)].append(ele) joined = [] for ele in self: for other in joiner.get(left_key_fn(ele), [None]): joined.append(self.join_objects(ele, other)) return self.Set(joined) def right_join(self, key, right): key_fn = lambda ele: self.getattr(ele, key) return self.right_join_by(key_fn, key_fn, right) def right_join_by(self, left_key_fn, right_key_fn, right): joiner = defaultdict(list) for ele in self: joiner[left_key_fn(ele)].append(ele) joined = [] for ele in right: for other in joiner.get(right_key_fn(ele), [None]): joined.append(self.join_objects(ele, other)) return self.Set(joined) def inner_join(self, key, right): key_fn = lambda ele: self.getattr(ele, key) return self.inner_join_by(key_fn, key_fn, right) def inner_join_by(self, left_key_fn, right_key_fn, right): joiner = defaultdict(list) for ele in right: joiner[right_key_fn(ele)].append(ele) joined = [] for ele in self: for other in joiner[left_key_fn(ele)]: joined.append(self.join_objects(ele, other)) return self.Set(joined) def outer_join(self, key, right): key_fn = lambda ele: self.getattr(ele, key) return self.outer_join_by(key_fn, key_fn, right) def outer_join_by(self, left_key_fn, right_key_fn, right): left_joiner = defaultdict(list) for ele in self: left_joiner[left_key_fn(ele)].append(ele) right_joiner = defaultdict(list) for ele in right: right_joiner[right_key_fn(ele)].append(ele) keys = set(left_joiner.keys()).union(set(right_joiner.keys())) def iter_join(l, r, join_keys): for join_key in join_keys: for ele in l.get(join_key, [None]): for other in r.get(join_key, [None]): yield self.join_objects(ele, other) return self.Set(iter_join(left_joiner, right_joiner, keys)) def pick_attrs(self, attr_names): def attr_filter(row): return Datum(dict((name, self.getattr(row, name)) for name in attr_names)) return self.map(attr_filter) def where(self, name=Nothing): return FilterRadix(self, name) @staticmethod def getattr(row, name): if name is Nothing: return row return getattr(row, name) @staticmethod def hasattr(row, name): return hasattr(row, name) @staticmethod def setattr(row, name, value): setattr(row, name, value) @classmethod def from_file(cls, path): return cls.Stream(cls.iter_file(path)) @classmethod def from_files(cls, paths): return cls.Stream(cls.iter_files(paths)) @staticmethod def iter_files(paths): for path in paths: source_file = open(path) for line in source_file: yield line source_file.close() raise StopIteration @staticmethod def iter_file(path): source_file = open(path) for line in source_file: yield line source_file.close() raise StopIteration @classmethod def from_csv(cls, path, headers=None, constructor=Datum): source_file = open(path) if headers is None: headers = [h.strip() for h in source_file.readline().split(",")] reader = cls.iter_csv(source_file) return cls.Stream(constructor(zip(headers, row)) for row in reader) @staticmethod def iter_csv(source_file): reader = csv.reader(source_file) for row in reader: yield row source_file.close() raise StopIteration @classmethod def from_stdin(cls): return cls.Stream(sys.stdin) def write_to_file(self, path): with open(path, 'w') as outfile: for row in self: outfile.write(row + os.linesep) def append_to_file(self, path): with open(path, 'a') as outfile: for row in self: outfile.write(row + os.linesep) class FilterRadix(object): def __init__(self, stream, attr_name): self._source = stream self.attr_name = attr_name def eq(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) == value) def neq(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) != value) def gt(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) > value) def gteq(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) >= value) def lt(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) < value) def lteq(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) <= value) def is_in(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) in value) def not_in(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) not in value) def has_length(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) == value) def shorter_than(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) < value) def longer_than(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) > value) def truthy(self): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name)) def falsey(self): name = self.attr_name return self._source.filter(lambda row: not self._source.getattr(row, name)) def isinstance(self, value): name = self.attr_name return self._source.filter( lambda row: isinstance(self._source.getattr(row, name), value)) def notinstance(self, value): name = self.attr_name return self._source.filter( lambda row: not isinstance(self._source.getattr(row, name), value)) def is_(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) is value) def is_not(self, value): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name) is not value) def contains(self, value): name = self.attr_name return self._source.filter(lambda row: value in self._source.getattr(row, name)) def doesnt_contain(self, value): name = self.attr_name return self._source.filter(lambda row: value not in self._source.getattr(row, name)) def startswith(self, substring): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name).startswith(substring)) def endswith(self, substring): name = self.attr_name return self._source.filter(lambda row: self._source.getattr(row, name).endswith(substring)) def len_eq(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) == value) def len_gt(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) > value) def len_lt(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) < value) def len_gteq(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) >= value) def len_lteq(self, value): name = self.attr_name return self._source.filter(lambda row: len(self._source.getattr(row, name)) <= value) class DataSet(DataStream): def __init__(self, source): super(DataSet, self).__init__(source) self._source = list(source) def __len__(self): return len(self._source) def __getitem__(self, item): return self._source[item] def __repr__(self): head, tail = ', '.join(map(str, self[:5])), ', '.join(map(str, self[-5:])) return "{}([{}, ... {}])".format(self.__class__.__name__, head, tail) def __str__(self): return self.__repr__() def take_now(self, n): return self.Set([self._source[i] for i in range(n)])
MIT License
brandondube/prysm
prysm/io.py
read_any_trioptics_mht
python
def read_any_trioptics_mht(file, metadata=False): type_, data = identify_trioptics_measurement_type(file) return type_, TRIOPTICS_SWITCHBOARD[type_](data, metadata=metadata)
Read any Trioptics .mht certificate (MTF vs Field, Distortion, etc). Parameters ---------- file : `str` or path_like or file_like contents of a file, path_like to the file, or file object metadata : `bool` whether to also extract and return metadata Returns ------- `dict` dictionary with appropriate keys. If metadata=True, also has keys in the return of `io.parse_trioptics_metadata`.
https://github.com/brandondube/prysm/blob/174c8f952fced12c1ed204e4632cc0d16c693d27/prysm/io.py#L524-L542
from io import StringIO, IOBase import re import struct import codecs import datetime import calendar import shutil import warnings import numpy as truenp from .conf import config from .mathops import np def read_file_stream_or_path(path_or_file): try: with codecs.open(path_or_file, mode='r', encoding='cp1252') as fid: data = codecs.encode(fid.read(), 'utf-8').decode('utf-8') except (FileNotFoundError, TypeError): try: path_or_file.seek(0) raw = path_or_file.read() data = codecs.encode(raw, 'utf-8').decode('utf-8') except TypeError: data = raw.decode('cp1252') except AttributeError: data = path_or_file except (AttributeError, UnicodeDecodeError): data = path_or_file return data def is_mtfvfvf_file(file): data = read_file_stream_or_path(file) if data.startswith('ImgHeight'): return True, data else: return False, data def read_trioptics_mtfvfvf(file, filename=None): if filename is None: with open(file, 'r') as fid: lines = fid.readlines() else: lines = file.splitlines() file = filename if str(file)[-7:-4] == 'Tan': azimuth = 'Tan' else: azimuth = 'Sag' imghts, objangs, focusposes, mtfs = [], [], [], [] for meta, data in zip(lines[0::2], lines[1::2]): metavalues = meta.split() imght, objang, focuspos, freqpitch = metavalues[1::2] mtf_raw = data.split()[1:] mtf = np.asarray(mtf_raw, dtype=config.precision) imghts.append(imght) objangs.append(objang) focusposes.append(focuspos) mtfs.append(mtf) focuses = np.unique(np.asarray(focusposes, dtype=config.precision)) focuses = (focuses - np.mean(focuses)) * 1e3 imghts = np.unique(np.asarray(imghts, dtype=config.precision)) freqs = np.arange(len(mtfs[0]), dtype=config.precision) * float(freqpitch) data = np.swapaxes(np.asarray(mtfs).reshape(len(focuses), len(imghts), len(freqs)), 0, 1) return { 'data': data, 'focus': focuses, 'field': imghts, 'freq': freqs, 'azimuth': azimuth } def read_trioptics_mtf_vs_field(file, metadata=False): warnings.warn('this function will dispatch to either read_trioptics_mtf_vs_field_mtflab_v4, or _v5 in v0.20. In v0.19, it always uses _v4.') return read_trioptics_mtf_vs_field_mtflab_v4(file, metadata=metadata) def read_trioptics_mtf_vs_field_mtflab_v4(file, metadata=False): warnings.warn('this function will dispatch to either read_trioptics_mtf_vs_field_mtflab_v4, or _v5 in v0.20. In v0.19, it always uses _v4.') data = read_file_stream_or_path(file) data = data[:len(data)//10] fields_pattern = re.compile('MTF=09(.*?)Legend=09', flags=re.DOTALL) fields = fields_pattern.findall(data)[0] tan_pattern = re.compile(r'Tan(.*?)=97', flags=re.DOTALL) sag_pattern = re.compile(r'Sag(.*?)=97', flags=re.DOTALL) tan, sag = tan_pattern.findall(data), sag_pattern.findall(data) endpt = len(tan) // 2 tan, sag = tan[:endpt], sag[:endpt] freqs = np.asarray([float(s.split('(')[0][1:]) for s in tan]) tan = np.asarray([s.split('=09')[1:-1] for s in tan], dtype=config.precision) sag = np.asarray([s.split('=09')[1:-1] for s in sag], dtype=config.precision) fields = np.asarray(fields.split('=09')[0:-1], dtype=config.precision).round(4) res = { 'freq': freqs, 'field': fields, 'tan': tan, 'sag': sag, } if metadata is True: return {**res, **parse_trioptics_metadata(data)} else: return res def read_trioptics_mtf_vs_field_mtflab_v5(file_contents, metadata=False): if metadata: mdata = parse_trioptics_metadata_mtflab_v5(file_contents) end = file_contents.find('<!-- close certificate table -->') file_contents = file_contents[:end] start = file_contents.find('<!-- begin table caption -->') end = file_contents.find('<!-- end table caption -->') image_heights = [] body = file_contents[start+29:end] body = body.splitlines()[8:-2] for row in body: value = row.split('>', 1)[1].split('<')[0] image_heights.append(float(value)) file_contents = file_contents[end:] start = file_contents.find('<!-- begin measurement data -->') end = file_contents.find('<!-- end measurement data -->') file_contents = file_contents[start+31:end] tan = [] sag = [] freqs = [] rows = file_contents.split('<tr ')[1:] for row in rows: cells = row.split('<td')[1:-1] az, freq = cells[0].split('>', 1)[1].split('<')[0].split() freq = float(freq.split('(')[0]) if az == 'Sag': target = sag else: target = tan tmp = [] for cell in cells[1:]: value = cell.split('>', 1)[1].split('<')[0] tmp.append(float(value)) target.append(tmp) if freq not in freqs: freqs.append(freq) data = { 'tan': np.asarray(tan, dtype=config.precision), 'sag': np.asarray(sag, dtype=config.precision), 'field': np.asarray(image_heights, dtype=config.precision), 'freq': np.asarray(freqs, dtype=config.precision), } if metadata: return {**data, **mdata} else: return data def read_trioptics_mtf(file, metadata=False): data = read_file_stream_or_path(file) data = data[:len(data)//10] focus_scanner = re.compile(r'Focus Position : (\-?\d+\.\d+) mm') data_scanner = re.compile(r'\r\n(\d+\.?\d?)=09\r\n(\d+\.\d+)=09') sag_scanner = re.compile(r'Measurement Table: MTF vs. Frequency \( Sagittal \)') blockend_scanner = re.compile(r' _____ =20') sagpos, cutoff = sag_scanner.search(data).end(), None for blockend in blockend_scanner.finditer(data): if blockend.end() > sagpos and cutoff is None: cutoff = blockend.end() focus_pos = float(focus_scanner.search(data).group(1)) result = data_scanner.findall(data[:cutoff]) freqs, mtfs = [], [] for dat in result: freqs.append(float(dat[0])) mtfs.append(dat[1]) breakpt = len(mtfs) // 2 t = np.asarray(mtfs[:breakpt], dtype=config.precision) s = np.asarray(mtfs[breakpt:], dtype=config.precision) freqs = tuple(freqs[:breakpt]) res = { 'focus': focus_pos, 'freq': freqs, 'tan': t, 'sag': s, } if metadata is True: return {**res, **parse_trioptics_metadata(data)} else: return res def parse_trioptics_metadata(file_contents): warnings.warn('this function will dispatch to either parse_trioptics_metadata_mtflab_v4, or _v5 in v0.20. In v0.19, it always uses _v4.') return parse_trioptics_metadata_mtflab_v4(file_contents) def parse_trioptics_metadata_mtflab_v4(file_contents): data = file_contents[750:1500] operator_scanner = re.compile(r'Operator : (\S*)') time_scanner = re.compile(r'Time/Date : (\d{2}:\d{2}:\d{2}\s*\w*\s*\d*,\s*\d*)') sampleid_scanner = re.compile(r'Sample ID : (.*)') instrument_sn_scanner = re.compile(r'Instrument S/N : (\S*)') collimatorefl_scanner = re.compile(r'EFL \(Collimator\): (\d*) mm') wavelength_scanner = re.compile(r'Wavelength : (\d+) nm') sampleefl_scanner = re.compile(r'EFL \(Sample\) : (\d*\.\d*) mm') objangle_scanner = re.compile(r'Object Angle : (-?\d*\.\d*) =B0') focuspos_scanner = re.compile(r'Focus Position : (\d*\.\d*) mm') azimuth_scanner = re.compile(r'Sample Azimuth : (-?\d*\.\d*) =B0') operator = operator_scanner.search(data).group(1) time = time_scanner.search(data).group(1) hms, month, day, year = time.split() year, day = int(year), int(day[:-1]) month_num = list(calendar.month_name).index(month) h, m, s = hms.split(':') h, m, s = (int(str_) for str_ in [h, m, s]) timestamp = datetime.datetime(year=year, month=month_num, day=day, hour=h, minute=m, second=s) sampleid = sampleid_scanner.search(data).group(1).strip() instrument_sn = instrument_sn_scanner.search(data).group(1) collimator_efl = float(collimatorefl_scanner.search(data).group(1)) wavelength = float(wavelength_scanner.search(data).group(1)) / 1e3 sample_efl = float(sampleefl_scanner.search(data).group(1)) obj_angle = float(objangle_scanner.search(data).group(1)) focus_pos = float(focuspos_scanner.search(data).group(1)) azimuth = float(azimuth_scanner.search(data).group(1)) return { 'operator': operator, 'time': timestamp, 'sample_id': sampleid, 'instrument': 'Trioptics ImageMaster HR', 'instrument_sn': instrument_sn, 'collimator': collimator_efl, 'wavelength': wavelength, 'efl': sample_efl, 'fno': None, 'obj_angle': obj_angle, 'focus_pos': focus_pos, 'azimuth': azimuth, } def parse_trioptics_metadata_mtflab_v5(file_contents): top = file_contents.find('<pre>') bottom = file_contents.find('</pre>', top) body = file_contents[top+5:bottom].splitlines() sep = ': ' company = body[0].split(sep)[-1].strip() operator = body[1].split(sep)[-1].strip() timestamp = body[2].split(sep)[-1].strip() timestamp = datetime.datetime.strptime(timestamp, '%H:%M:%S %B %d, %Y') sampleid = body[3].split(sep)[-1].strip() instrument_sn = body[8].split(sep)[-1].strip() top = file_contents.find('<pre>', bottom) bottom = file_contents.find('</pre>', top) body = file_contents[top+5:bottom].splitlines() collimator_efl = float(body[1].split(sep)[-1].strip().split(' ')[0]) wavelength = body[2].split(sep)[-1].strip() efl = float(body[3].split(sep)[-1].split()[0].strip()) fno = float(body[4].split(sep)[-1].split('=')[0]) obj_angle = float(body[5].split(sep)[-1].split()[0]) focus_pos = float(body[6].split(sep)[-1].split()[0]) azimuth = float(body[7].split(sep)[-1].split()[0]) efl, fno, obj_angle, focus_pos, azimuth meta = { 'company': company, 'operator': operator, 'timestamp': timestamp, 'sample_id': sampleid, 'instrument': 'Trioptics ImageMaster', 'instrument_sn': instrument_sn, 'collimator': collimator_efl, 'wavelength': wavelength, 'efl': efl, 'fno': fno, 'obj_angle': obj_angle, 'focus_pos': focus_pos, 'azimuth': azimuth, } return meta def identify_trioptics_measurement_type(file): data = read_file_stream_or_path(file) data_parse = data[750:1500] measurement_type_scanner = re.compile(r'Measure Program : (.*)') program = measurement_type_scanner.search(data_parse).group(1).strip() return program, data TRIOPTICS_SWITCHBOARD = { 'MTF vs. Field': read_trioptics_mtf_vs_field, 'Distortion': NotImplemented, 'Axial Color': NotImplemented, 'Lateral Color': NotImplemented, }
MIT License
districtdatalabs/yellowbrick-docs-zh
yellowbrick/cluster/elbow.py
KElbowVisualizer.draw
python
def draw(self): self.ax.plot(self.k_values_, self.k_scores_, marker="D", label="score") if self.timings: self.axes = [self.ax, self.ax.twinx()] self.axes[1].plot( self.k_values_, self.k_timers_, label="fit time", c='g', marker="o", linestyle="--", alpha=0.75, ) return self.ax
Draw the elbow curve for the specified scores and values of K.
https://github.com/districtdatalabs/yellowbrick-docs-zh/blob/3118e67f2bed561a00885e6edb2cabb3520ad66b/yellowbrick/cluster/elbow.py#L252-L267
import time from .base import ClusteringScoreVisualizer from ..exceptions import YellowbrickValueError from sklearn.metrics import silhouette_score from sklearn.metrics import calinski_harabaz_score from sklearn.metrics.pairwise import pairwise_distances from sklearn.preprocessing import LabelEncoder __all__ = [ "KElbowVisualizer", "distortion_score" ] def distortion_score(X, labels, metric='euclidean'): le = LabelEncoder() labels = le.fit_transform(labels) unique_labels = le.classes_ distortion = 0 for current_label in unique_labels: mask = labels == current_label instances = X[mask] center = instances.mean(axis=0) distances = pairwise_distances(instances, [center], metric=metric) distances = distances ** 2 distortion += distances.mean() return distortion KELBOW_SCOREMAP = { "distortion": distortion_score, "silhouette": silhouette_score, "calinski_harabaz": calinski_harabaz_score, } class KElbowVisualizer(ClusteringScoreVisualizer): def __init__(self, model, ax=None, k=10, metric="distortion", timings=True, **kwargs): super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs) if metric not in KELBOW_SCOREMAP: raise YellowbrickValueError( "'{}' is not a defined metric " "use one of distortion, silhouette, or calinski_harabaz" ) self.scoring_metric = KELBOW_SCOREMAP[metric] self.timings = timings if isinstance(k, int): k = (2, k+1) try: k = tuple(k) self.k_values_ = list(range(*k)) except: raise YellowbrickValueError(( "Specify a range or maximal K value, the value '{}' " "is not a valid argument for K.".format(k) )) self.k_scores_ = None def fit(self, X, y=None, **kwargs): self.k_scores_ = [] self.k_timers_ = [] for k in self.k_values_: start = time.time() self.estimator.set_params(n_clusters=k) self.estimator.fit(X) self.k_timers_.append(time.time() - start) self.k_scores_.append( self.scoring_metric(X, self.estimator.labels_) ) self.draw() return self
Apache License 2.0
scikit-multiflow/scikit-multiflow
src/skmultiflow/meta/oza_bagging.py
OzaBaggingClassifier.predict
python
def predict(self, X): r, c = get_dimensions(X) proba = self.predict_proba(X) predictions = [] if proba is None: return None for i in range(r): predictions.append(np.argmax(proba[i])) return np.asarray(predictions)
Predict classes for the passed data. Parameters ---------- X : numpy.ndarray of shape (n_samples, n_features) The set of data samples to predict the class labels for. Returns ------- A numpy.ndarray with all the predictions for the samples in X. Notes ----- The predict function will average the predictions from all its learners to find the most likely prediction for the sample matrix X.
https://github.com/scikit-multiflow/scikit-multiflow/blob/d073a706b5006cba2584761286b7fa17e74e87be/src/skmultiflow/meta/oza_bagging.py#L196-L221
import copy as cp import warnings import numpy as np from skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin from skmultiflow.lazy import KNNADWINClassifier from skmultiflow.utils import check_random_state, get_dimensions def OzaBagging(base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None): warnings.warn("'OzaBagging' has been renamed to 'OzaBaggingClassifier' in v0.5.0.\n" "The old name will be removed in v0.7.0", category=FutureWarning) return OzaBaggingClassifier(base_estimator=base_estimator, n_estimators=n_estimators, random_state=random_state) class OzaBaggingClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin): def __init__(self, base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None): super().__init__() self.ensemble = None self.actual_n_estimators = None self.classes = None self._random_state = None self.base_estimator = base_estimator self.n_estimators = n_estimators self.random_state = random_state self.__configure() def __configure(self): if hasattr(self.base_estimator, "reset"): self.base_estimator.reset() self.actual_n_estimators = self.n_estimators self.ensemble = [cp.deepcopy(self.base_estimator) for _ in range(self.actual_n_estimators)] self._random_state = check_random_state(self.random_state) def reset(self): self.__configure() return self def partial_fit(self, X, y, classes=None, sample_weight=None): if self.classes is None: if classes is None: raise ValueError("The first partial_fit call should pass all the classes.") else: self.classes = classes if self.classes is not None and classes is not None: if set(self.classes) == set(classes): pass else: raise ValueError("The classes passed to the partial_fit function" "differ from those passed earlier.") self.__adjust_ensemble_size() r, _ = get_dimensions(X) for j in range(r): for i in range(self.actual_n_estimators): k = self._random_state.poisson() if k > 0: for b in range(k): self.ensemble[i].partial_fit([X[j]], [y[j]], classes, sample_weight) return self def __adjust_ensemble_size(self): if len(self.classes) != len(self.ensemble): if len(self.classes) > len(self.ensemble): for i in range(len(self.ensemble), len(self.classes)): self.ensemble.append(cp.deepcopy(self.base_estimator)) self.actual_n_estimators += 1
BSD 3-Clause New or Revised License
rggibson/authtopus
authtopus/cron.py
CleanupUsersHandler.get
python
def get( self ): q = User.query( User.email_verified_lower == None ) users = q.order( User.updated ).fetch( UNVERIFIED_USER_DELETE_LIMIT ) for user in users: if( user.updated + timedelta( hours=UNVERIFIED_USER_LIFE_HOURS ) <= datetime.now( ) ): logging.debug( 'Deleting user [' + str( user.key.id() ) + ']' ) user.cleanup_and_delete( ) else: logging.debug( 'User [' + str( user.key.id() ) + '] still good' ) break
Deletes users that have no verified email address and have not been updated recently
https://github.com/rggibson/authtopus/blob/de32d50b9cbd0e424aad4abd999f5333ddddbb2f/authtopus/cron.py#L80-L94
import logging from datetime import datetime, timedelta from webapp2 import RequestHandler from .config import TOKEN_LIFE_HOURS, TOKEN_DELETE_LIMIT from .config import UNVERIFIED_USER_DELETE_LIMIT, UNVERIFIED_USER_LIFE_HOURS from .models import UserToken, VerifyEmailUserToken, User from .api import Auth class CleanupTokensHandler( RequestHandler ): def get( self ): for subject in TOKEN_LIFE_HOURS.keys( ): if subject == 'verify_email': q = VerifyEmailUserToken.query( ).order( VerifyEmailUserToken.created ) tokens = q.fetch( TOKEN_DELETE_LIMIT, projection=[ VerifyEmailUserToken.created, VerifyEmailUserToken.user, VerifyEmailUserToken.email ] ) info_deleted = set( ) for token in tokens: if( token.created + timedelta( hours=TOKEN_LIFE_HOURS.get( subject ) ) <= datetime.now( ) ): logging.debug( 'Deleting token [' + str( token.key.id( ) ) + ']' ) info_deleted.add( ( token.user, token.email ) ) token.key.delete( ) else: logging.debug( 'Token [' + str( token.key.id( ) ) + '] still valid' ) break for ( user_id, email ) in info_deleted: if( VerifyEmailUserToken.count_by_email( user_id, email ) <= 0 ): user = User.get_by_id( int( user_id ) ) if user is None: logging.error( 'No user found with id [' + str( user_id ) + ']' ) elif( user.email_pending_lower == email.lower( ) and user.email_verified ): Auth.update_user_internal( user, user.email_verified, user.username ) else: q = UserToken.query( UserToken.subject == subject ) tokens = q.order( UserToken.created ).fetch( TOKEN_DELETE_LIMIT, projection=[ UserToken.created ] ) for token in tokens: if( token.created + timedelta( hours=TOKEN_LIFE_HOURS.get( subject ) ) <= datetime.now( ) ): logging.debug( 'Deleting token [' + str( token.key.id( ) ) + ']' ) token.key.delete( ) else: logging.debug( 'Token [' + str( token.key.id( ) ) + '] still valid' ) break class CleanupUsersHandler( RequestHandler ):
MIT License
googleapis/python-spanner-django
django_spanner/functions.py
log
python
def log(self, compiler, connection, **extra_context): clone = self.copy() clone.set_source_expressions(self.get_source_expressions()[::-1]) return clone.as_sql(compiler, connection, **extra_context)
A method to extend Django Log class. Returns a SQL query of calculated logarithm. :type self: :class:`~django.db.models.functions.Log` :param self: the instance of the class that owns this method. :type compiler: :class:`~django_spanner.compiler.SQLCompilerst` :param compiler: The query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. :type connection: :class:`~google.cloud.spanner_dbapi.connection.Connection` :param connection: The Spanner database connection used for the current query. :rtype: tuple(str, list) :returns: A tuple where `str` is a string containing ordered SQL parameters to be replaced with the elements of the `list`.
https://github.com/googleapis/python-spanner-django/blob/1ec07849787dfcfeda7206e038e0a63c0b45d74c/django_spanner/functions.py#L214-L240
import math from django.db.models.expressions import Func, Value from django.db.models.functions import ( Cast, Chr, ConcatPair, Cot, Degrees, Left, Log, Ord, Pi, Radians, Right, StrIndex, Substr, ) class IfNull(Func): function = "IFNULL" arity = 2 def cast(self, compiler, connection, **extra_context): max_length = getattr(self.output_field, "max_length", None) if max_length is not None: template = "SUBSTR(" + self.template + ", 0, %s)" % max_length else: template = self.template return self.as_sql( compiler, connection, template=template, **extra_context ) def chr_(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="CODE_POINTS_TO_STRING([%(expressions)s])", **extra_context ) def concatpair(self, compiler, connection, **extra_context): clone = self.copy() clone.set_source_expressions( IfNull(e, Value("")) for e in self.get_source_expressions() ) return clone.as_sql(compiler, connection, **extra_context) def cot(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="(1 / TAN(%(expressions)s))", **extra_context ) def degrees(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="((%%(expressions)s) * 180 / %s)" % math.pi, **extra_context ) def left_and_right(self, compiler, connection, **extra_context): return self.get_substr().as_spanner(compiler, connection, **extra_context)
BSD 3-Clause New or Revised License
treasure-data/td-client-python
tdclient/client.py
Client.job_result_format_each
python
def job_result_format_each(self, job_id, format): for row in self.api.job_result_format_each(job_id, format): yield row
Args: job_id (str): job id format (str): output format of result set Returns: an iterator of rows in result set
https://github.com/treasure-data/td-client-python/blob/af0acaa04b13004c63795331625eed0647ad1edc/tdclient/client.py#L337-L347
import json from tdclient import api, models class Client: def __init__(self, *args, **kwargs): self._api = api.API(*args, **kwargs) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() @property def api(self): return self._api @property def apikey(self): return self._api.apikey def server_status(self): return self.api.server_status() def create_database(self, db_name, **kwargs): return self.api.create_database(db_name, **kwargs) def delete_database(self, db_name): return self.api.delete_database(db_name) def databases(self): databases = self.api.list_databases() return [ models.Database(self, db_name, **kwargs) for (db_name, kwargs) in databases.items() ] def database(self, db_name): databases = self.api.list_databases() for (name, kwargs) in databases.items(): if name == db_name: return models.Database(self, name, **kwargs) raise api.NotFoundError("Database '%s' does not exist" % (db_name)) def create_log_table(self, db_name, table_name): return self.api.create_log_table(db_name, table_name) def swap_table(self, db_name, table_name1, table_name2): return self.api.swap_table(db_name, table_name1, table_name2) def update_schema(self, db_name, table_name, schema): return self.api.update_schema(db_name, table_name, json.dumps(schema)) def update_expire(self, db_name, table_name, expire_days): return self.api.update_expire(db_name, table_name, expire_days) def delete_table(self, db_name, table_name): return self.api.delete_table(db_name, table_name) def tables(self, db_name): m = self.api.list_tables(db_name) return [ models.Table(self, db_name, table_name, **kwargs) for (table_name, kwargs) in m.items() ] def table(self, db_name, table_name): tables = self.tables(db_name) for table in tables: if table.table_name == table_name: return table raise api.NotFoundError("Table '%s.%s' does not exist" % (db_name, table_name)) def tail(self, db_name, table_name, count, to=None, _from=None, block=None): return self.api.tail(db_name, table_name, count, to, _from, block) def change_database(self, db_name, table_name, new_db_name): return self.api.change_database(db_name, table_name, new_db_name) def query( self, db_name, q, result_url=None, priority=None, retry_limit=None, type="hive", **kwargs ): if type not in ["hive", "pig", "impala", "presto"]: raise ValueError("The specified query type is not supported: %s" % (type)) job_id = self.api.query( q, type=type, db=db_name, result_url=result_url, priority=priority, retry_limit=retry_limit, **kwargs ) return models.Job(self, job_id, type, q) def jobs(self, _from=None, to=None, status=None, conditions=None): results = self.api.list_jobs(_from, to, status, conditions) return [job_from_dict(self, d) for d in results] def job(self, job_id): d = self.api.show_job(str(job_id)) return job_from_dict(self, d, job_id=job_id) def job_status(self, job_id): return self.api.job_status(job_id) def job_result(self, job_id): return self.api.job_result(job_id) def job_result_each(self, job_id): for row in self.api.job_result_each(job_id): yield row def job_result_format(self, job_id, format): return self.api.job_result_format(job_id, format)
Apache License 2.0
pydata/xarray
xarray/convert.py
from_cdms2
python
def from_cdms2(variable): values = np.asarray(variable) name = variable.id dims = variable.getAxisIds() coords = {} for axis in variable.getAxisList(): coords[axis.id] = DataArray( np.asarray(axis), dims=[axis.id], attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs), ) grid = variable.getGrid() if grid is not None: ids = [a.id for a in grid.getAxisList()] for axis in grid.getLongitude(), grid.getLatitude(): if axis.id not in variable.getAxisIds(): coords[axis.id] = DataArray( np.asarray(axis[:]), dims=ids, attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs), ) attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs) dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs) return decode_cf(dataarray.to_dataset())[dataarray.name]
Convert a cdms2 variable into an DataArray
https://github.com/pydata/xarray/blob/07de257c5884df49335496ee6347fb633a7c302c/xarray/convert.py#L63-L87
from collections import Counter import numpy as np import pandas as pd from .coding.times import CFDatetimeCoder, CFTimedeltaCoder from .conventions import decode_cf from .core import duck_array_ops from .core.dataarray import DataArray from .core.dtypes import get_fill_value from .core.pycompat import dask_array_type cdms2_ignored_attrs = {"name", "tileIndex"} iris_forbidden_keys = { "standard_name", "long_name", "units", "bounds", "axis", "calendar", "leap_month", "leap_year", "month_lengths", "coordinates", "grid_mapping", "climatology", "cell_methods", "formula_terms", "compress", "missing_value", "add_offset", "scale_factor", "valid_max", "valid_min", "valid_range", "_FillValue", } cell_methods_strings = { "point", "sum", "maximum", "median", "mid_range", "minimum", "mean", "mode", "standard_deviation", "variance", } def encode(var): return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable)) def _filter_attrs(attrs, ignored_attrs): return {k: v for k, v in attrs.items() if k not in ignored_attrs}
Apache License 2.0
airtestproject/airtest
airtest/core/android/touch_methods/base_touch.py
BaseTouch.safe_send
python
def safe_send(self, data): if isinstance(data, six.text_type): data = data.encode('utf-8') try: self.client.send(data) except Exception as err: raise err
Send data to client Args: data: data to send Raises: Exception: when data cannot be sent Returns: None
https://github.com/airtestproject/airtest/blob/c29d0462fe29db5c04cda31de1c05bcae5991061/airtest/core/android/touch_methods/base_touch.py#L78-L98
import threading import time import six from six.moves import queue from airtest.utils.logger import get_logger from airtest.utils.snippet import (on_method_ready, ready_method, reg_cleanup, kill_proc) LOGGING = get_logger(__name__) class BaseTouch(object): def __init__(self, adb, backend=False, size_info=None, input_event=None, *args, **kwargs): self.adb = adb self.backend = backend self.server_proc = None self.client = None self.size_info = None self.input_event = input_event self.handle = None self.size_info = size_info or self.adb.get_display_info() self.default_pressure = 50 self.path_in_android = "" reg_cleanup(self.teardown) @ready_method def install_and_setup(self): self.install() self.setup_server() if self.backend: self.setup_client_backend() else: self.setup_client() def uninstall(self): raise NotImplemented def install(self): raise NotImplemented def setup_server(self): raise NotImplemented
Apache License 2.0
floriankempenich/appdaemon-test-framework
appdaemontestframework/appdaemon_mock/scheduler.py
MockScheduler.sim_elapsed_seconds
python
def sim_elapsed_seconds(self): return (self._now - self._start_time).total_seconds()
Returns number of seconds elapsed since the start of the simulation
https://github.com/floriankempenich/appdaemon-test-framework/blob/da6111c6cc2d8cf4bc63f0fe78dc3a2542675a1d/appdaemontestframework/appdaemon_mock/scheduler.py#L80-L82
import datetime import uuid import pytz from appdaemontestframework.appdaemon_mock.appdaemon import MockAppDaemon class MockScheduler: def __init__(self, AD: MockAppDaemon): self.AD = AD self._registered_callbacks = [] self.sim_set_start_time(datetime.datetime(2000, 1, 1, 0, 0)) async def get_now(self): return self.get_now_sync() def get_now_sync(self): return pytz.utc.localize(self._now) async def get_now_ts(self): return (await self.get_now()).timestamp() async def get_now_naive(self): return self.make_naive(await self.get_now()) async def insert_schedule(self, name, aware_dt, callback, repeat, type_, **kwargs): naive_dt = self.make_naive(aware_dt) return self._queue_calllback(callback, kwargs, naive_dt) async def cancel_timer(self, name, handle): for callback in self._registered_callbacks: if callback.handle == handle: self._registered_callbacks.remove(callback) def convert_naive(self, dt): result = None if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None: result = self.AD.tz.localize(dt) else: result = dt return result def make_naive(self, dt): local = dt.astimezone(self.AD.tz) return datetime.datetime( local.year, local.month, local.day, local.hour, local.minute, local.second, local.microsecond, ) def sim_set_start_time(self, time): if len(self._registered_callbacks) > 0: raise RuntimeError("You can not set start time while callbacks are scheduled") if type(time) == datetime.time: time = datetime.datetime.combine(self._now.date(), time) self._start_time = self._now = time def sim_get_start_time(self): return pytz.utc.localize(self._start_time)
MIT License
voxel51/eta
eta/core/annotations.py
ManualColormap.__init__
python
def __init__(self, config): self.validate(config) self.config = config
Creates a ManualColormap instance. Args: config: a ManualColormapConfig instance
https://github.com/voxel51/eta/blob/e51510fda0722ac7cadb17b109bad413a6602ed3/eta/core/annotations.py#L621-L628
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import * from copy import deepcopy import logging import random import cv2 import numpy as np from PIL import Image, ImageDraw, ImageFont import eta import eta.constants as etac from eta.core.config import Config, Configurable import eta.core.data as etad import eta.core.frameutils as etaf import eta.core.image as etai import eta.core.logo as etal import eta.core.utils as etau import eta.core.video as etav logger = logging.getLogger(__name__) class AnnotationConfig(Config): def __init__(self, d): self.show_frame_attr_names = self.parse_bool( d, "show_frame_attr_names", default=True ) self.show_frame_attr_confidences = self.parse_bool( d, "show_frame_attr_confidences", default=False ) self.frame_attrs_box_gap = self.parse_string( d, "frame_attrs_box_gap", default="1%" ) self.show_object_boxes = self.parse_bool( d, "show_object_boxes", default=True ) self.show_object_names = self.parse_bool( d, "show_object_names", default=True ) self.show_object_labels = self.parse_bool( d, "show_object_labels", default=True ) self.show_object_attrs = self.parse_bool( d, "show_object_attrs", default=True ) self.show_object_confidences = self.parse_bool( d, "show_object_confidences", default=False ) self.per_object_name_colors = self.parse_bool( d, "per_object_name_colors", default=True ) self.per_object_label_colors = self.parse_bool( d, "per_object_label_colors", default=True ) self.per_object_index_colors = self.parse_bool( d, "per_object_index_colors", default=True ) self.show_object_attr_names = self.parse_bool( d, "show_object_attr_names", default=True ) self.show_object_attr_confidences = self.parse_bool( d, "show_object_attr_confidences", default=False ) self.show_object_indices = self.parse_bool( d, "show_object_indices", default=True ) self.show_object_masks = self.parse_bool( d, "show_object_masks", default=True ) self.occluded_object_attr = self.parse_string( d, "occluded_object_attr", default="occluded" ) self.hide_occluded_objects = self.parse_bool( d, "hide_occluded_objects", default=False ) self.show_event_boxes = self.parse_bool( d, "show_event_boxes", default=True ) self.show_event_labels = self.parse_bool( d, "show_event_labels", default=True ) self.show_event_attrs = self.parse_bool( d, "show_event_attrs", default=True ) self.show_event_names = self.parse_bool( d, "show_event_names", default=True ) self.show_event_confidences = self.parse_bool( d, "show_event_confidences", default=False ) self.per_event_name_colors = self.parse_bool( d, "per_event_name_colors", default=True ) self.per_event_label_colors = self.parse_bool( d, "per_event_label_colors", default=True ) self.per_event_index_colors = self.parse_bool( d, "per_event_index_colors", default=True ) self.show_event_attr_names = self.parse_bool( d, "show_event_attr_names", default=True ) self.show_event_attr_confidences = self.parse_bool( d, "show_event_attr_confidences", default=False ) self.show_event_indices = self.parse_bool( d, "show_event_indices", default=True ) self.show_event_masks = self.parse_bool( d, "show_event_masks", default=True ) self.show_event_label_on_objects = self.parse_bool( d, "show_event_label_on_objects", default=True ) self.show_event_objects_in_same_color = self.parse_bool( d, "show_event_objects_in_same_color", default=True ) self.occluded_event_attr = self.parse_string( d, "occluded_event_attr", default="occluded" ) self.hide_occluded_events = self.parse_bool( d, "hide_occluded_events", default=False ) self.bbox_alpha = self.parse_number(d, "bbox_alpha", default=0.75) self.bbox_label_text_pad_pixels = self.parse_number( d, "bbox_label_text_pad_pixels", default=2 ) self.bbox_linewidth = self.parse_number(d, "bbox_linewidth", default=3) self.mask_border_thickness = self.parse_number( d, "mask_border_thickness", default=2 ) self.mask_fill_alpha = self.parse_number( d, "mask_fill_alpha", default=0.7 ) self.show_frame_mask_semantics = self.parse_bool( d, "show_frame_mask_semantics", default=True ) self.attrs_box_render_method = self.parse_categorical( d, "attrs_box_render_method", ["list", "panel"], default="panel" ) self.attrs_box_bg_color = self.parse_string( d, "attrs_box_bg_color", default="#000000" ) self.attrs_box_bg_alpha = self.parse_number( d, "attrs_box_bg_alpha", default=0.5 ) self.attrs_box_text_pad_pixels = self.parse_number( d, "attrs_box_text_pad_pixels", default=5 ) self.attrs_box_text_line_spacing_pixels = self.parse_number( d, "attrs_box_text_line_spacing_pixels", default=1 ) self.show_keypoints_names = self.parse_bool( d, "show_keypoints_names", default=True ) self.show_keypoints_labels = self.parse_bool( d, "show_keypoints_labels", default=True ) self.show_keypoints_attrs = self.parse_bool( d, "show_keypoints_attrs", default=True ) self.show_keypoints_attr_names = self.parse_bool( d, "show_keypoints_attr_names", default=True ) self.show_keypoints_attr_confidences = self.parse_bool( d, "show_keypoints_attr_confidences", default=True ) self.per_keypoints_name_colors = self.parse_bool( d, "per_keypoints_name_colors", default=True ) self.per_keypoints_label_colors = self.parse_bool( d, "per_keypoints_label_colors", default=True ) self.keypoints_size = self.parse_number(d, "keypoints_size", default=4) self.keypoints_alpha = self.parse_number( d, "keypoints_alpha", default=0.75 ) self.show_polyline_names = self.parse_bool( d, "show_polyline_names", default=True ) self.show_polyline_labels = self.parse_bool( d, "show_polyline_labels", default=True ) self.show_polyline_attrs = self.parse_bool( d, "show_polyline_attrs", default=True ) self.show_polyline_attr_names = self.parse_bool( d, "show_polyline_attr_names", default=True ) self.show_polyline_attr_confidences = self.parse_bool( d, "show_polyline_attr_confidences", default=True ) self.hide_non_filled_polyline_annos = self.parse_bool( d, "hide_non_filled_polyline_annos", default=False ) self.per_polyline_name_colors = self.parse_bool( d, "per_polyline_name_colors", default=True ) self.per_polyline_label_colors = self.parse_bool( d, "per_polyline_label_colors", default=True ) self.polyline_alpha = self.parse_number( d, "polyline_alpha", default=0.75 ) self.polyline_linewidth = self.parse_number( d, "polyline_linewidth", default=3 ) self.fill_polylines = self.parse_bool( d, "fill_polylines", default=True ) self.show_all_names = self.parse_bool( d, "show_all_names", default=False ) self.hide_all_names = self.parse_bool( d, "hide_all_names", default=False ) self.show_name_only_titles = self.parse_bool( d, "show_name_only_titles", default=False ) self.show_all_confidences = self.parse_bool( d, "show_all_confidences", default=False ) self.hide_all_confidences = self.parse_bool( d, "hide_all_confidences", default=False ) self.labels_whitelist = self.parse_array( d, "labels_whitelist", default=None ) self.labels_blacklist = self.parse_array( d, "labels_blacklist", default=None ) self.attr_names_blacklist = self.parse_array( d, "attr_names_blacklist", default=None ) self.attr_values_blacklist = self.parse_array( d, "attr_values_blacklist", default=None ) self.hide_false_boolean_attrs = self.parse_bool( d, "hide_false_boolean_attrs", default=False ) self.confidence_scaled_alpha = self.parse_bool( d, "confidence_scaled_alpha", default=False ) self.colormap_config = self.parse_object( d, "colormap_config", ColormapConfig, default=None ) self.text_color = self.parse_string(d, "text_color", default="#FFFFFF") self.font_path = self.parse_string( d, "font_path", default=etac.DEFAULT_FONT_PATH ) self.font_size = self.parse_number(d, "font_size", default=16) self.scale_by_media_height = self.parse_bool( d, "scale_by_media_height", default=True ) self.add_logo = self.parse_bool(d, "add_logo", default=False) self.logo_config = self.parse_object( d, "logo_config", etal.LogoConfig, default=None ) self._media_height = None self._scale_factor = None self._logo = None self._font = None self._linewidth = None self.set_media_size(frame_size=(1280, 720)) if self.logo_config is not None: self._logo = etal.Logo(self.logo_config) elif self.add_logo: self._logo = etal.Logo.load_default() if self.colormap_config is not None: self._colormap = self.colormap_config.build() else: self._colormap = Colormap.load_default() @property def scale_factor(self): return self._scale_factor @property def colormap(self): return self._colormap @property def media_height(self): return self._media_height @property def font(self): return self._font @property def linewidth(self): return self._linewidth @property def logo(self): return self._logo def set_media_size(self, frame_size=None, shape=None, img=None): frame_size = etai.to_frame_size( frame_size=frame_size, shape=shape, img=img ) self._media_height = frame_size[1] self._scale_factor = self._get_media_scale_factor() if self.add_logo and self.logo is not None: self._logo.render_for(frame_size=frame_size) font_size = int(round(self.scale_factor * self.font_size)) self._font = ImageFont.truetype(self.font_path, font_size) self._linewidth = int(round(self.scale_factor * self.bbox_linewidth)) def _get_media_scale_factor(self): if self.scale_by_media_height: return self.media_height / 720.0 return 1.0 class ColormapConfig(Config): def __init__(self, d): self.type = self.parse_string(d, "type") self._colormap_cls, config_cls = Configurable.parse(self.type) self.config = self.parse_object(d, "config", config_cls) def build(self): return self._colormap_cls(self.config) class Colormap(Configurable): @property def colors(self): raise NotImplementedError("subclass must implement colors") def get_color(self, index): return self.colors[index % len(self.colors)] @classmethod def load_default(cls): config = ShuffledHLSColormapConfig.builder().set(num_colors=36).build() return ShuffledHLSColormap(config) class ManualColormapConfig(Config): def __init__(self, d): self.colors = self.parse_array(d, "colors") class ManualColormap(Colormap):
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_custom_resource_definition_status.py
V1CustomResourceDefinitionStatus.conditions
python
def conditions(self): return self._conditions
Gets the conditions of this V1CustomResourceDefinitionStatus. # noqa: E501 conditions indicate state for particular aspects of a CustomResourceDefinition # noqa: E501 :return: The conditions of this V1CustomResourceDefinitionStatus. # noqa: E501 :rtype: list[V1CustomResourceDefinitionCondition]
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_custom_resource_definition_status.py#L87-L95
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1CustomResourceDefinitionStatus(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'accepted_names': 'V1CustomResourceDefinitionNames', 'conditions': 'list[V1CustomResourceDefinitionCondition]', 'stored_versions': 'list[str]' } attribute_map = { 'accepted_names': 'acceptedNames', 'conditions': 'conditions', 'stored_versions': 'storedVersions' } def __init__(self, accepted_names=None, conditions=None, stored_versions=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._accepted_names = None self._conditions = None self._stored_versions = None self.discriminator = None if accepted_names is not None: self.accepted_names = accepted_names if conditions is not None: self.conditions = conditions if stored_versions is not None: self.stored_versions = stored_versions @property def accepted_names(self): return self._accepted_names @accepted_names.setter def accepted_names(self, accepted_names): self._accepted_names = accepted_names @property
Apache License 2.0
operasoftware/tlsprober
probedb/resultdb2/templatetags/tags.py
get_count_value
python
def get_count_value(context, fieldname): return {"value":fieldname}
{% get_count_value fieldname %}
https://github.com/operasoftware/tlsprober/blob/927f6177939470235bf336bca27096369932fc66/probedb/resultdb2/templatetags/tags.py#L6-L9
from django import template register = template.Library()
Apache License 2.0
openapi-generators/openapi-python-client
end_to_end_tests/custom-templates-golden-record/my_test_api_client/api/tests/__init__.py
TestsEndpoints.get_basic_list_of_integers
python
def get_basic_list_of_integers(cls) -> types.ModuleType: return get_basic_list_of_integers
Get a list of integers
https://github.com/openapi-generators/openapi-python-client/blob/2c157aa15682c695f2aa2ff197c88d9cc2ed2598/end_to_end_tests/custom-templates-golden-record/my_test_api_client/api/tests/__init__.py#L41-L45
import types from . import ( defaults_tests_defaults_post, get_basic_list_of_booleans, get_basic_list_of_floats, get_basic_list_of_integers, get_basic_list_of_strings, get_user_list, int_enum_tests_int_enum_post, json_body_tests_json_body_post, no_response_tests_no_response_get, octet_stream_tests_octet_stream_get, post_form_data, test_inline_objects, token_with_cookie_auth_token_with_cookie_get, unsupported_content_tests_unsupported_content_get, upload_file_tests_upload_post, upload_multiple_files_tests_upload_post, ) class TestsEndpoints: @classmethod def get_user_list(cls) -> types.ModuleType: return get_user_list @classmethod def get_basic_list_of_strings(cls) -> types.ModuleType: return get_basic_list_of_strings @classmethod
MIT License
hewlettpackard/oneview-redfish-toolkit
oneview_redfish_toolkit/blueprints/subscription.py
add_subscription
python
def add_subscription(): try: body = request.get_json() destination = body["Destination"] if not validators.url(destination): abort(status.HTTP_400_BAD_REQUEST, "Destination must be an URI.") is_duplicate = _is_duplicate_subscription(destination) if is_duplicate: abort(status.HTTP_400_BAD_REQUEST, "Destination is duplicate") event_types = body["EventTypes"] if not event_types: abort(status.HTTP_400_BAD_REQUEST, "EventTypes cannot be empty.") context = body.get("Context") except KeyError: error_message = "Invalid JSON key. The JSON request body " "must have the keys Destination and EventTypes. " "The Context is optional." abort(status.HTTP_400_BAD_REQUEST, error_message) subscription_id = str(uuid.uuid1()) try: sc = Subscription(subscription_id, destination, event_types, context) except ValidationError: error_message = "Invalid EventType. The EventTypes are " "StatusChange, ResourceUpdated, ResourceAdded, " "ResourceRemoved and Alert." abort(status.HTTP_400_BAD_REQUEST, error_message) lock = _get_file_lock() lock.acquire() try: is_duplicate = _is_duplicate_subscription(destination) if not is_duplicate: _add_subscription_to_file(sc.redfish) for event_type in sc.get_event_types(): util.get_subscriptions_by_type( )[event_type][subscription_id] = sc util.get_all_subscriptions()[subscription_id] = sc except Exception as e: logging.exception("Error while adding subscription: " + str(e)) abort(status.HTTP_500_INTERNAL_SERVER_ERROR, "Error while adding subscription to the file") finally: lock.release() json_str = sc.serialize() response = Response( response=json_str, status=status.HTTP_201_CREATED, mimetype="application/json") response.headers.add( "Location", "/redfish/v1/EventService/EventSubscriptions/" "{}".format(subscription_id)) return response
Add the Redfish Subscription. Add a new subscription when this POST operation is requested. The body of the request must have the Destination, an array of EventTypes. Context is optional. EventTypes: - ResourceUpdated - ResourceAdded - ResourceRemoved - Alert Returns: JSON: JSON with Subscription information. Exception: KeyError: When occur a key mapping error. return abort(400)
https://github.com/hewlettpackard/oneview-redfish-toolkit/blob/258fb8e23973445842bb317230f34ed34fdd7ec2/oneview_redfish_toolkit/blueprints/subscription.py#L109-L204
import inspect import json import logging import os import uuid from filelock import FileLock from flask import abort from flask import Blueprint from flask import request from flask import Response from flask_api import status from jsonschema.exceptions import ValidationError import validators from oneview_redfish_toolkit.api import scmb from oneview_redfish_toolkit.api.subscription import Subscription from oneview_redfish_toolkit import config from oneview_redfish_toolkit import util subscription = Blueprint("subscription", __name__) REDFISH_TOOLKIT_BASE_DIR = 'oneview_redfish_toolkit' ALL_SUBSCRIPTION_FILE = 'all_subscription.json' def _all_subscription_file(): base_dir = os.path.abspath(os.path.join( os.path.dirname(inspect.getfile(inspect.currentframe())), '..')) logging.info(base_dir) return os.path.join(base_dir, ALL_SUBSCRIPTION_FILE) def get_file_content(): file_content = None try: with open(_all_subscription_file()) as f: file_content = json.load(f) except Exception as e: logging.exception("Error while reading File: " + str(e)) return file_content def _get_file_lock(): lock_path = _all_subscription_file() + '.lock' lock = FileLock(lock_path) return lock def _update_all_subscription(file_content): try: with open(_all_subscription_file(), 'w+') as f: f.write(json.dumps(file_content, indent=4)) except Exception as e: raise e def _add_subscription_to_file(subscription): file_content = get_file_content() if file_content: if not file_content.get('members'): if config.auth_mode_is_session(): token = request.headers.get('x-auth-token') scmb.init_event_service(token) else: scmb.init_event_service() file_content['members'].append(subscription) _update_all_subscription(file_content) def _delete_subscription_from_file(subscription_id): file_content = get_file_content() if file_content and file_content.get('members'): sc = [x for x in file_content.get('members') if x['Id'] == subscription_id] file_content["members"].remove(sc[0]) _update_all_subscription(file_content) def _is_duplicate_subscription(destination): file_content = get_file_content() if file_content and file_content.get('members'): for i in file_content.get('members'): if i.get('Destination') == destination: return True return False @subscription.route( "/redfish/v1/EventService/EventSubscriptions/", methods=["POST"])
Apache License 2.0
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/released/client_factory.py
ClientFactory.get_work_client
python
def get_work_client(self): return self._connection.get_client('azure.devops.released.work.work_client.WorkClient')
get_work_client. Gets the 5.1 version of the WorkClient :rtype: :class:`<WorkClient> <azure.devops.released.work.work_client.WorkClient>`
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/released/client_factory.py#L158-L163
 class ClientFactory(object): def __init__(self, connection): self._connection = connection def get_accounts_client(self): return self._connection.get_client('azure.devops.released.accounts.accounts_client.AccountsClient') def get_build_client(self): return self._connection.get_client('azure.devops.released.build.build_client.BuildClient') def get_cloud_load_test_client(self): return self._connection.get_client('azure.devops.released.cloud_load_test.cloud_load_test_client.CloudLoadTestClient') def get_core_client(self): return self._connection.get_client('azure.devops.released.core.core_client.CoreClient') def get_git_client(self): return self._connection.get_client('azure.devops.released.git.git_client.GitClient') def get_identity_client(self): return self._connection.get_client('azure.devops.released.identity.identity_client.IdentityClient') def get_notification_client(self): return self._connection.get_client('azure.devops.released.notification.notification_client.NotificationClient') def get_operations_client(self): return self._connection.get_client('azure.devops.released.operations.operations_client.OperationsClient') def get_policy_client(self): return self._connection.get_client('azure.devops.released.policy.policy_client.PolicyClient') def get_profile_client(self): return self._connection.get_client('azure.devops.released.profile.profile_client.ProfileClient') def get_release_client(self): return self._connection.get_client('azure.devops.released.release.release_client.ReleaseClient') def get_security_client(self): return self._connection.get_client('azure.devops.released.security.security_client.SecurityClient') def get_service_hooks_client(self): return self._connection.get_client('azure.devops.released.service_hooks.service_hooks_client.ServiceHooksClient') def get_task_client(self): return self._connection.get_client('azure.devops.released.task.task_client.TaskClient') def get_task_agent_client(self): return self._connection.get_client('azure.devops.released.task_agent.task_agent_client.TaskAgentClient') def get_test_client(self): return self._connection.get_client('azure.devops.released.test.test_client.TestClient') def get_test_plan_client(self): return self._connection.get_client('azure.devops.released.test_plan.test_plan_client.TestPlanClient') def get_test_results_client(self): return self._connection.get_client('azure.devops.released.test_results.test_results_client.TestResultsClient') def get_tfvc_client(self): return self._connection.get_client('azure.devops.released.tfvc.tfvc_client.TfvcClient') def get_wiki_client(self): return self._connection.get_client('azure.devops.released.wiki.wiki_client.WikiClient')
MIT License
collerek/ormar
ormar/models/newbasemodel.py
NewBaseModel.__setattr__
python
def __setattr__(self, name: str, value: Any) -> None: if hasattr(self, name): object.__setattr__(self, name, value) else: super().__setattr__(name, value)
Overwrites setattr in pydantic parent as otherwise descriptors are not called. :param name: name of the attribute to set :type name: str :param value: value of the attribute to set :type value: Any :return: None :rtype: None
https://github.com/collerek/ormar/blob/5e946f514a3506c1702eb47e59dc42e5eb37f139/ormar/models/newbasemodel.py#L168-L183
import base64 import sys import warnings from typing import ( AbstractSet, Any, Callable, Dict, List, Mapping, MutableSequence, Optional, Set, TYPE_CHECKING, Tuple, Type, TypeVar, Union, cast, ) import databases import pydantic import sqlalchemy from ormar.models.utils import Extra from pydantic import BaseModel try: import orjson as json except ImportError: import json import ormar from ormar.exceptions import ModelError, ModelPersistenceError from ormar.fields import BaseField from ormar.fields.foreign_key import ForeignKeyField from ormar.models.helpers import register_relation_in_alias_manager from ormar.models.helpers.relations import expand_reverse_relationship from ormar.models.helpers.sqlalchemy import ( populate_meta_sqlalchemy_table_if_required, update_column_definition, ) from ormar.models.metaclass import ModelMeta, ModelMetaclass from ormar.models.modelproxy import ModelTableProxy from ormar.queryset.utils import translate_list_to_dict from ormar.relations.alias_manager import AliasManager from ormar.relations.relation_manager import RelationsManager if TYPE_CHECKING: from ormar.models import Model from ormar.signals import SignalEmitter T = TypeVar("T", bound="NewBaseModel") IntStr = Union[int, str] DictStrAny = Dict[str, Any] SetStr = Set[str] AbstractSetIntStr = AbstractSet[IntStr] MappingIntStrAny = Mapping[IntStr, Any] class NewBaseModel(pydantic.BaseModel, ModelTableProxy, metaclass=ModelMetaclass): __slots__ = ("_orm_id", "_orm_saved", "_orm", "_pk_column", "__pk_only__") if TYPE_CHECKING: pk: Any __model_fields__: Dict[str, BaseField] __table__: sqlalchemy.Table __fields__: Dict[str, pydantic.fields.ModelField] __pydantic_model__: Type[BaseModel] __pkname__: str __tablename__: str __metadata__: sqlalchemy.MetaData __database__: databases.Database __relation_map__: Optional[List[str]] _orm_relationship_manager: AliasManager _orm: RelationsManager _orm_id: int _orm_saved: bool _related_names: Optional[Set] _through_names: Optional[Set] _related_names_hash: str _choices_fields: Set _pydantic_fields: Set _quick_access_fields: Set _json_fields: Set _bytes_fields: Set Meta: ModelMeta def __init__(self, *args: Any, **kwargs: Any) -> None: self._verify_model_can_be_initialized() self._initialize_internal_attributes() pk_only = kwargs.pop("__pk_only__", False) object.__setattr__(self, "__pk_only__", pk_only) new_kwargs, through_tmp_dict = self._process_kwargs(kwargs) if not pk_only: values, fields_set, validation_error = pydantic.validate_model( self, new_kwargs ) if validation_error: raise validation_error else: fields_set = {self.Meta.pkname} values = new_kwargs object.__setattr__(self, "__dict__", values) object.__setattr__(self, "__fields_set__", fields_set) new_kwargs.update(through_tmp_dict) model_fields = object.__getattribute__(self, "Meta").model_fields for related in self.extract_related_names().union(self.extract_through_names()): model_fields[related].expand_relationship( new_kwargs.get(related), self, to_register=True ) if hasattr(self, "_init_private_attributes"): self._init_private_attributes()
MIT License
continualai/avalanche
avalanche/models/pnn.py
PNNLayer.forward_single_task
python
def forward_single_task(self, x, task_label): col_idx = self.task_to_module_idx[task_label] hs = [] for ii in range(col_idx + 1): hs.append(self.columns[ii](x[:ii+1])) return hs
Forward. :param x: list of inputs. :param task_label: :return:
https://github.com/continualai/avalanche/blob/9d72ee638d10af989455df8d062e8e86a4399c1d/avalanche/models/pnn.py#L181-L192
import torch import torch.nn.functional as F from torch import nn from avalanche.benchmarks.utils import AvalancheDataset from avalanche.benchmarks.utils.dataset_utils import ConstantSequence from avalanche.models import MultiTaskModule, DynamicModule from avalanche.models import MultiHeadClassifier class LinearAdapter(nn.Module): def __init__(self, in_features, out_features_per_column, num_prev_modules): super().__init__() self.lat_layers = nn.ModuleList([]) for _ in range(num_prev_modules): m = nn.Linear(in_features, out_features_per_column) self.lat_layers.append(m) def forward(self, x): assert len(x) == self.num_prev_modules hs = [] for ii, lat in enumerate(self.lat_layers): hs.append(lat(x[ii])) return sum(hs) class MLPAdapter(nn.Module): def __init__(self, in_features, out_features_per_column, num_prev_modules, activation=F.relu): super().__init__() self.num_prev_modules = num_prev_modules self.activation = activation if num_prev_modules == 0: return self.V = nn.Linear(in_features * num_prev_modules, out_features_per_column) self.alphas = nn.Parameter(torch.randn(num_prev_modules)) self.U = nn.Linear(out_features_per_column, out_features_per_column) def forward(self, x): if self.num_prev_modules == 0: return 0 assert len(x) == self.num_prev_modules assert len(x[0].shape) == 2, "Inputs to MLPAdapter should have two dimensions: " "<batch_size, num_features>." for i, el in enumerate(x): x[i] = self.alphas[i] * el x = torch.cat(x, dim=1) x = self.U(self.activation(self.V(x))) return x class PNNColumn(nn.Module): def __init__(self, in_features, out_features_per_column, num_prev_modules, adapter='mlp'): super().__init__() self.in_features = in_features self.out_features_per_column = out_features_per_column self.num_prev_modules = num_prev_modules self.itoh = nn.Linear(in_features, out_features_per_column) if adapter == 'linear': self.adapter = LinearAdapter(in_features, out_features_per_column, num_prev_modules) elif adapter == 'mlp': self.adapter = MLPAdapter(in_features, out_features_per_column, num_prev_modules) else: raise ValueError("`adapter` must be one of: {'mlp', `linear'}.") def freeze(self): for param in self.parameters(): param.requires_grad = False def forward(self, x): prev_xs, last_x = x[:-1], x[-1] hs = self.adapter(prev_xs) hs += self.itoh(last_x) return hs class PNNLayer(MultiTaskModule): def __init__(self, in_features, out_features_per_column, adapter='mlp'): super().__init__() self.in_features = in_features self.out_features_per_column = out_features_per_column self.adapter = adapter self.task_to_module_idx = {} first_col = PNNColumn(in_features, out_features_per_column, 0, adapter=adapter) self.columns = nn.ModuleList([first_col]) @property def num_columns(self): return len(self.columns) def train_adaptation(self, dataset: AvalancheDataset): super().train_adaptation(dataset) task_labels = dataset.targets_task_labels if isinstance(task_labels, ConstantSequence): task_labels = [task_labels[0]] else: task_labels = set(task_labels) assert len(task_labels) == 1, "PNN assumes a single task for each experience. Please use a " "compatible benchmark." task_label = next(iter(task_labels)) assert task_label not in self.task_to_module_idx, "A new experience is using a previously seen task label. This is " "not compatible with PNN, which assumes different task labels for" " each training experience." if len(self.task_to_module_idx) == 0: self.task_to_module_idx[task_label] = 0 else: self.task_to_module_idx[task_label] = self.num_columns self._add_column() def _add_column(self): for param in self.parameters(): param.requires_grad = False self.columns.append(PNNColumn(self.in_features, self.out_features_per_column, self.num_columns, adapter=self.adapter))
MIT License
duncaneddy/brahe
brahe/access/tessellation.py
create_tile
python
def create_tile(request: bdm.Request, direction: np.ndarray, center_point: np.ndarray, spacecraft_id: int = None): tiles = [] strip_width = request.tessellation.tile_width strip_angle = strip_width / R_EARTH max_length = utils.circumscription_length(request) max_angle = max_length / R_EARTH ct_pnt_l1 = utils.rodrigues_rotation(center_point, direction, -strip_angle / 2.0) ct_geod_l1 = sECEFtoGEOD(ct_pnt_l1 * R_EARTH, use_degrees=True) N = fcross(ct_pnt_l1, direction) N = N / np.linalg.norm(N) l1_fd = utils.rodrigues_rotation(ct_pnt_l1, N, max_angle) l1_bk = utils.rodrigues_rotation(ct_pnt_l1, N, -max_angle) ct_pnt_l2 = utils.rodrigues_rotation(center_point, direction, strip_angle / 2.0) ct_geod_l2 = sECEFtoGEOD(ct_pnt_l2 * R_EARTH, use_degrees=True) N = fcross(ct_pnt_l2, direction) N = N / np.linalg.norm(N) l2_fd = utils.rodrigues_rotation(ct_pnt_l2, N, max_angle) l2_bk = utils.rodrigues_rotation(ct_pnt_l2, N, -max_angle) l1_geod_fd = sECEFtoGEOD(l1_fd * R_EARTH, use_degrees=True) l2_geod_fd = sECEFtoGEOD(l2_fd * R_EARTH, use_degrees=True) lmax = find_max_alongtrack_distance(request, (ct_geod_l1, l1_geod_fd), (ct_geod_l2, l2_geod_fd)) N = fcross(ct_pnt_l1, direction) N = N / np.linalg.norm(N) l1_fd = utils.rodrigues_rotation(ct_pnt_l1, N, lmax / R_EARTH) N = fcross(ct_pnt_l2, direction) N = N / np.linalg.norm(N) l2_fd = utils.rodrigues_rotation(ct_pnt_l2, N, lmax / R_EARTH) l1_geod_bk = sECEFtoGEOD(l1_bk * R_EARTH, use_degrees=True) l2_geod_bk = sECEFtoGEOD(l2_bk * R_EARTH, use_degrees=True) lmin = find_max_alongtrack_distance(request, (ct_geod_l1, l1_geod_bk), (ct_geod_l2, l2_geod_bk)) N = fcross(ct_pnt_l1, direction) N = N / np.linalg.norm(N) l1_bk = utils.rodrigues_rotation(ct_pnt_l1, N, -lmin / R_EARTH) N = fcross(ct_pnt_l2, direction) N = N / np.linalg.norm(N) l2_bk = utils.rodrigues_rotation(ct_pnt_l2, N, -lmin / R_EARTH) tile = create_tile_from_sphere_points([l1_fd, l1_bk, l2_bk, l2_fd, l1_fd], request, spacecraft_id=spacecraft_id, tile_direction=direction) tiles.append(tile) return tiles
Create rectangular tile that covers request polygon. Starts with center point, rotates Returns: List[Tile]: Array of tiles that tessellate along-track direction.
https://github.com/duncaneddy/brahe/blob/d08e50a77b2785d4ec6e7c38c4e552ff6755e32a/brahe/access/tessellation.py#L280-L355
import copy import logging import typing import uuid import math import numpy as np import spherical_geometry.great_circle_arc as sggca import spherical_geometry.vector as sgv from . import utils as utils from brahe.utils import fcross import brahe.data_models as bdm from brahe.constants import R_EARTH from brahe.coordinates import sECEFtoGEOD, sGEODtoECEF logger = logging.getLogger(__name__) def create_tile_from_sphere_points(points: typing.List[np.ndarray], request: bdm.Request, spacecraft_id: int = None, tile_direction: np.ndarray = None) -> bdm.Tile: if not spacecraft_id: raise RuntimeError('Spacecraft ID required') points = [utils.sphere_point_to_latlon(pnt).tolist() for pnt in points] tile_json = { 'type': 'Feature', 'geometry': { 'type': 'Polygon', 'coordinates': [points] }, 'properties': { 'request_id': request.id, 'spacecraft_ids': [spacecraft_id], 'tile_direction': tile_direction.tolist(), } } return bdm.Tile(**tile_json) def tessellate(spacecraft: bdm.Spacecraft, request: bdm.Request): try: tiles = globals()[f'tessellate_{request.geotype.lower()}'](spacecraft, request) except RuntimeError: logger.error(f'Unable to tessellate {type(request)} - {request.id}') return tiles def tessellate_point(spacecraft: bdm.Spacecraft, request: bdm.Request): tiles = [] at_dirs = utils.compute_along_track_directions(spacecraft.tle, request.center) for at_dir in at_dirs: sgcp = request.center_ecef sgcp = np.asarray(sgcp) sgcp = sgcp / np.linalg.norm(sgcp) ct_ang = request.tessellation.tile_width / R_EARTH th = ct_ang / 2.0 v = sgcp k = at_dir ct_max = v * math.cos(th) + np.array(fcross(k, v)) * math.sin(th) + k * np.dot(k, v) * (1 - math.cos(th)) ct_min = v * math.cos(-th) + np.array(fcross(k, v)) * math.sin(-th) + k * np.dot(k, v) * (1 - math.cos(-th)) at_ang = request.tessellation.tile_length / R_EARTH angle = at_ang / 2.0 vec = ct_max axis = fcross(ct_min, at_dir) pnt1 = utils.rodrigues_rotation(vec, axis, angle) vec = ct_min axis = fcross(ct_max, at_dir) pnt2 = utils.rodrigues_rotation(vec, axis, angle) angle = -at_ang / 2.0 vec = ct_min axis = fcross(ct_min, at_dir) pnt3 = utils.rodrigues_rotation(vec, axis, angle) vec = ct_max axis = fcross(ct_max, at_dir) pnt4 = utils.rodrigues_rotation(vec, axis, angle) tiles.append( create_tile_from_sphere_points( [pnt1, pnt2, pnt3, pnt4, pnt1], request, spacecraft_id=spacecraft.id, tile_direction=at_dir ) ) return tiles def tessellate_polygon(spacecraft: bdm.Spacecraft, request: bdm.Request): tiles = [] at_dirs = utils.compute_along_track_directions(spacecraft.tle, request.center) for at_dir in at_dirs: tiles.extend(tile_polygon_direction(request, at_dir, spacecraft_id=spacecraft.spacecraft_id)) return tiles def find_polygon_intersection(request: bdm.Request, pnt1: np.ndarray, pnt2: np.ndarray): intersection = None pnt1 = np.asarray(pnt1) pnt1 = sGEODtoECEF(pnt1, use_degrees=True) pnt1 = pnt1 / np.linalg.norm(pnt1) pnt2 = np.asarray(pnt2) pnt2 = sGEODtoECEF(pnt2, use_degrees=True) pnt2 = pnt2 / np.linalg.norm(pnt2) for idx in range(0, request.num_points): ply1 = request.geometry.coordinates[0][idx] ply2 = request.geometry.coordinates[0][idx + 1] ply1 = np.asarray([ply1[0], ply1[1], 0.0]) ply1 = sGEODtoECEF(ply1, use_degrees=True) ply1 = ply1 / np.linalg.norm(ply1) ply2 = np.asarray([ply2[0], ply2[1], 0.0]) ply2 = sGEODtoECEF(ply2, use_degrees=True) ply2 = ply2 / np.linalg.norm(ply2) if sggca.intersects(pnt1, pnt2, ply1, ply2): N1 = fcross(pnt1, pnt2) N2 = fcross(ply1, ply2) N1 = N1 / np.linalg.norm(N1) N2 = N2 / np.linalg.norm(N2) N3 = fcross(N1, N2) N3 = N3 / np.linalg.norm(N3) cnt = np.asarray(request.center_ecef) cnt = cnt / np.linalg.norm(cnt) if np.sign(np.dot(cnt, N3)) > 0: return N3 else: return -N3 return intersection def find_max_alongtrack_distance(request: bdm.Request, seg1: typing.Tuple[np.ndarray, np.ndarray], seg2: typing.Tuple[np.ndarray, np.ndarray]): int1 = find_polygon_intersection(request, *seg1) l1 = 0.0 if np.any(int1): p = np.asarray(seg1[0]) p = sGEODtoECEF(p, use_degrees=True) p = p / np.linalg.norm(p) l1 = math.acos(np.dot(p, int1)) * R_EARTH int2 = find_polygon_intersection(request, *seg2) l2 = 0.0 if np.any(int2): p = np.asarray(seg2[0]) p = sGEODtoECEF(p, use_degrees=True) p = p / np.linalg.norm(p) l2 = math.acos(np.dot(p, int2)) * R_EARTH return max(l1, l2)
MIT License
demille/emailhooks
django_nonrel/django/contrib/gis/gdal/prototypes/errcheck.py
check_pointer
python
def check_pointer(result, func, cargs): if isinstance(result, six.integer_types): result = c_void_p(result) if bool(result): return result else: raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
Makes sure the result pointer is valid.
https://github.com/demille/emailhooks/blob/16dc3b295ac9d35a20e8d0db52760db2b7e8a822/django_nonrel/django/contrib/gis/gdal/prototypes/errcheck.py#L111-L118
from ctypes import c_void_p, string_at from django.contrib.gis.gdal.error import check_err, OGRException, SRSException from django.contrib.gis.gdal.libgdal import lgdal from django.utils import six def arg_byref(args, offset=-1): return args[offset]._obj.value def ptr_byref(args, offset=-1): return args[offset]._obj def check_bool(result, func, cargs): if bool(result): return True else: return False def check_const_string(result, func, cargs, offset=None): if offset: check_err(result) ptr = ptr_byref(cargs, offset) return ptr.value else: return result def check_string(result, func, cargs, offset=-1, str_result=False): if str_result: ptr = result if not ptr: s = None else: s = string_at(result) else: check_err(result) ptr = ptr_byref(cargs, offset) s = ptr.value if ptr: lgdal.VSIFree(ptr) return s def check_envelope(result, func, cargs, offset=-1): env = ptr_byref(cargs, offset) return env def check_geom(result, func, cargs): if isinstance(result, six.integer_types): result = c_void_p(result) if not result: raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__) return result def check_geom_offset(result, func, cargs, offset=-1): check_err(result) geom = ptr_byref(cargs, offset=offset) return check_geom(geom, func, cargs) def check_srs(result, func, cargs): if isinstance(result, six.integer_types): result = c_void_p(result) if not result: raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__) return result def check_arg_errcode(result, func, cargs): check_err(arg_byref(cargs)) return result def check_errcode(result, func, cargs): check_err(result) return
MIT License
reliaqualassociates/ramstk
src/ramstk/views/gtk3/widgets/panel.py
RAMSTKTreePanel.do_load_treerow
python
def do_load_treerow(self, node: treelib.Node, row: Gtk.TreeIter) -> Gtk.TreeIter: _new_row = None _data: List[Any] = [] try: [[__, _entity]] = node.data.items() _attributes = _entity.get_attributes() _model = self.tvwTreeView.get_model() for _key, _pos in self.tvwTreeView.position.items(): _data.insert(_pos, _attributes[_key]) _new_row = _model.append(row, _data) except (AttributeError, TypeError, ValueError) as _error: _method_name: str = inspect.currentframe().f_code.co_name _error_msg = ( f"{_method_name}: An error occurred when loading " f"{self._tag} {node.identifier}. This might indicate it was missing " f"it's data package, some of the data in the package was missing, or " f"some of the data was the wrong type. Row data was: {_data}. Error " f"was: {_error}." ) pub.sendMessage( "do_log_warning_msg", logger_name="WARNING", message=_error_msg, ) _new_row = None return _new_row
Load a row into the RAMSTKTreeView(). :param node: the treelib Node() with the data to load. :param row: the parent row of the row to load. :return: _new_row; the row that was just populated with data. :rtype: :class:`Gtk.TreeIter`
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/widgets/panel.py#L790-L826
import inspect from typing import Any, Callable, Dict, List, Union import treelib from pandas.plotting import register_matplotlib_converters from pubsub import pub from ramstk.utilities import boolean_to_integer from ramstk.views.gtk3 import Gtk, _ from .button import RAMSTKCheckButton from .combo import RAMSTKComboBox from .entry import RAMSTKEntry, RAMSTKTextView from .frame import RAMSTKFrame from .label import RAMSTKLabel, do_make_label_group from .plot import RAMSTKPlot from .scrolledwindow import RAMSTKScrolledWindow from .treeview import RAMSTKTreeView register_matplotlib_converters() class RAMSTKPanel(RAMSTKFrame): _record_field: str = "revision_id" _select_msg: str = "selected_revision" _tag: str = "" _title: str = "" def __init__(self) -> None: super().__init__() self._lst_labels: List[str] = [] self._lst_widgets: List[object] = [] self._parent_id: int = -1 self._record_id: int = -1 self._tree_loaded: bool = False self.dic_attribute_widget_map: Dict[str, List[Any]] = {} self.fmt: str = "{0:0.6}" self.tree: treelib.Tree = treelib.Tree() class RAMSTKFixedPanel(RAMSTKPanel): def __init__(self) -> None: super().__init__() self.on_edit_callback: str = f"wvw_editing_{self._tag}" pub.subscribe( self.do_clear_panel, "request_clear_views", ) pub.subscribe( self.do_load_panel, self._select_msg, ) pub.subscribe( self.on_edit, self.on_edit_callback, ) try: pub.subscribe(self._do_set_sensitive, f"succeed_get_{self._tag}_attributes") except AttributeError: pass try: pub.subscribe(self._do_load_entries, f"succeed_get_{self._tag}_attributes") except AttributeError: pass def do_clear_panel(self) -> None: for ( __, _value, ) in self.dic_attribute_widget_map.items(): _value[1].do_update(_value[4], signal=_value[2]) def do_load_panel( self, attributes: Dict[str, Any], ) -> None: self._record_id = attributes[self._record_field] for _key, _value in self.dic_attribute_widget_map.items(): _value[1].do_update( attributes.get(_key, _value[5]), signal=_value[2], ) pub.sendMessage("request_set_cursor_active") def do_make_panel(self, **kwargs: Dict[str, Any]) -> None: _justify = kwargs.get("justify", Gtk.Justification.RIGHT) _lst_labels = [x[1][7] for x in self.dic_attribute_widget_map.items()] _lst_widgets = [x[1][1] for x in self.dic_attribute_widget_map.items()] _fixed: Gtk.Fixed = Gtk.Fixed() _y_pos: int = 5 (_x_pos, _labels) = do_make_label_group( _lst_labels, bold=False, justify=_justify, x_pos=5, y_pos=5, ) for _idx, _label in enumerate(_labels): _fixed.put(_label, 5, _y_pos) _minimum: Gtk.Requisition = _lst_widgets[ _idx ].get_preferred_size()[0] if _minimum.height <= 0: _minimum.height = _lst_widgets[_idx].height if isinstance(_lst_widgets[_idx], RAMSTKTextView): _fixed.put( _lst_widgets[_idx].scrollwindow, _x_pos + 10, _y_pos, ) _y_pos += _minimum.height + 30 elif isinstance(_lst_widgets[_idx], RAMSTKCheckButton): _fixed.put(_lst_widgets[_idx], _x_pos + 10, _y_pos) _y_pos += _minimum.height + 30 else: _fixed.put(_lst_widgets[_idx], _x_pos + 10, _y_pos) _y_pos += _minimum.height + 5 _scrollwindow: RAMSTKScrolledWindow = RAMSTKScrolledWindow(_fixed) self.add(_scrollwindow) def do_set_callbacks(self) -> None: for ( _key, _value, ) in self.dic_attribute_widget_map.items(): _value[1].dic_handler_id[_value[2]] = _value[1].connect( _value[2], _value[3], _key, _value[4], ) def do_set_properties(self, **kwargs: Any) -> None: super().do_set_properties(**{"bold": True, "title": self._title}) for ( __, _value, ) in self.dic_attribute_widget_map.items(): _value[1].do_set_properties(**_value[6]) def on_changed_combo( self, combo: RAMSTKComboBox, key: str, message: str ) -> Dict[Union[str, Any], Any]: _key: str = "" _new_text: int = -1 combo.handler_block(combo.dic_handler_id["changed"]) try: _new_text = int(combo.get_active()) if _new_text > -1: pub.sendMessage( message, node_id=self._record_id, package={key: _new_text}, ) except (KeyError, ValueError): _method_name: str = inspect.currentframe().f_code.co_name _error_msg = _( f"{_method_name}: An error occurred while editing {self._tag} data " f"for record ID {self._record_id} in the view. Key {key} does not " f"exist in attribute dictionary." ) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) combo.handler_unblock(combo.dic_handler_id["changed"]) return {_key: _new_text} def on_changed_entry( self, entry: RAMSTKEntry, key: str, message: str ) -> Dict[Union[str, Any], Any]: try: _handler_id = entry.dic_handler_id["changed"] except KeyError: _handler_id = entry.dic_handler_id["value-changed"] entry.handler_block(_handler_id) _package: Dict[str, Any] = self.__do_read_text( entry, key, self.dic_attribute_widget_map[key][8] ) entry.handler_unblock(_handler_id) pub.sendMessage(message, node_id=self._record_id, package=_package) return _package def on_changed_textview( self, buffer: Gtk.TextBuffer, key: str, message: str, textview: RAMSTKTextView ) -> Dict[Union[str, Any], Any]: textview.handler_block(textview.dic_handler_id["changed"]) _package: Dict[str, Any] = self.__do_read_text( textview, key, self.dic_attribute_widget_map[key][8] ) textview.handler_unblock(textview.dic_handler_id["changed"]) pub.sendMessage(message, node_id=[self._record_id, -1], package=_package) return _package def on_edit(self, node_id: List[int], package: Dict[str, Any]) -> None: _method_name: str = inspect.currentframe().f_code.co_name [[_key, _value]] = package.items() try: _signal = self.dic_attribute_widget_map[_key][2] _function = self.dic_attribute_widget_map[_key][3] _function(_value, _signal) except KeyError: _error_msg = _( "{2}: An error occurred while updating {1} data for record " "ID {0} in the view. No key {3} in dic_attribute_widget_map." ).format(self._record_id, self._tag, _method_name, _key) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) except TypeError: _error_msg = _( "{2}: An error occurred while updating {1} data for record " "ID {0} in the view. Data for key {3} is the wrong " "type." ).format(self._record_id, self._tag, _method_name, _key) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) def on_toggled( self, checkbutton: RAMSTKCheckButton, key: str, message: str ) -> Dict[Union[str, Any], Any]: _new_text: int = -1 try: _new_text = int(checkbutton.get_active()) checkbutton.do_update(_new_text, signal="toggled") pub.sendMessage( message, node_id=[self._record_id, -1, ""], package={key: _new_text}, ) except KeyError: _method_name: str = inspect.currentframe().f_code.co_name _error_msg = _( f"{_method_name}: An error occurred while updating {self._tag} data " f"for record ID {self._record_id} in the view. Key {key} does not " f"exist in attribute dictionary." ) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) return {key: _new_text} def __do_read_text( self, entry: RAMSTKEntry, key: str, datatype: str ) -> Dict[str, Any]: _new_text: Any = "" try: if str(datatype) == "gfloat": _new_text = float(entry.do_get_text()) elif str(datatype) == "gint": _new_text = int(entry.do_get_text()) elif str(datatype) == "gchararray": _new_text = str(entry.do_get_text()) except (KeyError, ValueError): _method_name: str = inspect.currentframe().f_code.co_name _error_msg = _( f"{_method_name}: An error occurred while reading {self._tag} data for " f"record ID {self._record_id} in the view. Key {key} does not exist " f"in attribute dictionary." ) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) return {key: _new_text} class RAMSTKPlotPanel(RAMSTKPanel): def __init__(self) -> None: super().__init__() self.pltPlot: RAMSTKPlot = RAMSTKPlot() self.lst_axis_labels: List[str] = [_("abscissa"), _("ordinate")] self.lst_legend: List[str] = [] self.plot_title: str = "" pub.subscribe(self.do_clear_panel, "request_clear_views") def do_clear_panel(self) -> None: self.pltPlot.axis.cla() self.pltPlot.figure.clf() self.pltPlot.plot.draw() def do_load_panel(self) -> None: self.pltPlot.do_make_title(self.plot_title) self.pltPlot.do_make_labels( self.lst_axis_labels[1], x_pos=-0.5, y_pos=0, set_x=False ) self.pltPlot.do_make_legend(self.lst_legend) self.pltPlot.figure.canvas.draw() def do_make_panel(self) -> None: _scrollwindow: Gtk.ScrolledWindow = Gtk.ScrolledWindow() _scrollwindow.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) _scrollwindow.add(self.pltPlot.canvas) self.add(_scrollwindow) def do_set_callbacks(self) -> None: def do_set_properties(self, **kwargs: Any) -> None: super().do_set_properties(**{"bold": True, "title": self._title}) class RAMSTKTreePanel(RAMSTKPanel): def __init__(self) -> None: super().__init__() self.tvwTreeView: RAMSTKTreeView = RAMSTKTreeView() self._dic_row_loader: Dict[str, Callable] = {} pub.subscribe(self.do_clear_panel, "request_clear_views") pub.subscribe(self.do_refresh_tree, f"lvw_editing_{self._tag}") pub.subscribe(self.do_refresh_tree, f"mvw_editing_{self._tag}") pub.subscribe(self.do_refresh_tree, f"wvw_editing_{self._tag}") pub.subscribe(self.on_delete_treerow, f"succeed_delete_{self._tag}") if self._select_msg is not None: pub.subscribe(self.do_load_panel, self._select_msg) def do_clear_panel(self) -> None: _model = self.tvwTreeView.get_model() try: _model.clear() except AttributeError: pass def do_load_panel(self, tree: treelib.Tree) -> None: _model = self.tvwTreeView.get_model() _model.clear() try: _row = None for _node in tree.all_nodes()[1:]: _row = self._dic_row_loader[_node.tag](_node, _row) self.tvwTreeView.expand_all() _row = _model.get_iter_first() if _row is not None: self.tvwTreeView.selection.select_iter(_row) self.show_all() except TypeError: _method_name: str = inspect.currentframe().f_code.co_name _error_msg = _( "{2}: An error occurred while loading {1} data for Record " "ID {0} into the view. One or more values from the " "database was the wrong type for the column it was trying to " "load." ).format(self._record_id, self._tag, _method_name) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, ) except ValueError: _method_name = inspect.currentframe().f_code.co_name _error_msg = _( "{2}: An error occurred while loading {1:s} data for Record " "ID {0:d} into the view. One or more values from the " "database was missing." ).format(self._record_id, self._tag, _method_name) pub.sendMessage( "do_log_debug", logger_name="DEBUG", message=_error_msg, )
BSD 3-Clause New or Revised License
stevezheng23/sequence_labeling_tf
sequence_labeling/layer/attention.py
GatedAttention.__init__
python
def __init__(self, src_dim, trg_dim, att_dim, score_type, dropout, att_dropout=0.0, layer_dropout=0.0, layer_norm=False, residual_connect=False, is_self=False, external_matrix=None, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="gated_att"): self.src_dim = src_dim self.trg_dim = trg_dim self.att_dim = att_dim self.score_type = score_type self.dropout = dropout self.att_dropout = att_dropout self.layer_dropout = layer_dropout self.layer_norm = layer_norm self.residual_connect = residual_connect self.is_self = is_self self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope self.device_spec = get_device_spec(default_gpu_id, num_gpus) with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): if external_matrix == None: self.attention_matrix = _create_attention_matrix(self.src_dim, self.trg_dim, self.att_dim, self.score_type, self.regularizer, self.random_seed, self.trainable, "att_matrix") else: self.attention_matrix = external_matrix self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed) self.att_dropout_layer = Dropout(rate=self.att_dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed, scope="att_dropout") if self.layer_norm == True: self.src_norm_layer = LayerNorm(layer_dim=self.src_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="src_layer_norm") if self.is_self == True: self.trg_norm_layer = self.src_norm_layer else: self.trg_norm_layer = LayerNorm(layer_dim=self.trg_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="trg_layer_norm") weight_initializer = create_variable_initializer("glorot_uniform") gate_activation = create_activation_function("sigmoid") if self.residual_connect == True and self.is_self == True: self.gate_layer = tf.layers.Dense(units=self.trg_dim, activation=gate_activation, kernel_initializer=weight_initializer, kernel_regularizer=self.regularizer, trainable=self.trainable) else: self.gate_layer = tf.layers.Dense(units=self.src_dim+self.trg_dim, activation=gate_activation, kernel_initializer=weight_initializer, kernel_regularizer=self.regularizer, trainable=self.trainable)
initialize gated-attention layer
https://github.com/stevezheng23/sequence_labeling_tf/blob/05fcbec15e359e3db86af6c3798c13be8a6c58ee/sequence_labeling/layer/attention.py#L823-L888
import numpy as np import tensorflow as tf from util.default_util import * from util.sequence_labeling_util import * from layer.basic import * __all__ = ["Attention", "MaxAttention", "CoAttention", "GatedAttention", "MultiHeadAttention"] def _create_attention_matrix(src_unit_dim, trg_unit_dim, attention_unit_dim, attention_score_type, regularizer, random_seed, trainable, scope="att_matrix"): scope = "{0}/{1}".format(scope, attention_score_type) if attention_score_type == "dot": attention_matrix = [] elif attention_score_type == "scaled_dot": attention_matrix = [] elif attention_score_type == "linear": attention_matrix = _create_linear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope) elif attention_score_type == "bilinear": attention_matrix = _create_bilinear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope) elif attention_score_type == "nonlinear": attention_matrix = _create_nonlinear_attention_matrix(src_unit_dim, trg_unit_dim, attention_unit_dim, regularizer, random_seed, trainable, scope) elif attention_score_type == "linear_plus": attention_matrix = _create_linear_plus_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope) elif attention_score_type == "nonlinear_plus": attention_matrix = _create_nonlinear_plus_attention_matrix(src_unit_dim, trg_unit_dim, attention_unit_dim, regularizer, random_seed, trainable, scope) elif attention_score_type == "trilinear": attention_matrix = _create_trilinear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope) else: raise ValueError("unsupported attention score type {0}".format(attention_score_type)) return attention_matrix def _create_linear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope="linear"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) linear_src_weight = tf.get_variable("{0}/src_weight".format(scope), shape=[1, src_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) linear_trg_weight = tf.get_variable("{0}/trg_weight".format(scope), shape=[1, trg_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [linear_src_weight, linear_trg_weight] return attention_matrix def _create_bilinear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope="bilinear"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) bilinear_weight = tf.get_variable("{0}/weight".format(scope), shape=[src_unit_dim, trg_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [bilinear_weight] return attention_matrix def _create_nonlinear_attention_matrix(src_unit_dim, trg_unit_dim, attention_unit_dim, regularizer, random_seed, trainable, scope="nonlinear"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) bias_initializer = create_variable_initializer("zero") pre_nonlinear_src_weight = tf.get_variable("{0}/pre/src_weight".format(scope), shape=[attention_unit_dim, src_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) pre_nonlinear_trg_weight = tf.get_variable("{0}/pre/trg_weight".format(scope), shape=[attention_unit_dim, trg_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) pre_nonlinear_bias = tf.get_variable("{0}/pre/bias".format(scope), shape=[attention_unit_dim], initializer=bias_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) post_nonlinear_weight = tf.get_variable("{0}/post/weight".format(scope), shape=[1, attention_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [pre_nonlinear_src_weight, pre_nonlinear_trg_weight, pre_nonlinear_bias, post_nonlinear_weight] return attention_matrix def _create_linear_plus_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope="linear_plus"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) if src_unit_dim != trg_unit_dim: raise ValueError("src dim {0} and trg dim must be the same for linear plus attention".format(src_unit_dim, trg_unit_dim)) else: mul_unit_dim = src_unit_dim linear_plus_src_weight = tf.get_variable("{0}/src_weight".format(scope), shape=[1, src_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) linear_plus_trg_weight = tf.get_variable("{0}/trg_weight".format(scope), shape=[1, trg_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) linear_plus_mul_weight = tf.get_variable("{0}/mul_weight".format(scope), shape=[1, mul_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [linear_plus_src_weight, linear_plus_trg_weight, linear_plus_mul_weight] return attention_matrix def _create_nonlinear_plus_attention_matrix(src_unit_dim, trg_unit_dim, attention_unit_dim, regularizer, random_seed, trainable, scope="nonlinear_plus"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) bias_initializer = create_variable_initializer("zero") if src_unit_dim != trg_unit_dim: raise ValueError("src dim {0} and trg dim must be the same for nonlinear plus attention".format(src_unit_dim, trg_unit_dim)) else: mul_unit_dim = src_unit_dim pre_nonlinear_plus_src_weight = tf.get_variable("{0}/pre/src_weight".format(scope), shape=[attention_unit_dim, src_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) pre_nonlinear_plus_trg_weight = tf.get_variable("{0}/pre/trg_weight".format(scope), shape=[attention_unit_dim, trg_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) pre_nonlinear_plus_mul_weight = tf.get_variable("{0}/pre/mul_weight".format(scope), shape=[attention_unit_dim, mul_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) pre_nonlinear_plus_bias = tf.get_variable("{0}/pre/bias".format(scope), shape=[attention_unit_dim], initializer=bias_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) post_nonlinear_plus_weight = tf.get_variable("{0}/post/weight".format(scope), shape=[1, attention_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [pre_nonlinear_plus_src_weight, pre_nonlinear_plus_trg_weight, pre_nonlinear_plus_mul_weight, pre_nonlinear_plus_bias, post_nonlinear_plus_weight] return attention_matrix def _create_trilinear_attention_matrix(src_unit_dim, trg_unit_dim, regularizer, random_seed, trainable, scope="trilinear"): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) if src_unit_dim != trg_unit_dim: raise ValueError("src dim {0} and trg dim must be the same for trilinear attention".format(src_unit_dim, trg_unit_dim)) else: mul_unit_dim = src_unit_dim trilinear_src_weight = tf.get_variable("{0}/src_weight".format(scope), shape=[src_unit_dim, 1], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) trilinear_trg_weight = tf.get_variable("{0}/trg_weight".format(scope), shape=[trg_unit_dim, 1], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) trilinear_mul_weight = tf.get_variable("{0}/mul_weight".format(scope), shape=[1, 1, mul_unit_dim], initializer=weight_initializer, regularizer=regularizer, trainable=trainable, dtype=tf.float32) attention_matrix = [trilinear_src_weight, trilinear_trg_weight, trilinear_mul_weight] return attention_matrix def _generate_attention_score(input_src_data, input_trg_data, attention_matrix, attention_score_type): if attention_score_type == "dot": input_attention_score = _generate_dot_attention_score(input_src_data, input_trg_data) elif attention_score_type == "scaled_dot": input_attention_score = _generate_scaled_dot_attention_score(input_src_data, input_trg_data) elif attention_score_type == "linear": input_attention_score = _generate_linear_attention_score(input_src_data, input_trg_data, attention_matrix) elif attention_score_type == "bilinear": input_attention_score = _generate_bilinear_attention_score(input_src_data, input_trg_data, attention_matrix) elif attention_score_type == "nonlinear": input_attention_score = _generate_nonlinear_attention_score(input_src_data, input_trg_data, attention_matrix) elif attention_score_type == "linear_plus": input_attention_score = _generate_linear_plus_attention_score(input_src_data, input_trg_data, attention_matrix) elif attention_score_type == "nonlinear_plus": input_attention_score = _generate_nonlinear_plus_attention_score(input_src_data, input_trg_data, attention_matrix) elif attention_score_type == "trilinear": input_attention_score = _generate_trilinear_attention_score(input_src_data, input_trg_data, attention_matrix) else: raise ValueError("unsupported attention score type {0}".format(attention_score_type)) return input_attention_score def _generate_dot_attention_score(input_src_data, input_trg_data): input_attention = tf.matmul(input_src_data, input_trg_data, transpose_b=True) return input_attention def _generate_scaled_dot_attention_score(input_src_data, input_trg_data): src_unit_dim = tf.shape(input_src_data)[2] input_attention = tf.matmul(input_src_data, input_trg_data, transpose_b=True) input_attention = input_attention / tf.sqrt(tf.cast(src_unit_dim, dtype=tf.float32)) return input_attention def _generate_linear_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] trg_max_length = input_trg_shape[1] src_unit_dim = input_src_shape[2] trg_unit_dim = input_trg_shape[2] linear_src_weight = attention_matrix[0] linear_trg_weight = attention_matrix[1] input_src_data = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_src_data = tf.matmul(input_src_data, linear_src_weight, transpose_b=True) input_src_data = tf.reshape(input_src_data, shape=[batch_size, src_max_length, 1, -1]) input_trg_data = tf.reshape(input_trg_data, shape=[-1, trg_unit_dim]) input_trg_data = tf.matmul(input_trg_data, linear_trg_weight, transpose_b=True) input_trg_data = tf.reshape(input_trg_data, shape=[batch_size, 1, trg_max_length, -1]) input_src_data = tf.tile(input_src_data, multiples=[1, 1, trg_max_length, 1]) input_trg_data = tf.tile(input_trg_data, multiples=[1, src_max_length, 1, 1]) input_attention = input_src_data + input_trg_data input_attention = tf.reshape(input_attention, shape=[batch_size, src_max_length, trg_max_length]) return input_attention def _generate_bilinear_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] src_unit_dim = input_src_shape[2] bilinear_weight = attention_matrix[0] input_src_data = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_src_data = tf.matmul(input_src_data, bilinear_weight) input_src_data = tf.reshape(input_src_data, shape=[batch_size, src_max_length, -1]) input_attention = tf.matmul(input_src_data, input_trg_data, transpose_b=True) return input_attention def _generate_nonlinear_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] trg_max_length = input_trg_shape[1] src_unit_dim = input_src_shape[2] trg_unit_dim = input_trg_shape[2] pre_nonlinear_src_weight = attention_matrix[0] pre_nonlinear_trg_weight = attention_matrix[1] pre_nonlinear_bias = tf.reshape(attention_matrix[2], shape=[1, 1, 1, -1]) post_nonlinear_weight = attention_matrix[3] input_src_data = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_src_data = tf.matmul(input_src_data, pre_nonlinear_src_weight, transpose_b=True) input_src_data = tf.reshape(input_src_data, shape=[batch_size, src_max_length, 1, -1]) input_trg_data = tf.reshape(input_trg_data, shape=[-1, trg_unit_dim]) input_trg_data = tf.matmul(input_trg_data, pre_nonlinear_trg_weight, transpose_b=True) input_trg_data = tf.reshape(input_trg_data, shape=[batch_size, 1, trg_max_length, -1]) input_src_data = tf.tile(input_src_data, multiples=[1, 1, trg_max_length, 1]) input_trg_data = tf.tile(input_trg_data, multiples=[1, src_max_length, 1, 1]) input_attention = input_src_data + input_trg_data input_attention = tf.nn.tanh(input_attention + pre_nonlinear_bias) attention_dim = tf.shape(input_attention)[-1] input_attention = tf.reshape(input_attention, shape=[-1, attention_dim]) input_attention = tf.matmul(input_attention, post_nonlinear_weight, transpose_b=True) input_attention = tf.reshape(input_attention, shape=[batch_size, src_max_length, trg_max_length]) return input_attention def _generate_linear_plus_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] trg_max_length = input_trg_shape[1] src_unit_dim = input_src_shape[2] trg_unit_dim = input_trg_shape[2] mul_unit_dim = src_unit_dim linear_plus_src_weight = attention_matrix[0] linear_plus_trg_weight = attention_matrix[1] linear_plus_mul_weight = attention_matrix[2] input_src_data = tf.expand_dims(input_src_data, axis=2) input_trg_data = tf.expand_dims(input_trg_data, axis=1) input_src_data = tf.tile(input_src_data, multiples=[1, 1, trg_max_length, 1]) input_trg_data = tf.tile(input_trg_data, multiples=[1, src_max_length, 1, 1]) input_mul_data = input_src_data * input_trg_data input_src_data = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_src_data = tf.matmul(input_src_data, linear_plus_src_weight, transpose_b=True) input_trg_data = tf.reshape(input_trg_data, shape=[-1, trg_unit_dim]) input_trg_data = tf.matmul(input_trg_data, linear_plus_trg_weight, transpose_b=True) input_mul_data = tf.reshape(input_mul_data, shape=[-1, mul_unit_dim]) input_mul_data = tf.matmul(input_mul_data, linear_plus_mul_weight, transpose_b=True) input_attention = input_src_data + input_trg_data + input_mul_data input_attention = tf.reshape(input_attention, shape=[batch_size, src_max_length, trg_max_length]) return input_attention def _generate_nonlinear_plus_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] trg_max_length = input_trg_shape[1] src_unit_dim = input_src_shape[2] trg_unit_dim = input_trg_shape[2] mul_unit_dim = src_unit_dim pre_nonlinear_plus_src_weight = attention_matrix[0] pre_nonlinear_plus_trg_weight = attention_matrix[1] pre_nonlinear_plus_mul_weight = attention_matrix[2] pre_nonlinear_plus_bias = tf.reshape(attention_matrix[3], shape=[1, 1, 1, -1]) post_nonlinear_plus_weight = attention_matrix[4] input_src_data = tf.reshape(input_src_data, shape=[batch_size, src_max_length, 1, -1]) input_trg_data = tf.reshape(input_trg_data, shape=[batch_size, 1, trg_max_length, -1]) input_src_data = tf.tile(input_src_data, multiples=[1, 1, trg_max_length, 1]) input_trg_data = tf.tile(input_trg_data, multiples=[1, src_max_length, 1, 1]) input_mul_data = input_src_data * input_trg_data input_src_data = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_src_data = tf.matmul(input_src_data, pre_nonlinear_plus_src_weight, transpose_b=True) input_trg_data = tf.reshape(input_trg_data, shape=[-1, trg_unit_dim]) input_trg_data = tf.matmul(input_trg_data, pre_nonlinear_plus_trg_weight, transpose_b=True) input_mul_data = tf.reshape(input_mul_data, shape=[-1, mul_unit_dim]) input_mul_data = tf.matmul(input_mul_data, pre_nonlinear_plus_mul_weight, transpose_b=True) input_attention = input_src_data + input_trg_data + input_mul_data input_attention = tf.nn.tanh(input_attention + pre_nonlinear_plus_bias) input_attention = tf.matmul(input_attention, post_nonlinear_plus_weight, transpose_b=True) input_attention = tf.reshape(input_attention, shape=[batch_size, src_max_length, trg_max_length]) return input_attention def _generate_trilinear_attention_score(input_src_data, input_trg_data, attention_matrix): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) batch_size = input_src_shape[0] src_max_length = input_src_shape[1] trg_max_length = input_trg_shape[1] src_unit_dim = input_src_shape[2] trg_unit_dim = input_trg_shape[2] mul_unit_dim = src_unit_dim trilinear_src_weight = attention_matrix[0] trilinear_trg_weight = attention_matrix[1] trilinear_mul_weight = attention_matrix[2] input_src_part = tf.reshape(input_src_data, shape=[-1, src_unit_dim]) input_trg_part = tf.reshape(input_trg_data, shape=[-1, trg_unit_dim]) input_src_part = tf.matmul(input_src_part, trilinear_src_weight) input_trg_part = tf.matmul(input_trg_part, trilinear_trg_weight) input_src_part = tf.reshape(input_src_part, shape=[batch_size, src_max_length, 1]) input_trg_part = tf.reshape(input_trg_part, shape=[batch_size, 1, trg_max_length]) input_src_score = tf.tile(input_src_part, multiples=[1, 1, trg_max_length]) input_trg_score = tf.tile(input_trg_part, multiples=[1, src_max_length, 1]) input_src_part = input_src_data * trilinear_mul_weight input_trg_part = tf.transpose(input_trg_data, perm=[0, 2, 1]) input_mul_score = tf.matmul(input_src_part, input_trg_part) input_attention = input_src_score + input_trg_score + input_mul_score return input_attention def _generate_attention_mask(input_src_mask, input_trg_mask, remove_diag=False): input_mask = tf.matmul(input_src_mask, input_trg_mask, transpose_b=True) if remove_diag == True: src_max_length = tf.shape(input_src_mask)[1] trg_max_length = tf.shape(input_trg_mask)[1] input_mask = input_mask * (1 - tf.eye(src_max_length, trg_max_length)) return input_mask def _create_projection_layer(unit_dim, hidden_activation, use_bias, regularizer, random_seed, trainable, name): weight_initializer = create_variable_initializer("glorot_uniform", random_seed) bias_initializer = create_variable_initializer("zero") projection_layer = tf.layers.Dense(units=unit_dim, activation=hidden_activation, use_bias=use_bias, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=regularizer, bias_regularizer=regularizer, trainable=trainable, name=name) return projection_layer class Attention(object): def __init__(self, src_dim, trg_dim, att_dim, score_type, dropout, att_dropout=0.0, layer_dropout=0.0, layer_norm=False, residual_connect=False, is_self=False, external_matrix=None, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="attention"): self.src_dim = src_dim self.trg_dim = trg_dim self.att_dim = att_dim self.score_type = score_type self.dropout = dropout self.att_dropout = att_dropout self.layer_dropout = layer_dropout self.layer_norm = layer_norm self.residual_connect = residual_connect self.is_self = is_self self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope self.device_spec = get_device_spec(default_gpu_id, num_gpus) with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): if external_matrix == None: self.attention_matrix = _create_attention_matrix(self.src_dim, self.trg_dim, self.att_dim, self.score_type, self.regularizer, self.random_seed, self.trainable, "att_matrix") else: self.attention_matrix = external_matrix self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed) self.att_dropout_layer = Dropout(rate=self.att_dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed, scope="att_dropout") if self.layer_norm == True: self.src_norm_layer = LayerNorm(layer_dim=self.src_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="src_layer_norm") if self.is_self == True: self.trg_norm_layer = self.src_norm_layer else: self.trg_norm_layer = LayerNorm(layer_dim=self.trg_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="trg_layer_norm") def __call__(self, input_src_data, input_trg_data, input_src_mask, input_trg_mask): with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) input_src_mask_shape = tf.shape(input_src_mask) input_trg_mask_shape = tf.shape(input_trg_mask) src_shape_size = len(input_src_data.get_shape().as_list()) trg_shape_size = len(input_trg_data.get_shape().as_list()) if src_shape_size > 3: input_src_data = tf.reshape(input_src_data, shape=tf.concat([[-1], input_src_shape[-2:]], axis=0)) input_src_mask = tf.reshape(input_src_mask, shape=tf.concat([[-1], input_src_mask_shape[-2:]], axis=0)) if trg_shape_size > 3: input_trg_data = tf.reshape(input_trg_data, shape=tf.concat([[-1], input_trg_shape[-2:]], axis=0)) input_trg_mask = tf.reshape(input_trg_mask, shape=tf.concat([[-1], input_trg_mask_shape[-2:]], axis=0)) input_src_attention = input_src_data input_trg_attention = input_trg_data input_src_attention_mask = input_src_mask input_trg_attention_mask = input_trg_mask if self.layer_norm == True: input_src_attention, input_src_attention_mask = self.src_norm_layer(input_src_attention, input_src_attention_mask) input_trg_attention, input_trg_attention_mask = self.trg_norm_layer(input_trg_attention, input_trg_attention_mask) input_attention_score = _generate_attention_score(input_src_attention, input_trg_attention, self.attention_matrix, self.score_type) input_attention_mask = _generate_attention_mask(input_src_attention_mask, input_trg_attention_mask, self.is_self) input_attention_score = input_attention_score * input_attention_mask input_attention_weight = softmax_with_mask(input_attention_score, input_attention_mask, axis=-1) * input_attention_mask input_attention_weight, _ = self.att_dropout_layer(input_attention_weight, input_attention_mask) input_attention = tf.matmul(input_attention_weight, input_trg_attention) input_attention, _ = self.dropout_layer(input_attention, input_src_mask) if self.residual_connect == True and self.is_self == True: output_attention, output_mask = tf.cond(tf.random_uniform([]) < self.layer_dropout, lambda: (input_src_data, input_src_mask), lambda: (input_attention + input_src_data, input_src_mask)) output_attention = output_attention * output_mask else: output_attention = input_attention * input_src_mask output_mask = input_src_mask if src_shape_size > 3: output_attention = tf.reshape(output_attention, shape=tf.concat([input_src_shape[:-2], input_trg_shape[-2:]], axis=0)) output_mask = tf.reshape(output_mask, shape=tf.concat([input_src_mask_shape[:-2], input_trg_mask_shape[-2:]], axis=0)) return output_attention, output_mask, output_attention_score, output_score_mask def get_attention_matrix(self): return self.attention_matrix class MaxAttention(object): def __init__(self, src_dim, trg_dim, att_dim, score_type, dropout, att_dropout=0.0, layer_dropout=0.0, layer_norm=False, residual_connect=False, is_self=False, external_matrix=None, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="max_att"): self.src_dim = src_dim self.trg_dim = trg_dim self.att_dim = att_dim self.score_type = score_type self.dropout = dropout self.att_dropout = att_dropout self.layer_dropout = layer_dropout self.layer_norm = layer_norm self.residual_connect = residual_connect self.is_self = is_self self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope self.device_spec = get_device_spec(default_gpu_id, num_gpus) with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): if external_matrix == None: self.attention_matrix = _create_attention_matrix(self.src_dim, self.trg_dim, self.att_dim, self.score_type, self.regularizer, self.random_seed, self.trainable, "att_matrix") else: self.attention_matrix = external_matrix self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed) self.att_dropout_layer = Dropout(rate=self.att_dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed, scope="att_dropout") if self.layer_norm == True: self.src_norm_layer = LayerNorm(layer_dim=self.src_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="src_layer_norm") if self.is_self == True: self.trg_norm_layer = self.src_norm_layer else: self.trg_norm_layer = LayerNorm(layer_dim=self.trg_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="trg_layer_norm") def __call__(self, input_src_data, input_trg_data, input_src_mask, input_trg_mask): with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) input_src_mask_shape = tf.shape(input_src_mask) input_trg_mask_shape = tf.shape(input_trg_mask) src_shape_size = len(input_src_data.get_shape().as_list()) trg_shape_size = len(input_trg_data.get_shape().as_list()) if src_shape_size > 3: input_src_data = tf.reshape(input_src_data, shape=tf.concat([[-1], input_src_shape[-2:]], axis=0)) input_src_mask = tf.reshape(input_src_mask, shape=tf.concat([[-1], input_src_mask_shape[-2:]], axis=0)) if trg_shape_size > 3: input_trg_data = tf.reshape(input_trg_data, shape=tf.concat([[-1], input_trg_shape[-2:]], axis=0)) input_trg_mask = tf.reshape(input_trg_mask, shape=tf.concat([[-1], input_trg_mask_shape[-2:]], axis=0)) input_src_attention = input_src_data input_src_attention_mask = input_src_mask input_trg_attention = input_trg_data input_trg_attention_mask = input_trg_mask if self.layer_norm == True: input_src_attention, input_src_attention_mask = self.src_norm_layer(input_src_attention, input_src_attention_mask) input_trg_attention, input_trg_attention_mask = self.trg_norm_layer(input_trg_attention, input_trg_attention_mask) input_attention_score = _generate_attention_score(input_src_attention, input_trg_attention, self.attention_matrix, self.score_type) input_attention_mask = _generate_attention_mask(input_src_attention_mask, input_trg_attention_mask, self.is_self) input_attention_score = tf.transpose(tf.reduce_max(input_attention_score, axis=-1, keepdims=True), perm=[0, 2, 1]) input_attention_mask = tf.transpose(tf.reduce_max(input_attention_mask, axis=-1, keepdims=True), perm=[0, 2, 1]) input_attention_score = input_attention_score * input_attention_mask input_attention_weight = softmax_with_mask(input_attention_score, input_attention_mask, axis=-1) * input_attention_mask input_attention_weight, _ = self.att_dropout_layer(input_attention_weight, input_attention_mask) input_attention = tf.matmul(input_attention_weight, input_src_attention) input_attention, _ = self.dropout_layer(input_attention, input_src_mask) src_max_length = tf.shape(input_src_attention)[1] input_attention = tf.tile(input_attention, multiples=[1, src_max_length, 1]) if self.residual_connect == True and self.is_self == True: output_attention, output_mask = tf.cond(tf.random_uniform([]) < self.layer_dropout, lambda: (input_src_data, input_src_mask), lambda: (input_attention + input_src_data, input_src_mask)) output_attention = output_attention * output_mask else: output_attention = input_attention * input_src_mask output_mask = input_src_mask if src_shape_size > 3: output_attention = tf.reshape(output_attention, shape=tf.concat([input_src_shape[:-2], input_trg_shape[-2:]], axis=0)) output_mask = tf.reshape(output_mask, shape=tf.concat([input_src_mask_shape[:-2], input_trg_mask_shape[-2:]], axis=0)) return output_attention, output_mask def get_attention_matrix(self): return self.attention_matrix class CoAttention(object): def __init__(self, src_dim, trg_dim, att_dim, score_type, dropout, att_dropout=0.0, layer_dropout=0.0, layer_norm=False, residual_connect=False, is_self=False, external_matrix=None, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope="co_att"): self.src_dim = src_dim self.trg_dim = trg_dim self.att_dim = att_dim self.score_type = score_type self.dropout = dropout self.att_dropout = att_dropout self.layer_dropout = layer_dropout self.layer_norm = layer_norm self.residual_connect = residual_connect self.is_self = is_self self.regularizer = regularizer self.random_seed = random_seed self.trainable = trainable self.scope = scope self.device_spec = get_device_spec(default_gpu_id, num_gpus) with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): if external_matrix == None: self.attention_matrix = _create_attention_matrix(self.src_dim, self.trg_dim, self.att_dim, self.score_type, self.regularizer, self.random_seed, self.trainable, "att_matrix") else: self.attention_matrix = external_matrix self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed) self.s2t_att_dropout_layer = Dropout(rate=self.att_dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed, scope="s2t_att_dropout") self.t2s_att_dropout_layer = Dropout(rate=self.att_dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed, scope="t2s_att_dropout") if self.layer_norm == True: self.src_norm_layer = LayerNorm(layer_dim=self.src_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="src_layer_norm") if self.is_self == True: self.trg_norm_layer = self.src_norm_layer else: self.trg_norm_layer = LayerNorm(layer_dim=self.trg_dim, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=self.regularizer, trainable=self.trainable, scope="trg_layer_norm") def __call__(self, input_src_data, input_trg_data, input_src_mask, input_trg_mask): with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec): input_src_shape = tf.shape(input_src_data) input_trg_shape = tf.shape(input_trg_data) input_src_mask_shape = tf.shape(input_src_mask) input_trg_mask_shape = tf.shape(input_trg_mask) src_shape_size = len(input_src_data.get_shape().as_list()) trg_shape_size = len(input_trg_data.get_shape().as_list()) if src_shape_size > 3: input_src_data = tf.reshape(input_src_data, shape=tf.concat([[-1], input_src_shape[-2:]], axis=0)) input_src_mask = tf.reshape(input_src_mask, shape=tf.concat([[-1], input_src_mask_shape[-2:]], axis=0)) if trg_shape_size > 3: input_trg_data = tf.reshape(input_trg_data, shape=tf.concat([[-1], input_trg_shape[-2:]], axis=0)) input_trg_mask = tf.reshape(input_trg_mask, shape=tf.concat([[-1], input_trg_mask_shape[-2:]], axis=0)) input_src_attention = input_src_data input_src_attention_mask = input_src_mask input_trg_attention = input_trg_data input_trg_attention_mask = input_trg_mask if self.layer_norm == True: input_src_attention, input_src_attention_mask = self.src_norm_layer(input_src_attention, input_src_attention_mask) input_trg_attention, input_trg_attention_mask = self.trg_norm_layer(input_trg_attention, input_trg_attention_mask) input_attention_score = _generate_attention_score(input_src_attention, input_trg_attention, self.attention_matrix, self.score_type) input_attention_mask = _generate_attention_mask(input_src_attention_mask, input_trg_attention_mask, self.is_self) input_s2t_att_score = input_attention_score input_s2t_att_mask = input_attention_mask input_s2t_att_score = input_s2t_att_score * input_s2t_att_mask input_t2s_att_score = tf.transpose(input_attention_score, perm=[0, 2, 1]) input_t2s_att_mask = tf.transpose(input_attention_mask, perm=[0, 2, 1]) input_t2s_att_score = input_t2s_att_score * input_t2s_att_mask input_s2t_att_weight = softmax_with_mask(input_s2t_att_score, input_s2t_att_mask, axis=-1) * input_s2t_att_mask input_s2t_att_weight, _ = self.s2t_att_dropout_layer(input_s2t_att_weight, input_s2t_att_mask) input_t2s_att_weight = softmax_with_mask(input_t2s_att_score, input_t2s_att_mask, axis=-1) * input_t2s_att_mask input_t2s_att_weight, _ = self.t2s_att_dropout_layer(input_t2s_att_weight, input_t2s_att_mask) input_attention_weight = tf.matmul(input_s2t_att_weight, input_t2s_att_weight) input_attention = tf.matmul(input_attention_weight, input_src_attention) input_attention, _ = self.dropout_layer(input_attention, input_src_mask) if self.residual_connect == True and self.is_self == True: output_attention, output_mask = tf.cond(tf.random_uniform([]) < self.layer_dropout, lambda: (input_src_data, input_src_mask), lambda: (input_attention + input_src_data, input_src_mask)) output_attention = output_attention * output_mask else: output_attention = input_attention * input_src_mask output_mask = input_src_mask if src_shape_size > 3: output_attention = tf.reshape(output_attention, shape=tf.concat([input_src_shape[:-2], input_trg_shape[-2:]], axis=0)) output_mask = tf.reshape(output_mask, shape=tf.concat([input_src_mask_shape[:-2], input_trg_mask_shape[-2:]], axis=0)) return output_attention, output_mask def get_attention_matrix(self): return self.attention_matrix class GatedAttention(object):
Apache License 2.0
intel/dffml
dffml/util/data.py
traverse_set
python
def traverse_set(target, *args, value): if len(args) == 1: args = split_dot_seperated(args[0]) if len(args) == 1: target[args[0]] = value return current = target for level in args[:-1]: if level not in current: current[level] = {} current = current[level] current[args[-1]] = value return
Examples -------- >>> from dffml import traverse_set >>> >>> d = {"one": {"two": 3}} >>> traverse_set(d,"one.two", value = "Three") >>> d["one"]["two"] 'Three'
https://github.com/intel/dffml/blob/e7a356dfe8fd6fdf3cac7f8c218abc7d650fd93c/dffml/util/data.py#L156-L181
import ast import uuid import types import pydoc import inspect import dataclasses import collections from functools import wraps import pathlib from typing import Callable from .log import LOGGER try: from typing import get_origin, get_args except ImportError: def get_origin(t): return getattr(t, "__origin__", None) def get_args(t): return getattr(t, "__args__", None) def merge(one, two, list_append: bool = True): for key, value in two.items(): if key in one: if isinstance(value, dict): merge(one[key], two[key], list_append=list_append) elif list_append and isinstance(value, list): one[key] += two[key] else: one[key] = two[key] def traverse_config_set(target, *args): path, value = args[:-1], args[-1] current = target last = target for level in path: if not level in current: current[level] = {"plugin": None, "config": {}} last = current[level] current = last["config"] last["plugin"] = value return target def traverse_config_get(target, *args): current = target last = target for level in args: last = current[level] current = last["config"] return last["plugin"] def split_dot_seperated(val: str) -> "List[str]": raw_split = val.split(".") vals = [] i = 0 tl = [] trig = False for x in raw_split: if "'" in x or '"' in x: trig = not trig if not trig: tl.append(x) k = ".".join(tl).replace("'", "").replace('"', "") vals.append(k) tl = [] continue if trig: tl.append(x) continue vals.append(x) return vals def traverse_get(target, *args): if len(args) == 1: args = split_dot_seperated(args[0]) current = target for level in args: try: current = current[level] except TypeError: LOGGER.getChild("traverse_get").error( "args %r, target: %r", args, target ) raise return current
MIT License
edinburghnlp/nematus
nematus/server_translator.py
Translator._init_queues
python
def _init_queues(self): self._input_queue = Queue() self._output_queue = Queue()
Sets up shared queues for inter-process communication.
https://github.com/edinburghnlp/nematus/blob/d55074a2e342a33a4d5b0288cbad6269bd47271d/nematus/server_translator.py#L76-L81
import logging import sys import time from multiprocessing import Process, Queue from collections import defaultdict from queue import Empty import numpy from beam_search_sampler import BeamSearchSampler from config import load_config_from_json_file import exception import model_loader import rnn_model from transformer import Transformer as TransformerModel import translate_utils import util class Translation(object): def __init__(self, source_words, target_words, sentence_id=None, score=0, hypothesis_id=None): self.source_words = source_words self.target_words = target_words self.sentence_id = sentence_id self.score = score self.hypothesis_id = hypothesis_id class QueueItem(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) class Translator(object): def __init__(self, settings): self._models = settings.models self._num_processes = settings.num_processes self._verbose = settings.verbose self._retrieved_translations = defaultdict(dict) self._batch_size = settings.minibatch_size self._load_model_options() self._init_queues() self._init_processes() def _load_model_options(self): self._options = [] for model in self._models: config = load_config_from_json_file(model) setattr(config, 'reload', model) self._options.append(config) _, _, _, self._num_to_target = util.load_dictionaries(self._options[0])
BSD 3-Clause New or Revised License
ncbray/pystream
lib/PADS/LCA.py
RangeMin.__init__
python
def __init__(self,X): self._data = list(X) if len(X) > 1: big = map(max, self._ansv(False), self._ansv(True)) parents = dict([(i,big[i][1]) for i in range(len(X)) if big[i]]) self._lca = LCA(parents)
Set up structure with sequence X as data. Uses an LCA structure on a Cartesian tree for the input.
https://github.com/ncbray/pystream/blob/70bba5646d6512adb6803564c22268d3424c66d8/lib/PADS/LCA.py#L32-L39
import unittest,random from UnionFind import UnionFind from sets import Set if 'True' not in globals(): globals()['True'] = not None globals()['False'] = not True class RangeMin:
Apache License 2.0
anhaidgroup/deepmatcher
deepmatcher/models/core.py
MatchingModel.initialize
python
def initialize(self, train_dataset, init_batch=None): if self._initialized: return self.meta = Bunch(**train_dataset.__dict__) if hasattr(self.meta, 'fields'): del self.meta.fields del self.meta.examples self._register_train_buffer('state_meta', Bunch(**self.meta.__dict__)) del self.state_meta.metadata self.attr_summarizers = dm.modules.ModuleMap() if isinstance(self.attr_summarizer, Mapping): for name, summarizer in self.attr_summarizer.items(): self.attr_summarizers[name] = AttrSummarizer._create( summarizer, hidden_size=self.hidden_size) assert len( set(self.attr_summarizers.keys()) ^ set(self.meta.canonical_text_fields) ) == 0 else: self.attr_summarizer = AttrSummarizer._create( self.attr_summarizer, hidden_size=self.hidden_size) for name in self.meta.canonical_text_fields: self.attr_summarizers[name] = copy.deepcopy(self.attr_summarizer) if self.attr_condense_factor == 'auto': self.attr_condense_factor = min(len(self.meta.canonical_text_fields), 6) if self.attr_condense_factor == 1: self.attr_condense_factor = None if not self.attr_condense_factor: self.attr_condensors = None else: self.attr_condensors = dm.modules.ModuleMap() for name in self.meta.canonical_text_fields: self.attr_condensors[name] = dm.modules.Transform( '1-layer-highway', non_linearity=None, output_size=self.hidden_size // self.attr_condense_factor) self.attr_comparators = dm.modules.ModuleMap() if isinstance(self.attr_comparator, Mapping): for name, comparator in self.attr_comparator.items(): self.attr_comparators[name] = _create_attr_comparator(comparator) assert len( set(self.attr_comparators.keys()) ^ set(self.meta.canonical_text_fields) ) == 0 else: if isinstance(self.attr_summarizer, AttrSummarizer): self.attr_comparator = self._get_attr_comparator( self.attr_comparator, self.attr_summarizer) else: if self.attr_comparator is None: raise ValueError('"attr_comparator" must be specified if ' '"attr_summarizer" is custom.') self.attr_comparator = _create_attr_comparator(self.attr_comparator) for name in self.meta.canonical_text_fields: self.attr_comparators[name] = copy.deepcopy(self.attr_comparator) self.attr_merge = dm.modules._merge_module(self.attr_merge) self.classifier = _utils.get_module( Classifier, self.classifier, hidden_size=self.hidden_size) self._reset_embeddings(train_dataset.vocabs) if not init_batch: run_iter = MatchingIterator( train_dataset, train_dataset, train=False, batch_size=4, device='cpu', sort_in_buckets=False) init_batch = next(run_iter.__iter__()) self.forward(init_batch) self.state_meta.init_batch = init_batch self._initialized = True logger.info('Successfully initialized MatchingModel with {:d} trainable ' 'parameters.'.format(tally_parameters(self)))
r"""Initialize (not lazily) the matching model given the actual training data. Instantiates all sub-components and their trainable parameters. Args: train_dataset (:class:`~deepmatcher.data.MatchingDataset`): The training dataset obtained using :func:`deepmatcher.data.process`. init_batch (:class:`~deepmatcher.batch.MatchingBatch`): A batch of data to forward propagate through the model. If None, a batch is drawn from the training dataset.
https://github.com/anhaidgroup/deepmatcher/blob/a89ffe6cd246f690afd2772e47eb071741160f16/deepmatcher/models/core.py#L264-L362
import copy import logging from collections import Mapping import dill import six import deepmatcher as dm import torch import torch.nn as nn from . import _utils from ..data import MatchingDataset, MatchingIterator from ..runner import Runner from ..utils import Bunch, tally_parameters logger = logging.getLogger('deepmatcher.core') class MatchingModel(nn.Module): def __init__(self, attr_summarizer='hybrid', attr_condense_factor='auto', attr_comparator=None, attr_merge='concat', classifier='2-layer-highway', hidden_size=300): super(MatchingModel, self).__init__() self.attr_summarizer = attr_summarizer self.attr_condense_factor = attr_condense_factor self.attr_comparator = attr_comparator self.attr_merge = attr_merge self.classifier = classifier self.hidden_size = hidden_size self._train_buffers = set() self._initialized = False def run_train(self, *args, **kwargs): return Runner.train(self, *args, **kwargs) def run_eval(self, *args, **kwargs): return Runner.eval(self, *args, **kwargs) def run_prediction(self, *args, **kwargs): return Runner.predict(self, *args, **kwargs)
BSD 3-Clause New or Revised License
djpugh/fastapi_aad_auth
src/fastapi_aad_auth/_base/authenticators/session.py
SessionAuthenticator.get_access_token
python
def get_access_token(self, user, scopes=None, app_scopes=True): raise NotImplementedError('Implement in subclass')
Get the access token for the user.
https://github.com/djpugh/fastapi_aad_auth/blob/4089ca00abb56d613e40be23e700a645e2ce264b/src/fastapi_aad_auth/_base/authenticators/session.py#L74-L76
from starlette.requests import Request from starlette.responses import RedirectResponse from fastapi_aad_auth._base.state import AuthenticationState from fastapi_aad_auth.errors import ConfigurationError from fastapi_aad_auth.mixins import LoggingMixin class SessionAuthenticator(LoggingMixin): def __init__(self, session_validator, token_validator): self._session_validator = session_validator self._token_validator = token_validator super().__init__() def redirect_if_authenticated(self, auth_state, redirect='/'): if auth_state.is_authenticated(): self.logger.info(f'Logged in, redirecting to {redirect}') else: redirect = '/login' return RedirectResponse(redirect) def redirect_to_provider_login(self, auth_state, request): self.logger.debug(f'state {auth_state}') auth_state.save_to_session(self._session_validator._session_serializer, request.session) authorization_url = self._get_authorization_url(request, auth_state.session_state) return RedirectResponse(authorization_url) def _get_authorization_url(self, request, session_state): raise NotImplementedError('Implement in specific subclass') def process_login_request(self, request, force=False, redirect='/'): self.logger.debug(f'Logging in - request url {request.url}') auth_state = self._session_validator.get_state_from_session(request) force = request.query_params.get('force', force) if auth_state.is_authenticated() and not force: self.logger.debug(f'Authenticated - redirecting {auth_state}') response = self.redirect_if_authenticated(auth_state) else: self._session_validator.set_post_auth_redirect(request, request.query_params.get('redirect', redirect)) self.logger.debug(f'No Auth state - redirecting to provider login {auth_state}') response = self.redirect_to_provider_login(auth_state, request) return response def process_login_callback(self, request): if 'error' in request.query_params: error_args = [request.query_params['error'], ] if 'error_description' in request.query_params: error_args.append(request.query_params['error_description']) raise ConfigurationError(*error_args) code = request.query_params.get('code', None) state = request.query_params.get('state', None) if state is None or code is None: return auth_state = self._session_validator.get_state_from_session(request) auth_state.check_session_state(state) token = self._process_code(request, auth_state, code) user = self._get_user_from_token(token) authenticated_state = AuthenticationState.authenticate_as(user, self._session_validator._session_serializer, request.session) redirect = self._session_validator.pop_post_auth_redirect(request) return self.redirect_if_authenticated(authenticated_state, redirect=redirect) def _process_code(self, request, auth_state, code): raise NotImplementedError('Implement in subclass')
MIT License
c-w/gutenberg
gutenberg/acquire/metadata.py
MetadataCache._metadata_is_invalid
python
def _metadata_is_invalid(cls, fact): return any(isinstance(token, URIRef) and ' ' in token for token in fact)
Determines if the fact is not well formed.
https://github.com/c-w/gutenberg/blob/2c9aace973a9031c4c0234f174b8f135eaadcf70/gutenberg/acquire/metadata.py#L151-L156
from __future__ import absolute_import, unicode_literals import abc import codecs import logging import os import re import shutil import tarfile import tempfile from contextlib import closing from contextlib import contextmanager from rdflib import plugin from rdflib.graph import Graph from rdflib.query import ResultException from rdflib.store import Store from rdflib.term import BNode from rdflib.term import URIRef from rdflib_sqlalchemy import registerplugins from six import text_type from six import with_metaclass from gutenberg._domain_model.exceptions import CacheAlreadyExistsException from gutenberg._domain_model.exceptions import InvalidCacheException from gutenberg._domain_model.persistence import local_path from gutenberg._domain_model.vocabulary import DCTERMS from gutenberg._domain_model.vocabulary import PGTERMS from gutenberg._util.logging import disable_logging from gutenberg._util.os import makedirs from gutenberg._util.os import remove from gutenberg._util.url import urlopen _GUTENBERG_CATALOG_URL = r'http://www.gutenberg.org/cache/epub/feeds/rdf-files.tar.bz2' _DB_IDENTIFIER = 'urn:gutenberg:metadata' _DB_PATH = local_path(os.path.join('metadata', 'metadata.db')) class MetadataCache(with_metaclass(abc.ABCMeta, object)): def __init__(self, store, cache_uri): self.store = store self.cache_uri = cache_uri self.graph = Graph(store=self.store, identifier=_DB_IDENTIFIER) self.is_open = False self.catalog_source = _GUTENBERG_CATALOG_URL @property def exists(self): return os.path.exists(self._local_storage_path) def open(self): try: self.graph.open(self.cache_uri, create=False) self._add_namespaces(self.graph) self.is_open = True except Exception: raise InvalidCacheException('The cache is invalid or not created') def close(self): self.graph.close() self.is_open = False def delete(self): self.close() remove(self._local_storage_path) def populate(self): if self.exists: raise CacheAlreadyExistsException('location: %s' % self.cache_uri) self._populate_setup() with closing(self.graph): with self._download_metadata_archive() as metadata_archive: for fact in self._iter_metadata_triples(metadata_archive): self._add_to_graph(fact) def _add_to_graph(self, fact): self.graph.add(fact) def _populate_setup(self): pass def refresh(self): if self.exists: self.delete() self.populate() self.open() @property def _local_storage_path(self): return self.cache_uri @staticmethod def _add_namespaces(graph): graph.bind('pgterms', PGTERMS) graph.bind('dcterms', DCTERMS) @contextmanager def _download_metadata_archive(self): with tempfile.NamedTemporaryFile(delete=False) as metadata_archive: shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive) yield metadata_archive.name remove(metadata_archive.name) @classmethod
Apache License 2.0
kmill/textadv
textadv/gamesystem/utilities.py
StringEvaluator.__eval_parse
python
def __eval_parse(self, input, i=0, in_code=False) : parsed = [] j = i while i < len(input) : if input[i] == "[" : if i > j : parsed.append(input[j:i]) parsed2, i2 = self.__eval_parse(input, i+1, in_code=True) parsed.append(("code", parsed2)) i = i2 j = i elif input[i] == "]" : if i > j : parsed.append(input[j:i]) return (parsed, i+1) elif input[i] == "{" : if i > j : parsed.append(input[j:i]) start = i + 1 i += 1 while input[i] != "}" : i += 1 parsed.append(["reword"] + [("lit", f) for f in input[start:i].split("|")]) i += 1 j = i elif input[i] == "}" : raise Exception("Unmatched '}' in "+input) elif input[i] == "<" and in_code : if i > j : parsed.append(input[j:i]) start = i+1 while input[i] != ">" : i += 1 parsed.append(" ".join(input[start:i].split())) i += 1 j = i elif in_code and input[i] in string.whitespace : if i > j : parsed.append(input[j:i]) i += 1 j = i else : i += 1 if j < i : parsed.append(input[j:i]) return (parsed, i)
Pulls out [] and {} expressions, labeling them as such. Also makes it so [] expressions split by whitespace. The characters < and > delimit strings when in_code. Note that all whitespace is collapsed into a single space for < and >.
https://github.com/kmill/textadv/blob/b153b7f3f990f6f6f218a86c499382ad50982a8a/textadv/gamesystem/utilities.py#L207-L254
import string import itertools import re def list_append(xs) : return [a for x in xs for a in x] def join_with_spaces(xs) : return " ".join(xs) def join(xs) : return "".join(xs) def serial_comma(nouns, conj="and", comma=",", force_comma=False) : conj = " " + conj + " " if len(nouns) == 0 : return "nothing" elif len(nouns) == 1 : return nouns[0] elif len(nouns) == 2 : if force_comma : return nouns[0] + comma + conj + nouns[1] else : return nouns[0] + conj + nouns[1] else : comma_sp = comma + " " return comma_sp.join(nouns[:-1]) + comma + conj + nouns[-1] def is_are_list(nouns) : if len(nouns) == 0 : return "is nothing" elif len(nouns) == 1 : return "is "+nouns[0] else : return "are "+serial_comma(nouns) DIRECTION_INVERSES = {"north" : "south", "south" : "north", "east" : "west", "west" : "east", "northwest" : "southeast", "northeast" : "southwest", "southwest" : "northeast", "southeast" : "northwest", "up" : "down", "down" : "up", "in" : "out", "out" : "in"} def inverse_direction(direction) : return DIRECTION_INVERSES[direction] def add_direction_pair(dir, opp) : DIRECTION_INVERSES[dir] = opp DIRECTION_INVERSES[opp] = dir def docstring(s) : def _docstring(f) : f.__doc__ = s return f return _docstring html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", } def html_escape(text): if text : return "".join(html_escape_table.get(c,c) for c in text) else : return None def make_action_link(text, action, tag=None) : action = action.replace("'", "\\'") if tag : return '<a class="action" href="" onclick="return run_action(\'%s\');">%s&nbsp;%s</a>' % (action, tag, text) else : return '<a class="action" href="" onclick="return run_action(\'%s\');">%s</a>' % (action, text) def wrap_examine(eval, act, obj, text, ctxt) : return make_action_link(eval.eval_str(text, ctxt, actor=act), "examine "+eval.eval_str(ctxt.world.get_property("Name", obj), ctxt, actor=act)) def str_with_objs(input, **kwarg) : newkwarg = dict() for key, value in kwarg.iteritems() : if " " in value : newkwarg[key] = "<%s>"%value else : newkwarg[key] = value return string.Template(input).safe_substitute(newkwarg) def as_actor(input, actor) : if " " in actor : repla = "<%s>"%actor else : repla = actor return "[as %s]%s[endas]" % (repla, input) def _escape_str(match) : return "[char %r]" % ord(match.group()) def escape_str(input) : return re.sub(r"\[|\]|\$|{|}", _escape_str, input) class MalformedException(Exception) : pass class StringEvaluator(object) : def __init__(self) : self.eval_functions = dict() def copy(self) : newse = StringEvaluator() newse.eval_functions = self.eval_functions.copy() return newse def add_eval_func(self, name) : def _add_eval_func(f) : if name in self.eval_functions : print "Warning: adding another StringEvaluator function named",name self.eval_functions[name] = f return f return _add_eval_func def eval_str(self, input, context, actor=None) : if not actor : actor = context.actor parsed, i = self.__eval_parse(input) code = ["append"] try : i = 0 while i < len(parsed) : i, val = self.__collect_structures(parsed, i) code.append(val) except MalformedException as x : print "eval_str: Offending input is" print input raise x evaled = self.__eval(code, context, actor) return "".join([str(o) for o in evaled])
MIT License
neuralmagic/sparsezoo
src/sparsezoo/objects/base.py
BaseObject.modified
python
def modified(self) -> Union[str, None]: return self._modified
:return: the date modifed
https://github.com/neuralmagic/sparsezoo/blob/ff43990c3c492e6bbc876158a6a33907d2b4b667/src/sparsezoo/objects/base.py#L50-L54
from typing import Dict, List, Union __all__ = ["BaseObject"] class BaseObject: def __init__( self, created: Union[str, None] = None, modified: Union[str, None] = None, **kwargs, ): self._created = created self._modified = modified @property def created(self) -> Union[str, None]: return self._created @property
Apache License 2.0
microsoft/qdk-python
azure-quantum/azure/quantum/cirq/targets/honeywell.py
HoneywellTarget._to_cirq_job
python
def _to_cirq_job(self, azure_job: "AzureJob", program: "cirq.Circuit" = None): if "measurement_dict" not in azure_job.details.metadata and program is None: raise ValueError("Parameter 'measurement_dict' not found in job metadata.") measurement_dict = azure_job.details.metadata.get("measurement_dict") return CirqJob(azure_job=azure_job, program=program, measurement_dict=measurement_dict)
Convert Azure job to Cirq job
https://github.com/microsoft/qdk-python/blob/d0a87fda57dc360c96d9ce9772b71406d9b29ebe/azure-quantum/azure/quantum/cirq/targets/honeywell.py#L59-L64
import numpy as np from typing import TYPE_CHECKING, Any, Dict, Sequence from azure.quantum.target import Honeywell from azure.quantum.cirq.targets.target import Target as CirqTarget from azure.quantum.cirq.job import Job as CirqJob if TYPE_CHECKING: import cirq from azure.quantum import Workspace from azure.quantum import Job as AzureJob class HoneywellTarget(Honeywell, CirqTarget): def __init__( self, workspace: "Workspace", name: str, input_data_format: str = "honeywell.openqasm.v1", output_data_format: str = "honeywell.quantum-results.v1", provider_id: str = "honeywell", content_type: str = "application/qasm", encoding: str = "", **kwargs ): super().__init__( workspace=workspace, name=name, input_data_format=input_data_format, output_data_format=output_data_format, provider_id=provider_id, content_type=content_type, encoding=encoding, **kwargs ) @staticmethod def _translate_cirq_circuit(circuit) -> str: return circuit.to_qasm() @staticmethod def _to_cirq_result(result: Dict[str, Any], param_resolver, **kwargs): from cirq import Result measurements = { key.lstrip("m_"): np.array([[int(_v)] for _v in value]) for key, value in result.items() if key.startswith("m_") } return Result(params=param_resolver, measurements=measurements)
MIT License
olavolav/uniplot
uniplot/plot_elements.py
plot_title
python
def plot_title(title: str, width: int) -> str: return _center_if_possible(title, width=width + 2)
Returns the centered title string. Note that this assumes that `title` is not `None`.
https://github.com/olavolav/uniplot/blob/3bc32f4618c47f677002070d5b18a23b726fd776/uniplot/plot_elements.py#L88-L94
import sys import re import numpy as np from typing import List, Optional from uniplot.discretizer import compute_y_at_middle_of_row UNICODE_SQUARES = { 0: "", 1: "▘", 2: "▝", 3: "▀", 4: "▖", 5: "▌", 6: "▞", 7: "▛", 8: "▗", 9: "▚", 10: "▐", 11: "▜", 12: "▄", 13: "▙", 14: "▟", 15: "█", } BINARY_ENCODING_MATRIX = np.array([[1, 2], [4, 8]]) CURSOR_UP_ONE = "\x1b[1A" ERASE_LINE = "\x1b[2K" COLOR_CODES = { "blue": "\033[34m", "magenta": "\033[35m", "green": "\033[32m", "yellow": "\033[33m", "cyan": "\033[36m", "red": "\033[31m", } COLOR_RESET_CODE = "\033[0m" COLOR_CODE_REGEX = re.compile(r"\033\[\d+m") def character_for_2by2_pixels(square: np.array, color_mode: bool = False) -> str: assert square.shape == (2, 2) assert square.min() >= 0 max_color = square.max() if max_color <= 1: binary_square = np.clip(square, a_min=0, a_max=1) else: binary_square = np.clip(square, a_min=max_color - 1, a_max=max_color) - ( max_color - 1 ) integer_encoding = np.multiply(binary_square, BINARY_ENCODING_MATRIX).sum() char = UNICODE_SQUARES[integer_encoding] if char == "" or not color_mode: return char color_code = list(COLOR_CODES.values())[(square.max() - 1) % len(COLOR_CODES)] return f"{color_code}{char}{COLOR_RESET_CODE}" def legend(legend_labels: List[str], width: int) -> str: label_strings: List[str] = [] for i in range(len(legend_labels)): color_code = list(COLOR_CODES.values())[i % len(COLOR_CODES)] label_string = ( f"{color_code}██{COLOR_RESET_CODE} {str(legend_labels[i]).strip()}" ) label_strings.append(label_string) full_label_string = "\n".join(label_strings) return _center_if_possible(full_label_string, width=width + 2)
MIT License
janluke/cloup
cloup/formatting/sep.py
multiline_rows_are_at_least
python
def multiline_rows_are_at_least( count_or_percentage: Union[int, float] ) -> RowSepCondition: if count_or_percentage <= 0: raise ValueError('count_or_percentage should be > 0') if isinstance(count_or_percentage, int): count_threshold = count_or_percentage def condition(rows, col_widths, col_spacing): num_multiline = count_multiline_rows(rows, col_widths) return num_multiline >= count_threshold elif isinstance(count_or_percentage, float): percent_threshold = count_or_percentage if percent_threshold > 1.0: raise ValueError( "count_or_percentage must be either an integer or a float in the " f"interval ]0, 1[. You passed a float >= 1.0 ({percent_threshold}).") def condition(rows, col_widths, col_spacing): num_multiline = count_multiline_rows(rows, col_widths) percent_multiline = num_multiline / len(rows) return percent_multiline >= percent_threshold else: raise TypeError('count_or_percentage must be an int or a float') return condition
Returns a ``RowSepStrategy`` that returns a row separator between all rows of a definition list, only if the number of rows taking multiple lines is greater than or equal to a certain threshold. :param count_or_percentage: a threshold for multiline rows above which the returned strategy will insert a row separator. It can be either an absolute count (`int`) or a percentage relative to the total number of rows expressed as a `float` between 0 and 1 (0 and 1 excluded).
https://github.com/janluke/cloup/blob/a496e3595e2e8df12808e40a4c7f6a60ca52d506/cloup/formatting/sep.py#L136-L174
import abc import sys from itertools import zip_longest from typing import Optional, Sequence, Union if sys.version_info[:2] >= (3, 8): from typing import Protocol else: from typing_extensions import Protocol SepType = Union[str, 'SepGenerator'] class SepGenerator(Protocol): def __call__(self, width: int) -> str: ... class RowSepPolicy(metaclass=abc.ABCMeta): @abc.abstractmethod def __call__( self, rows: Sequence[Sequence[str]], col_widths: Sequence[int], col_spacing: int, ) -> Optional[str]: class RowSepCondition(Protocol): def __call__( self, rows: Sequence[Sequence[str]], col_widths: Sequence[int], col_spacing: int, ) -> bool: class RowSepIf(RowSepPolicy): def __init__(self, condition: RowSepCondition, sep: Union[str, SepGenerator] = ''): if isinstance(sep, str) and sep.endswith('\n'): raise ValueError( "sep must not end with '\\n'. The formatter writes a '\\n' after it; " "no other newline is allowed.") self.condition = condition self.sep = sep def __call__( self, rows: Sequence[Sequence[str]], col_widths: Sequence[int], col_spacing: int ) -> Optional[str]: if self.condition(rows, col_widths, col_spacing): if callable(self.sep): total_width = get_total_width(col_widths, col_spacing) return self.sep(total_width) return self.sep return None def get_total_width(col_widths: Sequence[int], col_spacing: int) -> int: return sum(col_widths) + col_spacing * (len(col_widths) - 1) def count_multiline_rows(rows: Sequence[str], col_widths: Sequence[int]) -> int: return sum( any(len(col_text) > col_width for col_text, col_width in zip_longest(row, col_widths)) for row in rows )
BSD 3-Clause New or Revised License
vladmunteanu/starlette-jsonapi
starlette_jsonapi/utils.py
parse_sparse_fields_params
python
def parse_sparse_fields_params(request: Request) -> Dict[str, List[str]]: sparse_fields = dict() for qp_name, qp_value in request.query_params.items(): if qp_name.startswith('fields[') and qp_name.endswith(']'): resource_name_start = qp_name.index('[') + 1 resource_name_end = qp_name.index(']') resource_name = qp_name[resource_name_start:resource_name_end] if not resource_name or not qp_value or not all(qp_value.split(',')): raise JSONAPIException(status_code=400, detail='Incorrect sparse fields request.') sparse_fields[resource_name] = qp_value.split(',') return sparse_fields
Parses a request's ``fields`` query parameter, if present, and returns a dictionary of resource type -> sparse fields. Example: .. code-block:: python # request URL: /articles/?fields[articles]=title,content assert parse_sparse_fields_params(request) == {'articles': ['title', 'content']}
https://github.com/vladmunteanu/starlette-jsonapi/blob/0e2a0a40284af291ffefe21fe77de1cd90cbdc93/starlette_jsonapi/utils.py#L65-L87
import copy from typing import Optional, Set, Dict, List, Union from starlette.applications import Starlette from starlette.exceptions import HTTPException from starlette.requests import Request from starlette.responses import Response from starlette_jsonapi.exceptions import JSONAPIException from starlette_jsonapi.responses import JSONAPIResponse def serialize_error(exc: Exception) -> JSONAPIResponse: if isinstance(exc, JSONAPIException): status_code = exc.status_code errors = exc.errors elif isinstance(exc, HTTPException): status_code = exc.status_code errors = [{'detail': exc.detail}] else: status_code = 500 errors = [{'detail': 'Internal server error'}] error_body = { 'errors': errors } return JSONAPIResponse(status_code=status_code, content=error_body) def register_jsonapi_exception_handlers(app: Starlette): async def _serialize_error(request: Request, exc: Exception) -> Response: return serialize_error(exc) app.add_exception_handler(Exception, _serialize_error) app.add_exception_handler(HTTPException, _serialize_error) def parse_included_params(request: Request) -> Optional[Set[str]]: include = request.query_params.get('include') if include: include = set(include.split(',')) return include return None
MIT License
keflavich/agpy
agpy/UCHIIfitter.py
HIIregion.refit
python
def refit(self,**kwargs): self.em,self.nutau,self.normfac,self.chi2 = emtau(self.nu,self.flux,self.fluxerr,Te=self.Te,**kwargs)
refit, presumably using different inputs to emtau
https://github.com/keflavich/agpy/blob/fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1/agpy/UCHIIfitter.py#L191-L193
import pylab as pl import numpy as np try: from scipy import optimize except ImportError: print("scipy not installed: UCHIIfitter may fail") from pyspeckit import mpfit from astropy import units as u from astropy import constants unitfactor={'mJy':1e-26,'Jy':1e-23,'cgs':1.0} freqfactor={'GHz':1e9,'Hz':1.0} muh = 2.8 def tnu(Te, nu, EM): nu0 = Te**1.5 / 1000 answer_highnu = (nu > nu0) * 3.014e-2 * Te**-1.5 * nu**-2 * EM gff_lownu = (np.log(4.955e-2 * nu**-1) + 1.5 * np.log(Te)) answer_lownu = (nu < nu0) * 3.014e-2 * Te**-1.5 * nu**-2 * EM * gff_lownu tau = answer_lownu+answer_highnu return tau def Inu(nu, tau, Te, I0=0): if I0==0 and isinstance(nu,np.ndarray): whtau1 = np.argmin(np.abs(tau-1)) nutau1 = nu[whtau1] taufactor = 1 else: nutau1 = nu taufactor = tau I0 = 2 * constants.k_B * Te * nutau1**2 / constants.c**2 * taufactor thin = (tau < 1) * np.exp(1-tau) * I0 thick = 2 * constants.k_B * Te * (nu * (tau > 1))**2 / constants.c**2 return thin+thick def inufit(nu, em, normfac, Te=8500, unit='mJy', frequnit='GHz'): _nu = nu*freqfactor[frequnit] I0 = 2 * constants.k_B * Te * _nu[0]**2 / constants.c**2 model_intensity = Inu(_nu,tnu(Te,_nu/1e9,em),Te,I0=I0) model_norm = normfac * model_intensity / unitfactor[unit] return model_norm def inufit_dust(nu, em, normfac, alpha, normfac2, Te=8500): I0 = 2 * constants.k_B * Te * nu[0]**2 / constants.c**2 model_intensity = Inu(nu,tnu(Te,nu,em),Te,I0=I0) model_norm = normfac * model_intensity + normfac2*nu**alpha return model_norm def inufit_dustT(nu, em, normfac, beta, normfac2, dustT, Te=8500): I0 = 2 * constants.k_B * Te * nu[0]**2 / constants.c**2 model_intensity = Inu(nu,tnu(Te,nu,em),Te,I0=I0) dustem = 2*constants.h*(nu)**(3+beta) / constants.c**2 * (np.exp(constants.h*nu*1e9/(constants.k_B*np.abs(dustT))) - 1)**-1 model_norm = normfac * model_intensity + normfac2/np.abs(dustT)*dustem return model_norm def mpfitfun(freq,flux,err=None,dust=False,dustT=False): if dust: if err is None: def f(p,fjac=None): return [0,(flux-inufit_dust(freq,*p))] else: def f(p,fjac=None): return [0,(flux-inufit_dust(freq,*p))/err] return f elif dustT: if err is None: def f(p,fjac=None): return [0,(flux-inufit_dustT(freq,*p))] else: def f(p,fjac=None): return [0,(flux-inufit_dustT(freq,*p))/err] return f else: if err is None: def f(p,fjac=None): return [0,(flux-inufit(freq,*p))] else: def f(p,fjac=None): return [0,(flux-inufit(freq,*p))/err] return f def emtau(freq,flux,err=None,EMguess=1e7,Te=8500,normfac=5e-6,quiet=1): mp = mpfit(mpfitfun(freq,flux,err),xall=[EMguess,normfac],quiet=quiet) mpp = mp.params mpperr = mp.perror chi2 = mp.fnorm bestEM = mpp[0] normfac = mpp[1] nu_tau = (Te**1.35 / bestEM / 8.235e-2)**(-1/2.1) return bestEM,nu_tau,normfac,chi2 class HIIregion(object): def __init__(self, nu, flux, fluxerr, fluxunit='mJy', frequnit='GHz', beamsize_as2=0.25, dist_kpc=1.0, resolved=False, Te=8500, **kwargs): order = np.argsort(np.asarray(nu)) self.nu = np.asarray(nu)[order] self.flux = np.asarray(flux)[order] self.fluxerr = np.asarray(fluxerr)[order] self.frequnit = frequnit self.fluxunit = fluxunit self.beamsize_as2 = beamsize_as2 self.dist_kpc = dist_kpc self.resolved = resolved self.Te = Te self.em, self.nutau, self.normfac, self.chi2 = emtau(self.nu, self.flux, self.fluxerr, Te=self.Te, **kwargs)
MIT License
sanderslab/magellanmapper
magmap/io/sqlite.py
_update_experiments
python
def _update_experiments(db_dir): ext = ".czi" db_paths = sorted(glob.glob(os.path.join(db_dir, "*.db"))) for db_path in db_paths: db = ClrDB() db.load_db(db_path, False) rows = db.select_experiment() for row in rows: name = row["name"] if not name.endswith(ext): name_updated = name.replace(ext, "_") + ext print("...replaced experiment {} with {}".format(name, name_updated)) db.cur.execute("UPDATE experiments SET name = ? WHERE name = ?", (name_updated, name)) else: print("...no update") db.conn.commit()
Updates experiment names by shifting the old .czi extension name from midway through the name to its end Args: cur: Connection's cursor. db_dir: Directory that contains databases to update
https://github.com/sanderslab/magellanmapper/blob/35e910035217edab799d4fbaa61e39931527a354/magmap/io/sqlite.py#L212-L235
import os import glob import datetime import sqlite3 from typing import List, Optional, Tuple import numpy as np from magmap.settings import config from magmap.cv import colocalizer, detector, verifier from magmap.io import df_io, importer, libmag DB_NAME_BASE = "magmap" DB_NAME_VERIFIED = "{}_verified.db".format(DB_NAME_BASE) DB_NAME_MERGED = "{}_merged.db".format(DB_NAME_BASE) DB_SUFFIX_TRUTH = "_truth.db" DB_VERSION = 4 _COLS_BLOBS = "roi_id, z, y, x, radius, confirmed, truth, channel" _COLS_BLOB_MATCHES = "roi_id, blob1, blob2, dist" def _create_db(path): if os.path.exists(path): libmag.backup_file(path) conn = sqlite3.connect(path) conn.row_factory = sqlite3.Row cur = conn.cursor() _create_table_about(cur) _create_table_experiments(cur) _create_table_rois(cur) _create_table_blobs(cur) _create_table_blob_matches(cur) insert_about(conn, cur, DB_VERSION, datetime.datetime.now()) conn.commit() print("created db at {}".format(path)) return conn, cur def _create_table_about(cur): cur.execute("CREATE TABLE about (version INTEGER PRIMARY KEY, date DATE)") def _create_table_experiments(cur): cur.execute("CREATE TABLE experiments (id INTEGER PRIMARY KEY AUTOINCREMENT, " "name TEXT, date DATE)") def _create_table_rois(cur): cur.execute("CREATE TABLE rois (id INTEGER PRIMARY KEY AUTOINCREMENT, " "experiment_id INTEGER, series INTEGER, " "offset_x INTEGER, offset_y INTEGER, " "offset_z INTEGER, size_x INTEGER, " "size_y INTEGER, size_z INTEGER, " "UNIQUE (experiment_id, series, offset_x, offset_y, offset_z))") def _create_table_blobs(cur): cur.execute("CREATE TABLE blobs (id INTEGER PRIMARY KEY AUTOINCREMENT, " "roi_id INTEGER, x INTEGER, y INTEGER, " "z INTEGER, radius REAL, " "confirmed INTEGER, truth INTEGER, " "channel INTEGER, " "UNIQUE (roi_id, x, y, z, truth, channel))") def _create_table_blob_matches(cur): cur.execute("CREATE TABLE blob_matches (" "id INTEGER PRIMARY KEY AUTOINCREMENT, " "roi_id INTEGER, blob1 INTEGER, blob2 INTEGER, dist REAL, " "FOREIGN KEY (roi_id) REFERENCES rois (id) " "ON UPDATE CASCADE ON DELETE CASCADE, " "FOREIGN KEY (blob1) REFERENCES blobs (id) " "ON UPDATE CASCADE ON DELETE CASCADE," "FOREIGN KEY (blob2) REFERENCES blobs (id) " "ON UPDATE CASCADE ON DELETE CASCADE)") def upgrade_db(conn, cur): db_ver = 0 try: abouts = select_about(conn, cur) db_ver = abouts[len(abouts) - 1]["version"] except sqlite3.OperationalError as e: print(e) print("defaulting to upgrade from DB version {}".format(db_ver)) if db_ver >= DB_VERSION: return print("Starting database upgrade...") if db_ver < 2: print("upgrading DB version from {} to 2".format(db_ver)) print("inserting new about table") _create_table_about(cur) print("upgrading blobs table") cur.execute("ALTER TABLE blobs RENAME TO tmp_blobs") _create_table_blobs(cur) cols = _COLS_BLOBS.rsplit(",", 2)[0] cur.execute("INSERT INTO blobs (" + cols + ", truth) SELECT " + cols + ", -1 FROM tmp_blobs") cur.execute("DROP TABLE tmp_blobs") if db_ver < 3: print("upgrading DB version from {} to 3".format(db_ver)) print("upgrading blobs table") cur.execute("ALTER TABLE blobs RENAME TO tmp_blobs") _create_table_blobs(cur) cols = _COLS_BLOBS.rsplit(",", 1)[0] cur.execute("INSERT INTO blobs (" + cols + ", channel) SELECT " + cols + ", 0 FROM tmp_blobs") cur.execute("DROP TABLE tmp_blobs") if db_ver < 4: print("upgrading DB version from {} to 4".format(db_ver)) _create_table_blob_matches(cur) insert_about(conn, cur, DB_VERSION, datetime.datetime.now()) print("...finished database upgrade.") conn.commit() def insert_about(conn, cur, version, date): cur.execute("INSERT INTO about (version, date) VALUES (?, ?)", (version, date)) print("about table entry entered with version {}".format(version)) conn.commit() return cur.lastrowid def select_about(conn, cur): cur.execute("SELECT * FROM about") rows = cur.fetchall() return rows def get_exp_name(path): path_decon = importer.deconstruct_img_name( path, keep_subimg=True)[0] if path_decon: path_decon = os.path.splitext(os.path.basename(path_decon))[0] return path_decon def insert_experiment(conn, cur, name, date=None): if date is None: date = datetime.datetime.now() cur.execute("INSERT INTO experiments (name, date) VALUES (?, ?)", (name, date)) print("{} experiment inserted".format(name)) conn.commit() return cur.lastrowid
BSD 3-Clause New or Revised License
openstack/ironic
ironic/common/pxe_utils.py
get_path_relative_to_tftp_root
python
def get_path_relative_to_tftp_root(file_path): return os.path.relpath(file_path, get_tftp_path_prefix())
Return file relative path to CONF.pxe.tftp_root :param file_path: full file path to be made relative path. :returns: The path relative to CONF.pxe.tftp_root
https://github.com/openstack/ironic/blob/a4a6f26333be31b84a9b1a874dde506e61d407d3/ironic/common/pxe_utils.py#L611-L617
import copy import os import shutil import tempfile from ironic_lib import utils as ironic_utils import jinja2 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from ironic.common import dhcp_factory from ironic.common import exception from ironic.common.glance_service import service_utils from ironic.common.i18n import _ from ironic.common import image_service as service from ironic.common import images from ironic.common import kickstart_utils as ks_utils from ironic.common import states from ironic.common import utils from ironic.conductor import utils as manager_utils from ironic.conf import CONF from ironic.drivers.modules import boot_mode_utils from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import image_cache from ironic.drivers import utils as driver_utils from ironic import objects LOG = logging.getLogger(__name__) PXE_CFG_DIR_NAME = CONF.pxe.pxe_config_subdir DHCP_CLIENT_ID = '61' DHCP_TFTP_SERVER_NAME = '66' DHCP_BOOTFILE_NAME = '67' DHCPV6_BOOTFILE_NAME = '59' DHCP_TFTP_SERVER_ADDRESS = '150' DHCP_IPXE_ENCAP_OPTS = '175' DHCP_TFTP_PATH_PREFIX = '210' DEPLOY_KERNEL_RAMDISK_LABELS = ['deploy_kernel', 'deploy_ramdisk'] RESCUE_KERNEL_RAMDISK_LABELS = ['rescue_kernel', 'rescue_ramdisk'] KERNEL_RAMDISK_LABELS = {'deploy': DEPLOY_KERNEL_RAMDISK_LABELS, 'rescue': RESCUE_KERNEL_RAMDISK_LABELS} def get_root_dir(): return CONF.pxe.tftp_root def get_ipxe_root_dir(): return CONF.deploy.http_root def _ensure_config_dirs_exist(task, ipxe_enabled=False): if ipxe_enabled: root_dir = get_ipxe_root_dir() else: root_dir = get_root_dir() node_dir = os.path.join(root_dir, task.node.uuid) pxe_dir = os.path.join(root_dir, PXE_CFG_DIR_NAME) for directory in (node_dir, pxe_dir): if not os.path.isdir(directory): fileutils.ensure_tree(directory) if CONF.pxe.dir_permission: os.chmod(directory, CONF.pxe.dir_permission) def _link_mac_pxe_configs(task, ipxe_enabled=False): def create_link(mac_path): ironic_utils.unlink_without_raise(mac_path) relative_source_path = os.path.relpath( pxe_config_file_path, os.path.dirname(mac_path)) utils.create_link_without_raise(relative_source_path, mac_path) pxe_config_file_path = get_pxe_config_file_path( task.node.uuid, ipxe_enabled=ipxe_enabled) for port in task.ports: client_id = port.extra.get('client-id') create_link(_get_pxe_mac_path(port.address, client_id=client_id, ipxe_enabled=ipxe_enabled)) for path in _get_pxe_grub_mac_path(port.address, ipxe_enabled=ipxe_enabled): create_link(path) def _link_ip_address_pxe_configs(task, ipxe_enabled=False): pxe_config_file_path = get_pxe_config_file_path( task.node.uuid, ipxe_enabled=ipxe_enabled) api = dhcp_factory.DHCPFactory().provider ip_addrs = api.get_ip_addresses(task) if not ip_addrs: if ip_addrs == []: LOG.warning("No IP addresses assigned for node %(node)s.", {'node': task.node.uuid}) else: LOG.warning( "DHCP address management is not available for node " "%(node)s. Operators without Neutron can ignore this " "warning.", {'node': task.node.uuid}) ip_addrs = [] for port_ip_address in ip_addrs: ip_address_path = _get_pxe_ip_address_path(port_ip_address) ironic_utils.unlink_without_raise(ip_address_path) relative_source_path = os.path.relpath( pxe_config_file_path, os.path.dirname(ip_address_path)) utils.create_link_without_raise(relative_source_path, ip_address_path) def _get_pxe_grub_mac_path(mac, ipxe_enabled=False): root_dir = get_ipxe_root_dir() if ipxe_enabled else get_root_dir() yield os.path.join(root_dir, "%s-%s-%s" % ("grub.cfg", "01", mac.replace(':', "-").lower())) yield os.path.join(root_dir, mac + '.conf') def _get_pxe_mac_path(mac, delimiter='-', client_id=None, ipxe_enabled=False): mac_file_name = mac.replace(':', delimiter).lower() if not ipxe_enabled: hw_type = '01-' if client_id: hw_type = '20-' mac_file_name = hw_type + mac_file_name return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name) return os.path.join(get_ipxe_root_dir(), PXE_CFG_DIR_NAME, mac_file_name) def _get_pxe_ip_address_path(ip_address): return os.path.join( CONF.pxe.tftp_root, ip_address + ".conf" ) def get_kernel_ramdisk_info(node_uuid, driver_info, mode='deploy', ipxe_enabled=False): if ipxe_enabled: root_dir = get_ipxe_root_dir() else: root_dir = get_root_dir() image_info = {} labels = KERNEL_RAMDISK_LABELS[mode] for label in labels: image_info[label] = ( str(driver_info[label]), os.path.join(root_dir, node_uuid, label) ) return image_info def get_pxe_config_file_path(node_uuid, ipxe_enabled=False): if ipxe_enabled: return os.path.join(get_ipxe_root_dir(), node_uuid, 'config') else: return os.path.join(get_root_dir(), node_uuid, 'config') def get_file_path_from_label(node_uuid, root_dir, label): if label == 'ks_template': return os.path.join(get_ipxe_root_dir(), node_uuid, 'ks.cfg.template') elif label == 'ks_cfg': return os.path.join(get_ipxe_root_dir(), node_uuid, 'ks.cfg') elif label == 'stage2': return os.path.join(get_ipxe_root_dir(), node_uuid, 'LiveOS', 'squashfs.img') else: return os.path.join(root_dir, node_uuid, label) def get_http_url_path_from_label(http_url, node_uuid, label): if label == 'ks_template': return '/'.join([http_url, node_uuid, 'ks.cfg.template']) elif label == 'ks_cfg': return '/'.join([http_url, node_uuid, 'ks.cfg']) elif label == 'stage2': return '/'.join([http_url, node_uuid]) else: return '/'.join([http_url, node_uuid, label]) def create_pxe_config(task, pxe_options, template=None, ipxe_enabled=False): LOG.debug("Building PXE config for node %s", task.node.uuid) if template is None: if ipxe_enabled: template = deploy_utils.get_ipxe_config_template(task.node) else: template = deploy_utils.get_pxe_config_template(task.node) _ensure_config_dirs_exist(task, ipxe_enabled) pxe_config_file_path = get_pxe_config_file_path( task.node.uuid, ipxe_enabled=ipxe_enabled) is_uefi_boot_mode = (boot_mode_utils.get_boot_mode(task.node) == 'uefi') uefi_with_grub = is_uefi_boot_mode and not ipxe_enabled if uefi_with_grub: pxe_config_root_tag = '(( ROOT ))' pxe_config_disk_ident = '(( DISK_IDENTIFIER ))' else: pxe_config_root_tag = '{{ ROOT }}' pxe_config_disk_ident = '{{ DISK_IDENTIFIER }}' params = {'pxe_options': pxe_options, 'ROOT': pxe_config_root_tag, 'DISK_IDENTIFIER': pxe_config_disk_ident} pxe_config = utils.render_template(template, params) utils.write_to_file(pxe_config_file_path, pxe_config) _link_mac_pxe_configs(task, ipxe_enabled=ipxe_enabled) if uefi_with_grub: try: _link_ip_address_pxe_configs(task, ipxe_enabled) except exception.FailedToGetIPAddressOnPort as e: if CONF.dhcp.dhcp_provider != 'none': with excutils.save_and_reraise_exception(): LOG.error('Unable to create boot config, IP address ' 'was unable to be retrieved. %(error)s', {'error': e}) def create_ipxe_boot_script(): boot_script = utils.render_template( CONF.pxe.ipxe_boot_script, {'ipxe_for_mac_uri': PXE_CFG_DIR_NAME + '/'}) bootfile_path = os.path.join( CONF.deploy.http_root, os.path.basename(CONF.pxe.ipxe_boot_script)) if (not os.path.isfile(bootfile_path) or not utils.file_has_content(bootfile_path, boot_script)): utils.write_to_file(bootfile_path, boot_script) def clean_up_pxe_config(task, ipxe_enabled=False): LOG.debug("Cleaning up PXE config for node %s", task.node.uuid) is_uefi_boot_mode = (boot_mode_utils.get_boot_mode(task.node) == 'uefi') if is_uefi_boot_mode and not ipxe_enabled: api = dhcp_factory.DHCPFactory().provider ip_addresses = api.get_ip_addresses(task) for port_ip_address in ip_addresses: try: ip_address_path = _get_pxe_ip_address_path(port_ip_address) except exception.InvalidIPv4Address: continue except exception.FailedToGetIPAddressOnPort: continue ironic_utils.unlink_without_raise(ip_address_path) for port in task.ports: client_id = port.extra.get('client-id') ironic_utils.unlink_without_raise( _get_pxe_mac_path(port.address, client_id=client_id, ipxe_enabled=ipxe_enabled)) for path in _get_pxe_grub_mac_path(port.address, ipxe_enabled=ipxe_enabled): ironic_utils.unlink_without_raise(path) if ipxe_enabled: utils.rmtree_without_raise(os.path.join(get_ipxe_root_dir(), task.node.uuid)) else: utils.rmtree_without_raise(os.path.join(get_root_dir(), task.node.uuid)) def _dhcp_option_file_or_url(task, urlboot=False, ip_version=None): try: if task.driver.boot.ipxe_enabled: boot_file = deploy_utils.get_ipxe_boot_file(task.node) else: boot_file = deploy_utils.get_pxe_boot_file(task.node) except AttributeError: boot_file = deploy_utils.get_pxe_boot_file(task.node) if not urlboot: return boot_file elif urlboot: if CONF.my_ipv6 and ip_version == 6: host = utils.wrap_ipv6(CONF.my_ipv6) else: host = utils.wrap_ipv6(CONF.pxe.tftp_server) return "tftp://{host}/{boot_file}".format(host=host, boot_file=boot_file) def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False, ip_version=None): if ip_version: use_ip_version = ip_version else: use_ip_version = int(CONF.pxe.ip_version) dhcp_opts = [] dhcp_provider_name = CONF.dhcp.dhcp_provider if use_ip_version == 4: boot_file_param = DHCP_BOOTFILE_NAME else: boot_file_param = DHCPV6_BOOTFILE_NAME url_boot = True boot_file = _dhcp_option_file_or_url(task, url_boot, use_ip_version) if ipxe_enabled: script_name = os.path.basename(CONF.pxe.ipxe_boot_script) ipxe_script_url = '/'.join([CONF.deploy.http_url, script_name]) if dhcp_provider_name == 'neutron': if use_ip_version != 6: dhcp_opts.append( {'opt_name': "tag:!ipxe,%s" % boot_file_param, 'opt_value': boot_file} ) dhcp_opts.append( {'opt_name': "tag:ipxe,%s" % boot_file_param, 'opt_value': ipxe_script_url} ) else: dhcp_opts.append( {'opt_name': "tag:!ipxe6,%s" % boot_file_param, 'opt_value': boot_file}) dhcp_opts.append( {'opt_name': "tag:ipxe6,%s" % boot_file_param, 'opt_value': ipxe_script_url}) else: if use_ip_version == 6: LOG.warning('IPv6 is enabled and the DHCP driver appears set ' 'to a plugin aside from "neutron". Node %(name)s ' 'may not receive proper DHCPv6 provided ' 'boot parameters.', {'name': task.node.uuid}) dhcp_opts.append({'opt_name': "!%s,%s" % (DHCP_IPXE_ENCAP_OPTS, boot_file_param), 'opt_value': boot_file}) dhcp_opts.append({'opt_name': boot_file_param, 'opt_value': ipxe_script_url}) else: dhcp_opts.append({'opt_name': boot_file_param, 'opt_value': boot_file}) if not url_boot: dhcp_opts.append( {'opt_name': DHCP_TFTP_PATH_PREFIX, 'opt_value': get_tftp_path_prefix()}) if not url_boot: dhcp_opts.append({'opt_name': DHCP_TFTP_SERVER_NAME, 'opt_value': CONF.pxe.tftp_server}) dhcp_opts.append({'opt_name': DHCP_TFTP_SERVER_ADDRESS, 'opt_value': CONF.pxe.tftp_server}) if not url_boot: dhcp_opts.append({'opt_name': 'server-ip-address', 'opt_value': CONF.pxe.tftp_server}) for opt in dhcp_opts: opt.update({'ip_version': use_ip_version}) return dhcp_opts def get_tftp_path_prefix(): return os.path.join(CONF.pxe.tftp_root, '')
Apache License 2.0
xilinx/pyxir
python/pyxir/contrib/target/components/DPUCADF8H/dpu_op_support.py
conv2d_transpose_op_support
python
def conv2d_transpose_op_support(X, bXs, tXs): data_layout = X.attrs["data_layout"] kernel_h, kernel_w = X.attrs["kernel_size"] stride_h, stride_w = X.attrs["strides"] dilation_h, dilation_w = X.attrs["dilation"] padding_h, padding_w = ( X.attrs["padding"][data_layout.index("H")], X.attrs["padding"][data_layout.index("W")], ) padding_h_top, padding_h_bot = padding_h padding_w_left, padding_w_right = padding_w padding = X.attrs["padding"] ch_in, ch_out = X.attrs["channels"] groups = X.attrs["groups"] return ( groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4] )
Check whether we can execute the provided Conv2DTranspose operator on the DPUCADF8H target
https://github.com/xilinx/pyxir/blob/bef661d6d77adcdbd2cf4163f2cf3a1d31d40406/python/pyxir/contrib/target/components/DPUCADF8H/dpu_op_support.py#L118-L158
import math import pyxir import logging logger = logging.getLogger("pyxir") @pyxir.register_op_support_check("DPUCADF8H", "BatchNorm") def batchnorm_op_support(X, bXs, tXs): axis = X.attrs["axis"] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check("DPUCADF8H", "BiasAdd") def biasadd_op_support(X, bXs, tXs): axis = X.attrs["axis"] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check("DPUCADF8H", "Cast") def cast_op_support(X, bXs, tXs): dtype = X.attrs["dtype"] return dtype == "float32" @pyxir.register_op_support_check("DPUCADF8H", "Concat") def concat_op_support(X, bXs, tXs): axis = X.attrs["axis"] channels = X.shapes[axis] return channels >= 1 and channels <= 4096 @pyxir.register_op_support_check("DPUCADF8H", "Convolution") def conv2d_op_support(X, bXs, tXs): data_layout = X.attrs["data_layout"] kernel_h, kernel_w = X.attrs["kernel_size"] stride_h, stride_w = X.attrs["strides"] dilation_h, dilation_w = X.attrs["dilation"] padding_h, padding_w = ( X.attrs["padding"][data_layout.index("H")], X.attrs["padding"][data_layout.index("H")], ) padding_h_top, padding_h_bot = padding_h padding_w_left, padding_w_right = padding_w ch_in, ch_out = X.attrs["channels"] groups = X.attrs["groups"] return ( groups == 1 and kernel_h >= 1 and kernel_h <= 15 and kernel_w >= 1 and kernel_w <= 15 and stride_h in [1, 2, 4, 8] and stride_w in [1, 2, 4, 8] and ch_in >= 1 and ch_in <= 4096 and ch_out >= 1 and ch_out <= 4096 and dilation_h in [1, 2, 4] and dilation_w in [1, 2, 4] ) @pyxir.register_op_support_check("DPUCADF8H", "Conv2DTranspose")
Apache License 2.0
convexengineering/gpkit
gpkit/constraints/set.py
sort_constraints_dict
python
def sort_constraints_dict(iterable): if sys.version_info >= (3, 7) or isinstance(iterable, OrderedDict): return iterable.keys(), iterable.values() items = sorted(list(iterable.items()), key=_sort_constraints) return (item[0] for item in items), (item[1] for item in items)
Sort a dictionary of {k: constraint} and return its keys and values
https://github.com/convexengineering/gpkit/blob/6f3b1e431722832c312ac8dead6e9bdcebd7c6f9/gpkit/constraints/set.py#L38-L43
import sys from collections import defaultdict, OrderedDict from itertools import chain import numpy as np from ..keydict import KeySet, KeyDict from ..small_scripts import try_str_without from ..repr_conventions import ReprMixin from .single_equation import SingleEquationConstraint def add_meq_bounds(bounded, meq_bounded): still_alive = True while still_alive: still_alive = False for bound in list(meq_bounded): if bound in bounded: del meq_bounded[bound] continue for condition in meq_bounded[bound]: if condition.issubset(bounded): del meq_bounded[bound] bounded.add(bound) still_alive = True break def _sort_by_name_and_idx(var): return (var.key.str_without(["units", "idx"]), var.key.idx or ()) def _sort_constraints(item): label, constraint = item return (not isinstance(constraint, SingleEquationConstraint), bool(getattr(constraint, "lineage", None)), label)
MIT License
napari/napari
napari/layers/utils/color_manager_utils.py
map_property
python
def map_property( prop: np.ndarray, colormap: Colormap, contrast_limits: Union[None, Tuple[float, float]] = None, ) -> Tuple[np.ndarray, Tuple[float, float]]: if contrast_limits is None: contrast_limits = (prop.min(), prop.max()) normalized_properties = np.interp(prop, contrast_limits, (0, 1)) mapped_properties = colormap.map(normalized_properties) return mapped_properties, contrast_limits
Apply a colormap to a property Parameters ---------- prop : np.ndarray The property to be colormapped colormap : napari.utils.Colormap The colormap object to apply to the property contrast_limits : Union[None, Tuple[float, float]] The contrast limits for applying the colormap to the property. If a 2-tuple is provided, it should be provided as (lower_bound, upper_bound). If None is provided, the contrast limits will be set to (property.min(), property.max()). Default value is None.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/layers/utils/color_manager_utils.py#L54-L79
from typing import Any, Dict, Tuple, Union import numpy as np from ...utils.colormaps import Colormap from ...utils.translations import trans def guess_continuous(property: np.ndarray) -> bool: if ( issubclass(property.dtype.type, np.floating) or len(np.unique(property)) > 16 ): return True else: return False def is_color_mapped(color, properties): if isinstance(color, str): if color in properties: return True else: return False elif isinstance(color, dict): return True elif isinstance(color, (list, np.ndarray)): return False else: raise ValueError( trans._( 'face_color should be the name of a color, an array of colors, or the name of an property', deferred=True, ) )
BSD 3-Clause New or Revised License
tensorflow/tensor2tensor
tensor2tensor/layers/common_video.py
decode_to_shape
python
def decode_to_shape(inputs, shape, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = inputs x = tfl.flatten(x) x = tfl.dense(x, shape[2], activation=None, name="dec_dense") x = tf.expand_dims(x, axis=1) return x
Encode the given tensor to given image shape.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/layers/common_video.py#L57-L64
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensor2tensor.layers import common_layers from tensor2tensor.utils import contrib import tensorflow.compat.v1 as tf from tensorflow.python.ops import summary_op_util try: from tensorflow.python.distribute import summary_op_util as distribute_summary_op_util except ImportError: distribute_summary_op_util = summary_op_util tfl = common_layers.layers() def swap_time_and_batch_axes(inputs): transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0) return tf.transpose(inputs, transposed_axes) def encode_to_shape(inputs, shape, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): w, h = shape[1], shape[2] x = inputs x = tfl.flatten(x) x = tfl.dense(x, w * h, activation=None, name="enc_dense") x = tf.reshape(x, (-1, w, h, 1)) return x
Apache License 2.0
googleapis/python-analytics-data
samples/snippets/run_pivot_report.py
run_pivot_report
python
def run_pivot_report(property_id="YOUR-GA4-PROPERTY-ID"): client = BetaAnalyticsDataClient() request = RunPivotReportRequest( property=f"properties/{property_id}", date_ranges=[ DateRange(start_date="2021-01-01", end_date="2021-01-30"), ], pivots=[ Pivot( field_names=["country"], limit=250, order_bys=[ OrderBy( dimension=OrderBy.DimensionOrderBy(dimension_name="country") ) ], ), Pivot( field_names=["browser"], offset=3, limit=3, order_bys=[ OrderBy( metric=OrderBy.MetricOrderBy(metric_name="sessions"), desc=True ) ], ), ], metrics=[Metric(name="sessions")], dimensions=[ Dimension(name="country"), Dimension(name="browser"), ], ) response = client.run_pivot_report(request) print_run_pivot_report_response(response)
Runs a pivot query to build a report of session counts by country, pivoted by the browser dimension..
https://github.com/googleapis/python-analytics-data/blob/f42abbe31d386bd6ee03159dbc04b4c50bc91e4c/samples/snippets/run_pivot_report.py#L41-L79
from google.analytics.data_v1beta import BetaAnalyticsDataClient from google.analytics.data_v1beta.types import DateRange from google.analytics.data_v1beta.types import Dimension from google.analytics.data_v1beta.types import Metric from google.analytics.data_v1beta.types import OrderBy from google.analytics.data_v1beta.types import Pivot from google.analytics.data_v1beta.types import RunPivotReportRequest def run_sample(): property_id = "YOUR-GA4-PROPERTY-ID" run_pivot_report(property_id)
Apache License 2.0
rossumai/nvprof-tools
nvprof/__init__.py
slice_events
python
def slice_events(conn, start_ns, end_ns): base_time, _ = time_range(conn) abs_start_ns, abs_end_ns = base_time + start_ns, base_time + end_ns c = conn.cursor() for table in tables_with_prefix(interval_tables): c.execute('DELETE FROM {} WHERE end < {} OR start > {}'.format(table, abs_start_ns, abs_end_ns)) conn.isolation_level = None conn.execute('VACUUM') conn.isolation_level = '' conn.commit()
Delete events outside the given range, keep events inside and overlapping.
https://github.com/rossumai/nvprof-tools/blob/50a64bd078f65b7f77ac92b502799716ed49453c/nvprof/__init__.py#L117-L131
import sqlite3 import sys table_prefix = 'CUPTI_ACTIVITY_KIND_' interval_tables = [ 'CDP_KERNEL', 'CONCURRENT_KERNEL', 'DRIVER', 'KERNEL', 'MEMCPY', 'MEMCPY2', 'MEMSET', 'OPENACC_DATA', 'OPENACC_LAUNCH', 'OPENACC_OTHER', 'OVERHEAD', 'RUNTIME', 'SYNCHRONIZATION', 'UNIFIED_MEMORY_COUNTER'] unnecessary_tables = ['RUNTIME', 'DRIVER'] def tables_with_prefix(short_tables): return ['%s%s' % (table_prefix, table) for table in short_tables] def time_range(conn): tables = tables_with_prefix([ t for t in interval_tables if t not in unnecessary_tables]) times = [] c = conn.cursor() for table in tables: c.execute('SELECT MIN(start), MAX(end) FROM {}'.format(table)) start, end = c.fetchone() if start is not None and end is not None: times.append((start, end)) if len(times) > 0: starts, ends = zip(*times) start_ns, end_ns = min(starts), max(ends) return start_ns, end_ns else: return 0, 0 def total_time(conn): start_ns, end_ns = time_range(conn) total_time_sec = (end_ns - start_ns) * 1e-9 return total_time_sec def list_tables(conn): c = conn.cursor() c.execute('select name from sqlite_master where type="table" and name like "CUPTI%" order by name') return [row[0] for row in c.fetchall()] def table_sizes(conn): table_sizes = {} c = conn.cursor() for table in list_tables(conn): c.execute('select count(*) from %s' % table) table_sizes[table] = c.fetchone()[0] return table_sizes def total_event_count(conn): ts = table_sizes(conn) return sum(ts.values()) def biggest_tables(conn): ts = table_sizes(conn) return sorted([(n,s) for (n,s) in ts.items() if s > 0], key=lambda item: item[1], reverse=True) def compute_utilization(conn): c = conn.cursor() c.execute("SELECT deviceId, 100. * SUM(1.0 * end - 1.0 * start) / (MAX(end) - MIN(start)) FROM CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL GROUP BY deviceId") return dict(c.fetchall()) def gpu_count(conn): c = conn.cursor() c.execute("SELECT COUNT(*) FROM CUPTI_ACTIVITY_KIND_DEVICE") return c.fetchone()[0] def print_info(conn): print("Number of GPUs: %d" % gpu_count(conn)) utilization_per_device = compute_utilization(conn) mean_utilization = sum(utilization_per_device.values()) / len(utilization_per_device) print("Compute utilization (mean): %0.2f %%" % mean_utilization) for dev, util in utilization_per_device.items(): print(' GPU %d: %0.2f %%' % (dev, util)) print('Total time: %.03f sec' % total_time(conn)) ts = biggest_tables(conn) print('Total number of events:', sum(s for (n, s) in ts)) print('Events by table:') for (name, size) in ts: print(name, ':', size) def truncate_tables(conn, tables): c = conn.cursor() for table in tables: c.execute('DELETE FROM %s' % table) c.execute('VACUUM') conn.commit() def delete_unnecessary_events(conn): tables = ['%s%s' % (table_prefix, table) for table in unnecessary_tables] truncate_tables(conn, tables)
MIT License
frank-qlu/recruit
招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/linalg/linalg.py
tensorsolve
python
def tensorsolve(a, b, axes=None): a, wrap = _makearray(a) b = asarray(b) an = a.ndim if axes is not None: allaxes = list(range(0, an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) oldshape = a.shape[-(an-b.ndim):] prod = 1 for k in oldshape: prod *= k a = a.reshape(-1, prod) b = b.ravel() res = wrap(solve(a, b)) res.shape = oldshape return res
Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=b.ndim)``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True
https://github.com/frank-qlu/recruit/blob/0875fb1d2cfb581aaa8abc7a97880c0ce5bf6147/招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/linalg/linalg.py#L253-L320
from __future__ import division, absolute_import, print_function __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError', 'multi_dot'] import functools import operator import warnings from numpy.core import ( array, asarray, zeros, empty, empty_like, intc, single, double, csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, atleast_2d, intp, asanyarray, object_, matmul, swapaxes, divide, count_nonzero, isnan ) from numpy.core.multiarray import normalize_axis_index from numpy.core.overrides import set_module from numpy.core import overrides from numpy.lib.twodim_base import triu, eye from numpy.linalg import lapack_lite, _umath_linalg array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.linalg') _N = b'N' _V = b'V' _A = b'A' _S = b'S' _L = b'L' fortran_int = intc @set_module('numpy.linalg') class LinAlgError(Exception): def _determine_error_states(): errobj = geterrobj() bufsize = errobj[0] with errstate(invalid='call', over='ignore', divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] return [bufsize, invalid_call_errmask, None] _linalg_error_extobj = _determine_error_states() del _determine_error_states def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix") def _raise_linalgerror_nonposdef(err, flag): raise LinAlgError("Matrix is not positive definite") def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): raise LinAlgError("Eigenvalues did not converge") def _raise_linalgerror_svd_nonconvergence(err, flag): raise LinAlgError("SVD did not converge") def _raise_linalgerror_lstsq(err, flag): raise LinAlgError("SVD did not converge in Linear Least Squares") def get_linalg_error_extobj(callback): extobj = list(_linalg_error_extobj) extobj[2] = callback return extobj def _makearray(a): new = asarray(a) wrap = getattr(a, "__array_prepare__", new.__array_wrap__) return new, wrap def isComplexType(t): return issubclass(t, complexfloating) _real_types_map = {single : single, double : double, csingle : single, cdouble : double} _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _realType(t, default=double): return _real_types_map.get(t, default) def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) def _linalgRealType(t): return double def _commonType(*arrays): result_type = single is_complex = False for a in arrays: if issubclass(a.dtype.type, inexact): if isComplexType(a.dtype.type): is_complex = True rt = _realType(a.dtype.type, default=None) if rt is None: raise TypeError("array type %s is unsupported in linalg" % (a.dtype.name,)) else: rt = double if rt is double: result_type = double if is_complex: t = cdouble result_type = _complex_types_map[result_type] else: t = double return t, result_type _fastCT = fastCopyAndTranspose def _to_native_byte_order(*arrays): ret = [] for arr in arrays: if arr.dtype.byteorder not in ('=', '|'): ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) else: ret.append(arr) if len(ret) == 1: return ret[0] else: return ret def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.type is type: cast_arrays = cast_arrays + (_fastCT(a),) else: cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def _assertRank2(*arrays): for a in arrays: if a.ndim != 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'two-dimensional' % a.ndim) def _assertRankAtLeast2(*arrays): for a in arrays: if a.ndim < 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'at least two-dimensional' % a.ndim) def _assertNdSquareness(*arrays): for a in arrays: m, n = a.shape[-2:] if m != n: raise LinAlgError('Last 2 dimensions of the array must be square') def _assertFinite(*arrays): for a in arrays: if not (isfinite(a).all()): raise LinAlgError("Array must not contain infs or NaNs") def _isEmpty2d(arr): return arr.size == 0 and product(arr.shape[-2:]) == 0 def _assertNoEmpty2d(*arrays): for a in arrays: if _isEmpty2d(a): raise LinAlgError("Arrays cannot be empty") def transpose(a): return swapaxes(a, -1, -2) def _tensorsolve_dispatcher(a, b, axes=None): return (a, b) @array_function_dispatch(_tensorsolve_dispatcher)
Apache License 2.0
azure/autorest.python
test/vanilla/legacy/Expected/AcceptanceTests/Validation/validation/operations/_auto_rest_validation_test_operations.py
AutoRestValidationTestOperationsMixin.validation_of_method_parameters
python
def validation_of_method_parameters( self, resource_group_name, id, **kwargs ): cls = kwargs.pop("cls", None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {})) request = build_validation_of_method_parameters_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, id=id, template_url=self.validation_of_method_parameters.metadata["url"], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize("Product", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Validates input parameters on the method. See swagger for details. :param resource_group_name: Required string between 3 and 10 chars with pattern [a-zA-Z0-9]+. :type resource_group_name: str :param id: Required int multiple of 10 from 100 to 1000. :type id: int :keyword callable cls: A custom type or function that will be passed the direct response :return: Product, or the result of cls(response) :rtype: ~validation.models.Product :raises: ~azure.core.exceptions.HttpResponseError
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/legacy/Expected/AcceptanceTests/Validation/validation/operations/_auto_rest_validation_test_operations.py#L166-L210
import functools from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error, ) from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: from typing import Any, Callable, Dict, Generic, Optional, TypeVar T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() def build_validation_of_method_parameters_request( subscription_id, resource_group_name, id, **kwargs ): api_version = "1.0.0" accept = "application/json" url = kwargs.pop("template_url", '/fakepath/{subscriptionId}/{resourceGroupName}/{id}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=10, min_length=3, pattern=r'[a-zA-Z0-9\']+'), "id": _SERIALIZER.url("id", id, 'int', maximum=1000, minimum=100, multiple=10), } url = _format_url_section(url, **path_format_arguments) query_parameters = kwargs.pop("params", {}) query_parameters['apiVersion'] = _SERIALIZER.query("api_version", api_version, 'str') header_parameters = kwargs.pop("headers", {}) header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_validation_of_body_request( subscription_id, resource_group_name, id, **kwargs ): content_type = kwargs.pop('content_type', None) api_version = "1.0.0" accept = "application/json" url = kwargs.pop("template_url", '/fakepath/{subscriptionId}/{resourceGroupName}/{id}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=10, min_length=3, pattern=r'[a-zA-Z0-9]+'), "id": _SERIALIZER.url("id", id, 'int', maximum=1000, minimum=100, multiple=10), } url = _format_url_section(url, **path_format_arguments) query_parameters = kwargs.pop("params", {}) query_parameters['apiVersion'] = _SERIALIZER.query("api_version", api_version, 'str') header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_with_constant_in_path_request( **kwargs ): constant_param = "constant" url = kwargs.pop("template_url", '/validation/constantsInPath/{constantParam}/value') path_format_arguments = { "constantParam": _SERIALIZER.url("constant_param", constant_param, 'str'), } url = _format_url_section(url, **path_format_arguments) return HttpRequest( method="GET", url=url, **kwargs ) def build_post_with_constant_in_body_request( **kwargs ): content_type = kwargs.pop('content_type', None) constant_param = "constant" accept = "application/json" url = kwargs.pop("template_url", '/validation/constantsInPath/{constantParam}/value') path_format_arguments = { "constantParam": _SERIALIZER.url("constant_param", constant_param, 'str'), } url = _format_url_section(url, **path_format_arguments) header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, headers=header_parameters, **kwargs ) class AutoRestValidationTestOperationsMixin(object): @distributed_trace
MIT License
ueg1990/imgur-cli
imgur_cli/cli_api.py
cmd_gallery_report
python
def cmd_gallery_report(client, args): report_gallery_item = client.report_gallery_item(args.item_id) generate_output({'report_gallery_item': report_gallery_item})
Report an item in the gallery
https://github.com/ueg1990/imgur-cli/blob/359508a8806d9e583849f5de9115a5bbcd5e04b4/imgur_cli/cli_api.py#L703-L706
import os import imgurpython from imgur_cli import exceptions from imgur_cli.utils import (cli_arg, cli_subparser, data_fields, generate_output, format_comment_tree) from imgur_cli.utils import cli_subparser from imgur_cli.utils import data_fields from imgur_cli.utils import generate_output SUBPARSERS = {'gallery': 'Gallery subparser', 'album': 'Album subparser', 'image': 'Image subparser', 'comment': 'Comment subparser', 'memegen': 'Memegen subparser', 'account': 'Account subparser', 'conversation': 'Conversation subparser', 'notification': 'Notification subparser', 'auth': 'Authentication subparser'} @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_user(client, args): account_user = client.get_account(args.username) data = account_user.__dict__ generate_output({'account_user': data}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_gallery_favorites(client, args): gallery_favorites = client.get_gallery_favorites(args.username) data = [item.__dict__ for item in gallery_favorites] generate_output({'gallery_favorites': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_favorites(client, args): account_favorites = client.get_account_favorites(args.username) data = [item.__dict__ for item in account_favorites] generate_output({'account_favorites': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_submissions(client, args): account_submissions = client.get_account_submissions(args.username, args.page) data = [item.__dict__ for item in account_submissions] generate_output({'account_submissions': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_settings(client, args): account_settings = client.get_account_settings(args.username) data = account_settings.__dict__ generate_output({'account_settings': data}) @cli_subparser('account') @cli_arg('user', help='Username of Account') @cli_arg('--bio', metavar='<bio>', help='The biography of the user, ' 'is displayed in the gallery profile page') @cli_arg('--public-images', metavar='<public-images>', choices=['true', 'false'], help='Set the users images to private ' 'or public by default') @cli_arg('--messaging-enabled', metavar='<messaging-enabled>', choices=['true', 'false'], help='Allows the user to enable or ' 'disable private messages') @cli_arg('--album-privacy', metavar='<album-privacy>', choices=['public', 'hidden', 'secret'], help='public | hidden | secret - ' 'Sets the default privacy level of albums the users creates') @cli_arg('--accepted-gallery-terms', metavar='<accepted-gallery-terms>', choices=['true', 'false'], help='The user agreement to the Imgur ' 'Gallery terms') @cli_arg('--username', metavar='<username>', help='A valid Imgur username (between 4 and 63 alphanumeric characters)') def cmd_account_change_settings(client, args): fields = data_fields(args, client.allowed_account_fields) account_settings = client.change_account_settings(args.user, fields) generate_output({'account_settings': account_settings}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_verification_status(client, args): email_verification_status = client.get_email_verification_status(args.username) generate_output({'email_verification_status': email_verification_status}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_send_verification(client, args): verification_email = client.send_verification_email(args.username) generate_output({'verification_email': verification_email}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_albums(client, args): account_albums = client.get_account_albums(args.username, args.page) data = [item.__dict__ for item in account_albums] generate_output({'account_albums': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_album_ids(client, args): account_album_ids = client.get_account_album_ids(args.username, args.page) generate_output({'account_album_ids': account_album_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_album_count(client, args): account_album_count = client.get_account_album_count(args.username) generate_output({'account_album_count': account_album_count}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--sort', default='newest', metavar='<sort>', choices=['best', 'worst', 'oldest', 'newest'], help='best | worst | oldest | newest - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_comments(client, args): account_comments = client.get_account_comments(args.username, args.sort, args.page) data = format_comment_tree(account_comments) generate_output({'account_comments': data}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--sort', default='newest', metavar='<sort>', choices=['best', 'worst', 'oldest', 'newest'], help='best | worst | oldest | newest - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_comment_ids(client, args): account_comment_ids = client.get_account_comment_ids(args.username, args.sort, args.page) generate_output({'account_comment_ids': account_comment_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_comment_count(client, args): account_comment_count = client.get_account_comment_count(args.username) generate_output({'account_comment_count': account_comment_count}) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_account_images(client, args): account_images = client.get_account_images(args.username, args.page) data = [item.__dict__ for item in account_images] generate_output({'account_images': data}, args.output_file) @cli_subparser('account') @cli_arg('username', help='Username of Account') @cli_arg('--page', default=0, metavar='<page>', type=int, help='Page number (defaults to %(default)s)') def cmd_account_image_ids(client, args): account_image_ids = client.get_account_image_ids(args.username, args.page) generate_output({'account_image_ids': account_image_ids}) @cli_subparser('account') @cli_arg('username', help='Username of Account') def cmd_account_image_count(client, args): account_image_count = client.get_account_images_count(args.username) generate_output({'account_image_count': account_image_count}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') def cmd_album_id(client, args): album = client.get_album(args.album_id) data = album.__dict__ generate_output({'album': data}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_album_images(client, args): album_images = client.get_album_images(args.album_id) data = [item.__dict__ for item in album_images] generate_output({'album_images': data}, args.output_file) @cli_subparser('album') @cli_arg('--ids', metavar='<ids>', help='Comma separated list of image ids that you ' 'want to be included in the album; you have to be logged in as the user ' 'for adding the image ids') @cli_arg('--title', metavar='<title>', help='The title of the album') @cli_arg('--description', metavar='<description>', help='The description of the album') @cli_arg('--privacy', metavar='<privacy>', choices=['public', 'hidden', 'secret'], help="Sets the privacy level of the album." "Values are : public | hidden | secret." "Defaults to user's privacy settings for logged in users") @cli_arg('--layout', metavar='<layout>', choices=['blog', 'grid', 'horizontal', 'vertical'], help='Sets the layout to display the album. ' 'Values are : blog | grid | horizontal | vertical') @cli_arg('--cover', metavar='<cover>', help='The ID of an image that you want to be the cover of the album; ' 'you have to be logged in as the user') def cmd_album_create(client, args): fields = data_fields(args, client.allowed_album_fields) album = client.create_album(fields) generate_output({'album': album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('--ids', metavar='<ids>', help='Comma separated list of image ids that you ' 'want to be included in the album; you have to be logged in as the user ' 'for adding the image ids') @cli_arg('--title', metavar='<title>', help='The title of the album') @cli_arg('--description', metavar='<description>', help='The description of the album') @cli_arg('--privacy', metavar='<privacy>', choices=['public', 'hidden', 'secret'], help="Sets the privacy level of the album." "Values are : public | hidden | secret." "Defaults to user's privacy settings for logged in users") @cli_arg('--layout', metavar='<layout>', choices=['blog', 'grid', 'horizontal', 'vertical'], help='Sets the layout to display the album. ' 'Values are : blog | grid | horizontal | vertical') @cli_arg('--cover', metavar='<cover>', help='The ID of an image that you want to be the cover of the album; ' 'you have to be logged in as the user') def cmd_album_update(client, args): fields = data_fields(args, client.allowed_album_fields) album = client.update_album(args.album_id, fields) generate_output({'album': album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') def cmd_album_delete(client, args): delete_album = client.album_delete(args.album_id) generate_output({'delete_album': delete_album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') def cmd_album_favorite(client, args): favorite_album = client.album_favorite(args.album_id) generate_output({'favorite_album': favorite_album}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('ids', help='Comma separated list of image ids that you want to be added ' 'to the album') def cmd_album_set_images(client, args): set_images = client.album_set_images(args.album_id, args.ids) generate_output({'set_images': set_images}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('ids', help='Comma separated list of image ids that you want to be added ' 'to the album') def cmd_album_add_images(client, args): add_images = client.album_add_images(args.album_id, args.ids) generate_output({'add_images': add_images}) @cli_subparser('album') @cli_arg('album_id', help='Album ID') @cli_arg('ids', help='Comma separated list of image ids that you want to be removed ' 'to the album') def cmd_album_remove_images(client, args): remove_images = client.album_remove_images(args.album_id, args.ids) generate_output({'remove_images': remove_images}) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') def cmd_comment_id(client, args): comment = client.get_comment(args.comment_id) data = comment.__dict__ generate_output({'comment': data}) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') def cmd_comment_delete(client, args): delete_comment = client.delete_comment(args.comment_id) generate_output({'delete_comment': delete_comment}) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_comment_replies(client, args): comment_replies = client.get_comment_replies(args.comment_id) data = format_comment_tree(comment_replies) generate_output({'comment_replies': data}, args.output_file) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') @cli_arg('image_id', help='Image ID') @cli_arg('comment', help='The comment text, this is what will be displayed') def cmd_comment_reply(client, args): comment_reply = client.post_comment_reply(args.comment_id, args.image_id, args.comment) generate_output({'comment_reply': comment_reply}) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') @cli_arg('--vote', default='up', metavar='<vote>', choices=['up', 'down'], help="'up' or 'down'") def cmd_comment_vote(client, args): comment_vote = client.comment_vote(args.comment_id, args.vote) generate_output({'comment_vote': comment_vote}) @cli_subparser('comment') @cli_arg('comment_id', type=int, help='Comment ID') def cmd_comment_report(client, args): comment_report = client.comment_report(args.comment_id) generate_output({'comment_report': comment_report}) @cli_subparser('conversation') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_conversation_list(client, args): conversation_list = client.conversation_list() data = [item.__dict__ for item in conversation_list] generate_output({'conversation_list': data}, args.output_file) @cli_subparser('conversation') @cli_arg('conversation_id', type=int, help='Conversation ID') @cli_arg('--page', default=1, metavar='<page>', type=int, help='Page of message thread. Starting at 1 for the most recent 25 ' 'messages and counting upwards (defaults to %(default)s)') @cli_arg('--offset', default=0, metavar='<offset>', type=int, help='Additional offset in current page (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_conversation_id(client, args): conversation = client.get_conversation(args.conversation_id, args.page, args.offset) data = conversation.__dict__ try: data['messages'] = [item.__dict__ for item in data['messages']] except KeyError: pass generate_output({'conversation': data}) @cli_subparser('conversation') @cli_arg('recipient', help='The recipient username, this person will receive ' 'the message') @cli_arg('body', help='The message body') def cmd_conversation_create(client, args): create_message = client.create_message(args.recipient, args.body) generate_output({'create_message': create_message}) @cli_subparser('conversation') @cli_arg('conversation_id', type=int, help='Conversation ID') def cmd_conversation_delete(client, args): delete_conversation = client.delete_conversation(args.conversation_id) generate_output({'delete_conversation': delete_conversation}) @cli_subparser('conversation') @cli_arg('username', help='Username of sender to report') def cmd_conversation_report(client, args): report_sender = client.report_sender(args.username) generate_output({'report_sender': report_sender}) @cli_subparser('conversation') @cli_arg('username', help='Username of sender to block') def cmd_conversation_block(client, args): block_sender = client.block_sender(args.username) generate_output({'block_sender': block_sender}) @cli_subparser('gallery') @cli_arg('--section', default='hot', metavar='<section>', choices=['hot', 'top', 'user'], help='hot | top | user - defaults to hot') @cli_arg('--sort', default='viral', metavar='<sort>', choices=['viral', 'top', 'time', 'rising'], help='viral | top | time | rising (only available with user section) - ' 'defaults to viral') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--window', default='day', metavar='<window>', choices=['day', 'week', 'month', 'year', 'all'], help='Change the date range of the request if the section is "top", ' 'day | week | month | year | all (Defaults to %(default)s)') @cli_arg('--show-viral', action='store_true', help='Show or hide viral images ' 'from the "user" section (Defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_items(client, args): gallery = client.gallery(args.section, args.sort, args.page, args.window, args.show_viral) data = [item.__dict__ for item in gallery] generate_output({'gallery': data}, args.output_file) @cli_subparser('gallery') @cli_arg('--sort', default='viral', metavar='<sort>', choices=['viral', 'top', 'time'], help='viral | top | time - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--window', default='week', metavar='<window>', choices=['day', 'week', 'month', 'year', 'all'], help='Change the date range of the request if the sort is "top", ' 'day | week | month | year | all (Defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_memes_subgallery(client, args): memes_subgallery = client.memes_subgallery(args.sort, args.page, args.window) data = [item.__dict__ for item in memes_subgallery] generate_output({'memes_subgallery': data}, args.output_file) @cli_subparser('gallery') @cli_arg('subreddit', help='A valid subreddit name') @cli_arg('--sort', default='time', metavar='<sort>', choices=['top', 'time'], help='top | time - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--window', default='week', metavar='<window>', choices=['day', 'week', 'month', 'year', 'all'], help='Change the date range of the request if the sort is "top", ' 'day | week | month | year | all (Defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_subreddit_gallery(client, args): subreddit_gallery = client.subreddit_gallery(args.subreddit, args.sort, args.window, args.page) data = [item.__dict__ for item in subreddit_gallery] generate_output({'subreddit_gallery': data}, args.output_file) @cli_subparser('gallery') @cli_arg('subreddit', help='A valid subreddit name') @cli_arg('image_id', help='Image ID') def cmd_gallery_subreddit_image(client, args): subreddit_image = client.subreddit_image(args.subreddit, args.image_id) data = subreddit_image.__dict__ generate_output({'subreddit_image': data}) @cli_subparser('gallery') @cli_arg('tag', help='The name of the tag') @cli_arg('--sort', default='viral', metavar='<sort>', choices=['viral', 'top', 'time'], help='viral | top | time - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--window', default='week', metavar='<window>', choices=['day', 'week', 'month', 'year', 'all'], help='Change the date range of the request if the sort is "top", ' 'day | week | month | year | all (Defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_tag(client, args): gallery_tag = client.gallery_tag(args.tag, args.sort, args.page, args.window) data = gallery_tag.__dict__ data['items'] = [item.__dict__ for item in data['items']] generate_output({'gallery_tag': data}) @cli_subparser('gallery') @cli_arg('tag', help='The name of the tag') @cli_arg('image_id', help='Image ID') def cmd_gallery_tag_image(client, args): gallery_tag_image = client.gallery_tag_image(args.tag, args.image_id) data = gallery_tag_image.__dict__ generate_output({'gallery_tag_image': data}) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID') def cmd_gallery_item_tags(client, args): gallery_item_tags = client.gallery_item_tags(args.item_id) data = [item.__dict__ for item in gallery_item_tags] generate_output({'gallery_item_tags': data}) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID') @cli_arg('tag', help='The name of the tag') @cli_arg('vote', choices=['up', 'down'], help="'up' or 'down'") def cmd_gallery_tag_vote(client, args): gallery_tag_vote = client.gallery_tag_vote(args.item_id, args.tag, args.vote) generate_output({'gallery_tag_vote': gallery_tag_vote}) @cli_subparser('gallery') @cli_arg('q', help="Query string (note: if advanced search parameters are set," "this query string is ignored). This parameter also supports boolean " "operators (AND, OR, NOT) and indices (tag: user: title: ext: subreddit: " "album: meme:). An example compound query would be 'title: cats AND dogs " "ext: gif'") @cli_arg('--advanced', default=None, help='Advanced Search Query Parameters') @cli_arg('--sort', default='time', metavar='<sort>', choices=['viral', 'top', 'time'], help='viral | top | time - defaults to %(default)s') @cli_arg('--page', default=0, metavar='<page>', type=int, help='The data paging number (defaults to %(default)s)') @cli_arg('--window', default='all', metavar='<window>', choices=['day', 'week', 'month', 'year', 'all'], help='Change the date range of the request if the sort is "top", ' 'day | week | month | year | all (Defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_search(client, args): if args.advanced: config = data_fields(args.advanced, client.allowed_advanced_search_fields) else: config = None gallery_search = client.gallery_search(args.q, config, args.sort, args.window, args.page) data = [item.__dict__ for item in gallery_search] generate_output({'gallery_search': data}, args.output_file) @cli_subparser('gallery') @cli_arg('--page', default=0, metavar='<page>', type=int, help='A page of random gallery images, from 0-50. ' 'Pages are regenerated every hour (defaults to %(default)s)') @cli_arg('--output-file', default=None, metavar='<output_file>', help='Save output to a JSON file') def cmd_gallery_random(client, args): gallery_random = client.gallery_random(args.page) data = [item.__dict__ for item in gallery_random] generate_output({'gallery_random': data}, args.output_file) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID') @cli_arg('title', help='The title of the image') @cli_arg('--terms', default=0, type=int, metavar='<terms>', help='If the user has not accepted our terms yet, this endpoint will ' 'return an error. To by-pass the terms in general simply set this ' 'value to 1') def cmd_gallery_publish(client, args): publish_to_imgur = client.share_on_imgur(args.item_id, args.title, args.terms) generate_output({'publish_to_imgur': publish_to_imgur}) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID') def cmd_gallery_remove(client, args): gallery_remove = client.remove_from_gallery(args.item_id) generate_output({'gallery_remove': gallery_remove}) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID') def cmd_gallery_item(client, args): gallery_item = client.gallery_item(args.item_id) data = gallery_item.__dict__ generate_output({'gallery_item': data}) @cli_subparser('gallery') @cli_arg('item_id', help='Gallery item ID')
MIT License
hdmifish/petal
petal/commands/template.py
CommandsTEMPLATE.cmd_example
python
async def cmd_example(self, args: list, msg: str, src: discord.Message, **_: dict): pass
This FIRST segment of the Docstring is the SUMMARY. It is shown first in the output of the `{p}help` Command. Any non-first segments lacking qualification are assumed by `{p}help` to be part of the DETAILS section. The next segment begins with `Syntax:`, which indicates to `{p}help` that it makes up the SYNTAX section of the output. It can be on one line or multiple. Syntax: `{p}example` - Specific function can be described here. The final segment, Parameters, is used by `{p}help` to automatically generate, as needed, the text shown for the OPTIONS section. Parameters ---------- _ : dict Dict of additional Keyword Args. args : list List of Positional Arguments supplied after Command. msg : str The TEXT of the Message that invoked this Command, minux the Prefix. src : discord.Message The Discord Message that invoked this Command. Also, take note of the fact that the source components of the DETAILS section are spread throughout the Docstring.
https://github.com/hdmifish/petal/blob/b972e1cbb48b971e5c05839e5525b7227caa51b4/petal/commands/template.py#L12-L40
import discord from petal.commands import core class CommandsTEMPLATE(core.Commands): auth_fail = "This command is implemented incorrectly."
MIT License
naparuba/opsbro
data/core-configuration/packs/core-functions/module/fstrings.py
string_join
python
def string_join(list, join_character): return join_character.join(list)
**string_join(string, join_character)** -> return a string with elements for the lsit joined by the join_character * list: (list of strings) list of string to joins * join_character: (string) character to user between strings <code> Example: string_join(['linux', 'windows'], ',') Returns: 'linux,windows' </code>
https://github.com/naparuba/opsbro/blob/98618a002cd47250d21e7b877a24448fc95fec80/data/core-configuration/packs/core-functions/module/fstrings.py#L61-L75
from opsbro.evaluater import export_evaluater_function FUNCTION_GROUP = 'string' @export_evaluater_function(function_group=FUNCTION_GROUP) def string_upper(string): return string.upper() @export_evaluater_function(function_group=FUNCTION_GROUP) def string_lower(string): return string.lower() @export_evaluater_function(function_group=FUNCTION_GROUP) def string_split(string, split_character): return string.split(split_character) @export_evaluater_function(function_group=FUNCTION_GROUP)
MIT License
napalm-automation/napalm-yang
napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv4/addresses/address/vrrp/vrrp_group/state/__init__.py
state._set_accept_mode
python
def _set_accept_mode(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """accept_mode must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__accept_mode = t if hasattr(self, "_set"): self._set()
Setter method for accept_mode, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/addresses/address/vrrp/vrrp_group/state/accept_mode (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_accept_mode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_accept_mode() directly. YANG Description: Configure whether packets destined for virtual addresses are accepted even when the virtual address is not owned by the router interface
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/routed_vlan/ipv4/addresses/address/vrrp/vrrp_group/state/__init__.py#L679-L720
from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): __slots__ = ( "_path_helper", "_extmethods", "__virtual_router_id", "__virtual_address", "__priority", "__preempt", "__preempt_delay", "__accept_mode", "__advertisement_interval", "__current_priority", ) _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__accept_mode = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="accept-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) self.__advertisement_interval = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["1..4095"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 100 ), is_leaf=True, yang_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) self.__current_priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), is_leaf=True, yang_name="current-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "interfaces", "interface", "routed-vlan", "ipv4", "addresses", "address", "vrrp", "vrrp-group", "state", ] def _get_virtual_router_id(self): return self.__virtual_router_id def _set_virtual_router_id(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_router_id must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..255']}), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__virtual_router_id = t if hasattr(self, "_set"): self._set() def _unset_virtual_router_id(self): self.__virtual_router_id = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..255"]}, ), is_leaf=True, yang_name="virtual-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_virtual_address(self): return self.__virtual_address def _set_virtual_address(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """virtual_address must be of a type compatible with inet:ip-address""", "defined-type": "inet:ip-address", "generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ip-address', is_config=False)""", } ) self.__virtual_address = t if hasattr(self, "_set"): self._set() def _unset_virtual_address(self): self.__virtual_address = YANGDynClass( base=TypedListType( allowed_type=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?" }, ), ] ), is_leaf=False, yang_name="virtual-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="inet:ip-address", is_config=False, ) def _get_priority(self): return self.__priority def _set_priority(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint8""", "defined-type": "uint8", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['1..254']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8)(100), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint8', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _unset_priority(self): self.__priority = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["1..254"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 )( 100 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint8", is_config=False, ) def _get_preempt(self): return self.__preempt def _set_preempt(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='boolean', is_config=False)""", } ) self.__preempt = t if hasattr(self, "_set"): self._set() def _unset_preempt(self): self.__preempt = YANGDynClass( base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="preempt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="boolean", is_config=False, ) def _get_preempt_delay(self): return self.__preempt_delay def _set_preempt_delay(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16, ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """preempt_delay must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['0..3600']}), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(0), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='uint16', is_config=False)""", } ) self.__preempt_delay = t if hasattr(self, "_set"): self._set() def _unset_preempt_delay(self): self.__preempt_delay = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), restriction_dict={"range": ["0..3600"]}, ), default=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 )( 0 ), is_leaf=True, yang_name="preempt-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="uint16", is_config=False, ) def _get_accept_mode(self): return self.__accept_mode
Apache License 2.0
mkenworthy/exorings
exorings.py
draw_rings
python
def draw_rings(r, tau, hjd_central, i, phi, p): xcen = hjd_central ycen = 0.0 xrang = np.array(50.) yrang = np.array(50.) resol = 0.05 nx = xrang / resol ny = yrang / resol xc = (nx - 1) / 2. yc = (ny - 1) / 2. agr = np.mgrid[:ny, :nx] agr[0] = agr[0] - yc agr[1] = agr[1] - xc agr = agr * resol tau_disk, grad = ellipse_nest(agr, i, phi, r, tau) ext = [np.min(agr[1])+xcen, np.max(agr[1])+xcen, np.min(agr[0])+ycen, np.max(agr[0])+ycen] p.imshow(tau_disk, extent=ext) p.scatter(hjd_central, 0) p.set_xlabel("Time [days]") p.ticklabel_format(axis='x', scilimits=(-2, 9)) p.set_ylabel("Time [days]")
pixel based ring imag. can be slow for large images
https://github.com/mkenworthy/exorings/blob/20e16aff07323a20b7585068c082931ed1b9de11/exorings.py#L531-L560
import numpy as np from astropy.io import fits import matplotlib as mpl from matplotlib.path import Path from matplotlib.patches import PathPatch import matplotlib.pyplot as plt from scipy.ndimage import convolve def ring_to_sky(Xr, Yr, i_deg, phi_deg): i = np.pi * i_deg / 180. phi = np.pi * phi_deg / 180. Xs = (np.cos(phi) * Xr) + (np.sin(phi) * np.sin(i) * Yr) Ys = (np.sin(phi) * Xr) - (np.cos(phi) * np.sin(i) * Yr) Rs = np.sqrt((Xs*Xs) + (Ys*Ys)) return(Xs, Ys, Rs) def sky_to_ring(Xs, Ys, i_deg, phi_deg): i = np.pi * i_deg / 180. phi = np.pi * phi_deg / 180. Xr = ((np.cos(phi) * Xs) + (np.sin(phi) * Ys)) Yr = ((np.sin(phi) * Xs) - (np.cos(phi) * Ys)) / np.sin(i) Rr = np.sqrt((Xr*Xr) + (Yr*Yr)) return(Xr, Yr, Rr) def ring_tilt(i_deg, phi_deg): i_rad = np.pi * i_deg / 180. phi_rad = np.pi * phi_deg / 180. ct = np.cos(i_rad) * np.cos(phi_rad) tilt = np.arccos(ct) * 180. / np.pi return tilt def ellipse(im, i_deg, phi_deg): Ys = im[0] Xs = im[1] (x11, y11, ellipser) = sky_to_ring(Xs, Ys, 90.-i_deg, phi_deg) return ellipser def ellipse_nest(im, i_deg, phi_deg, r=None, tau=None): Ys = im[0] Xs = im[1] (Xr, Yr, ellipse) = sky_to_ring(Xs, Ys, 90.-i_deg, phi_deg) (Xs_tang, Ys_tang, ellipse2) = ring_to_sky(Yr, -Xr, 90.-i_deg, phi_deg) tan_out = np.arctan2(Ys_tang, Xs_tang) im_out = ellipse if r is not None: im_out = np.zeros_like(Xs) + 0.0 r_inner = 0.0 for r_outer, curr_tau in zip(r, tau): sel = (ellipse >= r_inner) * (ellipse < r_outer) im_out[np.where(sel)] = curr_tau r_inner = r_outer return(im_out, tan_out) def ellipse_para(a, b, phi, t): ct = np.cos(t) st = np.sin(t) cp = np.cos(phi) sp = np.sin(phi) X = (a*ct*cp) - (b*st*sp) Y = (a*ct*sp) + (b*st*cp) dY = -a*st*sp + b*ct*cp dX = -a*st*cp - b*ct*sp tang = np.arctan2(dY, dX) return(X, Y, tang) def ellipse_tangents(a, b, phi): print( phi) cosi = b/a iang = np.arccos(cosi) dX0 = np.arctan(-(cosi)*np.tan(phi)) dY0 = np.arctan((cosi)/np.tan(phi)) denom = np.sin(2*phi) * np.sin(iang) * np.sin(iang) numer = 2 * (np.sin(phi)*np.sin(phi) + cosi*cosi * np.cos(phi)*np.cos(phi) ) tang = np.arctan(numer/denom) print ('phi is %f rad' % phi) print ('tang is %f rad' % tang) return(dX0, dY0, tang) def ellipse_strip(r, tau, y, hjd_central, i, phi, star, width): xrang = 200. yrang = width star_x, star_y = np.shape(star) ny = star_y dt = yrang / (ny - 1) nx = np.floor(xrang/dt) yc = (ny - 1) / 2. xc = (nx - 1) / 2. agr = np.mgrid[:ny, :nx] agr[0] = agr[0] - yc agr[1] = agr[1] - xc agr = agr * dt agr[0] = agr[0] + y tau_disk, grad = ellipse_nest(agr, i, phi, r, tau) tau_disk_convolved = convolve(tau_disk, star, mode='constant', cval=0.0) x_tdc = agr[1, yc, :] + hjd_central tdc = tau_disk_convolved[yc, :] td = tau_disk[yc, :] g = grad[yc, :] return(tau_disk, tau_disk_convolved, np.vstack((x_tdc, tdc, td, g))) def ring_grad_line(xt, yt, dt, i_deg, phi_deg): yy = np.ones_like(xt) * yt xx = xt - dt aa = np.vstack((yy, xx)) (ellipse, gradient) = ellipse_nest(aa, i_deg, phi_deg) grad_disk = np.abs(np.sin(gradient)) return(ellipse, grad_disk) def ring_mask_no_photometry(star, width, xt, yt, dt, i_deg, phi_deg): ring_radii = np.arange(0, 100, 0.001) ring_valid = np.zeros_like(ring_radii) star_x, star_y = np.shape(star) xc = (star_x - 1) / 2. yc = (star_y - 1) / 2. sgr = np.mgrid[:star_y, :star_x] tsize = width / (star_x - 1) sgr[0] = sgr[0] - yc sgr[1] = sgr[1] - xc sgr = sgr * tsize masked = sgr[:, (star > 0)] for x in xt: agr2 = np.copy(masked) agr2[0] = agr2[0] + yt agr2[1] = agr2[1] + (x - dt) rell = ellipse(agr2, i_deg, phi_deg) minr = np.min(rell) maxr = np.max(rell) ring_valid[(ring_radii > minr)*(ring_radii < maxr)] = 1.0 drv = ring_valid[1:] - ring_valid[:-1] print(drv) ringstart = ring_radii[(drv < -0.5)] ringends = ring_radii[(drv > 0.5)] if ring_valid[0] == 0: ringstart = np.insert(ringstart, 0, 0.0) if ring_valid[-1] == 0: ringends = np.append(ringends, ring_radii[-1]) return(ringstart, ringends) def y_to_tau(y): return (np.arctan(y)/np.pi)+0.5 def tau_to_y(tau): return np.tan(np.pi*(tau+0.5)) def ring_patch(r1, r2, i_deg, phi_deg, dr=([0, 0])): from matplotlib import patches i = np.cos(i_deg * np.pi / 180.) e1 = patches.Ellipse((1, 1), 1, 1, 0) e1p = e1.get_path() c = np.cos(phi_deg * np.pi / 180.) s = np.sin(phi_deg * np.pi / 180.) rotm = np.array([[c, s], [s, -c]]) a1 = e1p.vertices * ([1., i]) a2 = e1p.vertices * ([-1., i]) e1r = np.dot(a1 * r2, rotm) + dr e2r = np.dot(a2 * r1, rotm) + dr new_verts = np.vstack((e1r, e2r)) new_cmds = np.hstack((e1p.codes, e1p.codes)) newp = Path(new_verts, new_cmds) return newp def make_star_limbd(dstar_pix, u=0.0): ke = np.mgrid[:dstar_pix, :dstar_pix] dp2 = (dstar_pix - 1) / 2 ke[0] = ke[0] - dp2 ke[1] = ke[1] - dp2 re = np.sqrt(ke[0]*ke[0] + ke[1]*ke[1]) ren = re / dp2 mask = np.zeros_like(ren) mask[(ren > 1.0000001)] = 1. ren[(ren > 1.0000001)] = 1. I = 1. - u * (1 - np.sqrt(1 - ren * ren)) I[(mask > 0)] = 0. I /= np.sum(I) return I def write_ring_fits(fitsname, res, taun_rings, radii, dstar): col1 = fits.Column(name='taun', format='E', array=taun_rings) col2 = fits.Column(name='radius', format='E', array=radii) cols = fits.ColDefs([col1, col2]) tbhdu = fits.new_table(cols) prihdr = fits.Header() prihdr['TIMPACT'] = (res[0], 'Impact parameter (days)') prihdr['TMINR'] = (res[1], 'Time of minimum disk radius (days)') prihdr['DINCL'] = (res[2], 'Disk inclination (degrees)') prihdr['DTILT'] = (res[3], 'Disk tilt to orbital motion (degrees)') prihdr['DSTAR'] = (dstar, 'Diameter of star (days)') prihdr['HN'] = (0.907, 'Henweigh parameter') prihdu = fits.PrimaryHDU(header=prihdr) thdulist = fits.HDUList([prihdu, tbhdu]) thdulist.writeto(fitsname, clobber=True) print ('write_ring_fits: wrote FITS file to %s' % fitsname) def read_ring_fits(fitsname): hdulist = fits.open(fitsname) prihdr = hdulist[0].header re = np.array((prihdr['TIMPACT'], prihdr['TMINR'], prihdr['DINCL'], prihdr['DTILT'])) tbdata = hdulist[1].data return(re, tbdata['taun'], tbdata['radius'], prihdr['DSTAR']) def print_ring_tau(rad, tau): n = 0 for (r, t) in zip(rad, tau): print ('Ring %3d: tau = %5.3f out to radius %7.3f days' % (n, t, r)) n = n + 1 def print_ring_tau_latex(rad,tau): n = 0 from astropy.io import ascii for (r, t) in zip(rad, tau): print ('Ring %3d: tau = %5.3f out to radius %7.3f days' % (n, t, r)) n = n + 1 from astropy.table import Table exptau = -np.log(tau) t = Table([rad, exptau], names=['Radius', 'Tau']) t['Radius'].format = '%.1f' t['Tau'].format = '%4.2f' ascii.write(t, output='ring_table1.tex', Writer=ascii.latex.AASTex, col_align='ll', latexdict = {'caption' : r'Table of ring parameters \label{tab:ring}', 'preamble':r'\tablewidth{0pt} \tabletypesize{\scriptsize}' }) def print_disk_parameters(res, minr_t, samp_r): print ('') print ('Disk parameters fitting to gradients') print ('------------------------------------') print ('') print (' impact parameter b = %8.2f days' % res[0]) print (' HJD min approach t_b = %8.2f days' % res[1]) print (' disk inclination i = %7.1f deg' % res[2]) print (' disk tilt phi = %7.1f deg' % res[3]) print (' HJD min gradient = %8.2f days' % minr_t) print (' rmin = %8.2f days' % np.min(samp_r)) ss = r'\begin{eqnarray*}b =& %8.2f \rm{d} \\ t_b =& %8.2f \rm{d} \\ i_{disk} =& %5.1f^o \\ \phi =& %5.1f^o \\ t_\parallel =& %8.2f \rm{d}\end{eqnarray*}' % (res[0], res[1], res[2], res[3], minr_t) return ss def make_ring_grad_line(xt, yt, dt, i_deg, phi_deg): yy = np.ones_like(xt) * yt xx = xt - dt aa = np.vstack((yy, xx)) (ellipse, gradient) = ellipse_nest(aa, i_deg, phi_deg) grad_disk = np.abs(np.sin(gradient)) return(ellipse, grad_disk) def draw_badrings(rstart, rend, xcen, incl, phi, p): for (bs, be) in zip(rstart, rend): path = ring_patch(bs, be, incl, phi, ([xcen, 0])) pnew = PathPatch(path, facecolor='#DDDDDD', edgecolor='none', zorder=-9) p.add_patch(pnew) def draw_rings_vector(r, tau, xcen, incl, phi, p, ringcol='red'): ycen = 0.0 xrang = 50. yrang = 50. p.set_xlim(xcen-xrang, xcen+xrang) p.set_ylim(ycen-yrang, ycen+yrang) for i in np.arange(0, r.size): if i == 0: rin = 0 rout = r[0] else: rin = r[i-1] rout = r[i] ttau = 1 - tau[i] path = ring_patch(rin, rout, incl, phi, ([xcen, 0])) pnew = PathPatch(path, facecolor=ringcol, ec='none', alpha=ttau, zorder=-10) p.add_patch(pnew) p.set_xlabel("Time [days]") p.ticklabel_format(axis='x', scilimits=(-2, 9)) p.set_ylabel("Time [days]")
ISC License
openstack/horizon
horizon/forms/views.py
ModalFormView.get_form
python
def get_form(self, form_class=None): if form_class is None: form_class = self.get_form_class() return form_class(self.request, **self.get_form_kwargs())
Returns an instance of the form to be used in this view.
https://github.com/openstack/horizon/blob/5e405d71926764b8aa60c75794b62f668f4e8122/horizon/forms/views.py#L174-L178
import json import os from django.conf import settings from django import http from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import views ADD_TO_FIELD_HEADER = "HTTP_X_HORIZON_ADD_TO_FIELD" class ModalBackdropMixin(object): modal_backdrop = 'static' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) config = settings.HORIZON_CONFIG if 'modal_backdrop' in config: self.modal_backdrop = config['modal_backdrop'] def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['modal_backdrop'] = self.modal_backdrop return context class ModalFormMixin(ModalBackdropMixin): def get_template_names(self): if self.request.is_ajax(): if not hasattr(self, "ajax_template_name"): bits = list(os.path.split(self.template_name)) bits[1] = "".join(("_", bits[1])) self.ajax_template_name = os.path.join(*bits) template = self.ajax_template_name else: template = self.template_name return template def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) if self.request.is_ajax(): context['hide'] = True if ADD_TO_FIELD_HEADER in self.request.META: context['add_to_field'] = self.request.META[ADD_TO_FIELD_HEADER] return context class ModalFormView(ModalFormMixin, views.HorizonFormView): modal_id = None modal_header = "" form_id = None submit_url = None submit_label = _("Submit") cancel_label = _("Cancel") cancel_url = None def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['modal_id'] = self.modal_id context['modal_header'] = self.modal_header context['form_id'] = self.form_id context['submit_url'] = self.submit_url context['submit_label'] = self.submit_label context['cancel_label'] = self.cancel_label context['cancel_url'] = self.get_cancel_url() return context def get_cancel_url(self): return self.cancel_url or self.success_url def get_object_id(self, obj): return obj.id def get_object_display(self, obj): return obj.name
Apache License 2.0
mar10/wsgidav
wsgidav/xml_tools.py
element_content_as_string
python
def element_content_as_string(element): if len(element) == 0: return element.text or "" stream = compat.StringIO() for childnode in element: stream.write(xml_to_bytes(childnode, pretty_print=False) + "\n") s = stream.getvalue() stream.close() return s
Serialize etree.Element. Note: element may contain more than one child or only text (i.e. no child at all). Therefore the resulting string may raise an exception, when passed back to etree.XML().
https://github.com/mar10/wsgidav/blob/443058d3b05550006270f30f2f3d2329e6a82813/wsgidav/xml_tools.py#L112-L127
import logging from wsgidav import compat __docformat__ = "reStructuredText" _logger = logging.getLogger("wsgidav") use_lxml = False try: from defusedxml.lxml import _etree as etree from lxml import _elementpath as _dummy_elementpath use_lxml = True _ElementType = etree._Element except ImportError: from xml.etree.ElementTree import Element, SubElement, tostring from defusedxml import ElementTree as etree etree.Element = _ElementType = Element etree.SubElement = SubElement etree.tostring = tostring def is_etree_element(obj): return isinstance(obj, _ElementType) def string_to_xml(text): try: return etree.XML(text) except Exception: _logger.error( "Error parsing XML string. " "If lxml is not available, and unicode is involved, then " "installing lxml _may_ solve this issue." ) _logger.error("XML source: {}".format(text)) raise def xml_to_bytes(element, pretty_print=False): if use_lxml: xml = etree.tostring( element, encoding="UTF-8", xml_declaration=True, pretty_print=pretty_print ) else: xml = etree.tostring(element, encoding="UTF-8") if not xml.startswith(b"<?xml "): xml = b'<?xml version="1.0" encoding="utf-8" ?>\n' + xml assert xml.startswith(b"<?xml ") return xml def make_multistatus_el(): if use_lxml: return etree.Element("{DAV:}multistatus", nsmap={"D": "DAV:"}) return etree.Element("{DAV:}multistatus") def make_prop_el(): if use_lxml: return etree.Element("{DAV:}prop", nsmap={"D": "DAV:"}) return etree.Element("{DAV:}prop") def make_sub_element(parent, tag, nsmap=None): if use_lxml: return etree.SubElement(parent, tag, nsmap=nsmap) return etree.SubElement(parent, tag)
MIT License
trevor/calendarserver
txweb2/stream.py
connectStream
python
def connectStream(inputStream, factory): p = factory.buildProtocol(None) out = ProducerStream() out.disconnecting = False p.makeConnection(out) readStream(inputStream, lambda _: p.dataReceived(_)).addCallbacks( lambda _: p.connectionLost(ti_error.ConnectionDone()), lambda _: p.connectionLost(_)) return out
Connect a protocol constructed from a factory to stream. Returns an output stream from the protocol. The protocol's transport will have a finish() method it should call when done writing.
https://github.com/trevor/calendarserver/blob/c9970b06a70445ca75b62e3d170c26bc897a035e/txweb2/stream.py#L484-L499
from __future__ import generators import copy, os, types, sys from zope.interface import Interface, Attribute, implements from twisted.internet.defer import Deferred from twisted.internet import interfaces as ti_interfaces, defer, reactor, protocol, error as ti_error from twisted.python import components from twisted.python.failure import Failure from hashlib import md5 from twext.python.log import Logger log = Logger() if sys.version_info[0:3] != (2,4,2): try: import mmap except ImportError: mmap = None else: mmap = None class IStream(Interface): def read(): def close(): class IByteStream(IStream): length = Attribute("""How much data is in this stream. Can be None if unknown.""") def read(): def split(point): def close(): class ISendfileableStream(Interface): def read(sendfile=False): class SimpleStream(object): implements(IByteStream) length = None start = None def read(self): return None def close(self): self.length = 0 def split(self, point): if self.length is not None: if point > self.length: raise ValueError("split point (%d) > length (%d)" % (point, self.length)) b = copy.copy(self) self.length = point if b.length is not None: b.length -= point b.start += point return (self, b) MMAP_LIMIT = 4*1024*1024 MMAP_THRESHOLD = 8*1024 SENDFILE_LIMIT = 16777216 SENDFILE_THRESHOLD = 256 def mmapwrapper(*args, **kwargs): offset = kwargs.get('offset', None) if offset in [None, 0]: if 'offset' in kwargs: del kwargs['offset'] else: raise mmap.error("mmap: Python sucks and does not support offset.") return mmap.mmap(*args, **kwargs) class FileStream(SimpleStream): implements(ISendfileableStream) CHUNK_SIZE = 2 ** 2 ** 2 ** 2 - 32 f = None def __init__(self, f, start=0, length=None, useMMap=bool(mmap)): self.f = f self.start = start if length is None: self.length = os.fstat(f.fileno()).st_size else: self.length = length self.useMMap = useMMap def read(self, sendfile=False): if self.f is None: return None length = self.length if length == 0: self.f = None return None if self.useMMap and length > MMAP_THRESHOLD: readSize = min(length, MMAP_LIMIT) try: res = mmapwrapper(self.f.fileno(), readSize, access=mmap.ACCESS_READ, offset=self.start) self.length -= readSize self.start += readSize return res except mmap.error: pass readSize = min(length, self.CHUNK_SIZE) self.f.seek(self.start) b = self.f.read(readSize) bytesRead = len(b) if not bytesRead: raise RuntimeError("Ran out of data reading file %r, expected %d more bytes" % (self.f, length)) else: self.length -= bytesRead self.start += bytesRead return b def close(self): self.f = None SimpleStream.close(self) components.registerAdapter(FileStream, file, IByteStream) class MemoryStream(SimpleStream): def __init__(self, mem, start=0, length=None): self.mem = mem self.start = start if length is None: self.length = len(mem) - start else: if len(mem) < length: raise ValueError("len(mem) < start + length") self.length = length def read(self): if self.mem is None: return None if self.length == 0: result = None else: result = buffer(self.mem, self.start, self.length) self.mem = None self.length = 0 return result def close(self): self.mem = None SimpleStream.close(self) components.registerAdapter(MemoryStream, str, IByteStream) components.registerAdapter(MemoryStream, types.BufferType, IByteStream) class CompoundStream(object): implements(IByteStream, ISendfileableStream) deferred = None length = 0 def __init__(self, buckets=()): self.buckets = [IByteStream(s) for s in buckets] def addStream(self, bucket): bucket = IByteStream(bucket) self.buckets.append(bucket) if self.length is not None: if bucket.length is None: self.length = None else: self.length += bucket.length def read(self, sendfile=False): if self.deferred is not None: raise RuntimeError("Call to read while read is already outstanding") if not self.buckets: return None if sendfile and ISendfileableStream.providedBy(self.buckets[0]): try: result = self.buckets[0].read(sendfile) except: return self._gotFailure(Failure()) else: try: result = self.buckets[0].read() except: return self._gotFailure(Failure()) if isinstance(result, Deferred): self.deferred = result result.addCallbacks(self._gotRead, self._gotFailure, (sendfile,)) return result return self._gotRead(result, sendfile) def _gotFailure(self, f): self.deferred = None del self.buckets[0] self.close() return f def _gotRead(self, result, sendfile): self.deferred = None if result is None: del self.buckets[0] return self.read(sendfile) if self.length is not None: self.length -= len(result) return result def split(self, point): num = 0 origPoint = point for bucket in self.buckets: num+=1 if point == 0: b = CompoundStream() b.buckets = self.buckets[num:] del self.buckets[num:] return self,b if bucket.length is None: return fallbackSplit(self, origPoint) if point < bucket.length: before,after = bucket.split(point) b = CompoundStream() b.buckets = self.buckets[num:] b.buckets[0] = after del self.buckets[num+1:] self.buckets[num] = before return self,b point -= bucket.length def close(self): for bucket in self.buckets: bucket.close() self.buckets = [] self.length = 0 class _StreamReader(object): def __init__(self, stream, gotDataCallback): self.stream = stream self.gotDataCallback = gotDataCallback self.result = Deferred() def run(self): result = self.result self._read() return result def _read(self): try: result = self.stream.read() except: self._gotError(Failure()) return if isinstance(result, Deferred): result.addCallbacks(self._gotData, self._gotError) else: self._gotData(result) def _gotError(self, failure): result = self.result del self.result, self.gotDataCallback, self.stream result.errback(failure) def _gotData(self, data): if data is None: result = self.result del self.result, self.gotDataCallback, self.stream result.callback(None) return try: self.gotDataCallback(data) except: self._gotError(Failure()) return reactor.callLater(0, self._read) def readStream(stream, gotDataCallback): return _StreamReader(stream, gotDataCallback).run() def readAndDiscard(stream): return readStream(stream, lambda _: None) def readIntoFile(stream, outFile): def done(_): outFile.close() return _ return readStream(stream, outFile.write).addBoth(done)
Apache License 2.0
olitheolix/aiokubernetes
aiokubernetes/models/v1_storage_os_volume_source.py
V1StorageOSVolumeSource.secret_ref
python
def secret_ref(self): return self._secret_ref
Gets the secret_ref of this V1StorageOSVolumeSource. # noqa: E501 SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. # noqa: E501 :return: The secret_ref of this V1StorageOSVolumeSource. # noqa: E501 :rtype: V1LocalObjectReference
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_storage_os_volume_source.py#L118-L126
import pprint import re from aiokubernetes.models.v1_local_object_reference import V1LocalObjectReference class V1StorageOSVolumeSource(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'fs_type': 'str', 'read_only': 'bool', 'secret_ref': 'V1LocalObjectReference', 'volume_name': 'str', 'volume_namespace': 'str' } attribute_map = { 'fs_type': 'fsType', 'read_only': 'readOnly', 'secret_ref': 'secretRef', 'volume_name': 'volumeName', 'volume_namespace': 'volumeNamespace' } def __init__(self, fs_type=None, read_only=None, secret_ref=None, volume_name=None, volume_namespace=None): self._fs_type = None self._read_only = None self._secret_ref = None self._volume_name = None self._volume_namespace = None self.discriminator = None if fs_type is not None: self.fs_type = fs_type if read_only is not None: self.read_only = read_only if secret_ref is not None: self.secret_ref = secret_ref if volume_name is not None: self.volume_name = volume_name if volume_namespace is not None: self.volume_namespace = volume_namespace @property def fs_type(self): return self._fs_type @fs_type.setter def fs_type(self, fs_type): self._fs_type = fs_type @property def read_only(self): return self._read_only @read_only.setter def read_only(self, read_only): self._read_only = read_only @property
Apache License 2.0
wavefronthq/python-client
wavefront_api_client/models/field.py
Field.__ne__
python
def __ne__(self, other): if not isinstance(other, Field): return True return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/field.py#L118-L123
import pprint import re import six from wavefront_api_client.configuration import Configuration class Field(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'object_props': 'dict(str, object)' } attribute_map = { 'object_props': 'objectProps' } def __init__(self, object_props=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._object_props = None self.discriminator = None if object_props is not None: self.object_props = object_props @property def object_props(self): return self._object_props @object_props.setter def object_props(self, object_props): self._object_props = object_props def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Field, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, Field): return False return self.to_dict() == other.to_dict()
Apache License 2.0
globocom/globonetworkapi-client-python
networkapiclient/ApiEnvironmentVip.py
ApiEnvironmentVip.delete
python
def delete(self, ids): url = build_uri_with_ids('api/v3/environment-vip/%s/', ids) return super(ApiEnvironmentVip, self).delete(url)
Method to delete environments vip by their id's. :param ids: Identifiers of environments vip :return: None
https://github.com/globocom/globonetworkapi-client-python/blob/08dc24c54ee3cd6cdcca1fb33fb4796db8118e6f/networkapiclient/ApiEnvironmentVip.py#L96-L104
from networkapiclient.ApiGenericClient import ApiGenericClient from networkapiclient.utils import build_uri_with_ids class ApiEnvironmentVip(ApiGenericClient): def __init__(self, networkapi_url, user, password, user_ldap=None): super(ApiEnvironmentVip, self).__init__( networkapi_url, user, password, user_ldap ) def get_environment_vip(self, environment_vip_id, fields=None): uri = 'api/v3/environment-vip/%s/' % environment_vip_id if fields: uri += '?fields={}'.format(','.join(fields)) return super(ApiEnvironmentVip, self).get( uri) def environmentvip_step(self, finality='', client='', environmentp44=''): uri = 'api/v3/environment-vip/step/?finality=%s&client=%s&environmentp44=%s' % ( finality, client, environmentp44) return super(ApiEnvironmentVip, self).get( uri) def search(self, **kwargs): return super(ApiEnvironmentVip, self).get( self.prepare_url('api/v3/environment-vip/', kwargs)) def get(self, ids, **kwargs): uri = build_uri_with_ids('api/v3/environment-vip/%s/', ids) return super(ApiEnvironmentVip, self).get( self.prepare_url(uri, kwargs))
Apache License 2.0
obi-wan3/ob13-cogs
publicrooms/publicrooms.py
PublicRooms._edit_bitrate
python
async def _edit_bitrate(self, ctx: commands.Context, system_name: str, bitrate_in_kbps: int): async with self.config.guild(ctx.guild).systems() as systems: if system_name not in systems.keys(): return await ctx.send("There was no PublicRooms system found with that name!") systems[system_name]["bitrate"] = bitrate_in_kbps return await ctx.tick()
Edit the new VC bitrate (in kbps) for a PublicRooms system in this server.
https://github.com/obi-wan3/ob13-cogs/blob/716527f8581e0345802ea2626d43324f87edf941/publicrooms/publicrooms.py#L370-L379
from datetime import datetime import discord from redbot.core import commands, Config class PublicRooms(commands.Cog): def __init__(self, bot): self.bot = bot self.config = Config.get_conf(self, identifier=14000605, force_registration=True) default_guild = { "toggle": False, "systems": {}, } self.config.register_guild(**default_guild) self.bot.loop.create_task(self.initialize()) async def initialize(self) -> None: await self.bot.wait_until_red_ready() all_guilds = await self.config.all_guilds() for g in all_guilds.keys(): if guild := self.bot.get_guild(g): async with self.config.guild_from_id(g).all() as guild_settings: for sys in guild_settings['systems'].values(): for a in sys['active']: vc = guild.get_channel(a[0]) if not vc or not vc.members: sys['active'].remove(a) if vc and not vc.members and vc.permissions_for(guild.me).manage_channels: await vc.delete(reason="PublicRooms: unused VC found on cog load") @commands.Cog.listener("on_voice_state_update") async def _voice_listener(self, member: discord.Member, before, after): if ( not await self.config.guild(member.guild).toggle() or member.bot or await self.bot.cog_disabled_in_guild(self, member.guild) ): return leftroom = False joinedroom = False if before.channel and after.channel: async with self.config.guild(member.guild).systems() as systems: for sys in systems.values(): if not sys['toggle']: continue active = [x[0] for x in sys['active']] log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel and before.channel.id not in active and after.channel.id in active: await self._send_log( channel=log_channel, text=f"{member.mention} joined `{after.channel.name}`", color=discord.Color.magenta(), embed_links=embed_links ) if before.channel.id in active and before.channel.id != after.channel.id: leftroom = True if sys['origin'] == after.channel.id != before.channel.id: joinedroom = True if leftroom and joinedroom: break if (before.channel and not after.channel) or leftroom: async with self.config.guild(member.guild).systems() as systems: for sys in systems.values(): if not sys['toggle']: continue for a in sys['active']: if not a[0] == before.channel.id: continue if not before.channel.members: sys['active'].remove(a) if before.channel.permissions_for(member.guild.me).manage_channels: await before.channel.delete(reason="PublicRooms: all users have left") else: return log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel: await self._send_log( channel=log_channel, text=f"{member.mention} left `{before.channel.name}`, channel removed", color=discord.Color.dark_teal(), embed_links=embed_links, ) break if sys['overrides'].get(str(member.id)) == before.channel.name : no_created = False no_missing = False all_nums = [x[1] for x in sys['active'] if x[1] != 0] try: num = list(set(range(1, max(all_nums) + 1)) - set(all_nums))[0] for i in sorted(sys['active'], key=lambda x: x[1]): if i[1] > num: ch = i[0] break position = member.guild.get_channel(ch).position - 1 except IndexError: num = max(all_nums) + 1 no_missing = True except ValueError: no_created = True if before.channel.permissions_for(member.guild.me).manage_channels: if no_created or no_missing: if no_created: num = 1 public_vc = await before.channel.edit( name=sys['channel_name'].replace("{num}", str(num)), reason=f"PublicRooms: {member.display_name} left room with custom name", ) else: public_vc = await before.channel.edit( name=sys['channel_name'].replace("{num}", str(num)), position=position, reason=f"PublicRooms: {member.display_name} left room with custom name", ) else: return log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel: await self._send_log( channel=log_channel, text=f"{member.mention} left `{before.channel.name}`, renamed to {public_vc.name}", color=discord.Color.teal(), embed_links=embed_links, ) break log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel: await self._send_log( channel=log_channel, text=f"{member.mention} left `{before.channel.name}`", color=discord.Color.magenta(), embed_links=embed_links, ) break if (not before.channel and after.channel) or joinedroom: async with self.config.guild(member.guild).systems() as systems: for sys in systems.values(): if sys['toggle'] and sys['origin'] == after.channel.id: if not after.channel.category.permissions_for(member.guild.me).manage_channels: return channel_name = sys['overrides'].get(str(member.id)) if channel_name: num = 0 public_vc = await member.guild.create_voice_channel( name=channel_name, category=after.channel.category, position=after.channel.position+1, bitrate=min(sys['bitrate'] * 1000, member.guild.bitrate_limit), reason=f"PublicRooms: created by {member.display_name}", ) else: no_created = False no_missing = False all_nums = [x[1] for x in sys['active']] try: num = list(set(range(1, max(all_nums) + 1)) - set(all_nums))[0] for i in sorted(sys['active'], key=lambda x: x[1]): if i[1] > num: ch = i[0] break position = member.guild.get_channel(ch).position - 1 except IndexError: num = max(all_nums) + 1 no_missing = True except ValueError: no_created = True if no_created or no_missing: if no_created: num = 1 public_vc = await member.guild.create_voice_channel( name=sys['channel_name'].replace("{num}", str(num)), category=after.channel.category, bitrate=min(sys['bitrate']*1000, member.guild.bitrate_limit), reason=f"PublicRooms: created by {member.display_name}", ) else: public_vc = await member.guild.create_voice_channel( name=sys['channel_name'].replace("{num}", str(num)), category=after.channel.category, position=position, bitrate=min(sys['bitrate'] * 1000, member.guild.bitrate_limit), reason=f"PublicRooms: created by {member.display_name}", ) if not (after.channel.permissions_for(member.guild.me).move_members and public_vc.permissions_for(member.guild.me).move_members): return await member.move_to(public_vc, reason="PublicRooms: is VC creator") log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel: await self._send_log( channel=log_channel, text=f"{member.mention} created `{public_vc.name}`", color=discord.Color.teal(), embed_links=embed_links, ) sys['active'].append((public_vc.id, num)) break elif sys['toggle'] and sys['log_channel'] and after.channel.id in [x[0] for x in sys['active']]: log_channel, embed_links = await self._get_log(sys['log_channel'], member.guild) if log_channel: await self._send_log( channel=log_channel, text=f"{member.mention} joined `{after.channel.name}`", color=discord.Color.magenta(), embed_links=embed_links, ) @staticmethod async def _get_log(channel_id, guild: discord.Guild): log_channel, embed_links = None, False if channel_id: log_channel = guild.get_channel(channel_id) if not log_channel or not log_channel.permissions_for(guild.me).send_messages: log_channel = None if log_channel and log_channel.permissions_for(guild.me).embed_links: embed_links = True return log_channel, embed_links @staticmethod async def _send_log(channel: discord.TextChannel, text: str, color: discord.Color, embed_links: bool): if embed_links: return await channel.send(embed=discord.Embed( timestamp=datetime.utcnow(), color=color, description=text )) else: return await channel.send( text, allowed_mentions=discord.AllowedMentions.none() ) @commands.guild_only() @commands.admin_or_permissions(administrator=True) @commands.group(name="publicrooms") async def _publicrooms(self, ctx: commands.Context): @_publicrooms.command(name="toggle") async def _toggle(self, ctx: commands.Context, true_or_false: bool): await self.config.guild(ctx.guild).toggle.set(true_or_false) return await ctx.tick() @_publicrooms.command(name="add") async def _add(self, ctx: commands.Context, system_name: str, origin_channel: discord.VoiceChannel, default_bitrate_in_kbps: int, *, channel_name_template: str): if origin_channel.category and not origin_channel.category.permissions_for(ctx.guild.me).manage_channels: return await ctx.send("I don't have the `Manage Channels` permission in that category!") elif not origin_channel.category and not ctx.guild.me.guild_permissions.manage_channels: return await ctx.send("I don't have the `Manage Channels` permission in this server!") async with self.config.guild(ctx.guild).systems() as systems: if system_name in systems.keys(): return await ctx.send("There is already a PublicRooms system with that name!") systems[system_name] = { "toggle": True, "origin": origin_channel.id, "bitrate": default_bitrate_in_kbps, "channel_name": channel_name_template, "log_channel": None, "active": [], "overrides": {} } return await ctx.send(f'A new PublicRooms system with origin channel `{origin_channel.name}` has been created and toggled on. If you would like to toggle it or set a log channel, please use `{ctx.clean_prefix}publicrooms edit logchannel {system_name}`.') @_publicrooms.group(name="edit") async def _edit(self, ctx: commands.Context): @_edit.command(name="toggle") async def _edit_toggle(self, ctx: commands.Context, system_name: str, true_or_false: bool): async with self.config.guild(ctx.guild).systems() as systems: systems[system_name]["toggle"] = true_or_false return await ctx.tick() @_edit.command(name="origin") async def _edit_origin(self, ctx: commands.Context, system_name: str, origin_channel: discord.VoiceChannel): async with self.config.guild(ctx.guild).systems() as systems: if system_name not in systems.keys(): return await ctx.send("There was no PublicRooms system found with that name!") systems[system_name]["origin"] = origin_channel.id return await ctx.tick() @_edit.command(name="bitrate")
MIT License
napari/napari
napari/_qt/qt_viewer.py
QtViewer._reorder_layers
python
def _reorder_layers(self, event): for i, layer in enumerate(self.viewer.layers): vispy_layer = self.layer_to_visual[layer] vispy_layer.order = i self.canvas._draw_order.clear() self.canvas.update()
When the list is reordered, propagate changes to draw order. Parameters ---------- event : napari.utils.event.Event The napari event that triggered this method.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/_qt/qt_viewer.py#L446-L458
from __future__ import annotations import warnings from typing import TYPE_CHECKING, Optional import numpy as np from qtpy.QtCore import QCoreApplication, QObject, Qt from qtpy.QtGui import QCursor, QGuiApplication from qtpy.QtWidgets import QFileDialog, QSplitter, QVBoxLayout, QWidget from ..components.camera import Camera from ..components.layerlist import LayerList from ..utils import config, perf from ..utils.action_manager import action_manager from ..utils.colormaps.standardize_color import transform_color from ..utils.history import ( get_open_history, get_save_history, update_open_history, update_save_history, ) from ..utils.interactions import ( ReadOnlyWrapper, mouse_double_click_callbacks, mouse_move_callbacks, mouse_press_callbacks, mouse_release_callbacks, mouse_wheel_callbacks, ) from ..utils.io import imsave from ..utils.key_bindings import KeymapHandler from ..utils.misc import in_ipython from ..utils.theme import get_theme from ..utils.translations import trans from .containers import QtLayerList from .dialogs.screenshot_dialog import ScreenshotDialog from .perf.qt_performance import QtPerformance from .utils import QImg2array, circle_pixmap, square_pixmap from .widgets.qt_dims import QtDims from .widgets.qt_viewer_buttons import QtLayerButtons, QtViewerButtons from .widgets.qt_viewer_dock_widget import QtViewerDockWidget from .widgets.qt_welcome import QtWidgetOverlay from .._vispy import ( VispyAxesOverlay, VispyCamera, VispyCanvas, VispyScaleBarOverlay, VispyTextOverlay, create_vispy_visual, ) if TYPE_CHECKING: from ..viewer import Viewer from ..settings import get_settings from ..utils.io import imsave_extensions class QtViewer(QSplitter): def __init__(self, viewer: Viewer, show_welcome_screen: bool = False): from .layer_controls import QtLayerControlsContainer super().__init__() self.setAttribute(Qt.WA_DeleteOnClose) self._show_welcome_screen = show_welcome_screen QCoreApplication.setAttribute( Qt.AA_UseStyleSheetPropagationInWidgetStyles, True ) self.viewer = viewer self.dims = QtDims(self.viewer.dims) self.controls = QtLayerControlsContainer(self.viewer) self.layers = QtLayerList(self.viewer.layers) self.layerButtons = QtLayerButtons(self.viewer) self.viewerButtons = QtViewerButtons(self.viewer) self._key_map_handler = KeymapHandler() self._key_map_handler.keymap_providers = [self.viewer] self._console = None layerList = QWidget() layerList.setObjectName('layerList') layerListLayout = QVBoxLayout() layerListLayout.addWidget(self.layerButtons) layerListLayout.addWidget(self.layers) layerListLayout.addWidget(self.viewerButtons) layerListLayout.setContentsMargins(8, 4, 8, 6) layerList.setLayout(layerListLayout) self.dockLayerList = QtViewerDockWidget( self, layerList, name=trans._('layer list'), area='left', allowed_areas=['left', 'right'], object_name='layer list', ) self.dockLayerControls = QtViewerDockWidget( self, self.controls, name=trans._('layer controls'), area='left', allowed_areas=['left', 'right'], object_name='layer controls', ) self.dockConsole = QtViewerDockWidget( self, QWidget(), name=trans._('console'), area='bottom', allowed_areas=['top', 'bottom'], object_name='console', ) self.dockConsole.setVisible(False) self.dockConsole.visibilityChanged.connect(self._ensure_connect) self.dockLayerControls.visibilityChanged.connect(self._constrain_width) self.dockLayerList.setMaximumWidth(258) self.dockLayerList.setMinimumWidth(258) self.dockPerformance = self._create_performance_dock_widget() self.layer_to_visual = {} action_manager.register_action( "napari:toggle_console_visibility", self.toggle_console_visibility, trans._("Show/Hide IPython console"), self.viewer, ) action_manager.bind_button( 'napari:toggle_console_visibility', self.viewerButtons.consoleButton, ) self._create_canvas() self._canvas_overlay = QtWidgetOverlay(self, self.canvas.native) self._canvas_overlay.set_welcome_visible(show_welcome_screen) self._canvas_overlay.sig_dropped.connect(self.dropEvent) main_widget = QWidget() main_layout = QVBoxLayout() main_layout.setContentsMargins(10, 22, 10, 2) main_layout.addWidget(self._canvas_overlay) main_layout.addWidget(self.dims) main_layout.setSpacing(10) main_widget.setLayout(main_layout) self.setOrientation(Qt.Vertical) self.addWidget(main_widget) self._cursors = { 'cross': Qt.CrossCursor, 'forbidden': Qt.ForbiddenCursor, 'pointing': Qt.PointingHandCursor, 'standard': QCursor(), } self._on_active_change() self.viewer.layers.events.inserted.connect(self._update_welcome_screen) self.viewer.layers.events.removed.connect(self._update_welcome_screen) self.viewer.layers.selection.events.active.connect( self._on_active_change ) self.viewer.camera.events.interactive.connect(self._on_interactive) self.viewer.cursor.events.style.connect(self._on_cursor) self.viewer.cursor.events.size.connect(self._on_cursor) self.viewer.layers.events.reordered.connect(self._reorder_layers) self.viewer.layers.events.inserted.connect(self._on_add_layer_change) self.viewer.layers.events.removed.connect(self._remove_layer) self.setAcceptDrops(True) for layer in self.viewer.layers: self._add_layer(layer) self.view = self.canvas.central_widget.add_view() self.camera = VispyCamera( self.view, self.viewer.camera, self.viewer.dims ) self.canvas.connect(self.camera.on_draw) self._add_visuals() self._qt_poll = _create_qt_poll(self, self.viewer.camera) self._remote_manager = _create_remote_manager( self.viewer.layers, self._qt_poll ) if config.async_loading: from .experimental.qt_chunk_receiver import QtChunkReceiver self.chunk_receiver = QtChunkReceiver(self.layers) else: self.chunk_receiver = None self._bind_shortcuts() def _ensure_connect(self): id(self.console) def _bind_shortcuts(self): for action, shortcuts in get_settings().shortcuts.shortcuts.items(): action_manager.unbind_shortcut(action) for shortcut in shortcuts: action_manager.bind_shortcut(action, shortcut) def _create_canvas(self) -> None: self.canvas = VispyCanvas( keys=None, vsync=True, parent=self, size=self.viewer._canvas_size[::-1], ) self.canvas.events.draw.connect(self.dims.enable_play) self.canvas.connect(self.on_mouse_double_click) self.canvas.connect(self.on_mouse_move) self.canvas.connect(self.on_mouse_press) self.canvas.connect(self.on_mouse_release) self.canvas.connect(self._key_map_handler.on_key_press) self.canvas.connect(self._key_map_handler.on_key_release) self.canvas.connect(self.on_mouse_wheel) self.canvas.connect(self.on_draw) self.canvas.connect(self.on_resize) self.canvas.bgcolor = transform_color( get_theme(self.viewer.theme, False).canvas.as_hex() )[0] theme = self.viewer.events.theme on_theme_change = self.canvas._on_theme_change theme.connect(on_theme_change) self.canvas.destroyed.connect(self._diconnect_theme) def _diconnect_theme(self): self.viewer.events.theme.disconnect(self.canvas._on_theme_change) def _add_visuals(self) -> None: self.axes = VispyAxesOverlay( self.viewer, parent=self.view.scene, order=1e6, ) self.scale_bar = VispyScaleBarOverlay( self.viewer, parent=self.view, order=1e6 + 1, ) self.canvas.events.resize.connect(self.scale_bar._on_position_change) self.text_overlay = VispyTextOverlay( self.viewer, parent=self.view, order=1e6 + 2, ) self.canvas.events.resize.connect( self.text_overlay._on_position_change ) def _create_performance_dock_widget(self): if perf.USE_PERFMON: return QtViewerDockWidget( self, QtPerformance(), name=trans._('performance'), area='bottom', ) return None @property def console(self): if self._console is None: try: from napari_console import QtConsole import napari with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.console = QtConsole(self.viewer) self.console.push( {'napari': napari, 'action_manager': action_manager} ) except ImportError: warnings.warn( trans._( 'napari-console not found. It can be installed with' ' "pip install napari_console"' ) ) self._console = None return self._console @console.setter def console(self, console): self._console = console if console is not None: self.dockConsole.setWidget(console) console.setParent(self.dockConsole) def _constrain_width(self, event): if self.dockLayerControls.isFloating(): self.controls.setMaximumWidth(700) else: self.controls.setMaximumWidth(220) def _on_active_change(self, event=None): self._key_map_handler.keymap_providers = ( [self.viewer] if self.viewer.layers.selection.active is None else [self.viewer.layers.selection.active, self.viewer] ) def _on_add_layer_change(self, event): layer = event.value self._add_layer(layer) def _add_layer(self, layer): vispy_layer = create_vispy_visual(layer) if self._qt_poll is not None: self._qt_poll.events.poll.connect(vispy_layer._on_poll) if vispy_layer.events is not None: vispy_layer.events.loaded.connect(self._qt_poll.wake_up) vispy_layer.node.parent = self.view.scene vispy_layer.order = len(self.viewer.layers) - 1 self.layer_to_visual[layer] = vispy_layer def _remove_layer(self, event): layer = event.value vispy_layer = self.layer_to_visual[layer] vispy_layer.close() del vispy_layer del self.layer_to_visual[layer] self._reorder_layers(None)
BSD 3-Clause New or Revised License
cybertronai/ncluster
ncluster/aws_util.py
get_vpc
python
def get_vpc() -> Vpc: return get_vpc_dict()[get_prefix()]
Returns current VPC (ec2.Vpc object) https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#vpc
https://github.com/cybertronai/ncluster/blob/9c2a7fb9677dba9afe48c94f35bde7c41e4cc75f/ncluster/aws_util.py#L52-L58
import os import re import subprocess import sys import time from collections import OrderedDict import botocore.exceptions import paramiko from operator import itemgetter from typing import Iterable, List, Dict, Optional from boto3_type_annotations.ec2 import SecurityGroup, Vpc, Subnet, InternetGateway, PlacementGroup, Image, Instance, KeyPairInfo from boto3.resources.collection import ResourceCollection from boto3_type_annotations.ec2 import ServiceResource as EC2_ServiceResource from boto3_type_annotations.ec2 import Client as EC2_Client from boto3_type_annotations.sts import Client as STS_Client import boto3 from . import util from .util import VALID_REGIONS EMPTY_NAME = "noname" RETRY_INTERVAL_SEC = 1 RETRY_TIMEOUT_SEC = 60 DEFAULT_PREFIX = 'ncluster' PRIVATE_KEY_LOCATION = os.environ['HOME'] + '/.ncluster' DUPLICATE_CHECKING = False u = sys.modules[__name__] def validate_states(states: Iterable[str]): valid_states = {'running', 'stopped', 'initializing', 'terminated'} invalid_states = set(states).difference(valid_states) assert not invalid_states, f"Found invalid states {invalid_states}, valid states are {valid_states}"
MIT License
baidu/cup
cup/shell/oper.py
execshell_withpipe_str
python
def execshell_withpipe_str(cmd, b_printcmd=True): return ''.join(execshell_withpipe_ex(cmd, b_printcmd))
Deprecated. Recommand using ShellExec.
https://github.com/baidu/cup/blob/79ab2f3ad6eaab1461aa3b4cca37d3262240194a/cup/shell/oper.py#L650-L654
from __future__ import print_function import os import sys import time import uuid import tempfile import shutil import signal import random import hashlib import platform import warnings import datetime import threading import subprocess import cup from cup import err from cup import log from cup import platforms from cup import decorators if platform.system() == 'Linux': from cup.res import linux __all__ = [ 'rm', 'rmrf', 'kill', 'is_process_used_port', 'is_port_used', 'is_proc_exist', 'is_proc_exist', 'is_process_running', 'contains_file', 'backup_file', 'ShellExec' ] else: __all__ = [ 'contains_file', 'backup_file' ] def rm(name): try: os.remove(name) except OSError as error: cup.log.warn("rm oserror: %s" % error) def rmrf(fpath, safemode=True): @decorators.needlinux def _real_rmrf(fpath, safemode): if safemode: if os.path.normpath(os.path.abspath(fpath)) == '/': raise err.ShellException('cannot rmtree root / under safemode') if os.path.isfile(fpath): os.unlink(fpath) else: shutil.rmtree(fpath) return _real_rmrf(fpath, safemode) def is_process_running(path, name): @decorators.needlinux def _real_is_proc_exist(path, name): path = os.path.realpath(os.path.abspath(path)) cmd = 'ps -ef|grep %s|grep -v "^grep "|grep -v "^vim "|grep -v "^less "|\ grep -v "^vi "|grep -v "^cat "|grep -v "^more "|grep -v "^tail "|\ awk \'{print $2}\'' % (name) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') if len(pids) == 0 or len(pids) == 1 and len(pids[0]) == 0: return False for pid in pids: for sel_path in ["cwd", "exe"]: cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (pid, sel_path) ret = cup.shell.ShellExec().run(cmd, 10) pid_path = ret['stdout'].strip().strip() if pid_path.find(path) == 0: return True return False return _real_is_proc_exist(path, name) is_proc_exist = is_process_running def _kill_child(pid, sign): cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2,$3}\'' % (pid) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') for proc in pids: if len(proc) == 0: continue p_id = proc.split() if p_id[1] == pid: _kill_child(p_id[0], sign) if p_id[0] == pid: if len(sign) == 0: cup.shell.execshell('kill %s' % pid) elif sign == '9' or sign == '-9': cup.shell.execshell('kill -9 %s' % pid) elif sign == 'SIGSTOP' or sign == '19' or sign == '-19': cup.shell.execshell('kill -19 %s' % pid) elif sign == 'SIGCONT' or sign == '18' or sign == '-18': cup.shell.execshell('kill -18 %s' % pid) else: cup.log.error('sign error') def kill(path, name, sign='', b_kill_child=False): path = os.path.realpath(os.path.abspath(path)) cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2}\'' % (name) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') for pid in pids: cmd = 'ls -l /proc/%s/cwd|awk \'{print $11}\' ' % (pid) ret = cup.shell.ShellExec().run(cmd, 10) if ret['returncode'] != 0: return False pid_path = ret['stdout'].strip() if pid_path.find(path) == 0 or path.find(pid_path) == 0: if b_kill_child is True: _kill_child(pid, sign) if len(sign) == 0: cup.shell.execshell('kill %s' % pid) elif sign == '9' or sign == '-9': cup.shell.execshell('kill -9 %s' % pid) elif sign == 'SIGSTOP' or sign == '19' or sign == '-19': cup.shell.execshell('kill -19 %s' % pid) elif sign == 'SIGCONT' or sign == '18' or sign == '-18': cup.shell.execshell('kill -18 %s' % pid) else: cup.log.error('sign error') return True def backup_file(srcpath, filename, dstpath, label=None): if label is None: label = time.strftime('%H:%M:%S') if not os.path.exists(dstpath): os.makedirs(dstpath) shutil.copyfile( srcpath + '/' + filename, dstpath + '/' + filename + '.' + label ) def backup_folder(srcpath, foldername, dstpath, label=None): if label is None: label = time.strftime('%H:%M:%S') if not os.path.exists(dstpath): os.makedirs(dstpath) os.rename( '%s/%s' % (srcpath, foldername), '%s/%s' % (dstpath, foldername + '.' + label) ) def is_path_contain_file(dstpath, dstfile, recursive=False, follow_link=False): return contains_file(dstpath, dstfile, recursive, follow_link) def contains_file(dstpath, expected_name, recursive=False, follow_link=False): path = os.path.normpath(dstpath) fpath = os.path.normpath(expected_name.strip()) fullpath = '{0}/{1}'.format(path, expected_name.strip()) fullpath = os.path.normpath(fullpath) if recursive: for (_, __, fnames) in os.walk(path, followlinks=follow_link): for filename in fnames: if filename == fpath: return True return False else: if os.path.exists(fullpath): return True else: return False def is_port_used(port): @decorators.needlinux def __is_port_used(port): cmd = "netstat -nl | grep ':%s '" % (port) ret = cup.shell.ShellExec().run(cmd, 10) if 0 != ret['returncode']: return False stdout = ret['stdout'].strip() if 0 == len(stdout): return False else: return True return __is_port_used(port) def is_process_used_port(process_path, port): cmd = "netstat -nlp | grep ':%s '|awk -F ' ' '{print $7}'|\ cut -d \"/\" -f1" % (port) ret = cup.shell.ShellExec().run(cmd, 10) if 0 != ret['returncode']: return False stdout = ret['stdout'].strip() if 0 == len(stdout): return False dst_pid = stdout.strip() path = os.path.abspath(process_path) for sel_path in ['exe', 'cwd']: cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (dst_pid, sel_path) ret = cup.shell.ShellExec().run(cmd, 10) pid_path = ret['stdout'].strip().strip() if 0 == pid_path.find(path): return True return False class Asynccontent(object): def __init__(self): self.cmd = None self.timeout = None self.pid = None self.ret = { 'stdout': None, 'stderr': None, 'returncode': 0 } self.child_list = [] self.cmdthd = None self.monitorthd = None self.subproc = None self.tempscript = None class ShellExec(object): def __init__(self, tmpdir='/tmp/'): self._subpro = None self._subpro_data = None self._tmpdir = tmpdir self._tmpprefix = 'cup.shell.{0}'.format(uuid.uuid4()) @classmethod def kill_all_process(cls, async_content): for pid in async_content.child_list: os.kill(pid, signal.SIGKILL) @classmethod def which(cls, pgm): if os.path.exists(pgm) and os.access(pgm, os.X_OK): return pgm path = os.getenv('PATH') for fpath in path.split(os.path.pathsep): fpath = os.path.join(fpath, pgm) if os.path.exists(fpath) and os.access(fpath, os.X_OK): return fpath @classmethod def get_async_run_status(cls, async_content): try: async_process = linux.Process(async_content.pid) res = async_process.get_process_status() except err.NoSuchProcess: res = None return res @classmethod def get_async_run_res(cls, async_content): return async_content.ret def async_run(self, cmd, timeout): def _signal_handle(): signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _target(argcontent, proc_cond): argcontent.tempscript = tempfile.NamedTemporaryFile( dir=self._tmpdir, prefix=self._tmpprefix, delete=True ) with open(argcontent.tempscript.name, 'w+b') as fhandle: fhandle.write('cd {0};\n'.format(os.getcwd())) fhandle.write(argcontent.cmd) shexe = self.which('sh') cmds = [shexe, argcontent.tempscript.name] log.info( 'to async execute {0} with script {1}'.format( argcontent.cmd, cmds) ) try: proc_cond.acquire() argcontent.subproc = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=_signal_handle) proc_cond.notify() proc_cond.release() except OSError: proc_cond.notify() proc_cond.release() argcontent.ret['returncode'] = -1 argcontent.ret['stderr'] = ( 'failed to execute the cmd, plz check it out\'s' ) def _monitor(start_time, argcontent): while(int(time.mktime(datetime.datetime.now().timetuple())) - int(start_time) < int(argcontent.timeout)): time.sleep(1) if argcontent.subproc.poll() is not None: self._subpro_data = argcontent.subproc.communicate() argcontent.ret['returncode'] = argcontent.subproc.returncode argcontent.ret['stdout'] = self._subpro_data[0] argcontent.ret['stderr'] = self._subpro_data[1] return parent = linux.Process(argcontent.subproc.pid) children = parent.children(True) ret_dict = [] for process in children: ret_dict.append(process) argcontent.child_list = ret_dict str_warn = ( 'Shell "{0}"execution timout:{1}. To kill it'.format( argcontent.cmd, argcontent.timeout) ) self.kill_all_process(argcontent) argcontent.ret['returncode'] = 999 argcontent.ret['stderr'] = str_warn argcontent.subproc.terminate() argcontent = Asynccontent() argcontent.cmd = cmd argcontent.timeout = timeout argcontent.ret = { 'stdout': None, 'stderr': None, 'returncode': -999 } proc_cond = threading.Condition(threading.Lock()) argcontent.cmdthd = threading.Thread( target=_target, args=(argcontent, proc_cond)) argcontent.cmdthd.daemon = True proc_cond.acquire() argcontent.cmdthd.start() start_time = int(time.mktime(datetime.datetime.now().timetuple())) argcontent.cmdthd.join(0.1) proc_cond.wait() proc_cond.release() if argcontent.subproc is not None: argcontent.pid = argcontent.subproc.pid argcontent.monitorthd = threading.Thread(target=_monitor, args=(start_time, argcontent)) argcontent.monitorthd.daemon = True argcontent.monitorthd.start() argcontent.cmdthd.join(0.5) return argcontent def run(self, cmd, timeout): def _signal_handle(): signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _trans_bytes(data): if platforms.is_py2(): return data if isinstance(data, bytes): try: data = bytes.decode(data) except Exception: data = 'Error to decode result' return data def _pipe_asshell(cmd): tempscript = tempfile.NamedTemporaryFile( dir=self._tmpdir, prefix=self._tmpprefix, delete=True ) with open(tempscript.name, 'w+') as fhandle: fhandle.write('cd {0};\n'.format(os.getcwd())) fhandle.write(cmd) shexe = self.which('sh') cmds = [shexe, tempscript.name] log.info( 'cup shell execute {0} with script {1}'.format( cmd, cmds) ) self._subpro = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=_signal_handle ) self._subpro_data = self._subpro.communicate() ret = { 'stdout': None, 'stderr': None, 'returncode': 0 } cmdthd = threading.Thread( target=_pipe_asshell, args=(cmd, ) ) cmdthd.start() cmdthd.join(timeout) if cmdthd.isAlive(): str_warn = ( 'Shell "%s"execution timout:%d. Killed it' % (cmd, timeout) ) warnings.warn(str_warn, RuntimeWarning) parent = linux.Process(self._subpro.pid) for child in parent.children(True): os.kill(child, signal.SIGKILL) ret['returncode'] = 999 ret['stderr'] = str_warn self._subpro.terminate() else: self._subpro.wait() times = 0 while self._subpro.returncode is None and times < 10: time.sleep(1) times += 1 ret['returncode'] = self._subpro.returncode assert type(self._subpro_data) == tuple, 'self._subpro_data should be a tuple' ret['stdout'] = _trans_bytes(self._subpro_data[0]) ret['stderr'] = _trans_bytes(self._subpro_data[1]) return ret def _do_execshell(cmd, b_printcmd=True, timeout=None): if timeout is not None and timeout < 0: raise cup.err.ShellException( 'timeout should be None or >= 0' ) if b_printcmd is True: print('To exec cmd:{0}'.format(cmd)) shellexec = ShellExec() return shellexec.run(cmd, timeout) def execshell(cmd, b_printcmd=True, timeout=None): return _do_execshell( cmd, b_printcmd=b_printcmd, timeout=timeout)['returncode'] def execshell_withpipe(cmd): res = os.popen(cmd) return res def execshell_withpipe_ex(cmd, b_printcmd=True): strfile = '/tmp/%s.%d.%d' % ( 'shell_env.py', int(os.getpid()), random.randint(100000, 999999) ) os.mknod(strfile) cmd = cmd + ' 1>' + strfile + ' 2>/dev/null' os.system(cmd) if True == b_printcmd: print(cmd) fphandle = open(strfile, 'r') lines = fphandle.readlines() fphandle.close() os.unlink(strfile) return lines
Apache License 2.0
chandler37/immaculater
pyatdllib/core/auditable_object.py
AuditableObject.MergeCommonFrom
python
def MergeCommonFrom(self, pb: common.TypeHavingCommon) -> None: assert pb.common.uid == self.uid, (pb.common.uid, self.uid) ts = pb.common.timestamp for field in ['ctime', 'mtime']: if not ts.HasField(field): setattr(ts, field, max(ts.ctime, ts.mtime, ts.dtime) or _Int64Timestamp(time.time())) self.SetFieldsBasedOnProtobuf(pb.common)
NB: You must handle pb.metadata (name and note) yourself.
https://github.com/chandler37/immaculater/blob/13bfe8c949a16945d2195920375ad6d522664208/pyatdllib/core/auditable_object.py#L141-L149
from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import os import six import time from absl import flags from google.protobuf import message from typing import Any, Optional from . import common from . import errors from . import pyatdl_pb2 from . import uid flags.DEFINE_bool('pyatdl_show_uid', True, 'When displaying objects, include unique identifiers?') def _Int64Timestamp(float_time: Optional[float]) -> int: epsilon = 1e-5 if float_time is None or -1.0 - epsilon <= float_time <= -1.0 + epsilon: return -1 if float_time < 0.0: raise errors.DataError(f"A timestamp was negative: {float_time}") assert float_time >= 0.0, float_time return int(float_time * 1e6) class Error(Exception): class IllegalNameError(Error): class AuditableObject(object): def __init__(self, the_uid: int = None) -> None: self.ctime = time.time() self.mtime = self.ctime self.dtime: Optional[float] = None self.is_deleted = False if the_uid is None: try: self.uid = uid.singleton_factory.NextUID() except errors.DataError as e: raise errors.DataError(f"In the process of instantiating an object of type {type(e)}: {e}") else: uid.singleton_factory.NoteExistingUID(the_uid) self.uid = the_uid def NoteModification(self) -> None: self.__dict__['mtime'] = time.time() if os.environ.get('DJANGO_DEBUG') == "True": assert self.__dict__['mtime'] >= self.__dict__['ctime'], str(self.__dict__) def __setattr__(self, name: str, value: Any) -> None: if name == 'name': if value is not None and value.startswith('uid='): raise IllegalNameError('Names starting with "uid=" are prohibited.') self.__dict__[name] = value if name == 'is_deleted' and value: self.__dict__['dtime'] = time.time() if name != 'mtime': self.NoteModification() def AsProto(self, pb: message.Message) -> message.Message: if not isinstance(pb, pyatdl_pb2.Common): raise TypeError pb.is_deleted = self.is_deleted did_set = False for attr in ('ctime', 'dtime', 'mtime'): val = getattr(self, attr) if val is not None: did_set = True setattr(pb.timestamp, attr, _Int64Timestamp(val)) if did_set or self.dtime is not None: pb.timestamp.dtime = _Int64Timestamp(self.dtime) pb.uid = self.uid return pb
Apache License 2.0
geoscienceaustralia/pyrate
pyrate/core/algorithm.py
unit_vector
python
def unit_vector(incidence, azimuth): vertical = cos(incidence) north_south = sin(incidence) * cos(azimuth) east_west = sin(incidence) * sin(azimuth) return east_west, north_south, vertical
Returns unit vector tuple (east_west, north_south, vertical). :param float incidence: incidence angle w.r.t. nadir :param float azimuth: azimuth of looking vector :return: Unit vector (EW, NS, vertical). :rtype: tuple
https://github.com/geoscienceaustralia/pyrate/blob/c2260b9fddaa86d2e561dca24fc422ac19faf64f/pyrate/core/algorithm.py#L109-L123
from typing import Union, Iterable, Dict, Tuple from numpy import sin, cos, unique, histogram, diag, dot from scipy.linalg import qr, solve, lstsq from pyrate.core.shared import EpochList, IfgException, PrereadIfg from pyrate.core.ifgconstants import DAYS_PER_YEAR def is_square(arr): shape = arr.shape if len(shape) == 2 and (shape[0] == shape[1]): return True return False def least_squares_covariance(A, b, v): if len(A.shape) != 2: raise ValueError('') m, n = A.shape if m <= n: raise ValueError('Problem must be over-determined') V = diag(1.0 / v.squeeze()) q, r = qr(A) efg = dot(q.T, dot(V, q)) g = efg[n:, n:] cd = dot(q.T, b) f = efg[:n, n:] c = cd[:n] d = cd[n:] r = r[:n, :n] func = solve if is_square(g) else lstsq tmp = func(g, d) func = solve if is_square(r) else lstsq return func(r, (c-f * tmp)) def los_conversion(phase_data, unit_vec): return phase_data * unit_vec
Apache License 2.0
google/shoptimizer
shoptimizer_api/optimizers_builtin/invalid_chars_optimizer.py
InvalidCharsOptimizer._optimize
python
def _optimize( self, product_batch: Dict[str, Any], language: str, country: str, currency: str) -> optimization_result_counts.OptimizationResultCounts: num_of_products_optimized = 0 num_of_products_excluded = 0 for entry in product_batch['entries']: if (optimization_util.optimization_exclusion_specified( entry, self._OPTIMIZER_PARAMETER)): num_of_products_excluded += 1 continue product = entry['product'] product_was_sanitized = _sanitize_fields(product, _FIELDS_TO_SANITIZE) if product_was_sanitized: num_of_products_optimized += 1 base_optimizer.set_optimization_tracking(product, base_optimizer.SANITIZED) return optimization_result_counts.OptimizationResultCounts( num_of_products_optimized, num_of_products_excluded)
Runs the optimization. Removes invalid chars from the title and description. See above for the definition of an invalid char. Args: product_batch: A batch of product data. language: The language to use for this optimizer. country: The country to use for this optimizer. currency: The currency to use for this optimizer. Returns: The number of products affected by this optimization.
https://github.com/google/shoptimizer/blob/3c3bfc200c4b1cd6b03c3e408bbbfcbda9c38ed4/shoptimizer_api/optimizers_builtin/invalid_chars_optimizer.py#L50-L86
import logging from typing import Any, Dict, List from models import optimization_result_counts from optimizers_abstract import base_optimizer from util import optimization_util _FIELDS_TO_SANITIZE = ['description', 'title'] _REPLACEMENT_CHAR: str = '' _UNICODE_PRIVATE_USE_AREA_START = 0xE000 _UNICODE_PRIVATE_USE_AREA_END = 0xF8FF class InvalidCharsOptimizer(base_optimizer.BaseOptimizer): _OPTIMIZER_PARAMETER = 'invalid-chars-optimizer'
Apache License 2.0
hyperledger/aries-cloudagent-python
aries_cloudagent/protocols/present_proof/dif/pres_exch_handler.py
DIFPresExchHandler.filter_by_field
python
async def filter_by_field(self, field: DIFField, credential: VCRecord) -> bool: credential_dict = credential.cred_value for path in field.paths: if "$.proof." in path: raise DIFPresExchError( "JSON Path expression matching on proof object " "is not currently supported" ) jsonpath = parse(path) match = jsonpath.find(credential_dict) if len(match) == 0: continue for match_item in match: if not field._filter: return True if self.validate_patch(match_item.value, field._filter): return True return False
Apply filter on VCRecord. Checks if a credential is applicable Args: field: Field contains filtering spec credential: credential to apply filtering on Return: bool
https://github.com/hyperledger/aries-cloudagent-python/blob/fec69f1a2301e4745fc9d40cea190050e3f595fa/aries_cloudagent/protocols/present_proof/dif/pres_exch_handler.py#L565-L595
import pytz import re from datetime import datetime from dateutil.parser import parse as dateutil_parser from dateutil.parser import ParserError from jsonpath_ng import parse from pyld import jsonld from pyld.jsonld import JsonLdProcessor from typing import Sequence, Optional, Tuple from unflatten import unflatten from uuid import uuid4 from ....core.error import BaseError from ....core.profile import Profile from ....did.did_key import DIDKey from ....storage.vc_holder.vc_record import VCRecord from ....vc.ld_proofs import ( Ed25519Signature2018, BbsBlsSignature2020, BbsBlsSignatureProof2020, WalletKeyPair, DocumentLoader, ) from ....vc.ld_proofs.constants import ( SECURITY_CONTEXT_BBS_URL, EXPANDED_TYPE_CREDENTIALS_CONTEXT_V1_VC_TYPE, ) from ....vc.vc_ld.prove import sign_presentation, create_presentation, derive_credential from ....wallet.base import BaseWallet, DIDInfo from ....wallet.error import WalletError, WalletNotFoundError from ....wallet.key_type import KeyType from .pres_exch import ( PresentationDefinition, InputDescriptors, DIFField, Filter, Constraints, SubmissionRequirements, Requirement, SchemaInputDescriptor, InputDescriptorMapping, PresentationSubmission, ) PRESENTATION_SUBMISSION_JSONLD_CONTEXT = ( "https://identity.foundation/presentation-exchange/submission/v1" ) PRESENTATION_SUBMISSION_JSONLD_TYPE = "PresentationSubmission" class DIFPresExchError(BaseError): class DIFPresExchHandler: ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING = { Ed25519Signature2018: KeyType.ED25519, } if BbsBlsSignature2020.BBS_SUPPORTED: ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[BbsBlsSignature2020] = KeyType.BLS12381G2 DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING = { BbsBlsSignatureProof2020: KeyType.BLS12381G2, } PROOF_TYPE_SIGNATURE_SUITE_MAPPING = { suite.signature_type: suite for suite, key_type in ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING.items() } DERIVED_PROOF_TYPE_SIGNATURE_SUITE_MAPPING = { suite.signature_type: suite for suite, key_type in DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING.items() } def __init__( self, profile: Profile, pres_signing_did: str = None, proof_type: str = None, ): super().__init__() self.profile = profile self.pres_signing_did = pres_signing_did if not proof_type: self.proof_type = Ed25519Signature2018.signature_type else: self.proof_type = proof_type self.is_holder = False async def _get_issue_suite( self, *, wallet: BaseWallet, issuer_id: str, ): did_info = await self._did_info_for_did(issuer_id) verification_method = self._get_verification_method(issuer_id) SignatureClass = self.PROOF_TYPE_SIGNATURE_SUITE_MAPPING[self.proof_type] return SignatureClass( verification_method=verification_method, key_pair=WalletKeyPair( wallet=wallet, key_type=self.ISSUE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass], public_key_base58=did_info.verkey if did_info else None, ), ) async def _get_derive_suite( self, *, wallet: BaseWallet, ): SignatureClass = self.DERIVED_PROOF_TYPE_SIGNATURE_SUITE_MAPPING[ "BbsBlsSignatureProof2020" ] return SignatureClass( key_pair=WalletKeyPair( wallet=wallet, key_type=self.DERIVE_SIGNATURE_SUITE_KEY_TYPE_MAPPING[SignatureClass], ), ) def _get_verification_method(self, did: str): if did.startswith("did:key:"): return DIDKey.from_did(did).key_id elif did.startswith("did:sov:"): return did + "#key-1" else: raise DIFPresExchError( f"Unable to get retrieve verification method for did {did}" ) async def _did_info_for_did(self, did: str) -> DIDInfo: async with self.profile.session() as session: wallet = session.inject(BaseWallet) if did.startswith("did:sov:"): return await wallet.get_local_did(did.replace("did:sov:", "")) return await wallet.get_local_did(did) async def get_sign_key_credential_subject_id( self, applicable_creds: Sequence[VCRecord] ) -> Tuple[Optional[str], Sequence[dict]]: issuer_id = None filtered_creds_list = [] if self.proof_type == BbsBlsSignature2020.signature_type: reqd_key_type = KeyType.BLS12381G2 else: reqd_key_type = KeyType.ED25519 for cred in applicable_creds: if cred.subject_ids and len(cred.subject_ids) > 0: if not issuer_id: for cred_subject_id in cred.subject_ids: if not cred_subject_id.startswith("urn:"): did_info = await self._did_info_for_did(cred_subject_id) if did_info.key_type == reqd_key_type: issuer_id = cred_subject_id filtered_creds_list.append(cred.cred_value) break else: if issuer_id in cred.subject_ids: filtered_creds_list.append(cred.cred_value) else: raise DIFPresExchError( "Applicable credentials have different credentialSubject.id, " "multiple proofs are not supported currently" ) return (issuer_id, filtered_creds_list) async def to_requirement( self, sr: SubmissionRequirements, descriptors: Sequence[InputDescriptors] ) -> Requirement: input_descriptors = [] nested = [] total_count = 0 if sr._from: if sr._from != "": for descriptor in descriptors: if self.contains(descriptor.groups, sr._from): input_descriptors.append(descriptor) total_count = len(input_descriptors) if total_count == 0: raise DIFPresExchError(f"No descriptors for from: {sr._from}") else: for submission_requirement in sr.from_nested: try: requirement = await self.to_requirement( submission_requirement, descriptors ) nested.append(requirement) except Exception as err: raise DIFPresExchError( ( "Error creating requirement from " f"nested submission_requirements, {err}" ) ) total_count = len(nested) count = sr.count if sr.rule == "all": count = total_count requirement = Requirement( count=count, maximum=sr.maximum, minimum=sr.minimum, input_descriptors=input_descriptors, nested_req=nested, ) return requirement async def make_requirement( self, srs: Sequence[SubmissionRequirements] = None, descriptors: Sequence[InputDescriptors] = None, ) -> Requirement: if not srs: srs = [] if not descriptors: descriptors = [] if len(srs) == 0: requirement = Requirement( count=len(descriptors), input_descriptors=descriptors, ) return requirement requirement = Requirement( count=len(srs), nested_req=[], ) for submission_requirement in srs: try: requirement.nested_req.append( await self.to_requirement(submission_requirement, descriptors) ) except Exception as err: raise DIFPresExchError( f"Error creating requirement inside to_requirement function, {err}" ) return requirement def is_len_applicable(self, req: Requirement, val: int) -> bool: if req.count: if req.count > 0 and val != req.count: return False if req.minimum: if req.minimum > 0 and req.minimum > val: return False if req.maximum: if req.maximum > 0 and req.maximum < val: return False return True def contains(self, data: Sequence[str], e: str) -> bool: data_list = list(data) if data else [] for k in data_list: if e == k: return True return False async def filter_constraints( self, constraints: Constraints, credentials: Sequence[VCRecord], ) -> Sequence[VCRecord]: document_loader = self.profile.inject(DocumentLoader) result = [] for credential in credentials: if constraints.subject_issuer == "required" and not self.subject_is_issuer( credential=credential ): continue applicable = False is_holder_field_ids = self.field_ids_for_is_holder(constraints) for field in constraints._fields: applicable = await self.filter_by_field(field, credential) if not applicable: break if applicable and field.id and field.id in is_holder_field_ids: if not credential.subject_ids or len(credential.subject_ids) == 0: applicable = False break if not await self.process_constraint_holders( subject_ids=credential.subject_ids ): applicable = False break if not applicable: continue if constraints.limit_disclosure == "required": credential_dict = credential.cred_value new_credential_dict = self.reveal_doc( credential_dict=credential_dict, constraints=constraints ) async with self.profile.session() as session: wallet = session.inject(BaseWallet) derive_suite = await self._get_derive_suite( wallet=wallet, ) signed_new_credential_dict = await derive_credential( credential=credential_dict, reveal_document=new_credential_dict, suite=derive_suite, document_loader=document_loader, ) credential = self.create_vcrecord(signed_new_credential_dict) result.append(credential) return result def field_ids_for_is_holder(self, constraints: Constraints) -> Sequence[str]: reqd_field_ids = set() if not constraints.holders: reqd_field_ids = [] return reqd_field_ids for holder in constraints.holders: if holder.directive == "required": reqd_field_ids = set.union(reqd_field_ids, set(holder.field_ids)) return list(reqd_field_ids) async def process_constraint_holders( self, subject_ids: Sequence[str], ) -> bool: async with self.profile.session() as session: wallet = session.inject(BaseWallet) try: for subject_id in subject_ids: await wallet.get_local_did(subject_id.replace("did:sov:", "")) self.is_holder = True return True except (WalletError, WalletNotFoundError): return False def create_vcrecord(self, cred_dict: dict) -> VCRecord: proofs = cred_dict.get("proof") or [] proof_types = None if type(proofs) is dict: proofs = [proofs] if proofs: proof_types = [proof.get("type") for proof in proofs] contexts = [ctx for ctx in cred_dict.get("@context") if type(ctx) is str] if "@graph" in cred_dict: for enclosed_data in cred_dict.get("@graph"): if ( enclosed_data["id"].startswith("urn:") and "credentialSubject" in enclosed_data ): cred_dict.update(enclosed_data) del cred_dict["@graph"] break given_id = cred_dict.get("id") if given_id and self.check_if_cred_id_derived(given_id): given_id = str(uuid4()) issuer = cred_dict.get("issuer") if type(issuer) is dict: issuer = issuer.get("id") subject_ids = None subjects = cred_dict.get("credentialSubject") if subjects: if type(subjects) is dict: subjects = [subjects] subject_ids = [ subject.get("id") for subject in subjects if ("id" in subject) ] else: cred_dict["credentialSubject"] = {} schemas = cred_dict.get("credentialSchema", []) if type(schemas) is dict: schemas = [schemas] schema_ids = [schema.get("id") for schema in schemas] expanded = jsonld.expand(cred_dict) types = JsonLdProcessor.get_values( expanded[0], "@type", ) return VCRecord( contexts=contexts, expanded_types=types, issuer_id=issuer, subject_ids=subject_ids, proof_types=proof_types, given_id=given_id, cred_value=cred_dict, schema_ids=schema_ids, ) def reveal_doc(self, credential_dict: dict, constraints: Constraints): derived = { "@context": credential_dict.get("@context"), "type": credential_dict.get("type"), "@explicit": True, "@requireAll": True, "issuanceDate": {}, "issuer": {}, } unflatten_dict = {} for field in constraints._fields: for path in field.paths: jsonpath = parse(path) match = jsonpath.find(credential_dict) if len(match) == 0: continue for match_item in match: full_path = str(match_item.full_path) if bool(re.search(pattern=r"\[[0-9]+\]", string=full_path)): full_path = full_path.replace(".[", "[") unflatten_dict[full_path] = {} explicit_key_path = None key_list = full_path.split(".")[:-1] for key in key_list: if not explicit_key_path: explicit_key_path = key else: explicit_key_path = explicit_key_path + "." + key unflatten_dict[explicit_key_path + ".@explicit"] = True unflatten_dict[explicit_key_path + ".@requireAll"] = True derived = self.new_credential_builder(derived, unflatten_dict) if "credentialSubject" in derived.keys(): if "type" in credential_dict.get("credentialSubject"): derived["credentialSubject"]["type"] = credential_dict.get( "credentialSubject" ).get("type") if "credentialSubject" not in derived.keys(): if isinstance(credential_dict.get("credentialSubject"), list): derived["credentialSubject"] = [] elif isinstance(credential_dict.get("credentialSubject"), dict): derived["credentialSubject"] = {} return derived def new_credential_builder( self, new_credential: dict, unflatten_dict: dict ) -> dict: new_credential.update(unflatten(unflatten_dict)) return new_credential
Apache License 2.0
red-hat-storage/ocs-ci
ocs_ci/helpers/helpers.py
get_all_storageclass_names
python
def get_all_storageclass_names(): sc_obj = ocp.OCP( kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE ) result = sc_obj.get() sample = result["items"] storageclass = [ item.get("metadata").get("name") for item in sample if ( (item.get("metadata").get("name") not in constants.IGNORE_SC_GP2) and (item.get("metadata").get("name") not in constants.IGNORE_SC_FLEX) ) ] return storageclass
Function for getting all storageclass Returns: list: list of storageclass name
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/ocs_ci/helpers/helpers.py#L916-L937
import base64 import random import datetime import hashlib import json import logging import os import re import statistics import tempfile import threading import time import inspect from concurrent.futures import ThreadPoolExecutor from itertools import cycle from subprocess import PIPE, TimeoutExpired, run from uuid import uuid4 import yaml from ocs_ci.framework import config from ocs_ci.helpers.proxy import ( get_cluster_proxies, update_container_with_proxy_env, ) from ocs_ci.ocs.utils import mirror_image from ocs_ci.ocs import constants, defaults, node, ocp from ocs_ci.ocs.exceptions import ( CommandFailed, ResourceWrongStatusException, TimeoutExpiredError, UnavailableBuildException, UnexpectedBehaviour, ) from ocs_ci.ocs.ocp import OCP from ocs_ci.ocs.resources import pod, pvc from ocs_ci.ocs.resources.ocs import OCS from ocs_ci.utility import templating from ocs_ci.utility.retry import retry from ocs_ci.utility.utils import ( TimeoutSampler, ocsci_log_path, run_cmd, update_container_with_mirrored_image, ) logger = logging.getLogger(__name__) DATE_TIME_FORMAT = "%Y I%m%d %H:%M:%S.%f" def create_unique_resource_name(resource_description, resource_type): name = f"{resource_type}-{resource_description[:23]}-{uuid4().hex}" return name if len(name) < 40 else name[:40] def create_resource(do_reload=True, **kwargs): ocs_obj = OCS(**kwargs) resource_name = kwargs.get("metadata").get("name") created_resource = ocs_obj.create(do_reload=do_reload) assert created_resource, f"Failed to create resource {resource_name}" return ocs_obj def wait_for_resource_state(resource, state, timeout=60): if ( resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS or resource.name == constants.DEFAULT_STORAGECLASS_RBD ): logger.info("Attempt to default default Secret or StorageClass") return try: resource.ocp.wait_for_resource( condition=state, resource_name=resource.name, timeout=timeout ) except TimeoutExpiredError: logger.error(f"{resource.kind} {resource.name} failed to reach {state}") resource.reload() raise ResourceWrongStatusException(resource.name, resource.describe()) logger.info(f"{resource.kind} {resource.name} reached state {state}") def create_pod( interface_type=None, pvc_name=None, do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE, node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False, raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1, pod_name=None, node_selector=None, command=None, command_args=None, deploy_pod_status=constants.STATUS_COMPLETED, subpath=None, ): if ( interface_type == constants.CEPHBLOCKPOOL or interface_type == constants.CEPHBLOCKPOOL_THICK ): pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML interface = constants.RBD_INTERFACE else: pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML interface = constants.CEPHFS_INTERFACE if dc_deployment: pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML pod_data = templating.load_yaml(pod_dict) if not pod_name: pod_name = create_unique_resource_name(f"test-{interface}", "pod") pod_data["metadata"]["name"] = pod_name pod_data["metadata"]["namespace"] = namespace if dc_deployment: pod_data["metadata"]["labels"]["app"] = pod_name pod_data["spec"]["template"]["metadata"]["labels"]["name"] = pod_name pod_data["spec"]["replicas"] = replica_count if pvc_name: if dc_deployment: pod_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][ "claimName" ] = pvc_name else: pod_data["spec"]["volumes"][0]["persistentVolumeClaim"][ "claimName" ] = pvc_name if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv: if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]: temp_dict = [ { "devicePath": raw_block_device, "name": pod_data.get("spec") .get("template") .get("spec") .get("volumes")[0] .get("name"), } ] if pod_dict_path == constants.FEDORA_DC_YAML: del pod_data["spec"]["template"]["spec"]["containers"][0][ "volumeMounts" ] security_context = {"capabilities": {"add": ["SYS_ADMIN"]}} pod_data["spec"]["template"]["spec"]["containers"][0][ "securityContext" ] = security_context pod_data["spec"]["template"]["spec"]["containers"][0][ "volumeDevices" ] = temp_dict elif ( pod_dict_path == constants.NGINX_POD_YAML or pod_dict == constants.CSI_RBD_POD_YAML ): temp_dict = [ { "devicePath": raw_block_device, "name": pod_data.get("spec") .get("containers")[0] .get("volumeMounts")[0] .get("name"), } ] del pod_data["spec"]["containers"][0]["volumeMounts"] pod_data["spec"]["containers"][0]["volumeDevices"] = temp_dict else: pod_data["spec"]["containers"][0]["volumeDevices"][0][ "devicePath" ] = raw_block_device pod_data["spec"]["containers"][0]["volumeDevices"][0]["name"] = ( pod_data.get("spec").get("volumes")[0].get("name") ) if command: if dc_deployment: pod_data["spec"]["template"]["spec"]["containers"][0]["command"] = command else: pod_data["spec"]["containers"][0]["command"] = command if command_args: if dc_deployment: pod_data["spec"]["template"]["spec"]["containers"][0]["args"] = command_args else: pod_data["spec"]["containers"][0]["args"] = command_args if node_name: if dc_deployment: pod_data["spec"]["template"]["spec"]["nodeName"] = node_name else: pod_data["spec"]["nodeName"] = node_name if node_selector: if dc_deployment: pod_data["spec"]["template"]["spec"]["nodeSelector"] = node_selector else: pod_data["spec"]["nodeSelector"] = node_selector if sa_name and dc_deployment: pod_data["spec"]["template"]["spec"]["serviceAccountName"] = sa_name if subpath: if dc_deployment: pod_data["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0][ "subPath" ] = subpath else: pod_data["spec"]["containers"][0]["volumeMounts"][0]["subPath"] = subpath update_container_with_mirrored_image(pod_data) update_container_with_proxy_env(pod_data) if dc_deployment: ocs_obj = create_resource(**pod_data) logger.info(ocs_obj.name) assert (ocp.OCP(kind="pod", namespace=namespace)).wait_for_resource( condition=deploy_pod_status, resource_name=pod_name + "-1-deploy", resource_count=0, timeout=360, sleep=3, ) dpod_list = pod.get_all_pods(namespace=namespace) for dpod in dpod_list: if "-1-deploy" not in dpod.name: if pod_name in dpod.name: return dpod else: pod_obj = pod.Pod(**pod_data) pod_name = pod_data.get("metadata").get("name") logger.info(f"Creating new Pod {pod_name} for test") created_resource = pod_obj.create(do_reload=do_reload) assert created_resource, f"Failed to create Pod {pod_name}" return pod_obj def create_project(project_name=None): namespace = project_name or create_unique_resource_name("test", "namespace") project_obj = ocp.OCP(kind="Project", namespace=namespace) assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}" return project_obj def create_multilpe_projects(number_of_project): project_objs = [create_project() for _ in range(number_of_project)] return project_objs def create_secret(interface_type): secret_data = dict() if interface_type == constants.CEPHBLOCKPOOL: secret_data = templating.load_yaml(constants.CSI_RBD_SECRET_YAML) secret_data["stringData"]["userID"] = constants.ADMIN_USER secret_data["stringData"]["userKey"] = get_admin_key() interface = constants.RBD_INTERFACE elif interface_type == constants.CEPHFILESYSTEM: secret_data = templating.load_yaml(constants.CSI_CEPHFS_SECRET_YAML) del secret_data["stringData"]["userID"] del secret_data["stringData"]["userKey"] secret_data["stringData"]["adminID"] = constants.ADMIN_USER secret_data["stringData"]["adminKey"] = get_admin_key() interface = constants.CEPHFS_INTERFACE secret_data["metadata"]["name"] = create_unique_resource_name( f"test-{interface}", "secret" ) secret_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE return create_resource(**secret_data) def default_ceph_block_pool(): sc_obj = default_storage_class(constants.CEPHBLOCKPOOL) cbp_name = sc_obj.get().get("parameters").get("pool") return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL def create_ceph_block_pool( pool_name=None, replica=3, compression=None, failure_domain=None, verify=True ): cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML) cbp_data["metadata"]["name"] = ( pool_name if pool_name else create_unique_resource_name("test", "cbp") ) cbp_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE cbp_data["spec"]["replicated"]["size"] = replica cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin() if compression: cbp_data["spec"]["compressionMode"] = compression cbp_data["spec"]["parameters"]["compression_mode"] = compression cbp_obj = create_resource(**cbp_data) cbp_obj.reload() if verify: assert verify_block_pool_exists( cbp_obj.name ), f"Block pool {cbp_obj.name} does not exist" return cbp_obj def create_ceph_file_system(pool_name=None): cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML) cfs_data["metadata"]["name"] = ( pool_name if pool_name else create_unique_resource_name("test", "cfs") ) cfs_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE cfs_data = create_resource(**cfs_data) cfs_data.reload() assert validate_cephfilesystem( cfs_data.name ), f"File system {cfs_data.name} does not exist" return cfs_data def default_storage_class( interface_type, ): external = config.DEPLOYMENT["external_mode"] if interface_type == constants.CEPHBLOCKPOOL: if external: resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD else: resource_name = constants.DEFAULT_STORAGECLASS_RBD base_sc = OCP(kind="storageclass", resource_name=resource_name) elif interface_type == constants.CEPHFILESYSTEM: if external: resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS else: resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS base_sc = OCP(kind="storageclass", resource_name=resource_name) base_sc.wait_for_resource( condition=resource_name, column="NAME", timeout=240, ) sc = OCS(**base_sc.data) return sc def default_thick_storage_class(): external = config.DEPLOYMENT["external_mode"] if external: resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD_THICK else: resource_name = constants.DEFAULT_STORAGECLASS_RBD_THICK base_sc = OCP(kind="storageclass", resource_name=resource_name) sc = OCS(**base_sc.data) return sc def create_storage_class( interface_type, interface_name, secret_name, reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None, provisioner=None, rbd_thick_provision=False, encrypted=False, encryption_kms_id=None, ): sc_data = dict() if interface_type == constants.CEPHBLOCKPOOL: sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML) sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name sc_data["parameters"][ "csi.storage.k8s.io/node-stage-secret-namespace" ] = defaults.ROOK_CLUSTER_NAMESPACE interface = constants.RBD_INTERFACE sc_data["provisioner"] = ( provisioner if provisioner else defaults.RBD_PROVISIONER ) if rbd_thick_provision: sc_data["parameters"]["thickProvision"] = "true" if encrypted: from ocs_ci.utility.kms import get_encryption_kmsid sc_data["parameters"]["encrypted"] = "true" sc_data["parameters"]["encryptionKMSID"] = ( encryption_kms_id if encryption_kms_id else get_encryption_kmsid()[0] ) elif interface_type == constants.CEPHFILESYSTEM: sc_data = templating.load_yaml(constants.CSI_CEPHFS_STORAGECLASS_YAML) sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name sc_data["parameters"][ "csi.storage.k8s.io/node-stage-secret-namespace" ] = defaults.ROOK_CLUSTER_NAMESPACE interface = constants.CEPHFS_INTERFACE sc_data["parameters"]["fsName"] = get_cephfs_name() sc_data["provisioner"] = ( provisioner if provisioner else defaults.CEPHFS_PROVISIONER ) sc_data["parameters"]["pool"] = interface_name sc_data["metadata"]["name"] = ( sc_name if sc_name else create_unique_resource_name(f"test-{interface}", "storageclass") ) sc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE sc_data["parameters"]["csi.storage.k8s.io/provisioner-secret-name"] = secret_name sc_data["parameters"][ "csi.storage.k8s.io/provisioner-secret-namespace" ] = defaults.ROOK_CLUSTER_NAMESPACE sc_data["parameters"][ "csi.storage.k8s.io/controller-expand-secret-name" ] = secret_name sc_data["parameters"][ "csi.storage.k8s.io/controller-expand-secret-namespace" ] = defaults.ROOK_CLUSTER_NAMESPACE sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE sc_data["reclaimPolicy"] = reclaim_policy try: del sc_data["parameters"]["userid"] except KeyError: pass return create_resource(**sc_data) def create_pvc( sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO, volume_mode=None, ): pvc_data = templating.load_yaml(constants.CSI_PVC_YAML) pvc_data["metadata"]["name"] = ( pvc_name if pvc_name else create_unique_resource_name("test", "pvc") ) pvc_data["metadata"]["namespace"] = namespace pvc_data["spec"]["accessModes"] = [access_mode] pvc_data["spec"]["storageClassName"] = sc_name if size: pvc_data["spec"]["resources"]["requests"]["storage"] = size if volume_mode: pvc_data["spec"]["volumeMode"] = volume_mode ocs_obj = pvc.PVC(**pvc_data) created_pvc = ocs_obj.create(do_reload=do_reload) assert created_pvc, f"Failed to create resource {pvc_name}" return ocs_obj def create_multiple_pvcs( sc_name, namespace, number_of_pvc=1, size=None, do_reload=False, access_mode=constants.ACCESS_MODE_RWO, burst=False, ): if not burst: if access_mode == "ReadWriteMany" and "rbd" in sc_name: volume_mode = "Block" else: volume_mode = None return [ create_pvc( sc_name=sc_name, size=size, namespace=namespace, do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode, ) for _ in range(number_of_pvc) ] pvc_data = templating.load_yaml(constants.CSI_PVC_YAML) pvc_data["metadata"]["namespace"] = namespace pvc_data["spec"]["accessModes"] = [access_mode] pvc_data["spec"]["storageClassName"] = sc_name if size: pvc_data["spec"]["resources"]["requests"]["storage"] = size if access_mode == "ReadWriteMany" and "rbd" in sc_name: pvc_data["spec"]["volumeMode"] = "Block" else: pvc_data["spec"]["volumeMode"] = None tmpdir = tempfile.mkdtemp() logger.info("Creating the PVC yaml files for creation in bulk") ocs_objs = [] for _ in range(number_of_pvc): name = create_unique_resource_name("test", "pvc") logger.info(f"Adding PVC with name {name}") pvc_data["metadata"]["name"] = name templating.dump_data_to_temp_yaml(pvc_data, f"{tmpdir}/{name}.yaml") ocs_objs.append(pvc.PVC(**pvc_data)) logger.info("Creating all PVCs as bulk") oc = OCP(kind="pod", namespace=namespace) cmd = f"create -f {tmpdir}/" oc.exec_oc_cmd(command=cmd, out_yaml_format=False) logger.info( f"Going to sleep for {number_of_pvc} sec. " "until starting verify that PVCs was created." ) time.sleep(number_of_pvc) return ocs_objs, tmpdir def delete_bulk_pvcs(pvc_yaml_dir, pv_names_list): oc = OCP(kind="pod", namespace=defaults.ROOK_CLUSTER_NAMESPACE) cmd = f"delete -f {pvc_yaml_dir}/" oc.exec_oc_cmd(command=cmd, out_yaml_format=False) time.sleep(len(pv_names_list) / 2) for pv_name in pv_names_list: validate_pv_delete(pv_name) def verify_block_pool_exists(pool_name): logger.info(f"Verifying that block pool {pool_name} exists") ct_pod = pod.get_ceph_tools_pod() try: for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph osd lspools"): logger.info(f"POOLS are {pools}") for pool in pools: if pool_name in pool.get("poolname"): return True except TimeoutExpiredError: return False def get_pool_cr(pool_name): logger.info(f"Checking if pool {pool_name} is kind of {constants.CEPHBLOCKPOOL}") ocp_kind_cephblockpool = ocp.OCP( kind=constants.CEPHBLOCKPOOL, namespace=config.ENV_DATA["cluster_namespace"] ) pool_cr = ocp_kind_cephblockpool.get(resource_name=pool_name, dont_raise=True) if pool_cr is not None: return pool_cr else: logger.info( f"Pool {pool_name} is not kind={constants.CEPHBLOCKPOOL}" f", checkging if it is kind={constants.CEPHFILESYSTEM}" ) ocp_kind_cephfilesystem = ocp.OCP( kind="CephFilesystem", namespace=config.ENV_DATA["cluster_namespace"], ) pool_cr = ocp_kind_cephfilesystem.get(resource_name=pool_name, dont_raise=True) return pool_cr def get_admin_key(): ct_pod = pod.get_ceph_tools_pod() out = ct_pod.exec_ceph_cmd("ceph auth get-key client.admin") return out["key"] def get_cephfs_data_pool_name(): ct_pod = pod.get_ceph_tools_pod() out = ct_pod.exec_ceph_cmd("ceph fs ls") return out[0]["data_pools"][0] def validate_cephfilesystem(fs_name): cfs = ocp.OCP( kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE ) ct_pod = pod.get_ceph_tools_pod() ceph_validate = False ocp_validate = False result = cfs.get(resource_name=fs_name) if result.get("metadata").get("name"): logger.info("Filesystem %s got created from Openshift Side", fs_name) ocp_validate = True else: logger.info("Filesystem %s was not create at Openshift Side", fs_name) return False try: for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph fs ls"): for out in pools: result = out.get("name") if result == fs_name: logger.info("FileSystem %s got created from Ceph Side", fs_name) ceph_validate = True break else: logger.error("FileSystem %s was not present at Ceph Side", fs_name) ceph_validate = False if ceph_validate: break except TimeoutExpiredError: pass return True if (ceph_validate and ocp_validate) else False def create_ocs_object_from_kind_and_name( kind, resource_name, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE ): ocp_object = OCP(kind=kind, resource_name=resource_name, namespace=namespace).get() return OCS(**ocp_object) def remove_ocs_object_from_list(kind, resource_name, object_list): for obj in object_list: if obj.name == resource_name and obj.kind == kind: object_list.remove(obj) return object_list
MIT License
mne-tools/mne-bids
mne_bids/path.py
BIDSPath.acquisition
python
def acquisition(self) -> Optional[str]: return self._acquisition
The acquisition parameters.
https://github.com/mne-tools/mne-bids/blob/c6a3f84ad210ffee7d069ab1062c3b9e3897546d/mne_bids/path.py#L413-L415
import glob import os import re from io import StringIO import shutil as sh from collections import OrderedDict from copy import deepcopy from os import path as op from pathlib import Path from datetime import datetime import json from typing import Optional import numpy as np from mne.utils import warn, logger, _validate_type, verbose from mne_bids.config import ( ALLOWED_PATH_ENTITIES, ALLOWED_FILENAME_EXTENSIONS, ALLOWED_FILENAME_SUFFIX, ALLOWED_PATH_ENTITIES_SHORT, ALLOWED_DATATYPES, SUFFIX_TO_DATATYPE, ALLOWED_DATATYPE_EXTENSIONS, ALLOWED_SPACES, reader, ENTITY_VALUE_TYPE) from mne_bids.utils import (_check_key_val, _check_empty_room_basename, param_regex, _ensure_tuple) def _find_matched_empty_room(bids_path): bids_root = bids_path.root if bids_root is None: raise ValueError('The root of the "bids_path" must be set. ' 'Please use `bids_path.update(root="<root>")` ' 'to set the root of the BIDS folder to read.') from mne_bids import read_raw_bids bids_path = bids_path.copy() datatype = 'meg' bids_fname = bids_path.update(suffix=datatype, root=bids_root).fpath _, ext = _parse_ext(bids_fname) raw = read_raw_bids(bids_path=bids_path) if raw.info['meas_date'] is None: raise ValueError('The provided recording does not have a measurement ' 'date set. Cannot get matching empty-room file.') ref_date = raw.info['meas_date'] if not isinstance(ref_date, datetime): ref_date = datetime.fromtimestamp(raw.info['meas_date'][0]) emptyroom_dir = BIDSPath(root=bids_root, subject='emptyroom').directory if not emptyroom_dir.exists(): return None emptyroom_session_dirs = [x for x in emptyroom_dir.iterdir() if x.is_dir() and str(x.name).startswith('ses-')] if not emptyroom_session_dirs: emptyroom_session_dirs = [emptyroom_dir] allowed_extensions = list(reader.keys()) del allowed_extensions[allowed_extensions.index('.pdf')] candidate_er_fnames = [] for session_dir in emptyroom_session_dirs: dir_contents = glob.glob(op.join(session_dir, datatype, f'sub-emptyroom_*_{datatype}*')) for item in dir_contents: item = Path(item) if ((item.suffix in allowed_extensions) or (not item.suffix and item.is_dir())): candidate_er_fnames.append(item.name) best_er_bids_path = None min_delta_t = np.inf date_tie = False failed_to_get_er_date_count = 0 for er_fname in candidate_er_fnames: er_bids_path = get_bids_path_from_fname(er_fname, check=False) er_bids_path.subject = 'emptyroom' er_bids_path.root = bids_root er_meas_date = None if er_bids_path.session is not None: try: er_meas_date = datetime.strptime( er_bids_path.session, '%Y%m%d') except (ValueError, TypeError): pass if er_meas_date is None: _, ext = _parse_ext(er_fname) extra_params = None if ext == '.fif': extra_params = dict(allow_maxshield=True) er_raw = read_raw_bids(bids_path=er_bids_path, extra_params=extra_params) er_meas_date = er_raw.info['meas_date'] if er_meas_date is None: failed_to_get_er_date_count += 1 continue er_meas_date = er_meas_date.replace(tzinfo=ref_date.tzinfo) delta_t = er_meas_date - ref_date if abs(delta_t.total_seconds()) == min_delta_t: date_tie = True elif abs(delta_t.total_seconds()) < min_delta_t: min_delta_t = abs(delta_t.total_seconds()) best_er_bids_path = er_bids_path date_tie = False if failed_to_get_er_date_count > 0: msg = (f'Could not retrieve the empty-room measurement date from ' f'a total of {failed_to_get_er_date_count} recording(s).') warn(msg) if date_tie: msg = ('Found more than one matching empty-room measurement with the ' 'same recording date. Selecting the first match.') warn(msg) return best_er_bids_path class BIDSPath(object): def __init__(self, subject=None, session=None, task=None, acquisition=None, run=None, processing=None, recording=None, space=None, split=None, root=None, suffix=None, extension=None, datatype=None, check=True): if all(ii is None for ii in [subject, session, task, acquisition, run, processing, recording, space, root, suffix, extension]): raise ValueError("At least one parameter must be given.") self.check = check self.update(subject=subject, session=session, task=task, acquisition=acquisition, run=run, processing=processing, recording=recording, space=space, split=split, root=root, datatype=datatype, suffix=suffix, extension=extension) @property def entities(self): return OrderedDict([ ('subject', self.subject), ('session', self.session), ('task', self.task), ('acquisition', self.acquisition), ('run', self.run), ('processing', self.processing), ('space', self.space), ('recording', self.recording), ('split', self.split) ]) @property def basename(self): basename = [] for key, val in self.entities.items(): if val is not None and key != 'datatype': long_to_short_entity = { val: key for key, val in ALLOWED_PATH_ENTITIES_SHORT.items() } key = long_to_short_entity[key] basename.append(f'{key}-{val}') if self.suffix is not None: if self.extension is not None: basename.append(f'{self.suffix}{self.extension}') else: basename.append(self.suffix) basename = '_'.join(basename) return basename @property def directory(self): data_path = '' if self.root is None else self.root if self.subject is not None: data_path = op.join(data_path, f'sub-{self.subject}') if self.session is not None: data_path = op.join(data_path, f'ses-{self.session}') if self.datatype is not None: data_path = op.join(data_path, self.datatype) return Path(data_path) @property def subject(self) -> Optional[str]: return self._subject @subject.setter def subject(self, value): self.update(subject=value) @property def session(self) -> Optional[str]: return self._session @session.setter def session(self, value): self.update(session=value) @property def task(self) -> Optional[str]: return self._task @task.setter def task(self, value): self.update(task=value) @property def run(self) -> Optional[str]: return self._run @run.setter def run(self, value): self.update(run=value) @property
BSD 3-Clause New or Revised License
meteorshowers/stereonet-activestereonet
disparity/eval/kitti-object-eval-python/eval.py
get_official_eval_result
python
def get_official_eval_result(gt_annos, dt_annos, current_classes, difficultys=[0, 1, 2], z_axis=1, z_center=1.0): overlap_mod = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]]) overlap_easy = np.array([[0.5, 0.5, 0.5, 0.7, 0.5, 0.5, 0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5]]) min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) class_to_name = { 0: 'Car', 1: 'Pedestrian', 2: 'Cyclist', 3: 'Van', 4: 'Person_sitting', 5: 'car', 6: 'tractor', 7: 'trailer', } name_to_class = {v: n for n, v in class_to_name.items()} if not isinstance(current_classes, (list, tuple)): current_classes = [current_classes] current_classes_int = [] for curcls in current_classes: if isinstance(curcls, str): current_classes_int.append(name_to_class[curcls]) else: current_classes_int.append(curcls) current_classes = current_classes_int min_overlaps = min_overlaps[:, :, current_classes] result = '' compute_aos = False for anno in dt_annos: if anno['alpha'].shape[0] != 0: if anno['alpha'][0] != -10: compute_aos = True break metrics = do_eval_v3( gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, difficultys, z_axis=z_axis, z_center=z_center) for j, curcls in enumerate(current_classes): for i in range(min_overlaps.shape[0]): mAPbbox = get_mAP_v2(metrics["bbox"]["precision"][j, :, i]) mAPbbox = ", ".join(f"{v:.2f}" for v in mAPbbox) mAPbev = get_mAP_v2(metrics["bev"]["precision"][j, :, i]) mAPbev = ", ".join(f"{v:.2f}" for v in mAPbev) mAP3d = get_mAP_v2(metrics["3d"]["precision"][j, :, i]) mAP3d = ", ".join(f"{v:.2f}" for v in mAP3d) result += print_str( (f"{class_to_name[curcls]} " "AP(Average Precision)@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]))) result += print_str(f"bbox AP:{mAPbbox}") result += print_str(f"bev AP:{mAPbev}") result += print_str(f"3d AP:{mAP3d}") if compute_aos: mAPaos = get_mAP_v2(metrics["bbox"]["orientation"][j, :, i]) mAPaos = ", ".join(f"{v:.2f}" for v in mAPaos) result += print_str(f"aos AP:{mAPaos}") return result
gt_annos and dt_annos must contains following keys: [bbox, location, dimensions, rotation_y, score]
https://github.com/meteorshowers/stereonet-activestereonet/blob/ff8ec7e3c96625cb85b91bcc657829970e316e1f/disparity/eval/kitti-object-eval-python/eval.py#L719-L796
import io as sysio import time import numba import numpy as np from scipy.interpolate import interp1d from rotate_iou import rotate_iou_gpu_eval def get_mAP(prec): sums = 0 for i in range(0, len(prec), 4): sums += prec[i] return sums / 11 * 100 @numba.jit def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41): scores.sort() scores = scores[::-1] current_recall = 0 thresholds = [] for i, score in enumerate(scores): l_recall = (i + 1) / num_gt if i < (len(scores) - 1): r_recall = (i + 2) / num_gt else: r_recall = l_recall if (((r_recall - current_recall) < (current_recall - l_recall)) and (i < (len(scores) - 1))): continue thresholds.append(score) current_recall += 1 / (num_sample_pts - 1.0) return thresholds def clean_data(gt_anno, dt_anno, current_class, difficulty): CLASS_NAMES = [ 'car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'car', 'tractor', 'trailer' ] MIN_HEIGHT = [40, 25, 25] MAX_OCCLUSION = [0, 1, 2] MAX_TRUNCATION = [0.15, 0.3, 0.5] dc_bboxes, ignored_gt, ignored_dt = [], [], [] current_cls_name = CLASS_NAMES[current_class].lower() num_gt = len(gt_anno["name"]) num_dt = len(dt_anno["name"]) num_valid_gt = 0 for i in range(num_gt): bbox = gt_anno["bbox"][i] gt_name = gt_anno["name"][i].lower() height = bbox[3] - bbox[1] valid_class = -1 if (gt_name == current_cls_name): valid_class = 1 elif (current_cls_name == "Pedestrian".lower() and "Person_sitting".lower() == gt_name): valid_class = 0 elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name): valid_class = 0 else: valid_class = -1 ignore = False if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty]) or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty]) or (height <= MIN_HEIGHT[difficulty])): ignore = True if valid_class == 1 and not ignore: ignored_gt.append(0) num_valid_gt += 1 elif (valid_class == 0 or (ignore and (valid_class == 1))): ignored_gt.append(1) else: ignored_gt.append(-1) if gt_anno["name"][i] == "DontCare": dc_bboxes.append(gt_anno["bbox"][i]) for i in range(num_dt): if (dt_anno["name"][i].lower() == current_cls_name): valid_class = 1 else: valid_class = -1 height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1]) if height < MIN_HEIGHT[difficulty]: ignored_dt.append(1) elif valid_class == 1: ignored_dt.append(0) else: ignored_dt.append(-1) return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes @numba.jit(nopython=True) def image_box_overlap(boxes, query_boxes, criterion=-1): N = boxes.shape[0] K = query_boxes.shape[0] overlaps = np.zeros((N, K), dtype=boxes.dtype) for k in range(K): qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) * (query_boxes[k, 3] - query_boxes[k, 1])) for n in range(N): iw = (min(boxes[n, 2], query_boxes[k, 2]) - max( boxes[n, 0], query_boxes[k, 0])) if iw > 0: ih = (min(boxes[n, 3], query_boxes[k, 3]) - max( boxes[n, 1], query_boxes[k, 1])) if ih > 0: if criterion == -1: ua = ( (boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih) elif criterion == 0: ua = ((boxes[n, 2] - boxes[n, 0]) * (boxes[n, 3] - boxes[n, 1])) elif criterion == 1: ua = qbox_area else: ua = 1.0 overlaps[n, k] = iw * ih / ua return overlaps def bev_box_overlap(boxes, qboxes, criterion=-1): riou = rotate_iou_gpu_eval(boxes, qboxes, criterion) return riou @numba.jit(nopython=True, parallel=True) def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1, z_axis=1, z_center=1.0): N, K = boxes.shape[0], qboxes.shape[0] for i in range(N): for j in range(K): if rinc[i, j] > 0: min_z = min( boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center), qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center)) max_z = max( boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center, qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center) iw = min_z - max_z if iw > 0: area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5] area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5] inc = iw * rinc[i, j] if criterion == -1: ua = (area1 + area2 - inc) elif criterion == 0: ua = area1 elif criterion == 1: ua = area2 else: ua = 1.0 rinc[i, j] = inc / ua else: rinc[i, j] = 0.0 def d3_box_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0): bev_axes = list(range(7)) bev_axes.pop(z_axis + 3) bev_axes.pop(z_axis) rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2) d3_box_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center) return rinc @numba.jit(nopython=True) def compute_statistics_jit(overlaps, gt_datas, dt_datas, ignored_gt, ignored_det, dc_bboxes, metric, min_overlap, thresh=0, compute_fp=False, compute_aos=False): det_size = dt_datas.shape[0] gt_size = gt_datas.shape[0] dt_scores = dt_datas[:, -1] dt_alphas = dt_datas[:, 4] gt_alphas = gt_datas[:, 4] dt_bboxes = dt_datas[:, :4] assigned_detection = [False] * det_size ignored_threshold = [False] * det_size if compute_fp: for i in range(det_size): if (dt_scores[i] < thresh): ignored_threshold[i] = True NO_DETECTION = -10000000 tp, fp, fn, similarity = 0, 0, 0, 0 thresholds = np.zeros((gt_size, )) thresh_idx = 0 delta = np.zeros((gt_size, )) delta_idx = 0 for i in range(gt_size): if ignored_gt[i] == -1: continue det_idx = -1 valid_detection = NO_DETECTION max_overlap = 0 assigned_ignored_det = False for j in range(det_size): if (ignored_det[j] == -1): continue if (assigned_detection[j]): continue if (ignored_threshold[j]): continue overlap = overlaps[j, i] dt_score = dt_scores[j] if (not compute_fp and (overlap > min_overlap) and dt_score > valid_detection): det_idx = j valid_detection = dt_score elif (compute_fp and (overlap > min_overlap) and (overlap > max_overlap or assigned_ignored_det) and ignored_det[j] == 0): max_overlap = overlap det_idx = j valid_detection = 1 assigned_ignored_det = False elif (compute_fp and (overlap > min_overlap) and (valid_detection == NO_DETECTION) and ignored_det[j] == 1): det_idx = j valid_detection = 1 assigned_ignored_det = True if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0: fn += 1 elif ((valid_detection != NO_DETECTION) and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)): assigned_detection[det_idx] = True elif valid_detection != NO_DETECTION: tp += 1 thresholds[thresh_idx] = dt_scores[det_idx] thresh_idx += 1 if compute_aos: delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx] delta_idx += 1 assigned_detection[det_idx] = True if compute_fp: for i in range(det_size): if (not (assigned_detection[i] or ignored_det[i] == -1 or ignored_det[i] == 1 or ignored_threshold[i])): fp += 1 nstuff = 0 if metric == 0: overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0) for i in range(dc_bboxes.shape[0]): for j in range(det_size): if (assigned_detection[j]): continue if (ignored_det[j] == -1 or ignored_det[j] == 1): continue if (ignored_threshold[j]): continue if overlaps_dt_dc[j, i] > min_overlap: assigned_detection[j] = True nstuff += 1 fp -= nstuff if compute_aos: tmp = np.zeros((fp + delta_idx, )) for i in range(delta_idx): tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0 if tp > 0 or fp > 0: similarity = np.sum(tmp) else: similarity = -1 return tp, fp, fn, similarity, thresholds[:thresh_idx] def get_split_parts(num, num_part): same_part = num // num_part remain_num = num % num_part if remain_num == 0: return [same_part] * num_part else: return [same_part] * num_part + [remain_num] @numba.jit(nopython=True) def fused_compute_statistics(overlaps, pr, gt_nums, dt_nums, dc_nums, gt_datas, dt_datas, dontcares, ignored_gts, ignored_dets, metric, min_overlap, thresholds, compute_aos=False): gt_num = 0 dt_num = 0 dc_num = 0 for i in range(gt_nums.shape[0]): for t, thresh in enumerate(thresholds): overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num + gt_nums[i]] gt_data = gt_datas[gt_num:gt_num + gt_nums[i]] dt_data = dt_datas[dt_num:dt_num + dt_nums[i]] ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]] ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]] dontcare = dontcares[dc_num:dc_num + dc_nums[i]] tp, fp, fn, similarity, _ = compute_statistics_jit( overlap, gt_data, dt_data, ignored_gt, ignored_det, dontcare, metric, min_overlap=min_overlap, thresh=thresh, compute_fp=True, compute_aos=compute_aos) pr[t, 0] += tp pr[t, 1] += fp pr[t, 2] += fn if similarity != -1: pr[t, 3] += similarity gt_num += gt_nums[i] dt_num += dt_nums[i] dc_num += dc_nums[i] def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50, z_axis=1, z_center=1.0): assert len(gt_annos) == len(dt_annos) total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0) total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0) num_examples = len(gt_annos) split_parts = get_split_parts(num_examples, num_parts) parted_overlaps = [] example_idx = 0 bev_axes = list(range(3)) bev_axes.pop(z_axis) for num_part in split_parts: gt_annos_part = gt_annos[example_idx:example_idx + num_part] dt_annos_part = dt_annos[example_idx:example_idx + num_part] if metric == 0: gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0) dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0) overlap_part = image_box_overlap(gt_boxes, dt_boxes) elif metric == 1: loc = np.concatenate( [a["location"][:, bev_axes] for a in gt_annos_part], 0) dims = np.concatenate( [a["dimensions"][:, bev_axes] for a in gt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) loc = np.concatenate( [a["location"][:, bev_axes] for a in dt_annos_part], 0) dims = np.concatenate( [a["dimensions"][:, bev_axes] for a in dt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(np.float64) elif metric == 2: loc = np.concatenate([a["location"] for a in gt_annos_part], 0) dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) loc = np.concatenate([a["location"] for a in dt_annos_part], 0) dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0) rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) overlap_part = d3_box_overlap( gt_boxes, dt_boxes, z_axis=z_axis, z_center=z_center).astype(np.float64) else: raise ValueError("unknown metric") parted_overlaps.append(overlap_part) example_idx += num_part overlaps = [] example_idx = 0 for j, num_part in enumerate(split_parts): gt_annos_part = gt_annos[example_idx:example_idx + num_part] dt_annos_part = dt_annos[example_idx:example_idx + num_part] gt_num_idx, dt_num_idx = 0, 0 for i in range(num_part): gt_box_num = total_gt_num[example_idx + i] dt_box_num = total_dt_num[example_idx + i] overlaps.append( parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num, dt_num_idx:dt_num_idx + dt_box_num]) gt_num_idx += gt_box_num dt_num_idx += dt_box_num example_idx += num_part return overlaps, parted_overlaps, total_gt_num, total_dt_num def _prepare_data(gt_annos, dt_annos, current_class, difficulty): gt_datas_list = [] dt_datas_list = [] total_dc_num = [] ignored_gts, ignored_dets, dontcares = [], [], [] total_num_valid_gt = 0 for i in range(len(gt_annos)): rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets ignored_gts.append(np.array(ignored_gt, dtype=np.int64)) ignored_dets.append(np.array(ignored_det, dtype=np.int64)) if len(dc_bboxes) == 0: dc_bboxes = np.zeros((0, 4)).astype(np.float64) else: dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64) total_dc_num.append(dc_bboxes.shape[0]) dontcares.append(dc_bboxes) total_num_valid_gt += num_valid_gt gt_datas = np.concatenate( [gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1) dt_datas = np.concatenate([ dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis], dt_annos[i]["score"][..., np.newaxis] ], 1) gt_datas_list.append(gt_datas) dt_datas_list.append(dt_datas) total_dc_num = np.stack(total_dc_num, axis=0) return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) def eval_class(gt_annos, dt_annos, current_classes, difficultys, metric, min_overlaps, compute_aos=False, z_axis=1, z_center=1.0, num_parts=50): assert len(gt_annos) == len(dt_annos) num_examples = len(gt_annos) split_parts = get_split_parts(num_examples, num_parts) rets = calculate_iou_partly( dt_annos, gt_annos, metric, num_parts, z_axis=z_axis, z_center=z_center) overlaps, parted_overlaps, total_dt_num, total_gt_num = rets N_SAMPLE_PTS = 41 num_minoverlap = len(min_overlaps) num_class = len(current_classes) num_difficulty = len(difficultys) precision = np.zeros( [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) recall = np.zeros( [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) all_thresholds = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) for m, current_class in enumerate(current_classes): for l, difficulty in enumerate(difficultys): rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty) (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets for k, min_overlap in enumerate(min_overlaps[:, metric, m]): thresholdss = [] for i in range(len(gt_annos)): rets = compute_statistics_jit( overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], metric, min_overlap=min_overlap, thresh=0.0, compute_fp=False) tp, fp, fn, similarity, thresholds = rets thresholdss += thresholds.tolist() thresholdss = np.array(thresholdss) thresholds = get_thresholds(thresholdss, total_num_valid_gt) thresholds = np.array(thresholds) all_thresholds[m, l, k, :len(thresholds)] = thresholds pr = np.zeros([len(thresholds), 4]) idx = 0 for j, num_part in enumerate(split_parts): gt_datas_part = np.concatenate( gt_datas_list[idx:idx + num_part], 0) dt_datas_part = np.concatenate( dt_datas_list[idx:idx + num_part], 0) dc_datas_part = np.concatenate( dontcares[idx:idx + num_part], 0) ignored_dets_part = np.concatenate( ignored_dets[idx:idx + num_part], 0) ignored_gts_part = np.concatenate( ignored_gts[idx:idx + num_part], 0) fused_compute_statistics( parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, metric, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos) idx += num_part for i in range(len(thresholds)): precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1]) if compute_aos: aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1]) for i in range(len(thresholds)): precision[m, l, k, i] = np.max( precision[m, l, k, i:], axis=-1) if compute_aos: aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1) ret_dict = { "precision": precision, "orientation": aos, "thresholds": all_thresholds, "min_overlaps": min_overlaps, } return ret_dict def get_mAP_v2(prec): sums = 0 for i in range(0, prec.shape[-1], 4): sums = sums + prec[..., i] return sums / 11 * 100 def do_eval_v2(gt_annos, dt_annos, current_classes, min_overlaps, compute_aos=False, difficultys=(0, 1, 2), z_axis=1, z_center=1.0): ret = eval_class( gt_annos, dt_annos, current_classes, difficultys, 0, min_overlaps, compute_aos, z_axis=z_axis, z_center=z_center) mAP_bbox = get_mAP_v2(ret["precision"]) mAP_aos = None if compute_aos: mAP_aos = get_mAP_v2(ret["orientation"]) ret = eval_class( gt_annos, dt_annos, current_classes, difficultys, 1, min_overlaps, z_axis=z_axis, z_center=z_center) mAP_bev = get_mAP_v2(ret["precision"]) ret = eval_class( gt_annos, dt_annos, current_classes, difficultys, 2, min_overlaps, z_axis=z_axis, z_center=z_center) mAP_3d = get_mAP_v2(ret["precision"]) return mAP_bbox, mAP_bev, mAP_3d, mAP_aos def do_eval_v3(gt_annos, dt_annos, current_classes, min_overlaps, compute_aos=False, difficultys=(0, 1, 2), z_axis=1, z_center=1.0): types = ["bbox", "bev", "3d"] metrics = {} for i in range(3): ret = eval_class( gt_annos, dt_annos, current_classes, difficultys, i, min_overlaps, compute_aos, z_axis=z_axis, z_center=z_center) metrics[types[i]] = ret return metrics def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos, z_axis=1, z_center=1.0): min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]]) for i in range(overlap_ranges.shape[1]): for j in range(overlap_ranges.shape[2]): min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j]) mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2( gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, z_axis=z_axis, z_center=z_center) mAP_bbox = mAP_bbox.mean(-1) mAP_bev = mAP_bev.mean(-1) mAP_3d = mAP_3d.mean(-1) if mAP_aos is not None: mAP_aos = mAP_aos.mean(-1) return mAP_bbox, mAP_bev, mAP_3d, mAP_aos def print_str(value, *arg, sstream=None): if sstream is None: sstream = sysio.StringIO() sstream.truncate(0) sstream.seek(0) print(value, *arg, file=sstream) return sstream.getvalue()
MIT License
brython-dev/brython
www/src/Lib/pydoc.py
_getowndoc
python
def _getowndoc(obj): try: doc = object.__getattribute__(obj, '__doc__') if doc is None: return None if obj is not type: typedoc = type(obj).__doc__ if isinstance(typedoc, str) and typedoc == doc: return None return doc except AttributeError: return None
Get the documentation string for an object if it is not inherited from its class.
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/pydoc.py#L154-L167
__all__ = ['help'] __author__ = "Ka-Ping Yee <ping@lfw.org>" __date__ = "26 February 2001" __credits__ = """Guido van Rossum, for an excellent programming language. Tommy Burnette, the original creator of manpy. Paul Prescod, for all his work on onlinehelp. Richard Chamberlain, for the first implementation of textdoc. """ import builtins import importlib._bootstrap import importlib._bootstrap_external import importlib.machinery import importlib.util import inspect import io import os import pkgutil import platform import re import sys import sysconfig import time import tokenize import urllib.parse import warnings from collections import deque from reprlib import Repr from traceback import format_exception_only def pathdirs(): dirs = [] normdirs = [] for dir in sys.path: dir = os.path.abspath(dir or '.') normdir = os.path.normcase(dir) if normdir not in normdirs and os.path.isdir(dir): dirs.append(dir) normdirs.append(normdir) return dirs def _findclass(func): cls = sys.modules.get(func.__module__) if cls is None: return None for name in func.__qualname__.split('.')[:-1]: cls = getattr(cls, name) if not inspect.isclass(cls): return None return cls def _finddoc(obj): if inspect.ismethod(obj): name = obj.__func__.__name__ self = obj.__self__ if (inspect.isclass(self) and getattr(getattr(self, name, None), '__func__') is obj.__func__): cls = self else: cls = self.__class__ elif inspect.isfunction(obj): name = obj.__name__ cls = _findclass(obj) if cls is None or getattr(cls, name) is not obj: return None elif inspect.isbuiltin(obj): name = obj.__name__ self = obj.__self__ if (inspect.isclass(self) and self.__qualname__ + '.' + name == obj.__qualname__): cls = self else: cls = self.__class__ elif isinstance(obj, property): func = obj.fget name = func.__name__ cls = _findclass(func) if cls is None or getattr(cls, name) is not obj: return None elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj): name = obj.__name__ cls = obj.__objclass__ if getattr(cls, name) is not obj: return None if inspect.ismemberdescriptor(obj): slots = getattr(cls, '__slots__', None) if isinstance(slots, dict) and name in slots: return slots[name] else: return None for base in cls.__mro__: try: doc = _getowndoc(getattr(base, name)) except AttributeError: continue if doc is not None: return doc return None
BSD 3-Clause New or Revised License
ceph/ceph-ansible
library/ceph_ec_profile.py
get_profile
python
def get_profile(module, name, cluster='ceph', container_image=None): args = ['get', name, '--format=json'] cmd = generate_ceph_cmd(sub_cmd=['osd', 'erasure-code-profile'], args=args, cluster=cluster, container_image=container_image) return cmd
Get existing profile
https://github.com/ceph/ceph-ansible/blob/ae6be71b081b379e19035d6abc05475ed8a00e5d/library/ceph_ec_profile.py#L109-L121
from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.ca_common import is_containerized, generate_ceph_cmd, exec_command, exit_module except ImportError: from module_utils.ca_common import is_containerized, generate_ceph_cmd, exec_command, exit_module import datetime import json ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: ceph_ec_profile short_description: Manage Ceph Erasure Code profile version_added: "2.8" description: - Manage Ceph Erasure Code profile options: cluster: description: - The ceph cluster name. required: false default: ceph name: description: - name of the profile. required: true state: description: If 'present' is used, the module creates a profile. If 'absent' is used, the module will delete the profile. required: false choices: ['present', 'absent', 'info'] default: present stripe_unit: description: - The amount of data in a data chunk, per stripe. required: false k: description: - Number of data-chunks the object will be split in required: true m: description: - Compute coding chunks for each object and store them on different OSDs. required: true crush_root: description: - The name of the crush bucket used for the first step of the CRUSH rule. required: false crush_device_class: description: - Restrict placement to devices of a specific class (hdd/ssd) required: false author: - Guillaume Abrioux <gabrioux@redhat.com> ''' EXAMPLES = ''' - name: create an erasure code profile ceph_ec_profile: name: foo k: 4 m: 2 - name: delete an erassure code profile ceph_ec_profile: name: foo state: absent ''' RETURN = '''# '''
Apache License 2.0
arp242/download-npo
download_npo/__init__.py
replace_vars
python
def replace_vars(path, meta): if sys.version_info[0] <= 2: path = path.decode() path = path.format(**{ 'episode_id': meta.get('prid', ''), 'datum': meta.get('gidsdatum', ''), 'titel': meta.get('titel', None) or meta.get('title', ''), 'aflevering_titel': meta.get('aflevering_titel', ''), 'tijdsduur': meta.get('tijdsduur', ''), 'serie_id': meta.get('serie', {}).get('srid', ''), 'serie_titel': meta.get('serie', {}).get('serie_titel', ''), }) if locale.getpreferredencoding() != 'UTF-8': path = unicodedata.normalize('NFKD', path).encode('ascii', 'ignore') if sys.version_info[0] > 2: path = path.decode() return path
Replace variables in the path with format(); we need to play some games to make sure it works with both Python 2 and 3 and unicode.
https://github.com/arp242/download-npo/blob/01ca50d6da3aead3e32783b2ac3057344f3918a7/download_npo/__init__.py#L96-L119
from __future__ import print_function import locale import os import re import sys import unicodedata import download_npo.sites if sys.version_info[0] < 3: import urllib2 else: import urllib.request as urllib2 __all__ = ['version', 'check_update', 'human_size', 'human_time'] verbose = 0 class Error(Exception): pass def version(): return ('2.9.1', '2018-12-29') def check_update(): try: page = urllib2.urlopen( 'https://github.com/Carpetsmoker/download-npo/releases').read().decode('utf-8') latest = re.findall('releases/tag/version-([0-9.]+)', page)[0] if latest > version()[0]: return latest except: if verbose: print('check_update() failed: {}'.format(sys.exc_info()[1])) return None def human_size(bytesize, p=1): i = 0 while bytesize > 1024: bytesize /= 1024.0 i += 1 bytesize = (('%.' + str(p) + 'f') % bytesize).replace('.', ',') return '%s %s' % (bytesize, ('b', 'KiB', 'MiB', 'GiB')[i]) def human_time(s): if s > 3600: return '%ih%02im%02is' % (s / 3600, s / 60 % 60, s % 60) if s > 60: return '%im%02is' % (s / 60, s % 60) return '%02is' % s
MIT License
thalesgroup/pycryptoki
pycryptoki/token_management.py
c_get_mechanism_list
python
def c_get_mechanism_list(slot): slot_id = CK_ULONG(slot) mech = AutoCArray(ctype=CK_MECHANISM_TYPE) @refresh_c_arrays(1) def _c_get_mech_list(): return C_GetMechanismList(slot_id, mech.array, mech.size) ret = _c_get_mech_list() return ret, [x for x in mech]
Gets the list of mechanisms from the HSM :param slot: The slot number to get the mechanism list on :returns: The result code, A python dictionary representing the mechanism list
https://github.com/thalesgroup/pycryptoki/blob/b1c97389b9db11c8bd96722db5347cc54a051602/pycryptoki/token_management.py#L101-L117
import logging from ctypes import byref from six import b from .cryptoki import CK_ULONG, CK_BBOOL, CK_MECHANISM_TYPE, CK_MECHANISM_INFO from .defaults import ADMIN_PARTITION_LABEL, ADMIN_SLOT from .defines import CKR_OK from .cryptoki import ( C_InitToken, C_GetSlotList, C_GetMechanismList, C_GetMechanismInfo, CA_GetTokenPolicies, ) from .session_management import c_get_token_info from .exceptions import make_error_handle_function from .common_utils import AutoCArray from .common_utils import refresh_c_arrays LOG = logging.getLogger(__name__) def c_init_token(slot_num, password, token_label="Main Token"): LOG.info( "C_InitToken: Initializing token (slot=%s, label='%s', password='%s')", slot_num, token_label, password, ) if password == b"": password = None password = AutoCArray(data=password) slot_id = CK_ULONG(slot_num) label = AutoCArray(data=token_label) return C_InitToken(slot_id, password.array, password.size.contents, label.array) c_init_token_ex = make_error_handle_function(c_init_token) def get_token_by_label(label): if label == ADMIN_PARTITION_LABEL: return CKR_OK, ADMIN_SLOT slot_list = AutoCArray() @refresh_c_arrays(1) def _get_slot_list(): return C_GetSlotList(CK_BBOOL(1), slot_list.array, slot_list.size) ret = _get_slot_list() if ret != CKR_OK: return ret, None for slot in slot_list: ret, token_info = c_get_token_info(slot) if token_info["label"] == b(label): return ret, slot raise Exception("Slot with label " + str(label) + " not found.") get_token_by_label_ex = make_error_handle_function(get_token_by_label)
Apache License 2.0
square/connect-python-sdk
squareconnect/models/product.py
Product.__init__
python
def __init__(self): self.swagger_types = { } self.attribute_map = { }
Product - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/product.py#L30-L45
from pprint import pformat from six import iteritems import re class Product(object):
Apache License 2.0
matheushent/covid-19-detector
explain/core/grad_cam.py
GradCAM.generate_ponderated_output
python
def generate_ponderated_output(outputs, grads): maps = [ GradCAM.ponderate_output(output, grad) for output, grad in zip(outputs, grads) ] return maps
Apply Grad CAM algorithm scheme. Inputs are the convolutional outputs (shape WxHxN) and gradients (shape WxHxN). From there: - we compute the spatial average of the gradients - we build a ponderated sum of the convolutional outputs based on those averaged weights Args: output (tf.Tensor): Target layer outputs, with shape (batch_size, Hl, Wl, Nf), where Hl and Wl are the target layer output height and width, and Nf the number of filters. grads (tf.Tensor): Guided gradients with shape (batch_size, Hl, Wl, Nf) Returns: List[tf.Tensor]: List of ponderated output of shape (batch_size, Hl, Wl, 1)
https://github.com/matheushent/covid-19-detector/blob/1661c6e7237fdf4155dbd9df09c650615fa00511/explain/core/grad_cam.py#L130-L154
import tensorflow as tf import numpy as np import cv2 from explain.utils.display import grid_display, heatmap_display from explain.utils.saver import save_rgb class GradCAM: def explain( self, validation_data, model, class_index, layer_name=None, colormap=cv2.COLORMAP_VIRIDIS, image_weight=0.7, _grid=True ): images, _ = validation_data if layer_name is None: layer_name = self.infer_grad_cam_target_layer(model) outputs, guided_grads = GradCAM.get_gradients_and_filters( model, images, layer_name, class_index ) cams = GradCAM.generate_ponderated_output(outputs, guided_grads) heatmaps = np.array( [ heatmap_display(cam.numpy(), image, colormap, image_weight) for cam, image in zip(cams, images) ] ) if _grid: return grid_display(heatmaps) else: return heatmaps @staticmethod def infer_grad_cam_target_layer(model): for layer in reversed(model.layers): if len(layer.output_shape) == 4 and layer.name.count('conv') > 0: return layer.name raise ValueError( "Model does not seem to contain 4D layer. Grad CAM cannot be applied." ) @staticmethod @tf.function def get_gradients_and_filters(model, images, layer_name, class_index): grad_model = tf.keras.models.Model( [model.inputs], [model.get_layer(layer_name).output, model.output] ) dtype = model.get_layer(layer_name).output.dtype with tf.GradientTape() as tape: inputs = tf.cast(images, tf.float32) conv_outputs, predictions = grad_model(inputs) loss = predictions[:, class_index] grads = tape.gradient(loss, conv_outputs) guided_grads = ( tf.cast(conv_outputs > 0, dtype) * tf.cast(grads > 0, dtype) * grads ) return conv_outputs, guided_grads @staticmethod
Apache License 2.0
opendp/smartnoise-sdk
synth/snsynth/mwem.py
MWEMSynthesizer._exponential_mechanism
python
def _exponential_mechanism(self, hist, synth_hist, queries, eps): errors = [ abs(self._evaluate(queries[i], hist) - self._evaluate(queries[i], synth_hist)) * (eps / 2.0) for i in range(len(queries)) ] maxi = max(errors) errors = [math.exp(errors[i] - maxi) for i in range(len(errors))] r = random.random() e_s = sum(errors) c = 0 for i in range(len(errors)): c += errors[i] if c > r * e_s: return i return len(errors) - 1
Refer to paper for in depth description of Exponential Mechanism. Parametrized with epsilon value epsilon/(2 * iterations) :param hist: Basis histogram :type hist: np.ndarray :param synth_hist: Synthetic histogram :type synth_hist: np.ndarray :param queries: Queries to draw from :type queries: list :param eps: Budget :type eps: float :return: # of errors :rtype: int
https://github.com/opendp/smartnoise-sdk/blob/3d6823417aa4fe27e3ed035845a052199e717c6a/synth/snsynth/mwem.py#L334-L364
import math import random import warnings from functools import wraps import numpy as np import pandas as pd from snsynth.base import SDGYMBaseSynthesizer class MWEMSynthesizer(SDGYMBaseSynthesizer): def __init__( self, epsilon, q_count=400, iterations=30, mult_weights_iterations=20, splits=[], split_factor=None, max_bin_count=500, custom_bin_count={}, ): self.epsilon = epsilon self.q_count = q_count self.iterations = iterations self.mult_weights_iterations = mult_weights_iterations self.synthetic_data = None self.data_bins = None self.real_data = None self.splits = splits self.split_factor = split_factor self.max_bin_count = max_bin_count self.mins_maxes = {} self.scale = {} self.custom_bin_count = custom_bin_count self.pandas = False self.pd_cols = None self.pd_index = None self.q_values = None self.max_retries_exp_mechanism = 50 @wraps(SDGYMBaseSynthesizer.fit) def fit(self, data, categorical_columns=None, ordinal_columns=None): if isinstance(data, np.ndarray): self.data = data.copy() elif isinstance(data, pd.DataFrame): self.pandas = True for col in data.columns: data[col] = pd.to_numeric(data[col], errors="ignore") self.data = data.to_numpy().copy() self.pd_cols = data.columns self.pd_index = data.index else: raise ValueError("Data must be a numpy array or pandas dataframe.") if self.split_factor is not None and self.splits == []: self.splits = self._generate_splits(data.T.shape[0], self.split_factor) self.splits = np.array(self.splits) if self.splits.size == 0: self.histograms = self._histogram_from_data_attributes( self.data, [np.arange(self.data.shape[1])] ) else: self.histograms = self._histogram_from_data_attributes(self.data, self.splits) self.q_values = [] for h in self.histograms: self.q_values.append(self._compose_arbitrary_slices(self.q_count, h[1])) self.synthetic_histograms = self.mwem() @wraps(SDGYMBaseSynthesizer.sample) def sample(self, samples): synthesized_columns = () first = True for fake, _, split in self.synthetic_histograms: s = [] fake_indices = np.arange(len(np.ravel(fake))) fake_distribution = np.ravel(fake) norm = np.sum(fake) for _ in range(samples): s.append(np.random.choice(fake_indices, p=(fake_distribution / norm))) s_unraveled = [] for ind in s: s_unraveled.append(np.unravel_index(ind, fake.shape)) np_unraveled = np.array(s_unraveled) for i in range(np_unraveled.shape[-1]): min_c, max_c = self.mins_maxes[str(split[i])] np_unraveled[:, i] = np_unraveled[:, i] * self.scale[str(split[i])] np_unraveled[:, i] = np_unraveled[:, i] + min_c if first: synthesized_columns = np_unraveled first = False else: synthesized_columns = np.hstack((synthesized_columns, np_unraveled)) combined = synthesized_columns r = self._reorder(self.splits) if self.pandas: df = pd.DataFrame(combined[:, r], index=self.pd_index, columns=self.pd_cols) return df else: return combined[:, r] def mwem(self): a_values = [] for i, h in enumerate(self.histograms): hist = h[0] dimensions = h[1] split = h[3] queries = self.q_values[i] synth_hist = self._initialize_a(hist, dimensions) measurements = {} flat_dim = 1 for j in dimensions: flat_dim *= j if 2 * flat_dim <= self.iterations: warnings.warn( "Flattened dimensionality of synthetic histogram is less than" + " the number of iterations. This is a privacy risk." + " Consider increasing your split_factor (especially if it is 1), " + "or decreasing the number of iterations. " + "Dim: " + str(flat_dim) + " Split: " + str(split), Warning, ) for i in range(self.iterations): qi = self._exponential_mechanism( hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms)) ) while qi in measurements: qi = self._exponential_mechanism( hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms)) ) evals = self._evaluate(queries[qi], hist) lap = self._laplace( (2 * self.iterations * len(self.histograms)) / (self.epsilon * len(dimensions)) ) measurements[qi] = evals + lap synth_hist = self._multiplicative_weights( synth_hist, queries, measurements, hist, self.mult_weights_iterations ) a_values.append((synth_hist, hist, split)) return a_values def _initialize_a(self, histogram, dimensions): n = np.sum(histogram) value = n / np.prod(dimensions) synth_hist = np.zeros_like(histogram) synth_hist += value return synth_hist def _histogram_from_data_attributes(self, data, splits=[]): histograms = [] for split in splits: split_data = data[:, split] mins_data = [] maxs_data = [] dims_sizes = [] for i, column in enumerate(split_data.T): min_c = min(column) max_c = max(column) mins_data.append(min_c) maxs_data.append(max_c) bin_count = int(max_c - min_c + 1) self.mins_maxes[str(split[i])] = (min_c, max_c) if bin_count > self.max_bin_count: warnings.warn( "Bin count " + str(bin_count) + " in column: " + str(split[i]) + " exceeds max_bin_count, defaulting to: " + str(self.max_bin_count) + ". Is this a continuous variable?", Warning, ) bin_count = self.max_bin_count self.scale[str(split[i])] = (max_c - min_c + 1) / self.max_bin_count else: self.scale[str(split[i])] = 1 if str(split[i]) in self.custom_bin_count: bin_count = int(self.custom_bin_count[str(split[i])]) self.scale[str(split[i])] = 1 dims_sizes.append(bin_count) histogram, bins = np.histogramdd(split_data, bins=dims_sizes) histograms.append((histogram, dims_sizes, bins, split)) return histograms
MIT License
austerweillab/jarjar
jarjar/jarjar.py
jarjar.text
python
def text(self, message=None, **kwargs): kwargs = self._infer_kwargs(message=message, **kwargs) return self._post(**kwargs)
Send a text message. This method is largely identical to :func:`~jarjar.jarjar.attach`, only differing in the first argument (``message``), which is expected to be a string. Parameters ---------- message : str Text to send. Optional *but weird if you don't provide one*. If attach is None and there is no default *and* you don't provide one here, jarjar just wings it. attach : dict Attachment data. Optional. All values are converted to string for the slack payload so don't sweat it. channel : str, list Optional. Name of the channel to post within. Can also be a list of channel names; jarjar will post to each. webhook : str Optional. Webhook URL for the slack team. Returns ------- response : requests.models.Response Requests response object for the POST request to slack.
https://github.com/austerweillab/jarjar/blob/ee95995c78e4e74a8203063d1dbc3763e49b7d7a/jarjar/jarjar.py#L308-L337
import copy import datetime import functools import imp import json import os import requests import time import traceback import warnings _EXPECTED_CONFIG = ['channel', 'webhook', 'message'] _EXPECTED_KWARGS = _EXPECTED_CONFIG + ['attach'] _NO_MESSAGE_WARN = ( ''' Slow down cowboy! You didn't provide a message and there is no default in your .jarjar, so I'll just wing it. ''' .strip() .replace('\n', ' ') .replace('\t', ' ') .replace(' ', ' ') ) def read_config_file(filename): if isinstance(filename, (list, tuple)): return [read_config_file(i) for i in filename] if not os.path.exists(filename): return dict((i, None) for i in _EXPECTED_CONFIG) cfg = imp.load_source('_tmp', filename) try: os.remove(filename + 'c', ) except OSError: pass return dict((i, getattr(cfg, i, None)) for i in _EXPECTED_CONFIG) class jarjar(object): _final_default_message = 'Meesa Jarjar Binks!' headers = {'Content-Type': 'application/json'} def __init__(self, config=None, **defaults): for k in defaults.keys(): if k in _EXPECTED_CONFIG: continue warnings.warn('Received unexpected kwarg: `%s`.' % k) expected_config_files = [ os.path.join(os.getcwd(), '.jarjar'), os.path.join(os.path.expanduser('~'), '.jarjar'), ] if config is not None: if isinstance(config, list): expected_config_files = config + expected_config_files else: expected_config_files.insert(0, config) configs = read_config_file(expected_config_files) configs.insert(0, defaults) for val in _EXPECTED_CONFIG: setattr( self, 'default_%s' % val, next((i[val] for i in configs if i.get(val, None)), None) ) self.attachment_args = dict( fallback="New attachments are ready!", color="#36a64f", fields=[] ) self.payload_args = dict() def _set_defaults(self, channel=None, webhook=None, message=None): if channel in (None, ''): self.default_channel = self.cfg_channel else: self.default_channel = channel if webhook in (None, ''): self.default_webhook = self.cfg_webhook else: self.default_webhook = webhook if message in (None, ''): self.default_message = self.cfg_message else: self.default_message = message def _read_config(self): filename = os.path.join(os.path.expanduser('~'), '.jarjar') if not os.path.exists(filename): open(filename, 'a').close() cfg = imp.load_source('_jarjar', filename) for field in ['channel', 'webhook', 'message']: if hasattr(cfg, field): data = getattr(cfg, field) else: data = None setattr(self, 'cfg_%s' % field, data) def _infer_kwargs(self, **kwargs): def _get(arg): if arg in kwargs and kwargs[arg] not in ('', None): return kwargs[arg] if arg == 'attach': return None default = getattr(self, 'default_{}'.format(arg)) if arg in ['channel', 'webhook']: if not default: raise NameError('No {} provided!'.format(arg)) else: return default if self.default_message is not None: return self.default_message if 'attach' in kwargs and kwargs['attach']: return None warnings.warn(_NO_MESSAGE_WARN) return self._final_default_message for k, _ in kwargs.items(): if k in _EXPECTED_KWARGS: continue warnings.warn('Recieved unexpected kwarg: `%s`.' % k) result = dict() for arg in ['message', 'attach', 'channel', 'webhook']: result[arg] = _get(arg) return result def _attachment_formatter(self, attach): attachments = copy.deepcopy(self.attachment_args) attachments['ts'] = time.time() for key in attach: if isinstance(attach[key], str): outval = attach[key] else: try: outval = str(attach[key]) except UnicodeEncodeError: outval = unicode(attach[key]) except Exception: raise attachments['fields'].append(dict( title=key, value=outval, short=len(outval) < 20 )) return [attachments] def attach(self, attach=None, **kwargs): if attach is None: warnings.warn( 'You called `attach` but there is no attachment? Weird.' ) kwargs = self._infer_kwargs(attach=attach, **kwargs) return self.text(**kwargs)
MIT License
pekrau/couchdb2
couchdb2.py
Database.compact
python
def compact(self, finish=False, callback=None): self.server._POST(self.name, "_compact", headers={"Content-Type": JSON_MIME}) if finish: response = self.server._GET(self.name) seconds = 0 while response.json().get("compact_running"): time.sleep(1) seconds += 1 if callback: callback(seconds) response = self.server._GET(self.name)
Compacts the CouchDB database by rewriting the disk database file and removing old revisions of documents. - If `finish` is True, then return only when compaction is done. - In addition, if defined, the function `callback(seconds)` is called every second until compaction is done.
https://github.com/pekrau/couchdb2/blob/8e6f4031b39029a69f560c7c9418270144cce33d/couchdb2.py#L374-L391
__version__ = "1.11.0" import argparse import collections import getpass import gzip import io import json import mimetypes import os import os.path import tarfile import sys import time import uuid if sys.version_info[:2] < (3, 7): from collections import OrderedDict as dict import requests JSON_MIME = "application/json" BIN_MIME = "application/octet-stream" CHUNK_SIZE = 100 class Server: def __init__(self, href="http://localhost:5984/", username=None, password=None, use_session=True, ca_file=None): self.href = href.rstrip("/") + "/" self._session = requests.Session() self._session.headers.update({"Accept": JSON_MIME}) if ca_file is not None: self._session.verify = ca_file if username and password: if use_session: self._POST("_session", data={"name": username, "password": password}) else: self._session.auth = (username, password) @property def version(self): try: return self._version except AttributeError: self._version = self._GET().json()["version"] return self._version @property def user_context(self): response = self._GET("_session") return response.json() def __str__(self): return f"CouchDB {self.version} {self.href}" def __len__(self): data = self._GET("_all_dbs").json() return len([n for n in data if not n.startswith("_")]) def __iter__(self): data = self._GET("_all_dbs").json() return iter([Database(self, n, check=False) for n in data if not n.startswith("_")]) def __getitem__(self, name): return Database(self, name, check=True) def __contains__(self, name): response = self._HEAD(name, errors={404: None}) return response.status_code == 200 def __call__(self): response = self._GET() return response.json() def __del__(self): self._session.close() def up(self): assert self.version >= "2.0" response = self._session.get(self.href + "_up") return response.status_code == 200 def get(self, name, check=True): return Database(self, name, check=check) def create(self, name, n=3, q=8, partitioned=False): db = Database(self, name, check=False) return db.create(n=n, q=q, partitioned=partitioned) def get_config(self, nodename="_local"): response = self._GET("_node", nodename, "_config") return response.json() def get_active_tasks(self): response = self._GET("_active_tasks") return response.json() def get_cluster_setup(self, ensure_dbs_exists=None): assert self.version >= "2.0" if ensure_dbs_exists is None: params = {} else: params = {"ensure_dbs_exists": ensure_dbs_exists} response = self._GET("_cluster_setup", params=params) return response.json() def set_cluster_setup(self, doc): assert self.version >= "2.0" self._POST("_cluster_setup", json=doc) def get_membership(self): assert self.version >= "2.0" response = self._GET("_membership") return response.json() def set_replicate(self, doc): response = self._POST("_replicate", json=doc) return response.json() def get_scheduler_jobs(self, limit=None, skip=None): params = {} if limit is not None: params["limit"] = _jsons(limit) if skip is not None: params["skip"] = _jsons(skip) response = self._GET("_scheduler/jobs", params=params) return response.json() def get_scheduler_docs(self, limit=None, skip=None): params = {} if limit is not None: params["limit"] = _jsons(limit) if skip is not None: params["skip"] = _jsons(skip) response = self._GET("_scheduler/docs", params=params) return response.json() def get_node_stats(self, nodename="_local"): response = self._GET("_node", nodename, "_stats") return response.json() def get_node_system(self, nodename="_local"): response = self._GET("_node", nodename, "_system") return response.json() def _HEAD(self, *segments, **kwargs): response = self._session.head(self._href(segments)) self._check(response, errors=kwargs.get("errors", {})) return response def _GET(self, *segments, **kwargs): kw = self._kwargs(kwargs, "headers", "params") response = self._session.get(self._href(segments), **kw) self._check(response, errors=kwargs.get("errors", {})) return response def _PUT(self, *segments, **kwargs): kw = self._kwargs(kwargs, "json", "data", "headers") response = self._session.put(self._href(segments), **kw) self._check(response, errors=kwargs.get("errors", {})) return response def _POST(self, *segments, **kwargs): kw = self._kwargs(kwargs, "json", "data", "headers", "params") response = self._session.post(self._href(segments), **kw) self._check(response, errors=kwargs.get("errors", {})) return response def _DELETE(self, *segments, **kwargs): kw = self._kwargs(kwargs, "headers") response = self._session.delete(self._href(segments), **kw) self._check(response, errors=kwargs.get("errors", {})) return response def _href(self, segments): return self.href + "/".join(segments) def _kwargs(self, kwargs, *keys): result = {} for key in keys: try: result[key] = kwargs[key] except KeyError: pass return result def _check(self, response, errors={}): try: error = errors[response.status_code] except KeyError: try: error = _ERRORS[response.status_code] except KeyError: raise IOError(f"{response.status_code} {response.reason}") if error is not None: raise error(response.reason) class Database: def __init__(self, server, name, check=True): self.server = server self.name = name if check: self.check() def __str__(self): return self.name def __len__(self): return self.server._GET(self.name).json()["doc_count"] def __contains__(self, id): response = self.server._HEAD(self.name, id, errors={404: None}) return response.status_code in (200, 304) def __iter__(self): return _DatabaseIterator(self) def __getitem__(self, id): result = self.get(id) if result is None: raise NotFoundError("no such document") else: return result def exists(self): response = self.server._HEAD(self.name, errors={404: None}) return response.status_code == 200 def check(self): if not self.exists(): raise NotFoundError(f"Database '{self}' does not exist.") def create(self, n=3, q=8, partitioned=False): self.server._PUT(self.name, data={"n": n, "q": q, "partitioned": partitioned}) return self def destroy(self): self.server._DELETE(self.name) def get_info(self): response = self.server._GET(self.name) return response.json() def get_security(self): response = self.server._GET(self.name, "_security") return response.json() def set_security(self, doc): self.server._PUT(self.name, "_security", json=doc)
MIT License
blackholll/loonflow
service/ticket/ticket_base_service.py
TicketBaseService.ticket_handle_permission_check
python
def ticket_handle_permission_check(cls, ticket_id: int, username: str, by_timer: bool=False, by_task: bool=False, by_hook: bool=False)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() if not ticket_obj: return False, '工单不存在或已被删除' ticket_state_id = ticket_obj.state_id flag, transition_queryset = workflow_transition_service_ins.get_state_transition_queryset(ticket_state_id) if flag is False: return False, transition_queryset if not transition_queryset: return True, dict(permission=False, msg='工单当前状态无需操作') flag, state_obj = workflow_state_service_ins.get_workflow_state_by_id(ticket_state_id) if not state_obj: return True, dict(permission=False, msg='工单当前状态id不存在或已被删除') if by_timer and username == 'loonrobot': return True, dict(permission=True, need_accept=False, in_add_node=False, msg='by timer,release permissions') if by_task and username == 'loonrobot': return True, dict(permission=True, need_accept=False, in_add_node=False, msg='by script,release permissions') if by_hook and username == 'loonrobot': return True, dict(permission=True, need_accept=False, in_add_node=False, msg='by hook,release permissions') participant_type_id = ticket_obj.participant_type_id participant = ticket_obj.participant current_participant_count = 1 if participant_type_id == constant_service_ins.PARTICIPANT_TYPE_PERSONAL: if username != participant: return True, dict(permission=False, need_accept=False, in_add_node=False, msg='not current participant, no permission') elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_MULTI: if username not in participant.split(','): return True, dict(permission=False, need_accept=False, in_add_node=False, msg='not crrent participant, no permission') current_participant_count = len(participant.split(',')) elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_DEPT: flag, dept_user_list = account_base_service_ins.get_dept_username_list(participant) if flag is False: return flag, dept_user_list if username not in dept_user_list: return True, dict(permission=False, need_accept=False, in_add_node=False, msg='not current participant, no permission') current_participant_count = len(dept_user_list) elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_ROLE: flag, role_user_list = account_base_service_ins.get_role_username_list(int(participant)) if not flag: return False, role_user_list if username not in role_user_list: return True, dict(permission=False, need_accept=False, in_add_node=False, msg='not crrent participant, no permission') current_participant_count = len(role_user_list) else: return True, dict(permission=False, need_accept=False, in_add_node=False, msg='not crrent participant, no permission') if current_participant_count > 1 and state_obj.distribute_type_id == constant_service_ins.STATE_DISTRIBUTE_TYPE_ACTIVE: need_accept = True else: need_accept = False if ticket_obj.in_add_node: in_add_node = True else: in_add_node = False return True, dict(permission=True, need_accept=need_accept, in_add_node=in_add_node)
handle permission check :param ticket_id: :param username: :param by_timer: is timer or not :param by_task: is by script or not :param by_hook: is by hook or not :return:
https://github.com/blackholll/loonflow/blob/9992f9bc712e844a5c357c33ce148bf9e2bd854d/service/ticket/ticket_base_service.py#L885-L974
import copy import json import datetime import random import redis from django.db.models import Q from django.conf import settings from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from apps.workflow.models import CustomField from apps.ticket.models import TicketRecord, TicketCustomField, TicketFlowLog, TicketUser from service.redis_pool import POOL from service.base_service import BaseService from service.common.log_service import auto_log from service.common.common_service import common_service_ins from service.common.constant_service import constant_service_ins from service.account.account_base_service import account_base_service_ins from service.workflow.workflow_base_service import workflow_base_service_ins from service.workflow.workflow_state_service import workflow_state_service_ins from service.workflow.workflow_transition_service import workflow_transition_service_ins from service.workflow.workflow_custom_field_service import workflow_custom_field_service_ins class TicketBaseService(BaseService): def __init__(self): pass @classmethod @auto_log def get_ticket_by_id(cls, ticket_id: int)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() if ticket_obj: return True, ticket_obj else: return False, 'ticket is not existed or has been deleted' @classmethod @auto_log def get_ticket_list(cls, sn: str='', title: str='', username: str='', create_start: str='', create_end: str='', workflow_ids: str='', state_ids: str='', ticket_ids: str='', category: str='', reverse: int=1, per_page: int=10, page: int=1, app_name: str='', **kwargs): category_list = ['all', 'owner', 'duty', 'relation', 'worked', 'view', 'intervene'] if category not in category_list: return False, 'category value is invalid, it should be in all, owner, duty, relation' query_params = Q(is_deleted=False) from service.workflow.workflow_permission_service import workflow_permission_service_ins flag, result = workflow_permission_service_ins.get_workflow_id_list_by_permission('api', 'app', app_name) if not flag or not result.get('workflow_id_list'): return True, dict(ticket_result_restful_list=[], paginator_info=dict(per_page=per_page, page=page, total=0)) else: app_workflow_id_list = result.get('workflow_id_list') if kwargs.get('act_state_id') != '': query_params &= Q(act_state_id=int(kwargs.get('act_state_id'))) if kwargs.get('from_admin') != '': flag, result = workflow_base_service_ins.get_workflow_manage_list(username ) if flag is False: return False, result workflow_list = result.get('workflow_list') workflow_admin_id_list = [workflow['id'] for workflow in workflow_list] if kwargs.get('creator') != '': query_params &= Q(creator=kwargs.get('creator')) if sn: query_params &= Q(sn__startswith=sn) if title: query_params &= Q(title__contains=title) if create_start: query_params &= Q(gmt_created__gte=create_start) if create_end: query_params &= Q(gmt_created__lte=create_end) if workflow_ids: workflow_id_str_list = workflow_ids.split(',') query_workflow_id_list = [int(workflow_id_str) for workflow_id_str in workflow_id_str_list] else: query_workflow_id_list = [] if state_ids: state_id_str_list = state_ids.split(',') state_id_list = [int(state_id_str) for state_id_str in state_id_str_list] query_params &= Q(state_id__in=state_id_list) if ticket_ids: ticket_id_str_list = ticket_ids.split(',') ticket_id_list = [int(ticket_id_str) for ticket_id_str in ticket_id_str_list] query_params &= Q(id__in=ticket_id_list) if kwargs.get('from_admin'): permission_workflow_id_set = set(workflow_admin_id_list) - (set(workflow_admin_id_list) - set(app_workflow_id_list)) if query_workflow_id_list: ending_workflow_id_list = list(permission_workflow_id_set - (permission_workflow_id_set - set(query_workflow_id_list))) else: ending_workflow_id_list = list(permission_workflow_id_set) else: if query_workflow_id_list: ending_workflow_id_list = list(set(app_workflow_id_list) - (set(app_workflow_id_list) - set(query_workflow_id_list))) else: ending_workflow_id_list = app_workflow_id_list query_params &= Q(workflow_id__in=ending_workflow_id_list) if reverse: order_by_str = '-gmt_created' else: order_by_str = 'gmt_created' if category == 'owner': query_params &= Q(creator=username) ticket_objects = TicketRecord.objects.filter(query_params).order_by(order_by_str).distinct() elif category == 'duty': duty_query_expression = Q(ticketuser__in_process=True, ticketuser__username=username) query_params &= duty_query_expression act_state_expression = ~Q(act_state_id__in=[ constant_service_ins.TICKET_ACT_STATE_FINISH, constant_service_ins.TICKET_ACT_STATE_CLOSED ]) query_params &= act_state_expression ticket_objects = TicketRecord.objects.filter(query_params).order_by(order_by_str).distinct() elif category == 'relation': relation_query_expression = Q(ticketuser__username=username) query_params &= relation_query_expression ticket_objects = TicketRecord.objects.filter(query_params).order_by(order_by_str).distinct() elif category == 'worked': worked_query_expression = Q(ticketuser__username=username, ticketuser__worked=True) query_params &= worked_query_expression ticket_objects = TicketRecord.objects.filter(query_params).order_by(order_by_str).distinct() else: ticket_objects = TicketRecord.objects.filter(query_params).order_by(order_by_str).distinct() paginator = Paginator(ticket_objects, per_page) try: ticket_result_paginator = paginator.page(page) except PageNotAnInteger: ticket_result_paginator = paginator.page(1) except EmptyPage: ticket_result_paginator = paginator.page(paginator.num_pages) ticket_result_object_list = ticket_result_paginator.object_list ticket_result_restful_list = [] for ticket_result_object in ticket_result_object_list: flag, state_obj = workflow_state_service_ins.get_workflow_state_by_id(ticket_result_object.state_id) state_name = state_obj.name flag, participant_info = cls.get_ticket_format_participant_info(ticket_result_object.id) flag, workflow_obj = workflow_base_service_ins.get_by_id(ticket_result_object.workflow_id) workflow_info_dict = dict(workflow_id=workflow_obj.id, workflow_name=workflow_obj.name) flag, creator_obj = account_base_service_ins.get_user_by_username(ticket_result_object.creator) if flag: flag, dept_dict_info = account_base_service_ins.get_user_dept_info(user_id=creator_obj.id) creator_info = dict(username=creator_obj.username, alias=creator_obj.alias, is_active=creator_obj.is_active, email=creator_obj.email, phone=creator_obj.phone, dept_info=dept_dict_info) else: creator_info = dict(username=ticket_result_object.creator, alias='', is_active=False, email='', phone='', dept_info={}) ticket_format_obj = ticket_result_object.get_dict() ticket_format_obj.update(dict(state=dict(state_id=ticket_result_object.state_id, state_name=state_name, state_label=json.loads(state_obj.label)), participant_info=participant_info, creator_info=creator_info, workflow_info=workflow_info_dict)) ticket_result_restful_list.append(ticket_format_obj) return True, dict(ticket_result_restful_list=ticket_result_restful_list, paginator_info=dict(per_page=per_page, page=page, total=paginator.count)) @classmethod @auto_log def new_ticket(cls, request_data_dict: dict, app_name: str='')->tuple: workflow_id = request_data_dict.get('workflow_id') transition_id = request_data_dict.get('transition_id') username = request_data_dict.get('username') parent_ticket_id = request_data_dict.get('parent_ticket_id', 0) parent_ticket_state_id = request_data_dict.get('parent_ticket_state_id', 0) suggestion = request_data_dict.get('suggestion', '') if not (workflow_id and transition_id and username): return False, u'参数不合法,请提供workflow_id,username,transition_id' request_field_arg_list = [key for key, value in request_data_dict.items() if (key not in ['workflow_id', 'suggestion', 'username'])] has_permission, msg = workflow_base_service_ins.check_new_permission(username, workflow_id) if not has_permission: return False, msg flag, start_state = workflow_state_service_ins.get_workflow_start_state(workflow_id) if flag is False: return False, start_state flag, state_info_dict = cls.get_state_field_info(start_state.id) require_field_list = state_info_dict.get('require_field_list', []) update_field_list = state_info_dict.get('update_field_list', []) flag, req_transition_obj = workflow_transition_service_ins.get_workflow_transition_by_id(transition_id) if req_transition_obj.field_require_check: for require_field in require_field_list: if require_field not in request_field_arg_list: return False, '此工单的必填字段为:{}'.format(','.join(require_field_list)) flag, msg = cls.get_next_state_id_by_transition_and_ticket_info(0, request_data_dict) if flag: destination_state_id = msg.get('destination_state_id') else: return False, msg flag, destination_state = workflow_state_service_ins.get_workflow_state_by_id(destination_state_id) flag, participant_info = cls.get_ticket_state_participant_info(destination_state_id, ticket_req_dict=request_data_dict) if not flag: return False, participant_info destination_participant_type_id = participant_info.get('destination_participant_type_id', 0) destination_participant = participant_info.get('destination_participant', '') multi_all_person = participant_info.get('multi_all_person', '{}') flag, result = cls.gen_ticket_sn(app_name) if not flag: return False, result ticket_sn = result.get('ticket_sn') if destination_state.type_id == constant_service_ins.STATE_TYPE_END: act_state_id = constant_service_ins.TICKET_ACT_STATE_FINISH elif destination_state.type_id == constant_service_ins.STATE_TYPE_START: act_state_id = constant_service_ins.TICKET_ACT_STATE_DRAFT else: act_state_id = constant_service_ins.TICKET_ACT_STATE_ONGOING flag, workflow_base_obj = workflow_base_service_ins.get_by_id(workflow_id) title_template = workflow_base_obj.title_template title = request_data_dict.get('title', '') if title_template: title = title_template.format(**request_data_dict) new_ticket_obj = TicketRecord(sn=ticket_sn, title=title, workflow_id=workflow_id, state_id=destination_state_id, parent_ticket_id=parent_ticket_id, parent_ticket_state_id=parent_ticket_state_id, participant=destination_participant, participant_type_id=destination_participant_type_id, relation=username, creator=username, act_state_id=act_state_id, multi_all_person=multi_all_person) new_ticket_obj.save() flag, result = cls.get_ticket_dest_relation(destination_participant_type_id, destination_participant) if flag is True: cls.update_ticket_relation(new_ticket_obj.id, result.get('add_relation'), ticket_creator=username) request_data_dict_allow = {} for key, value in request_data_dict.items(): if key in update_field_list: request_data_dict_allow[key] = value update_ticket_custom_field_result, msg = cls.update_ticket_custom_field(new_ticket_obj.id, request_data_dict_allow) if not update_ticket_custom_field_result: return False, msg flag, result = cls.get_ticket_all_field_value_json(new_ticket_obj.id) if flag is False: return False, result all_ticket_data_json = result.get('all_field_value_json') new_ticket_flow_log_dict = dict(ticket_id=new_ticket_obj.id, transition_id=transition_id, suggestion=suggestion, participant_type_id=constant_service_ins.PARTICIPANT_TYPE_PERSONAL, participant=username, state_id=start_state.id, ticket_data=all_ticket_data_json) add_ticket_flow_log_result, msg = cls.add_ticket_flow_log(new_ticket_flow_log_dict) if not add_ticket_flow_log_result: return False, msg from tasks import send_ticket_notice send_ticket_notice.apply_async(args=[new_ticket_obj.id], queue='loonflow') if destination_participant_type_id == constant_service_ins.PARTICIPANT_TYPE_ROBOT: from tasks import run_flow_task run_flow_task.apply_async(args=[new_ticket_obj.id, destination_participant, destination_state_id], queue='loonflow') if destination_participant_type_id == constant_service_ins.PARTICIPANT_TYPE_HOOK: from tasks import flow_hook_task flow_hook_task.apply_async(args=[new_ticket_obj.id], queue='loonflow') cls.handle_timer_transition(new_ticket_obj.id, destination_state_id) if destination_state.type_id == constant_service_ins.STATE_TYPE_END and new_ticket_obj.parent_ticket_id and new_ticket_obj.parent_ticket_state_id: filter_params = dict( parent_ticket_id=new_ticket_obj.parent_ticket_id, parent_ticket_state_id=new_ticket_obj.parent_ticket_state_id, is_deleted=0 ) other_sub_ticket_queryset = TicketRecord.objects.filter(**filter_params).all() other_sub_ticket_state_id_list = [other_sub_ticket.state_id for other_sub_ticket in other_sub_ticket_queryset] flag, result = workflow_state_service_ins.get_states_info_by_state_id_list(other_sub_ticket_state_id_list) if flag: sub_ticket_state_type_list = [] for key, value in result.items(): sub_ticket_state_type_list.append(value.get('type_id')) list_set = set(sub_ticket_state_type_list) if list_set == {constant_service_ins.STATE_TYPE_END}: parent_ticket_obj = TicketRecord.objects.filter(id=new_ticket_obj.parent_ticket_id, is_deleted=0) .first() parent_ticket_state_id = parent_ticket_obj.state_id flag, parent_ticket_transition_queryset = workflow_transition_service_ins .get_state_transition_queryset(parent_ticket_state_id) parent_ticket_transition_id = parent_ticket_transition_queryset[0].id cls.handle_ticket(parent_ticket_obj.id, dict(transition_id=parent_ticket_transition_id, username='loonrobot', suggestion='所有子工单处理完毕,自动流转')) return True, dict(new_ticket_id=new_ticket_obj.id) @classmethod @auto_log def gen_ticket_sn(cls, app_name: str='')->tuple: redis_conn = redis.Redis(connection_pool=POOL) ticket_day_count_key = 'ticket_day_count_{}'.format(str(datetime.datetime.now())[:10]) try: ticket_day_count = redis_conn.get(ticket_day_count_key) except redis.exceptions.ConnectionError: return False, 'Redis连接失败,请确认Redis已启动并配置正确' except Exception as e: raise Exception(e.__str__()) if ticket_day_count is not None: new_ticket_day_count = redis_conn.incr(ticket_day_count_key) else: today = str(datetime.datetime.now())[:10] + " 00:00:00" next_day = str(datetime.datetime.now() + datetime.timedelta(days=1))[:10] + " 00:00:00" ticket_day_count = TicketRecord.objects.filter(gmt_created__gte=today, gmt_created__lt=next_day).count() new_ticket_day_count = int(ticket_day_count) + 1 redis_conn.set(ticket_day_count_key, new_ticket_day_count, 86400) now_day = datetime.datetime.now() if not app_name: sn_prefix = 'loonflow' else: if app_name == 'loonflow': sn_prefix = 'loonflow' else: flag, result = account_base_service_ins.get_token_by_app_name(app_name) if flag is False: return False, result sn_prefix = result.ticket_sn_prefix zone_info = '' if settings.DEPLOY_ZONE: zone_info = '{}_'.format(settings.DEPLOY_ZONE) return True, dict(ticket_sn='%s_%s%04d%02d%02d%04d' % (sn_prefix, zone_info, now_day.year, now_day.month, now_day.day, new_ticket_day_count)) @classmethod @auto_log def get_ticket_field_value(cls, ticket_id: int, field_key: str)->tuple: if field_key in constant_service_ins.TICKET_BASE_FIELD_LIST: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() ticket_obj_dict = ticket_obj.get_dict() value = ticket_obj_dict.get(field_key) else: flag, result = cls.get_ticket_custom_field_value(ticket_id, field_key) if flag is False: return False, result value = result.get('value') return True, dict(value=value) @classmethod @auto_log def get_ticket_format_custom_field_key_dict(cls, ticket_id: int)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() custom_field_queryset = CustomField.objects.filter(is_deleted=0, workflow_id=ticket_obj.workflow_id).all() format_field_key_dict = {} for custom_field in custom_field_queryset: format_field_key_dict[custom_field.field_key] = dict(field_type_id=custom_field.field_type_id, name=custom_field.field_name, bool_field_display=custom_field.boolean_field_display, field_choice=custom_field.field_choice, field_from='custom') return True, format_field_key_dict @classmethod @auto_log def get_ticket_custom_field_value(cls, ticket_id: int, field_key: str)->tuple: flag, result = cls.get_ticket_format_custom_field_key_dict(ticket_id) if flag is False: return False, result field_type_id = result[field_key]['field_type_id'] ticket_custom_field_obj = TicketCustomField.objects.filter(field_key=field_key, ticket_id=ticket_id, is_deleted=0).first() if not ticket_custom_field_obj: value = None else: value_dict = ticket_custom_field_obj.get_dict() value_enum = constant_service_ins.FIELD_VALUE_ENUM value = value_dict.get(value_enum[field_type_id]) return True, dict(value=value) @classmethod @auto_log def get_ticket_field_name(cls, ticket_id: int, field_key: str)->tuple: if field_key in constant_service_ins.TICKET_BASE_FIELD_LIST: return True, dict(field_name=field_key) else: flag, result = cls.get_ticket_custom_field_name(ticket_id, field_key) if flag is False: return False, result return True, dict(field_name=result.get('field_name')) @classmethod @auto_log def get_ticket_custom_field_name(cls, ticket_id: int, field_key: str)->tuple: flag, result = cls.get_ticket_format_custom_field_key_dict(ticket_id) if flag is False: return False, result field_name = result[field_key]['field_name'] return True, dict(field_name=field_name) @classmethod @auto_log def update_ticket_custom_field(cls, ticket_id: int, update_dict: dict)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() flag, format_custom_field_dict = workflow_custom_field_service_ins .get_workflow_custom_field(ticket_obj.workflow_id) if flag is False: return False, format_custom_field_dict format_custom_field_dict = format_custom_field_dict custom_field_key_list = [key for key, value in format_custom_field_dict.items()] for key, value in update_dict.items(): if key in custom_field_key_list: ticket_custom_field_queryset = TicketCustomField.objects.filter( ticket_id=ticket_id, field_key=key, is_deleted=0) field_type_id = format_custom_field_dict[key]['field_type_id'] if update_dict.get(key) is None: if ticket_custom_field_queryset: ticket_custom_field_queryset.update(is_deleted=1) else: pass else: value_enum = constant_service_ins.FIELD_VALUE_ENUM if ticket_custom_field_queryset: ticket_custom_field_queryset.update(**{value_enum.get(field_type_id): update_dict.get(key)}) elif not ticket_custom_field_queryset: new_dict = { 'name': format_custom_field_dict[key]['field_name'], 'ticket_id': ticket_id, 'field_key': key, 'field_type_id': field_type_id, value_enum[field_type_id]: update_dict.get(key) } new_ticket_custom_field_record = TicketCustomField(**new_dict) new_ticket_custom_field_record.save() return True, '' @classmethod @auto_log def update_ticket_field_value(cls, ticket_id: int, update_dict: dict)-> tuple: base_field_dict = {} for key, value in update_dict.items(): if key in constant_service_ins.TICKET_BASE_FIELD_LIST: base_field_dict[key] = value if base_field_dict: TicketRecord.objects.filter(id=ticket_id, is_deleted=0).update(**base_field_dict) cls.update_ticket_custom_field(ticket_id, update_dict) return True, '' @classmethod @auto_log def add_ticket_flow_log(cls, kwargs: dict)->tuple: suggestion = kwargs.get('suggestion', '') if kwargs.get('suggestion', '') else '' if len(suggestion) > 1000: kwargs['suggestion'] = '{}...(be truncated because More than 1000)' .format(kwargs.get('suggestion', '')[:960]) kwargs['suggestion'] = suggestion if not kwargs.get('creator'): kwargs['creator'] = kwargs.get('participant', '') new_ticket_flow_log = TicketFlowLog(**kwargs) new_ticket_flow_log.save() return True, dict(new_ticket_flow_log_id=new_ticket_flow_log.id) @classmethod @auto_log def get_ticket_detail(cls, ticket_id: int, username: str)-> tuple: flag, result = cls.ticket_handle_permission_check(ticket_id, username) if flag is False or not result.get('permission'): view_permission, msg = cls.ticket_view_permission_check(ticket_id, username) if not view_permission: return False, msg handle_permission = False else: handle_permission = True ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() flag, result = cls.get_ticket_base_field_list(ticket_id) if flag is False: return False, result field_list = result.get('field_list') new_field_list = [] if handle_permission: flag, state_obj = workflow_state_service_ins.get_workflow_state_by_id(ticket_obj.state_id) if flag is False: return False, state_obj state_field_str = state_obj.state_field_str state_field_dict = json.loads(state_field_str) state_field_key_list = state_field_dict.keys() for field in field_list: if field['field_key'] in state_field_key_list: field['field_attribute'] = state_field_dict[field['field_key']] new_field_list.append(field) else: flag, workflow_obj = workflow_base_service_ins.get_by_id(workflow_id=ticket_obj.workflow_id) display_form_field_list = json.loads(workflow_obj.display_form_str) for field in field_list: if field['field_key'] in display_form_field_list: new_field_list.append(field) new_field_list = sorted(new_field_list, key=lambda r: r['order_id']) flag, creator_obj = account_base_service_ins.get_user_by_username(ticket_obj.creator) if flag: flag, dept_dict_info = account_base_service_ins.get_user_dept_info(user_id=creator_obj.id) creator_info = dict(username=creator_obj.username, alias=creator_obj.alias, is_active=creator_obj.is_active, email=creator_obj.email, phone=creator_obj.phone, dept_info=dept_dict_info) else: creator_info = dict(username=ticket_obj.creator, alias='', is_active=False, email='', phone='', dept_info={}) flag, result = workflow_state_service_ins.get_workflow_state_by_id(ticket_obj.state_id) if flag is False: return False, result state_info = result.get_dict() if state_info['participant_type_id'] == constant_service_ins.PARTICIPANT_TYPE_HOOK: state_info['participant'] = 'hook' state_info['state_field_str'] = json.loads(state_info['state_field_str']) state_info['label'] = json.loads(state_info['label']) ticket_result_dict = ticket_obj.get_dict() ticket_result_dict.update(dict(field_list=new_field_list, creator_info=creator_info, state_info=state_info)) return True, ticket_result_dict @classmethod @auto_log def get_ticket_base_field_list(cls, ticket_id: int)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() flag, state_obj = workflow_state_service_ins.get_workflow_state_by_id(ticket_obj.state_id) if flag is False: return False, state_obj state_name = state_obj.name field_list = [] flag, participant_info_dict = cls.get_ticket_format_participant_info(ticket_id) if flag is False: return False, participant_info_dict flag, workflow_obj = workflow_base_service_ins.get_by_id(ticket_obj.workflow_id) if flag is False: return False, workflow_obj workflow_name = workflow_obj.name field_list.append(dict(field_key='sn', field_name=u'流水号', field_value=ticket_obj.sn, order_id=10, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单的流水号', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='title', field_name=u'标题', field_value=ticket_obj.title, order_id=20, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO,description='工单的标题', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='state_id', field_name=u'状态id', field_value=ticket_obj.state_id, order_id=40, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单当前状态的id', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='participant_info.participant_name', field_name=u'当前处理人', field_value=participant_info_dict['participant_name'], order_id=50, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单的当前处理人', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='participant_info.participant_alias', field_name=u'当前处理人', field_value=participant_info_dict['participant_alias'], order_id=55, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单当前处理人(alias)', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='workflow.workflow_name', field_name=u'工作流名称', field_value=workflow_name, order_id=60, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单所属工作流的名称', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='creator', field_name=u'创建人', field_value=ticket_obj.creator, order_id=80, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单的创建人', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='gmt_created', field_name=u'创建时间', field_value=str(ticket_obj.gmt_created)[:19], order_id=100, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单的创建时间', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='gmt_modified', field_name=u'更新时间', field_value=str(ticket_obj.gmt_modified)[:19], order_id=120, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单的更新时间', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) field_list.append(dict(field_key='state.state_name', field_name=u'状态名', field_value=state_name, order_id=41, field_type_id=constant_service_ins.FIELD_TYPE_STR, field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, description='工单当前状态的名称', field_choice={}, boolean_field_display={}, default_value=None, field_template='', label={}, placeholder='')) flag, custom_field_dict = workflow_custom_field_service_ins.get_workflow_custom_field(ticket_obj.workflow_id) custom_field_key_list = [key for key, value in custom_field_dict.items()] ticket_custom_field_objs = TicketCustomField.objects.filter(ticket_id=ticket_id, field_key__in=custom_field_key_list, is_deleted=0).all() key_value_dict = {} for ticket_custom_field_obj in ticket_custom_field_objs: key_value_dict[ticket_custom_field_obj.field_key] = ticket_custom_field_obj for key, value in custom_field_dict.items(): field_type_id = value['field_type_id'] field_value_obj = key_value_dict.get(key) if not field_value_obj: field_value = None else: value_enum = constant_service_ins.FIELD_VALUE_ENUM field_value = field_value_obj.get_dict().get(value_enum.get(field_type_id)) boolean_field_display = json.loads( custom_field_dict[key]['boolean_field_display']) if custom_field_dict[key]['boolean_field_display'] else {} field_list.append(dict(field_key=key, field_name=custom_field_dict[key]['field_name'], field_value=field_value, order_id=custom_field_dict[key]['order_id'], field_type_id=custom_field_dict[key]['field_type_id'], field_attribute=constant_service_ins.FIELD_ATTRIBUTE_RO, default_value=custom_field_dict[key]['default_value'], description=custom_field_dict[key]['description'], field_template=custom_field_dict[key]['field_template'], boolean_field_display=boolean_field_display, field_choice=json.loads(custom_field_dict[key]['field_choice']), label=json.loads(custom_field_dict[key]['label']), placeholder=custom_field_dict[key]['placeholder'] )) return True, dict(field_list=field_list) @classmethod @auto_log def get_ticket_format_participant_info(cls, ticket_id: int)->tuple: ticket_obj = TicketRecord.objects.filter(id=ticket_id, is_deleted=0).first() participant = ticket_obj.participant participant_name = ticket_obj.participant participant_type_id = ticket_obj.participant_type_id participant_type_name = '' participant_alias = '' if participant_type_id == constant_service_ins.PARTICIPANT_TYPE_PERSONAL: participant_type_name = '个人' flag, participant_user_obj = account_base_service_ins.get_user_by_username(participant) if flag: participant_alias = participant_user_obj.alias else: participant_alias = participant elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_MULTI: participant_type_name = '多人' participant_name_list = participant_name.split(',') participant_map = {"name": set(), "alias": set()} flag, participant_user_objs = account_base_service_ins.get_user_list_by_usernames(participant_name_list) if flag: for participant_user in participant_user_objs: participant_map["name"].add(participant_user.username) participant_map["alias"].add(participant_user.alias) participant_map["alias"].update( set(participant_name_list) - participant_map["name"] ) participant_alias = ','.join(participant_map["alias"]) elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_DEPT: participant_type_name = '部门' flag, dept_obj = account_base_service_ins.get_dept_by_id(int(ticket_obj.participant)) if flag is False: return False, dept_obj if not dept_obj: return False, 'dept_id:{} is not existed or has been deleted'.format(ticket_obj.participant) participant_name = dept_obj.name participant_alias = participant_name elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_ROLE: participant_type_name = '角色' flag, role_obj = account_base_service_ins.get_role_by_id(int(ticket_obj.participant)) if flag is False or (not role_obj): return False, 'role is not existed or has been deleted' participant_name = role_obj.name participant_alias = participant_name elif participant_type_id == constant_service_ins.PARTICIPANT_TYPE_ROBOT: from apps.workflow.models import WorkflowScript script_obj = WorkflowScript.objects.filter(id=int(participant), is_deleted=0).first() if script_obj: participant_name = participant participant_alias = '脚本:{}'.format(script_obj.name) if json.loads(ticket_obj.multi_all_person): participant_type_name = '多人且全部处理' multi_all_person_dict = json.loads(ticket_obj.multi_all_person) participant_alias0_list = [] for key, value in multi_all_person_dict.items(): flag, participant_user_obj = account_base_service_ins.get_user_by_username(key) if not flag: participant_alias0 = key else: participant_alias0 = participant_user_obj.alias if value: participant_alias0_list.append( '{}({})已处理:{}'.format(participant_alias0, key, value.get('transition_name'))) else: participant_alias0_list.append( '{}({})未处理:{}'.format(participant_alias0, key, value.get('transition_name'))) participant_alias = ';'.join(participant_alias0_list) return True, dict(participant=participant, participant_name=participant_name, participant_type_id=participant_type_id, participant_type_name=participant_type_name, participant_alias=participant_alias) @classmethod @auto_log
MIT License
mpes-kit/mpes
mpes/beamtime.py
nnmean
python
def nnmean(U, V, lsh, rsh, ush, dsh): a, x, y = V.shape for i in nb.prange(ush, x-dsh): for j in nb.prange(lsh, y-rsh): for ia in nb.prange(a): V[ia, i, j] = np.nanmean(U[ia, i-ush:i+dsh, j-lsh:j+rsh]) return V
Nearest-neighbor mean averaging **Parameters**\n U, V: numpy.ndarray (float32) 3D matrices, U is the original, V is the modified version of U. lsh, rsh, ush, dsh: int Pixel shifts along the four primary directions (left, right, up, down). **Return**\n V: numpy.ndarray (float32) Modified 3D matrix after averaging.
https://github.com/mpes-kit/mpes/blob/c54cb756b9af9a640707bad9170e3437a44eebcb/mpes/beamtime.py#L85-L109
from __future__ import print_function, division import numpy as np import numba as nb from skimage.draw import circle, polygon @nb.njit(parallel=False) def _gridopt_landscape(u, v, shift_range, scale_range): lshift = shift_range.size lscale = scale_range.size vopt = np.zeros((lshift, lscale)) for si, s in enumerate(shift_range): for ai, a in enumerate(scale_range): vopt[si, ai] = np.nansum((u[:-s] - a*v[s:])**2) return vopt def planarfilter(U, axis, leftshift=0, rightshift=1, upshift=0, downshift=1, shifts=None): if shifts is not None: lsh, rsh, ush, dsh = shifts V = U.copy() V = np.moveaxis(V, axis, 0) V = nnmean(U, V, lsh, rsh, ush, dsh) V = np.moveaxis(V, 0, axis) return V @nb.njit('float32[:,:,:](float32[:,:,:], float32[:,:,:], int64, int64, int64, int64)', parallel=True)
MIT License
autodesk/nanodesign
nanodesign/data/strand.py
DnaStrand.get_base_coords
python
def get_base_coords(self): num_bases = len(self.tour) base_coords = np.zeros((num_bases,3), dtype=float) for i,base in enumerate(self.tour): helix_num = base.h helix_pos = base.p helix = self.helix_list[helix_num] base_coords[i] = base.coordinates return base_coords
Get the coordinates of bases along the dna helix axis. This is only used when writing a visualization file.
https://github.com/autodesk/nanodesign/blob/54c470a2c10011182b297dec969fadadfc0ba2b7/nanodesign/data/strand.py#L104-L115
import random import sys import os from sets import Set import numpy as np class DnaStrand(object): def __init__(self, id, dna_structure, is_scaffold, is_circular, tour): self.id = id self.is_scaffold = is_scaffold self.is_circular = is_circular self.tour = tour self.color = self.create_random_color() self.icolor = None self.helix_list = dict() self.base_id_list = dict() self.dna_structure = dna_structure self.domain_list = [] self.insert_seq = [] def create_random_color(self): n = 4 dc = 1.0 / (n-1) color_list = [i*dc for i in range(n)] if self.is_scaffold: rgb = [1.0, 1.0, 1.0] else: rgb = [random.choice(color_list) for i in range(3)] if (rgb[0] == 0.0) and (rgb[1] == 0.0): rgb[0] = random.choice(color_list[1:]) if rgb[2] == 0.0: rgb[2] = random.choice(color_list[1:]) return rgb def add_helix(self, helix): id = helix.lattice_num if (id not in self.helix_list): self.helix_list[id] = helix
Apache License 2.0
cs3org/wopiserver
src/wopiserver.py
bridgeOpen
python
def bridgeOpen(): try: wopisrc = url_unquote(flask.request.args['WOPISrc']) acctok = flask.request.args['access_token'] Wopi.log.info('msg="BridgeOpen called" client="%s" user-agent="%s" token="%s"' % (flask.request.remote_addr, flask.request.user_agent, acctok[-20:])) appurl, _ = bridge.appopen(wopisrc, acctok) return flask.redirect(appurl) except KeyError as e: Wopi.log.warning('msg="BridgeOpen: unable to open the file, missing WOPI context" error="%s"' % e) return _guireturn('Missing arguments'), http.client.BAD_REQUEST except bridge.FailedOpen as foe: return _guireturn(foe.msg), foe.statuscode
The WOPI bridge open call
https://github.com/cs3org/wopiserver/blob/00e8b2a0c69af6b037c8c90ad4067c14f782cf39/src/wopiserver.py#L481-L495
import sys import os import time import socket import configparser from platform import python_version import logging import logging.handlers from urllib.parse import unquote as url_unquote import http.client import json try: import flask from werkzeug.exceptions import NotFound as Flask_NotFound from werkzeug.exceptions import MethodNotAllowed as Flask_MethodNotAllowed import jwt from prometheus_flask_exporter import PrometheusMetrics except ImportError: print("Missing modules, please install dependencies with `pip3 install -f requirements.txt`") raise import core.wopi import core.discovery import core.ioplocks import core.wopiutils as utils import bridge WOPISERVERVERSION = 'git' UNAUTHORIZED = 'Client not authorized', http.client.UNAUTHORIZED storage = None def storage_layer_import(storagetype): global storage if storagetype in ['local', 'xroot', 'cs3']: storagetype += 'iface' else: raise ImportError('Unsupported/Unknown storage type %s' % storagetype) try: storage = __import__('core.' + storagetype, globals(), locals(), [storagetype]) except ImportError: print("Missing module when attempting to import %s.py. Please make sure dependencies are met." % storagetype) raise class Wopi: app = flask.Flask("wopiserver") metrics = PrometheusMetrics(app, group_by='endpoint') port = 0 lastConfigReadTime = time.time() loglevels = {"Critical": logging.CRITICAL, "Error": logging.ERROR, "Warning": logging.WARNING, "Info": logging.INFO, "Debug": logging.DEBUG } log = utils.JsonLogger(app.logger) openfiles = {} @classmethod def init(cls): try: hostname = os.environ.get('HOST_HOSTNAME') if not hostname: hostname = socket.gethostname() loghandler = logging.FileHandler('/var/log/wopi/wopiserver.log') loghandler.setFormatter(logging.Formatter( fmt='{"time": "%(asctime)s.%(msecs)03d", "host": "' + hostname + '", "level": "%(levelname)s", "process": "%(name)s", %(message)s}', datefmt='%Y-%m-%dT%H:%M:%S')) cls.app.logger.handlers = [loghandler] cls.config = configparser.ConfigParser() with open('/etc/wopi/wopiserver.defaults.conf') as fdef: cls.config.read_file(fdef) cls.config.read('/etc/wopi/wopiserver.conf') storage_layer_import(cls.config.get('general', 'storagetype')) cls.port = int(cls.config.get('general', 'port')) cls.log.setLevel(cls.loglevels[cls.config.get('general', 'loglevel')]) try: cls.nonofficetypes = cls.config.get('general', 'nonofficetypes').split() except (TypeError, configparser.NoOptionError) as e: cls.nonofficetypes = [] with open(cls.config.get('security', 'wopisecretfile')) as s: cls.wopisecret = s.read().strip('\n') with open(cls.config.get('security', 'iopsecretfile')) as s: cls.iopsecret = s.read().strip('\n') cls.tokenvalidity = cls.config.getint('general', 'tokenvalidity') storage.init(cls.config, cls.log) cls.useHttps = cls.config.get('security', 'usehttps').lower() == 'yes' if cls.useHttps: try: with open(cls.config.get('security', 'wopicert')) as _: pass with open(cls.config.get('security', 'wopikey')) as _: pass except OSError as e: cls.log.error('msg="Failed to open the provided certificate or key to start in https mode"') raise cls.wopiurl = cls.config.get('general', 'wopiurl') if cls.config.has_option('general', 'lockpath'): cls.lockpath = cls.config.get('general', 'lockpath') else: cls.lockpath = '' _ = cls.config.get('general', 'downloadurl') bridge.WB.init(cls.config, cls.log, cls.wopisecret) utils.srv = core.ioplocks.srv = core.wopi.srv = cls utils.log = core.ioplocks.log = core.wopi.log = core.discovery.log = cls.log utils.st = core.ioplocks.st = core.wopi.st = storage core.discovery.config = cls.config utils.endpoints = core.discovery.endpoints except (configparser.NoOptionError, OSError) as e: cls.log.fatal('msg="Failed to initialize the service, aborting" error="%s"' % e) print("Failed to initialize the service: %s\n" % e, file=sys.stderr) sys.exit(22) @classmethod def refreshconfig(cls): if time.time() > cls.lastConfigReadTime + 300: cls.lastConfigReadTime = time.time() cls.config.read('/etc/wopi/wopiserver.conf') cls.tokenvalidity = cls.config.getint('general', 'tokenvalidity') cls.log.setLevel(cls.loglevels[cls.config.get('general', 'loglevel')]) @classmethod def run(cls): if cls.useHttps: cls.log.info('msg="WOPI Server starting in standalone secure mode" port="%d" wopiurl="%s" version="%s"' % (cls.port, cls.wopiurl, WOPISERVERVERSION)) cls.app.run(host='0.0.0.0', port=cls.port, threaded=True, debug=(cls.config.get('general', 'loglevel') == 'Debug'), ssl_context=(cls.config.get('security', 'wopicert'), cls.config.get('security', 'wopikey'))) else: cls.log.info('msg="WOPI Server starting in unsecure/embedded mode" port="%d" wopiurl="%s" version="%s"' % (cls.port, cls.wopiurl, WOPISERVERVERSION)) cls.app.run(host='0.0.0.0', port=cls.port, threaded=True, debug=(cls.config.get('general', 'loglevel') == 'Debug')) @Wopi.app.errorhandler(Exception) def handleException(ex): if isinstance(ex, (Flask_NotFound, Flask_MethodNotAllowed)): return ex return utils.logGeneralExceptionAndReturn(ex, flask.request) @Wopi.app.route("/", methods=['GET']) def redir(): return flask.redirect("/wopi") @Wopi.app.route("/wopi", methods=['GET']) def index(): Wopi.log.debug('msg="Accessed index page" client="%s"' % flask.request.remote_addr) return """ <html><head><title>ScienceMesh WOPI Server</title></head> <body> <div align="center" style="color:#000080; padding-top:50px; font-family:Verdana; size:11"> This is the ScienceMesh IOP <a href=http://wopi.readthedocs.io>WOPI</a> server to support online office-like editors.<br> The service includes support for non-WOPI-native apps through a bridge extension.<br> To use this service, please log in to your EFSS Storage and click on a supported document.</div> <div style="position: absolute; bottom: 10px; left: 10px; width: 99%%;"><hr> <i>ScienceMesh WOPI Server %s at %s. Powered by Flask %s for Python %s</i>. </body> </html> """ % (WOPISERVERVERSION, socket.getfqdn(), flask.__version__, python_version()) @Wopi.app.route("/wopi/iop/openinapp", methods=['GET']) @Wopi.metrics.do_not_track() @Wopi.metrics.counter('open_by_app', 'Number of /open calls by appname', labels={ 'open_type': lambda: flask.request.args.get('appname') }) def iopOpenInApp(): Wopi.refreshconfig() req = flask.request if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret: Wopi.log.warning('msg="iopOpenInApp: unauthorized access attempt, missing authorization token" ' 'client="%s" clientAuth="%s"' % (req.remote_addr, req.headers.get('Authorization'))) return UNAUTHORIZED try: usertoken = req.headers['TokenHeader'] except KeyError: Wopi.log.warning('msg="iopOpenInApp: missing TokenHeader in request" client="%s"' % req.remote_addr) return UNAUTHORIZED fileid = req.args.get('fileid', '') if not fileid: Wopi.log.warning('msg="iopOpenInApp: fileid must be provided" client="%s"' % req.remote_addr) return 'Missing fileid argument', http.client.BAD_REQUEST try: viewmode = utils.ViewMode(req.args['viewmode']) except (KeyError, ValueError) as e: Wopi.log.warning('msg="iopOpenInApp: invalid viewmode parameter" client="%s" viewmode="%s" error="%s"' % (req.remote_addr, req.args.get('viewmode'), e)) return 'Missing or invalid viewmode argument', http.client.BAD_REQUEST username = req.args.get('username', '') wopiuser = req.args.get('userid', utils.randomString(10)) folderurl = url_unquote(req.args.get('folderurl', '%2F')) endpoint = req.args.get('endpoint', 'default') appname = url_unquote(req.args.get('appname', '')) appurl = url_unquote(req.args.get('appurl', '')).strip('/') appviewurl = url_unquote(req.args.get('appviewurl', appurl)).strip('/') if not appname or not appurl: Wopi.log.warning('msg="iopOpenInApp: app-related arguments must be provided" client="%s"' % req.remote_addr) return 'Missing appname or appurl arguments', http.client.BAD_REQUEST if bridge.issupported(appname): apikey = req.headers.get('ApiKey') appinturl = url_unquote(req.args.get('appinturl', appurl)) try: bridge.WB.loadplugin(appname, appurl, appinturl, apikey) except ValueError: return 'Failed to load WOPI bridge plugin for %s' % appname, http.client.INTERNAL_SERVER_ERROR appurl = appviewurl = Wopi.wopiurl + '/wopi/bridge/open' try: inode, acctok = utils.generateAccessToken(usertoken, fileid, viewmode, (username, wopiuser), folderurl, endpoint, (appname, appurl, appviewurl)) except IOError as e: Wopi.log.info('msg="iopOpenInApp: remote error on generating token" client="%s" user="%s" ' 'friendlyname="%s" mode="%s" endpoint="%s" reason="%s"' % (req.remote_addr, usertoken[-20:], username, viewmode, endpoint, e)) return 'Remote error, file not found or file is a directory', http.client.NOT_FOUND res = {} if bridge.issupported(appname): try: res['app-url'], res['form-parameters'] = bridge.appopen(url_unquote(utils.generateWopiSrc(inode)), acctok) except bridge.FailedOpen as foe: return foe.msg, foe.statuscode else: res['app-url'] = appurl if viewmode == utils.ViewMode.READ_WRITE else appviewurl res['app-url'] += '%sWOPISrc=%s' % ('&' if '?' in res['app-url'] else '?', utils.generateWopiSrc(inode)) res['form-parameters'] = {'access_token' : acctok} return flask.Response(json.dumps(res), mimetype='application/json') @Wopi.app.route("/wopi/iop/download", methods=['GET']) def iopDownload(): try: acctok = jwt.decode(flask.request.args['access_token'], Wopi.wopisecret, algorithms=['HS256']) if acctok['exp'] < time.time(): raise jwt.exceptions.ExpiredSignatureError resp = flask.Response(storage.readfile(acctok['endpoint'], acctok['filename'], acctok['userid']), mimetype='application/octet-stream') resp.headers['Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(acctok['filename']) resp.status_code = http.client.OK Wopi.log.info('msg="cboxDownload: direct download succeeded" filename="%s" user="%s" token="%s"' % (acctok['filename'], acctok['userid'][-20:], flask.request.args['access_token'][-20:])) return resp except IOError as e: Wopi.log.info('msg="Requested file not found" filename="%s" token="%s" error="%s"' % (acctok['filename'], flask.request.args['access_token'][-20:], e)) return 'File not found', http.client.NOT_FOUND except (jwt.exceptions.DecodeError, jwt.exceptions.ExpiredSignatureError) as e: Wopi.log.warning('msg="Signature verification failed" client="%s" requestedUrl="%s" token="%s"' % (flask.request.remote_addr, flask.request.base_url, flask.request.args['access_token'])) return 'Invalid access token', http.client.UNAUTHORIZED except KeyError as e: Wopi.log.warning('msg="Invalid access token or request argument" error="%s" request="%s"' % (e, flask.request.__dict__)) return 'Invalid request', http.client.UNAUTHORIZED @Wopi.app.route("/wopi/iop/list", methods=['GET']) def iopGetOpenFiles(): req = flask.request if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret: Wopi.log.warning('msg="iopGetOpenFiles: unauthorized access attempt, missing authorization token" ' 'client="%s"' % req.remote_addr) return UNAUTHORIZED jlist = {} for f in list(Wopi.openfiles.keys()): jlist[f] = (Wopi.openfiles[f][0], tuple(Wopi.openfiles[f][1])) Wopi.log.info('msg="iopGetOpenFiles: returning list of open files" client="%s"' % req.remote_addr) return flask.Response(json.dumps(jlist), mimetype='application/json') @Wopi.app.route("/wopi/files/<fileid>", methods=['GET']) def wopiCheckFileInfo(fileid): return core.wopi.checkFileInfo(fileid) @Wopi.app.route("/wopi/files/<fileid>/contents", methods=['GET']) def wopiGetFile(fileid): return core.wopi.getFile(fileid) @Wopi.app.route("/wopi/files/<fileid>", methods=['POST']) def wopiFilesPost(fileid): Wopi.refreshconfig() try: acctok = jwt.decode(flask.request.args['access_token'], Wopi.wopisecret, algorithms=['HS256']) if acctok['exp'] < time.time(): raise jwt.exceptions.ExpiredSignatureError headers = flask.request.headers op = headers['X-WOPI-Override'] if op != 'GET_LOCK' and utils.ViewMode(acctok['viewmode']) != utils.ViewMode.READ_WRITE: return 'Attempting to perform a write operation using a read-only token', http.client.UNAUTHORIZED if op in ('LOCK', 'REFRESH_LOCK'): return core.wopi.setLock(fileid, headers, acctok) if op == 'UNLOCK': return core.wopi.unlock(fileid, headers, acctok) if op == 'GET_LOCK': return core.wopi.getLock(fileid, headers, acctok) if op == 'PUT_RELATIVE': return core.wopi.putRelative(fileid, headers, acctok) if op == 'DELETE': return core.wopi.deleteFile(fileid, headers, acctok) if op == 'RENAME_FILE': return core.wopi.renameFile(fileid, headers, acctok) Wopi.log.warning('msg="Unknown/unsupported operation" operation="%s"' % op) return 'Not supported operation found in header', http.client.NOT_IMPLEMENTED except (jwt.exceptions.DecodeError, jwt.exceptions.ExpiredSignatureError) as e: Wopi.log.warning('msg="Signature verification failed" client="%s" requestedUrl="%s" error="%s" token="%s"' % (flask.request.remote_addr, flask.request.base_url, e, flask.request.args['access_token'])) return 'Invalid access token', http.client.UNAUTHORIZED except KeyError as e: Wopi.log.warning('msg="Missing argument" client="%s" requestedUrl="%s" error="%s" token="%s"' % (flask.request.remote_addr, flask.request.base_url, e, flask.request.args.get('access_token'))) return 'Missing argument: %s' % e, http.client.BAD_REQUEST @Wopi.app.route("/wopi/files/<fileid>/contents", methods=['POST']) def wopiPutFile(fileid): return core.wopi.putFile(fileid) @Wopi.app.route("/wopi/cbox/lock", methods=['GET', 'POST']) @Wopi.metrics.counter('lock_by_ext', 'Number of /lock calls by file extension', labels={'open_type': lambda: (flask.request.args['filename'].split('.')[-1] if 'filename' in flask.request.args and '.' in flask.request.args['filename'] else 'noext') if flask.request.method == 'POST' else 'query' }) def cboxLock(): req = flask.request if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret: Wopi.log.warning('msg="cboxLock: unauthorized access attempt, missing authorization token" ' 'client="%s"' % req.remote_addr) return UNAUTHORIZED filename = req.args['filename'] userid = req.args['userid'] if 'userid' in req.args else '0:0' endpoint = req.args['endpoint'] if 'endpoint' in req.args else 'default' return core.ioplocks.ioplock(filename, userid, endpoint, req.method == 'GET') @Wopi.app.route("/wopi/cbox/unlock", methods=['POST']) def cboxUnlock(): req = flask.request if req.headers.get('Authorization') != 'Bearer ' + Wopi.iopsecret: Wopi.log.warning('msg="cboxUnlock: unauthorized access attempt, missing authorization token" ' 'client="%s"' % req.remote_addr) return UNAUTHORIZED filename = req.args['filename'] userid = req.args['userid'] if 'userid' in req.args else '0:0' endpoint = req.args['endpoint'] if 'endpoint' in req.args else 'default' return core.ioplocks.iopunlock(filename, userid, endpoint) def _guireturn(msg): return '<div align="center" style="color:#808080; padding-top:50px; font-family:Verdana">%s</div>' % msg @Wopi.app.route("/wopi/bridge/open", methods=["GET"])
Apache License 2.0
exopy/exopy
exopy/testing/util.py
run_pending_tasks
python
def run_pending_tasks(qtbot, timeout=1000): def check_pending_tasks(): assert not qtbot.enaml_app.has_pending_tasks() qtbot.wait_until(check_pending_tasks)
Run all enaml pending tasks. WARNING: this may not run the Qt event loop if no task is pending. This will only deal with tasks schedule through the schedule function (or Application method) Parameters ---------- timeout : int, optional Timeout after which the operation should fail in ms
https://github.com/exopy/exopy/blob/aeda9bcfad2d2f76903c7ad2800ea2110ff689b2/exopy/testing/util.py#L40-L58
import os import gc import weakref import inspect from contextlib import contextmanager from pprint import pformat import enaml from configobj import ConfigObj from atom.api import Atom, Bool from enaml.application import timed_call from enaml.widgets.api import Window, Dialog, PopupView with enaml.imports(): from enaml.stdlib.message_box import MessageBox APP_PREFERENCES = os.path.join('app', 'preferences') APP_DIR_CONFIG = 'app_directory.ini' def exopy_path(): from .fixtures import EXOPY assert EXOPY return EXOPY
BSD 3-Clause New or Revised License
ruipgil/tracktotrip
tracktotrip/track.py
Track.to_json
python
def to_json(self): return { 'name': self.name, 'segments': [segment.to_json() for segment in self.segments], 'meta': self.meta }
Converts track to a JSON serializable format Returns: Map with the name, and segments of the track.
https://github.com/ruipgil/tracktotrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/track.py#L231-L241
from copy import deepcopy from os.path import basename from datetime import timedelta import gpxpy import numpy as np from rtree import index from .segment import Segment from .similarity import segment_similarity DEFAULT_FILE_NAME_FORMAT = "%Y-%m-%d" class Track(object): def __init__(self, name, segments): self.name = name self.meta = [] self.segments = sorted(segments, key=lambda s: s.points[0].time) def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT): if len(self.segments) > 0: return self.segments[0].points[0].time.strftime(name_format) + ".gpx" else: return "EmptyTrack" def remove_noise(self): for segment in self.segments: segment.remove_noise() return self def smooth(self, strategy, noise): print noise for segment in self.segments: segment.smooth(noise, strategy) return self def segment(self, eps, min_time): new_segments = [] for segment in self.segments: segmented = segment.segment(eps, min_time) for seg in segmented: new_segments.append(Segment(seg)) self.segments = new_segments return self def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False): for segment in self.segments: segment.simplify(eps, max_dist_error, max_speed_error, topology_only) return self def infer_transportation_mode(self, clf, min_time): for segment in self.segments: segment.infer_transportation_mode(clf, min_time) return self def copy(self): return deepcopy(self) def to_trip( self, smooth, smooth_strategy, smooth_noise, seg, seg_eps, seg_min_time, simplify, simplify_max_dist_error, simplify_max_speed_error ): self.compute_metrics() self.remove_noise() print (smooth, seg, simplify) if smooth: self.compute_metrics() self.smooth(smooth_strategy, smooth_noise) if seg: self.compute_metrics() self.segment(seg_eps, seg_min_time) if simplify: self.compute_metrics() self.simplify(0, simplify_max_dist_error, simplify_max_speed_error) self.compute_metrics() return self def infer_transportation_modes(self, dt_threshold=10): self.segments = [ segment.infer_transportation_mode(dt_threshold=dt_threshold) for segment in self.segments ] return self def infer_location( self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit ): self.segments = [ segment.infer_location( location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit ) for segment in self.segments ] return self
MIT License
rycus86/prometheus_flask_exporter
prometheus_flask_exporter/__init__.py
PrometheusMetrics.export_defaults
python
def export_defaults(self, buckets=None, group_by='path', latency_as_histogram=True, prefix='flask', app=None, **kwargs): if app is None: app = self.app or current_app if not prefix: prefix = self._defaults_prefix or 'flask' if kwargs.get('group_by_endpoint') is True: warnings.warn( 'The `group_by_endpoint` argument of ' '`PrometheusMetrics.export_defaults` is deprecated since 0.4.0, ' 'please use the new `group_by` argument.', DeprecationWarning ) duration_group = 'endpoint' elif group_by: duration_group = group_by else: duration_group = 'path' if callable(duration_group): duration_group_name = duration_group.__name__ else: duration_group_name = duration_group if prefix == NO_PREFIX: prefix = "" else: prefix = prefix + "_" try: self.info( '%sexporter_info' % prefix, 'Information about the Prometheus Flask exporter', version=self.version ) except ValueError: return labels = self._get_combined_labels(None) if latency_as_histogram: buckets_as_kwargs = {} if buckets is not None: buckets_as_kwargs['buckets'] = buckets request_duration_metric = Histogram( '%shttp_request_duration_seconds' % prefix, 'Flask HTTP request duration in seconds', ('method', duration_group_name, 'status') + labels.keys(), registry=self.registry, **buckets_as_kwargs ) else: request_duration_metric = Summary( '%shttp_request_duration_seconds' % prefix, 'Flask HTTP request duration in seconds', ('method', duration_group_name, 'status') + labels.keys(), registry=self.registry ) counter_labels = ('method', 'status') + labels.keys() request_total_metric = Counter( '%shttp_request_total' % prefix, 'Total number of HTTP requests', counter_labels, registry=self.registry ) request_exceptions_metric = Counter( '%shttp_request_exceptions_total' % prefix, 'Total number of HTTP requests which resulted in an exception', counter_labels, registry=self.registry ) def before_request(): request.prom_start_time = default_timer() def after_request(response): if hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'): return response if self.excluded_paths: if any(pattern.match(request.path) for pattern in self.excluded_paths): return response if hasattr(request, 'prom_start_time'): total_time = max(default_timer() - request.prom_start_time, 0) if callable(duration_group): group = duration_group(request) else: group = getattr(request, duration_group) request_duration_labels = { 'method': request.method, 'status': _to_status_code(response.status_code), duration_group_name: group } request_duration_labels.update(labels.values_for(response)) request_duration_metric.labels(**request_duration_labels).observe(total_time) request_total_metric.labels( method=request.method, status=_to_status_code(response.status_code), **labels.values_for(response) ).inc() return response def teardown_request(exception=None): if not exception or hasattr(request, 'prom_do_not_track') or hasattr(request, 'prom_exclude_all'): return if self.excluded_paths: if any(pattern.match(request.path) for pattern in self.excluded_paths): return response = make_response('Exception: %s' % exception, 500) if callable(duration_group): group = duration_group(request) else: group = getattr(request, duration_group) request_exceptions_metric.labels( method=request.method, status=500, **labels.values_for(response) ).inc() if hasattr(request, 'prom_start_time'): total_time = max(default_timer() - request.prom_start_time, 0) request_duration_labels = { 'method': request.method, 'status': 500, duration_group_name: group } request_duration_labels.update(labels.values_for(response)) request_duration_metric.labels(**request_duration_labels).observe(total_time) request_total_metric.labels( method=request.method, status=500, **labels.values_for(response) ).inc() return app.before_request(before_request) app.after_request(after_request) app.teardown_request(teardown_request)
Export the default metrics: - HTTP request latencies - HTTP request exceptions - Number of HTTP requests :param buckets: the time buckets for request latencies (will use the default when `None`) :param group_by: group default HTTP metrics by this request property, like `path`, `endpoint`, `rule`, etc. (defaults to `path`) :param latency_as_histogram: export request latencies as a Histogram, otherwise use a Summary instead (defaults to `True` to export as a Histogram) :param prefix: prefix to start the default metrics names with or `NO_PREFIX` (to skip prefix) :param app: the Flask application
https://github.com/rycus86/prometheus_flask_exporter/blob/73efe45b755d6113cbaf03373662b4ed0b4371cd/prometheus_flask_exporter/__init__.py#L317-L496
import functools import inspect import os import re import sys import threading import warnings from timeit import default_timer from flask import Flask, Response from flask import request, make_response, current_app from flask.views import MethodViewType from prometheus_client import Counter, Histogram, Gauge, Summary from prometheus_client.exposition import choose_encoder from werkzeug.serving import is_running_from_reloader if sys.version_info[0:2] >= (3, 4): wraps = functools.wraps else: def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper try: from http import HTTPStatus def _to_status_code(response_status): if isinstance(response_status, HTTPStatus): return response_status.value else: return response_status except ImportError: def _to_status_code(response_status): return response_status NO_PREFIX = '#no_prefix' class PrometheusMetrics(object): def __init__(self, app, path='/metrics', export_defaults=True, defaults_prefix='flask', group_by='path', buckets=None, default_latency_as_histogram=True, default_labels=None, response_converter=None, excluded_paths=None, exclude_user_defaults=True, metrics_decorator=None, registry=None, **kwargs): self.app = app self.path = path self._export_defaults = export_defaults self._defaults_prefix = defaults_prefix or 'flask' self._default_labels = default_labels or {} self._default_latency_as_histogram = default_latency_as_histogram self._response_converter = response_converter or make_response self._metrics_decorator = metrics_decorator self.buckets = buckets self.version = __version__ if registry: self.registry = registry else: from prometheus_client import REGISTRY as DEFAULT_REGISTRY self.registry = DEFAULT_REGISTRY if kwargs.get('static_labels'): warnings.warn( 'The `static_labels` argument of `PrometheusMetrics` is ' 'deprecated since 0.15.0, please use the ' 'new `default_labels` argument.', DeprecationWarning ) for key, value in kwargs.get('static_labels', dict()).items(): if key not in self._default_labels: self._default_labels[key] = value if kwargs.get('group_by_endpoint') is True: warnings.warn( 'The `group_by_endpoint` argument of `PrometheusMetrics` is ' 'deprecated since 0.4.0, please use the ' 'new `group_by` argument.', DeprecationWarning ) self.group_by = 'endpoint' elif group_by: self.group_by = group_by else: self.group_by = 'path' if excluded_paths: if PrometheusMetrics._is_string(excluded_paths): excluded_paths = [excluded_paths] self.excluded_paths = [ re.compile(p) for p in excluded_paths ] else: self.excluded_paths = None self.exclude_user_defaults = exclude_user_defaults if app is not None: self.init_app(app) @classmethod def for_app_factory(cls, **kwargs): return cls(app=None, **kwargs) def init_app(self, app): if self.path: self.register_endpoint(self.path, app) if self._export_defaults: self.export_defaults( buckets=self.buckets, group_by=self.group_by, latency_as_histogram=self._default_latency_as_histogram, prefix=self._defaults_prefix, app=app ) def register_endpoint(self, path, app=None): if is_running_from_reloader() and not os.environ.get('DEBUG_METRICS'): return if app is None: app = self.app or current_app @self.do_not_track() def prometheus_metrics(): from prometheus_client import multiprocess, CollectorRegistry if 'PROMETHEUS_MULTIPROC_DIR' in os.environ or 'prometheus_multiproc_dir' in os.environ: registry = CollectorRegistry() else: registry = self.registry if 'name[]' in request.args: registry = registry.restricted_registry(request.args.getlist('name[]')) if 'PROMETHEUS_MULTIPROC_DIR' in os.environ or 'prometheus_multiproc_dir' in os.environ: multiprocess.MultiProcessCollector(registry) generate_latest, content_type = choose_encoder(request.headers.get("Accept")) headers = {'Content-Type': content_type} return generate_latest(registry), 200, headers if self._metrics_decorator: prometheus_metrics = self._metrics_decorator(prometheus_metrics) app.route(path)(prometheus_metrics) def start_http_server(self, port, host='0.0.0.0', endpoint='/metrics'): if is_running_from_reloader(): return app = Flask('prometheus-flask-exporter-%d' % port) self.register_endpoint(endpoint, app) def run_app(): app.run(host=host, port=port) thread = threading.Thread(target=run_app) thread.daemon = True thread.start()
MIT License
robotools/compositor
Lib/compositor/textUtilities.py
_handleSpecialCasing
python
def _handleSpecialCasing(case, glyphs, index, uniValue, converted, cmap, reversedCMAP, language): if language not in specialCasing: return False languageMap = specialCasing[language] if uniValue in languageMap: contextMatch = True context = languageMap[uniValue]["context"] if context: contextMatch = False if context == "After_I": previous = None for otherUniValue in reversed(glyphs[:index]): previous = otherUniValue if isinstance(otherUniValue, basestring): break combining = unicodedata.combining(unichr(otherUniValue)) if combining == 230: previous = None break if combining == 0: break if previous == convertCodeToInt("0049"): contextMatch = True elif context == "Not_After_I": raise NotImplementedError elif context == "After_Soft_Dotted": previous = None for otherUniValue in reversed(glyphs[:index]): previous = otherUniValue if isinstance(otherUniValue, basestring): break combining = unicodedata.combining(unichr(otherUniValue)) if combining == 230: previous = None break if combining == 0: break if previous in softDotted: contextMatch = True elif context == "Not_After_Soft_Dotted": raise NotImplementedError elif context == "More_Above": next = None for otherUniValue in glyphs[index+1:]: next = otherUniValue if isinstance(otherUniValue, basestring): break combining = unicodedata.combining(unichr(otherUniValue)) if combining == 230: contextMatch = True break else: break elif context == "Not_More_Above": raise NotImplementedError elif context == "Before_Dot": raise NotImplementedError elif context == "Not_Before_Dot": next = None contextMatch = True for otherUniValue in glyphs[index+1:]: if isinstance(otherUniValue, basestring): break if otherUniValue == convertCodeToInt("0307"): contextMatch = False break else: combining = unicodedata.combining(unichr(otherUniValue)) if combining == 0 or combining == 230: break elif context == "Final_Sigma": glyphNames = [cmap.get(i, i) for i in glyphs] if isWordBreakAfter(glyphNames, index, reversedCMAP): contextMatch = True else: raise NotImplementedError(context) if contextMatch: conversion = languageMap[uniValue][case] if conversion is None: return True if not isinstance(conversion, tuple): conversion = [conversion] for code in conversion: converted.append(code) return True return False
Handle a language specific lookup. Returns a boolean indicating if a change was made.
https://github.com/robotools/compositor/blob/226e6baf83c052dd35f9c3d5942a41d9879b00bc/Lib/compositor/textUtilities.py#L87-L208
from fontTools.misc.py23 import * import unicodedata from compositor.cmap import reverseCMAP from compositor.caseConversionMaps import lowerToSingleUpper, upperToSingleLower, specialCasing, softDotted from compositor.wordBreakProperties import wordBreakProperties def convertCase(case, glyphNames, cmap, reversedCMAP, language=None, fallbackGlyph=".notdef"): glyphs = [] for glyphName in glyphNames: uniValue = reversedCMAP.get(glyphName) if uniValue is None: glyphs.append(glyphName) else: glyphs.append(uniValue[0]) converted = [] for index, uniValue in enumerate(glyphs): if isinstance(uniValue, basestring): converted.append(uniValue) continue if language is not None: madeChange = _handleSpecialCasing(case, glyphs, index, uniValue, converted, cmap, reversedCMAP, language) if madeChange: continue madeChange = _handleSpecialCasing(case, glyphs, index, uniValue, converted, cmap, reversedCMAP, None) if madeChange: continue if case == "upper": d = lowerToSingleUpper else: d = upperToSingleLower if uniValue in d: converted.append(d[uniValue]) continue converted.append(uniValue) glyphNames = [] for uniValue in converted: if isinstance(uniValue, basestring): glyphNames.append(uniValue) continue glyphNames.append(cmap.get(uniValue, fallbackGlyph)) return glyphNames def convertCodeToInt(code): if not code: return None if " " in code: return tuple([convertCodeToInt(i) for i in code.split(" ")]) return int(code, 16)
MIT License
mgymrek/pybamview
pybamview/app.py
configure_template_filters
python
def configure_template_filters(app): @app.template_filter() def isnuc(x): return str(x).upper() in ["A", "C", "G", "T"]
Configure custom Jinja2 template filters.
https://github.com/mgymrek/pybamview/blob/719c4251769510260d29287074845650b399a3d0/pybamview/app.py#L42-L47
from flask import Flask from .settings import DefaultConfig from . import browser DEFAULT_BLUEPRINTS = (browser.blueprint,) def create_app(config_object=None, blueprints=None): if blueprints is None: blueprints = DEFAULT_BLUEPRINTS app = Flask(__name__, template_folder="browser/templates", static_folder="browser/static") configure_app(app, config_obj=config_object) register_blueprints(app, blueprints=blueprints) configure_template_filters(app) return app def configure_app(app, config_obj=None): app.config.from_object(config_obj or DefaultConfig) def register_blueprints(app, blueprints): for blueprint in blueprints: app.register_blueprint(blueprint)
MIT License
state-of-the-art/blendnet
BlendNet/providers/azure/__init__.py
getStorageUrl
python
def getStorageUrl(session_id): default_account = 'blendnet{session_id}'.format(session_id=session_id.lower()) default_name = 'blendnet-{session_id}'.format(session_id=session_id.lower()) return 'https://%s.blob.core.windows.net/%s' % ( (AZ_CONF.get('storage_account') or default_account).format(session_id=session_id.lower()), (AZ_CONF.get('storage_container') or default_name).format(session_id=session_id.lower()), )
Returns the azure storage url
https://github.com/state-of-the-art/blendnet/blob/a313333f109c8d7cae8615f747767a42576e3d21/BlendNet/providers/azure/__init__.py#L829-L836
__all__ = [ 'Processor', 'Manager', 'Agent', 'Instance', ] class AzToolException(Exception): pass import os import sys import json import platform import tempfile import ssl import site import urllib import subprocess import pathlib import time import datetime as dt METADATA_URL = 'http://169.254.169.254/metadata/instance/' LOCATION = None AZ_CONF = {} AZ_EXEC_OPTIONS = None AZ_CACHE_LOCATIONS = [] AZ_CACHE_SIZES = None def _requestMetadata(path, verbose = False): req = urllib.request.Request(METADATA_URL+path+'?api-version=2020-10-01&format=text"') req.add_header('Metadata','true') try: while True: with urllib.request.urlopen(req, timeout=1) as res: if res.getcode() == 503: print('WARN: Azure: Unable to reach metadata serivce') time.sleep(5) continue data = res.read() try: return data.decode('utf-8') except (LookupError, UnicodeDecodeError): return data.decode('iso-8859-1') except Exception as e: if verbose: print('WARN: Azure: Metadata is not available ' + path) return None def checkLocation(): global LOCATION if LOCATION is not None: return LOCATION LOCATION = _requestMetadata('compute/location', True) is not None return LOCATION def _executeAzTool(*args, data=None): cmd = (AZ_CONF.get('az_exec_path'),) + args + AZ_EXEC_OPTIONS result = subprocess.run(cmd, input=data, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.returncode != 0: raise AzToolException('Az tool returned %d during execution of "%s": %s' % ( result.returncode, cmd, result.stderr)) data = None try: data = json.loads(result.stdout) except UnicodeDecodeError as e: print('WARN: Azure: Found UnicodeDecodeError during parsing of the az output, switching to ISO-8859-1:', str(e)) data = json.loads(result.stdout.decode('iso-8859-1')) except json.decoder.JSONDecodeError: try: data = {'stdout': result.stdout.decode('utf-8')} except (LookupError, UnicodeDecodeError): data = {'stdout': result.stdout.decode('iso-8859-1')} return data def _initAzTool(): if checkLocation(): _executeAzTool('login', '--identity') global AZ_CACHE_SIZES, AZ_CACHE_LOCATIONS AZ_CACHE_SIZES = None locations = _executeAzTool('account', 'list-locations') AZ_CACHE_LOCATIONS = [('please select', 'please select', 'please select')] AZ_CACHE_LOCATIONS += sorted( (l['name'], l['name'], l['regionalDisplayName']) for l in locations ) return True def initProvider(settings = dict()): from .. import findPATHExec global AZ_CONF AZ_CONF = settings if not AZ_CONF.get('az_exec_path'): AZ_CONF['az_exec_path'] = findPATHExec('az') if not AZ_CONF.get('location'): AZ_CONF['location'] = 'westus' if checkLocation(): AZ_CONF['location'] = _requestMetadata('instance/location') if not AZ_CONF.get('resource_group'): AZ_CONF['resource_group'] = _requestMetadata('instance/resourceGroupName') if checkLocation() else 'blendnet' if not AZ_CONF.get('storage_account'): AZ_CONF['storage_account'] = 'blendnet{session_id}' if not AZ_CONF['az_exec_path']: return 'Unable to find "az" in PATH - check the provider documentation and install the requirements' if not os.path.isfile(AZ_CONF['az_exec_path']): path = AZ_CONF['az_exec_path'] AZ_CONF['az_exec_path'] = {} return 'The provided "az" exec path is invalid: %s' % (path,) global AZ_EXEC_OPTIONS AZ_EXEC_OPTIONS = ('--output', 'json') if not _initAzTool(): AZ_CONF['az_exec_path'] = None return 'Error during execution of "az" tool' print('INFO: Azure: Using az tool:', AZ_CONF['az_exec_path']) return True def checkDependencies(settings): if not AZ_CONF.get('az_exec_path'): return initProvider(settings) return True def _getLocationItems(scene = None, context = None): return AZ_CACHE_LOCATIONS def getSettings(): return { 'az_exec_path': { 'name': 'Path to az exec', 'description': 'Full path to the az or az.exe from Azure CLI, by default uses PATH env to find it', 'type': 'path', 'value': AZ_CONF.get('az_exec_path'), }, 'location': { 'name': 'Location of resources', 'description': 'Select the required location for resources provision', 'type': 'choice', 'values': _getLocationItems, 'value': AZ_CONF.get('location'), }, 'resource_group': { 'name': 'Resource group', 'description': 'Set the resource group name to organize your resources, will be created if not exists', 'type': 'string', 'value': AZ_CONF.get('resource_group'), }, 'storage_account': { 'name': 'Storage account', 'description': '''What kind of storage account to use - in case it's empty will create the new one as "blendnet{session_id}"''', 'type': 'string', 'value': AZ_CONF.get('storage_account'), }, 'storage_container': { 'name': 'Storage container', 'description': '''What the storage container to use - in case it's empty will create the new one as "blendnet-{session_id}"''', 'type': 'string', 'value': AZ_CONF.get('storage_container'), }, } def _getInstanceTypeInfo(name): try: getInstanceTypes() return { 'cpu': AZ_CACHE_SIZES[name][0], 'mem': AZ_CACHE_SIZES[name][1] } except: print('ERROR: Azure: Unable to get the Azure machine type info for:', name) return None def _verifyQuotas(avail): try: import bpy except: return [] errors = [] prefs = bpy.context.preferences.addons[__package__.split('.', 1)[0]].preferences manager_info = _getInstanceTypeInfo(prefs.manager_instance_type) agents_info = _getInstanceTypeInfo(prefs.manager_agent_instance_type) agents_num = prefs.manager_agents_max if manager_info: if avail.get('Total vCPUs', 0) < manager_info['cpu']: errors.append('Available "Total vCPUs" is too small to provision the Manager') if avail.get('IP Addresses', 0) < 1: errors.append('Available "Public IP Addresses" is too small to provision the Manager') else: errors.append('Unable to get the Manager type info to validate quotas') if agents_info: if avail.get('Total vCPUs', 0) < agents_info['cpu'] * agents_num: errors.append('Available "Total vCPUs" is too small to provision the Agents') else: errors.append('Unable to get the Agents type info to validate quotas') if manager_info and agents_info: if avail.get('Virtual Machines', 0) < 1 + agents_num: errors.append('Available "Virtual Machines" is too small to provision the Manager and Agents') else: errors.append('Unable to get the Manager and Agents type info to validate quotas') if errors: errors.append('You can request Azure project quotas increase to get better experience') return errors def getProviderInfo(): configs = dict() try: useful_quotas = { 'cores': 'Total vCPUs', 'virtualMachines': 'Virtual Machines', 'PublicIPAddresses': 'IP Addresses', } avail = {} data = _executeAzTool('vm', 'list-usage', '--location', AZ_CONF['location']) for q in data: if q['name']['value'] in useful_quotas: avail[useful_quotas[q['name']['value']]] = float(q['limit']) - float(q['currentValue']) configs['Quota: ' + useful_quotas[q['name']['value']]] = '%s, usage: %s' % (q['limit'], q['currentValue']) data = _executeAzTool('network', 'list-usages', '--location', AZ_CONF['location']) for q in data: if q['name']['value'] in useful_quotas: avail[useful_quotas[q['name']['value']]] = float(q['limit']) - float(q['currentValue']) configs['Quota: ' + useful_quotas[q['name']['value']]] = '%s, usage: %s' % (q['limit'], q['currentValue']) errors = _verifyQuotas(avail) if errors: configs['ERRORS'] = errors except AzToolException as e: configs['ERRORS'] = ['Looks like access to the API is restricted ' '- please check your permissions: %s' % e] return configs def getInstanceTypes(): global AZ_CACHE_SIZES try: if not AZ_CACHE_SIZES: data = _executeAzTool('vm', 'list-sizes', '--location', AZ_CONF['location']) AZ_CACHE_SIZES = dict([ (d['name'], (d['numberOfCores'], d['memoryInMb'])) for d in data ]) return dict([ (k, ('%s vCPUs %s GB RAM' % (v[0], v[1]/1024.0), v[1]/1024.0)) for k, v in AZ_CACHE_SIZES.items() ]) except AzToolException as e: return {'ERROR': 'Looks like access to the API is restricted ' '- please check your permissions: %s' % e} return {} def _createResourceGroup(): data = _executeAzTool('group', 'list', '--query', "[?location=='{}']".format(AZ_CONF['location'])) groups_list = ( res['name'] for res in data ) if AZ_CONF['resource_group'] not in groups_list: _executeAzTool('group', 'create', '--location', AZ_CONF['location'], '--name', AZ_CONF['resource_group']) def _createIdentities(): print('INFO: Azure: Creating the identity blendnet-agent') agent = _executeAzTool('identity', 'create', '--location', AZ_CONF['location'], '--resource-group', AZ_CONF['resource_group'], '--name', 'blendnet-agent') print('INFO: Azure: Creating the identity blendnet-manager') mngr = _executeAzTool('identity', 'create', '--location', AZ_CONF['location'], '--resource-group', AZ_CONF['resource_group'], '--name', 'blendnet-manager') _executeAzTool('role', 'assignment', 'create', '--role', 'Reader and Data Access', '--assignee-object-id', agent['principalId'], '--description', 'Allow to download from storage for BlendNet Agent', '--resource-group', AZ_CONF['resource_group']) _executeAzTool('role', 'assignment', 'create', '--role', 'Network Contributor', '--assignee-object-id', mngr['principalId'], '--description', 'Allow to create Agent VMs for BlendNet Manager', '--resource-group', AZ_CONF['resource_group']) _executeAzTool('role', 'assignment', 'create', '--role', 'Virtual Machine Contributor', '--assignee-object-id', mngr['principalId'], '--description', 'Allow to create Agent VMs for BlendNet Manager', '--resource-group', AZ_CONF['resource_group']) _executeAzTool('role', 'assignment', 'create', '--role', 'Managed Identity Operator', '--assignee-object-id', mngr['principalId'], '--description', 'Allow to create Agent VMs for BlendNet Manager', '--scope', agent['id']) print('INFO: Azure: Created identities') def createInstanceManager(cfg): _createResourceGroup() _createIdentities() image = 'Debian:debian-10:10:latest' account = urllib.parse.urlparse(cfg['storage_url']).hostname.split('.')[0] container = urllib.parse.urlparse(cfg['storage_url']).path.split('/')[-1] startup_script = '''#!/bin/sh echo '--> Check for blender dependencies' dpkg -l libxrender1 libxi6 libgl1 if [ $? -gt 0 ]; then apt update until apt install --no-install-recommends -y libxrender1 libxi6 libgl1; do echo "Unable to install blender dependencies, repeating..." sleep 5 done fi if [ ! -x /srv/blender/blender ]; then echo '--> Download & unpack blender' echo "{blender_sha256} -" > /tmp/blender.sha256 curl -fLs "{blender_url}" | tee /tmp/blender.tar.bz2 | sha256sum -c /tmp/blender.sha256 || (echo "ERROR: checksum of the blender binary is incorrect"; exit 1) mkdir -p /srv/blender tar -C /srv/blender --strip-components=1 --checkpoint=10000 --checkpoint-action=echo='Unpacked %{{r}}T' -xf /tmp/blender.tar.bz2 fi # Azure instances has no preinstalled az cli if ! which az; then echo '--> Install az CLI' curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash fi cat <<'EOF' > /usr/local/bin/blendnet_cloud_init.sh #!/bin/sh -e # Login to identity az login --identity echo '--> Update the BlendNet manager' az storage copy --recursive --source-account-name {storage_account} --source-container {storage_name} --source-blob 'work_manager/*' -d "$(getent passwd blendnet-user | cut -d: -f6)" # Remove not working due to exception: Exception: MSI auth not yet supported. # az storage remove --recursive --account-name {storage_account} --container-name {storage_name} --name work_manager az storage blob delete-batch --account-name {storage_account} --source {storage_name} --pattern 'work_manager/*' az storage copy --recursive --source-account-name {storage_account} --source-container {storage_name} --source-blob 'blendnet' -d /srv chown -R blendnet-user .azure .azcopy EOF chmod +x /usr/local/bin/blendnet_cloud_init.sh adduser --shell /bin/false --disabled-password blendnet-user cat <<'EOF' > /etc/systemd/system/blendnet-manager.service [Unit] Description=BlendNet Manager Service After=network-online.target google-network-daemon.service [Service] User=blendnet-user WorkingDirectory=~ Type=simple ExecStartPre=+/usr/local/bin/blendnet_cloud_init.sh ExecStart=/srv/blender/blender -b -noaudio -P /srv/blendnet/manager.py Restart=always TimeoutStopSec=60 StandardOutput=syslog StandardError=syslog [Install] WantedBy=multi-user.target EOF echo '--> Run the BlendNet manager' systemctl daemon-reload systemctl enable blendnet-manager.service systemctl start blendnet-manager.service '''.format( blender_url=cfg['dist_url'], blender_sha256=cfg['dist_checksum'], session_id=cfg['session_id'], storage_account=account, storage_name=container, ) options = [ 'vm', 'create', '--name', cfg['instance_name'], '--resource-group', AZ_CONF['resource_group'], '--image', image, '--size', cfg['instance_type'], '--boot-diagnostics-storage', 'https://'+urllib.parse.urlparse(cfg['storage_url']).hostname+'/', '--os-disk-size-gb', '200', '--generate-ssh-keys', '--assign-identity', 'blendnet-manager', '--nsg', 'blendnet-manager', '--custom-data', startup_script, '--tags', 'app=blendnet', 'session_id='+cfg['session_id'], 'type=manager', 'vm='+cfg['instance_name'], ] print('INFO: Azure: Creating manager', cfg['instance_name']) _executeAzTool(*options) return cfg['instance_name'] def createInstanceAgent(cfg): image = 'Debian:debian-10:10:latest' account = urllib.parse.urlparse(cfg['storage_url']).hostname.split('.')[0] container = urllib.parse.urlparse(cfg['storage_url']).path.split('/')[-1] startup_script = '''#!/bin/sh echo '--> Check for blender dependencies' dpkg -l libxrender1 libxi6 libgl1 if [ $? -gt 0 ]; then apt update until apt install --no-install-recommends -y libxrender1 libxi6 libgl1; do echo "Unable to install blender dependencies, repeating..." sleep 5 done fi if [ ! -x /srv/blender/blender ]; then echo '--> Download & unpack blender' echo "{blender_sha256} -" > /tmp/blender.sha256 curl -fLs "{blender_url}" | tee /tmp/blender.tar.bz2 | sha256sum -c /tmp/blender.sha256 || (echo "ERROR: checksum of the blender binary is incorrect"; exit 1) mkdir -p /srv/blender tar -C /srv/blender --strip-components=1 --checkpoint=10000 --checkpoint-action=echo='Unpacked %{{r}}T' -xf /tmp/blender.tar.bz2 fi # Azure instances has no preinstalled az cli if ! which az; then echo '--> Install az CLI' curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash fi cat <<'EOF' > /usr/local/bin/blendnet_cloud_init.sh #!/bin/sh -e # Login to identity az login --identity echo '--> Update the BlendNet agent' az storage copy --recursive --source-account-name {storage_account} --source-container {storage_name} --source-blob 'work_{instance_name}/*' -d "$(getent passwd blendnet-user | cut -d: -f6)" az storage copy --recursive --source-account-name {storage_account} --source-container {storage_name} --source-blob 'blendnet' -d /srv chown -R blendnet-user .azure .azcopy EOF chmod +x /usr/local/bin/blendnet_cloud_init.sh adduser --shell /bin/false --disabled-password blendnet-user cat <<'EOF' > /etc/systemd/system/blendnet-agent.service [Unit] Description=BlendNet Agent Service After=network-online.target google-network-daemon.service [Service] User=blendnet-user WorkingDirectory=~ Type=simple ExecStartPre=+/usr/local/bin/blendnet_cloud_init.sh ExecStart=/srv/blender/blender -b -noaudio -P /srv/blendnet/agent.py Restart=always TimeoutStopSec=20 StandardOutput=syslog StandardError=syslog [Install] WantedBy=multi-user.target EOF echo '--> Run the BlendNet agent' systemctl daemon-reload systemctl enable blendnet-agent.service systemctl start blendnet-agent.service '''.format( blender_url=cfg['dist_url'], blender_sha256=cfg['dist_checksum'], storage_account=account, storage_name=container, instance_name=cfg['instance_name'], ) options = [ 'vm', 'create', '--name', cfg['instance_name'], '--resource-group', AZ_CONF['resource_group'], '--image', image, '--size', cfg['instance_type'], '--os-disk-size-gb', '200', '--boot-diagnostics-storage', 'https://'+urllib.parse.urlparse(cfg['storage_url']).hostname+'/', '--generate-ssh-keys', '--assign-identity', 'blendnet-agent', '--nsg', 'blendnet-agent', '--public-ip-address', '', '--custom-data', startup_script, '--tags', 'app=blendnet', 'session_id='+cfg['session_id'], 'type=agent', 'vm='+cfg['instance_name'], ] if cfg['use_cheap_instance']: print('INFO: Azure: Running cheap agent instance with max price %f (min %f)' % ( cfg['instance_max_price'], getMinimalCheapPrice(cfg['instance_type']), )) options.append('--priority') options.append('Spot') options.append('--max-price') options.append(str(cfg['instance_max_price'])) options.append('--eviction-policy') options.append('Delete') print('INFO: Azure: Creating agent %s' % (cfg['instance_name'],)) _executeAzTool(*options) return cfg['instance_name'] def startInstance(instance_id): _executeAzTool('vm', 'start', '--resource-group', AZ_CONF['resource_group'], '--name', instance_id) def stopInstance(instance_id): _executeAzTool('vm', 'stop', '--resource-group', AZ_CONF['resource_group'], '--name', instance_id) def deleteInstance(instance_id): toremove_ids = _executeAzTool('resource', 'list', '--tag', 'vm=' + instance_id, '--query', '[].id') if len(toremove_ids) < 5: print('WARN: Azure: Not enough resources for VM to remove (5 needed): %s' % (toremove_ids,)) for i in range(5): try: _executeAzTool('resource', 'delete', '--resource-group', AZ_CONF['resource_group'], '--ids', *toremove_ids) except AzToolException as e: if 'Some resources failed to be deleted' not in str(e): print('ERROR: Unable to delete resources:', e) print('WARN: Repeat deleting of the resources to be sure', i, toremove_ids) time.sleep(1) def createFirewall(target_group, port): print('INFO: Azure: Creating security group for %s' % (target_group,)) _executeAzTool('network', 'nsg', 'create', '--name', target_group, '--location', AZ_CONF['location'], '--resource-group', AZ_CONF['resource_group']) _executeAzTool('network', 'nsg', 'rule', 'create', '--name', 'inbound-ssh', '--resource-group', AZ_CONF['resource_group'], '--nsg-name', target_group, '--priority', '1000', '--access', 'Deny', '--direction', 'Inbound', '--protocol', 'Tcp', '--destination-port-ranges', '22', '--destination-address-prefixes', '0.0.0.0/0') _executeAzTool('network', 'nsg', 'rule', 'create', '--name', 'inbound-https', '--resource-group', AZ_CONF['resource_group'], '--nsg-name', target_group, '--priority', '1001', '--access', 'Allow', '--direction', 'Inbound', '--protocol', 'Tcp', '--destination-port-ranges', str(port), '--destination-address-prefixes', '10.0.0.0/8' if target_group == 'blendnet-agent' else '0.0.0.0/0') def createStorage(storage_url): _createResourceGroup() print('INFO: Azure: Creating storage %s ...' % (storage_url,)) account = urllib.parse.urlparse(storage_url).hostname.split('.')[0] container = urllib.parse.urlparse(storage_url).path.split('/')[-1] _executeAzTool('storage', 'account', 'create', '--name', account, '--location', AZ_CONF['location'], '--resource-group', AZ_CONF['resource_group']) while _executeAzTool('storage', 'account', 'check-name', '--name', account).get('nameAvailable'): print('DEBUG: Azure: Waiting for account creation') _executeAzTool('storage', 'container', 'create', '--name', container, '--account-name', account) while not _executeAzTool('storage', 'container', 'exists', '--name', container, '--account-name', account).get('exists'): print('DEBUG: Azure: Waiting for container creation') return True def uploadFileToStorage(path, storage_url, dest_path = None): if dest_path: if platform.system() == 'Windows': dest_path = pathlib.PurePath(dest_path).as_posix() storage_url += '/' + dest_path if not os.path.isabs(path): path = os.path.join('.', path) print('INFO: Azure: Uploading file to %s ...' % (storage_url,)) _executeAzTool('storage', 'copy', '--source-local-path', path, '--destination', storage_url) return True def uploadRecursiveToStorage(path, storage_url, dest_path = None, include = None, exclude = None): if dest_path: if platform.system() == 'Windows': dest_path = pathlib.PurePath(dest_path).as_posix() storage_url += '/' + dest_path print('INFO: Azure: Uploading files from %s to "%s" ...' % (path, storage_url)) cmd = ['storage', 'copy', '--recursive', '--source-local-path', os.path.join(path, '*'), '--destination', storage_url] if include: cmd += ['--include-pattern', include] if exclude: cmd += ['--exclude-pattern', exclude] _executeAzTool(*cmd) print('INFO: Azure: Uploaded files to "%s"' % (storage_url,)) return True def uploadDataToStorage(data, storage_url, dest_path = None): with tempfile.TemporaryDirectory(prefix='blendnet') as temp_dir_name: temp_file = os.path.join(temp_dir_name, 'upload_file') with open(temp_file, 'wb') as fd: fd.write(data) fd.flush() if dest_path: if platform.system() == 'Windows': dest_path = pathlib.PurePath(dest_path).as_posix() storage_url += '/' + dest_path print('INFO: Azure: Uploading data to "%s" ...' % (storage_url,)) _executeAzTool('storage', 'copy', '--source-local-path', temp_file, '--destination', storage_url) return True def downloadDataFromStorage(storage_url, path = None): temp_file = tempfile.NamedTemporaryFile() if path: if platform.system() == 'Windows': path = pathlib.PurePath(path).as_posix() storage_url += '/' + path print('INFO: Azure: Downloading file from "%s" ...' % (path,)) try: _executeAzTool('storage', 'copy', '--source', storage_url, '--destination-local-path', temp_file.name) except AzToolException: print('WARN: Azure: Download operation failed') return None with open(temp_file.name, 'rb') as fh: return fh.read() def getResources(session_id): out = {'agents':{}} def parseInstanceInfo(it): try: return { 'id': it.get('name'), 'name': it.get('name'), 'ip': it.get('publicIps'), 'internal_ip': it['privateIps'], 'type': it['hardwareProfile']['vmSize'], 'started': it['powerState'] == 'VM running', 'stopped': it['powerState'] == 'VM stopped', 'created': 'unknown', } except: return None data = _executeAzTool('vm', 'list', '--show-details', '--resource-group', AZ_CONF['resource_group'], '--query', "[?tags.session_id == '%s']" % (session_id,)) disks = _executeAzTool('disk', 'list', '--resource-group', AZ_CONF['resource_group'], '--query', "[?tags.session_id == '%s']" % (session_id,)) vm_dates = dict() for disk in disks: vm_dates[disk.get('tags', {}).get('vm')] = disk.get('timeCreated') if disk.get('diskState') != 'Unattached': continue if (dt.datetime.now(dt.timezone.utc) - dt.datetime.fromisoformat(disk.get('timeCreated'))).seconds < 120: continue print('INFO: Azure: Destroying stale disk:', disk.get('name')) _executeAzTool('disk', 'delete', '--no-wait', '--yes', '--ids', disk.get('id')) for it in data: inst = parseInstanceInfo(it) if not inst: continue inst['created'] = vm_dates.get(inst['id'], 'unknown') it_type = it['tags'].get('type') if it_type == 'manager': out['manager'] = inst elif it_type == 'agent': out['agents'][inst['name']] = inst else: print('WARN: Azure: Unknown type resource instance', inst['name']) return out def getNodeLog(instance_id): data = _executeAzTool('vm', 'boot-diagnostics', 'get-boot-log', '--resource-group', AZ_CONF['resource_group'], '--name', instance_id) return data.get('stdout') def getManagerSizeDefault(): return 'Standard_A1_v2' def getAgentSizeDefault(): return 'Standard_A1_v2'
Apache License 2.0
kdrag0n/freqbench
rd/usr/lib/python3.8/compileall.py
compile_dir
python
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, quiet=0, legacy=False, optimize=-1, workers=1, invalidation_mode=None): ProcessPoolExecutor = None if workers < 0: raise ValueError('workers must be greater or equal to 0') if workers != 1: try: from concurrent.futures import ProcessPoolExecutor except ImportError: workers = 1 files_and_ddirs = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels, ddir=ddir) success = True if workers != 1 and ProcessPoolExecutor is not None: workers = workers or None with ProcessPoolExecutor(max_workers=workers) as executor: results = executor.map( partial(_compile_file_tuple, force=force, rx=rx, quiet=quiet, legacy=legacy, optimize=optimize, invalidation_mode=invalidation_mode, ), files_and_ddirs) success = min(results, default=True) else: for file, dfile in files_and_ddirs: if not compile_file(file, dfile, force, rx, quiet, legacy, optimize, invalidation_mode): success = False return success
Byte-compile all modules in the given directory tree. Arguments (only dir is required): dir: the directory to byte-compile maxlevels: maximum recursion level (default 10) ddir: the directory that will be prepended to the path to the file as it is compiled into each byte-code file. force: if True, force compilation, even if timestamps are up-to-date quiet: full output with False or 0, errors only with 1, no output with 2 legacy: if True, produce legacy pyc paths instead of PEP 3147 paths optimize: optimization level or -1 for level of the interpreter workers: maximum number of parallel workers invalidation_mode: how the up-to-dateness of the pyc will be checked
https://github.com/kdrag0n/freqbench/blob/44bd02980cf44fd9346458ed483f6e4058d8c0ac/rd/usr/lib/python3.8/compileall.py#L50-L99
import os import sys import importlib.util import py_compile import struct from functools import partial __all__ = ["compile_dir","compile_file","compile_path"] def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0): if quiet < 2 and isinstance(dir, os.PathLike): dir = os.fspath(dir) if not quiet: print('Listing {!r}...'.format(dir)) try: names = os.listdir(dir) except OSError: if quiet < 2: print("Can't list {!r}".format(dir)) names = [] names.sort() for name in names: if name == '__pycache__': continue fullname = os.path.join(dir, name) if ddir is not None: dfile = os.path.join(ddir, name) else: dfile = None if not os.path.isdir(fullname): yield fullname, ddir elif (maxlevels > 0 and name != os.curdir and name != os.pardir and os.path.isdir(fullname) and not os.path.islink(fullname)): yield from _walk_dir(fullname, ddir=dfile, maxlevels=maxlevels - 1, quiet=quiet)
MIT License
peerplays-network/python-peerplays
peerplays/cli/bookie.py
rule
python
def rule(ctx, rule): rule = Rule(rule, peerplays_instance=ctx.peerplays) t = PrettyTable([ "id", "name", ]) t.align = "l" t.add_row([ rule["id"], "\n".join(["{}: {}".format(v[0], v[1]) for v in rule["name"]]), ]) click.echo(str(t)) click.echo( "\n".join(["{}: {}".format(v[0], v[1]) for v in rule["description"]]) )
[bookie] Show a specific rule :param str bmg: Betting market id
https://github.com/peerplays-network/python-peerplays/blob/99730afc0017a660458a3e51228ac8874306d94a/peerplays/cli/bookie.py#L98-L116
import click from pprint import pprint from prettytable import PrettyTable from .decorators import onlineChain from .main import main from .ui import pretty_print from peerplays.asset import Asset from peerplays.sport import Sport, Sports from peerplays.eventgroup import EventGroup, EventGroups from peerplays.event import Event, Events from peerplays.bettingmarketgroup import BettingMarketGroup, BettingMarketGroups from peerplays.rule import Rule, Rules @main.group() def bookie(): pass @bookie.command() @click.pass_context @onlineChain def sports(ctx): sports = Sports(peerplays_instance=ctx.peerplays) click.echo(pretty_print(sports, ctx=ctx)) @bookie.command() @click.argument("sport") @click.pass_context @onlineChain def eventgroups(ctx, sport): sport = Sport(sport, peerplays_instance=ctx.peerplays) click.echo(pretty_print(sport.eventgroups, ctx=ctx)) @bookie.command() @click.argument("eventgroup") @click.pass_context @onlineChain def events(ctx, eventgroup): eg = EventGroup(eventgroup, peerplays_instance=ctx.peerplays) click.echo(pretty_print(eg.events, ctx=ctx)) @bookie.command() @click.argument("event") @click.pass_context @onlineChain def bmgs(ctx, event): eg = Event(event, peerplays_instance=ctx.peerplays) click.echo(pretty_print(eg.bettingmarketgroups, ctx=ctx)) @bookie.command() @click.argument("bmg") @click.pass_context @onlineChain def bettingmarkets(ctx, bmg): bmg = BettingMarketGroup(bmg, peerplays_instance=ctx.peerplays) click.echo(pretty_print(bmg.bettingmarkets, ctx=ctx)) @bookie.command() @click.pass_context @onlineChain def rules(ctx): rules = Rules(peerplays_instance=ctx.peerplays) click.echo(pretty_print(rules, ctx=ctx)) @bookie.command() @click.argument("rule") @click.pass_context @onlineChain
MIT License
ripple/dactyl
dactyl/dactyl_build.py
DactylBuilder.build
python
def build(self): logger.info("loading pages in target...") pages = self.target.load_pages() logger.info("... done loading pages in target") context = { "current_time": time.strftime(self.config["time_format"]), "config": self.config, "mode": self.mode, "target": self.target.data, "pages": [p.data for p in pages], "categories": self.target.categories(), } es_data = {} matched_only = False for page in pages: if page.is_virtual(): logger.debug("skipping virtual page: %s" % page) continue if self.only_page: if self.match_only_page(page.data): matched_only = True else: logger.debug("only_page mode: skipping page %s" % page) continue logger.info("Building page: %s"%page) page_context = {"currentpage":page.data, **context} if self.mode == "es" or self.es_upload != NO_ES_UP: es_template = self.template_for_page(page, mode="es") es_json_s = page.es_json(es_template, page_context) es_page_id = self.target.name+"."+page.data["html"] es_data[es_page_id] = es_json_s if self.mode == "html" or self.mode == "pdf": use_template = self.template_for_page(page) logger.debug("use_template is: %s" % use_template) page_text = page.render(use_template, page_context) elif self.mode == "md": if "md" not in page.data and "__md_generator" not in page.data: logger.info("... md mode: Skipping page (no md): %s" % page) continue page_text = page.md_content(page_context) elif self.mode == "es": page_text = es_json_s else: exit("build() error: unknown mode: %s" % self.mode) if page_text: self.write_page(page_text, page.filepath(self.mode)) else: logger.warning("not writing empty page '%s'"%page.data["name"]) if self.only_page and not matched_only: exit("Didn't find requested 'only' page '%s'" % self.only_page) if self.es_upload != NO_ES_UP: self.upload_es(es_data) if self.mode == "pdf": self.assemble_pdf()
Build and write all pages in the target, according to the set mode, and upload their entries to ElasticSearch if requested.
https://github.com/ripple/dactyl/blob/91ae98cc448dd178a7129ed66e84888b9908b526/dactyl/dactyl_build.py#L113-L184
from dactyl.common import * from distutils.dir_util import copy_tree, remove_tree from shutil import copy as copy_file from copy import copy import subprocess from time import sleep import requests from urllib.parse import urlparse import jinja2 from markdown import markdown from bs4 import BeautifulSoup from watchdog.observers import Observer from dactyl.config import DactylConfig from dactyl.cli import DactylCLIParser from dactyl.target import DactylTarget from dactyl.page import DactylPage from dactyl.watch_handler import UpdaterHandler class DactylBuilder: def __init__(self, target, config, mode="html", only_page=None): assert isinstance(target, DactylTarget) self.target = target self.config = config self.mode = mode self.only_page = only_page self.nonce = str(time.time()).replace(".","") if mode == "html": self.copy_content_static = True self.copy_template_static = True elif mode == "md": self.copy_content_static = True self.copy_template_static = False else: self.copy_content_static = False self.copy_template_static = False self.out_path = self.config["out_path"] if mode == "pdf": self.staging_folder = self.temp_dir() self.pdf_filename = PDF_USE_DEFAULT self.leave_temp_files = False if mode == "es": self.es_upload = ES_USE_DEFAULT else: self.es_upload = NO_ES_UP if (self.config["template_allow_undefined"] == False and not self.config.bypass_errors): self.strict_undefined = True else: self.strict_undefined = False self.setup_html_env() self.default_pdf_template = self.get_template(self.config["default_pdf_template"]) self.default_html_template = self.get_template(self.config["default_template"]) self.default_es_template = self.get_es_template(self.config["default_es_template"]) self.http_port = DEFAULT_SERVER_PORT def temp_dir(self): run_dir = os.path.join(self.config["temporary_files_path"], "dactyl-"+self.nonce) if not os.path.isdir(run_dir): os.makedirs(run_dir) return run_dir def match_only_page(self, currentpage_data): if not self.only_page: return False if self.only_page[-5:] == ".html": if (currentpage_data["html"] == self.only_page or os.path.basename(currentpage_data["html"]) == self.only_page): return True elif self.only_page[-3:] == ".md": if "md" in currentpage_data and (currentpage_data["md"] == self.only_page or os.path.basename(currentpage_data["md"]) == self.only_page): return True return False
MIT License
cmdmnt/commandment
commandment/mdm/handlers.py
ack_available_os_updates
python
def ack_available_os_updates(request: DBCommand, device: Device, response: dict): if response.get('Status', None) == 'Error': pass else: for au in device.available_os_updates: db.session.delete(au) schema = AvailableOSUpdateListResponse() result = schema.load(response) for upd in result.data['AvailableOSUpdates']: upd.device = device db.session.add(upd) db.session.commit()
Acknowledge a response to AvailableOSUpdates
https://github.com/cmdmnt/commandment/blob/17c1dbe3f5301eab0f950f82608c231c15a3ff43/commandment/mdm/handlers.py#L234-L249
from binascii import hexlify from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from flask import current_app from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound from commandment.apps import ManagedAppStatus from commandment.apps.models import ManagedApplication from commandment.mdm import commands from commandment.mdm.app import command_router from .commands import ProfileList, DeviceInformation, SecurityInfo, InstalledApplicationList, CertificateList, InstallProfile, AvailableOSUpdates, InstallApplication, RemoveProfile, ManagedApplicationList from .response_schema import InstalledApplicationListResponse, DeviceInformationResponse, AvailableOSUpdateListResponse, ProfileListResponse, SecurityInfoResponse from ..models import db, Device, Command as DBCommand from commandment.inventory.models import InstalledCertificate, InstalledProfile, InstalledApplication Queries = DeviceInformation.Queries @command_router.route('DeviceInformation') def ack_device_information(request: DBCommand, device: Device, response: dict): schema = DeviceInformationResponse() result = schema.load(response) for k, v in result.data['QueryResponses'].items(): setattr(device, k, v) db.session.commit() @command_router.route('SecurityInfo') def ack_security_info(request: DBCommand, device: Device, response: dict): schema = SecurityInfoResponse() result = schema.load(response) db.session.commit() @command_router.route('ProfileList') def ack_profile_list(request: DBCommand, device: Device, response: dict): schema = ProfileListResponse() profile_list = schema.load(response) for pl in device.installed_payloads: db.session.delete(pl) for p in device.installed_profiles: db.session.delete(p) desired_profiles = {} for tag in device.tags: for p in tag.profiles: desired_profiles[p.uuid] = p remove_profiles = [] for profile in profile_list.data['ProfileList']: profile.device = device profile.device_udid = device.udid for payload in profile.payload_content: payload.device = device payload.profile_id = profile.id db.session.add(profile) if profile.payload_uuid in desired_profiles: del desired_profiles[profile.payload_uuid] else: if not profile.is_managed: current_app.logger.debug("Skipping removal of unmanaged profile: %s", profile.payload_display_name) else: current_app.logger.debug("Going to remove: %s", profile.payload_display_name) remove_profiles.append(profile) for puuid, p in desired_profiles.items(): c = commands.InstallProfile(None, profile=p) dbc = DBCommand.from_model(c) dbc.device = device db.session.add(dbc) for remove_profile in remove_profiles: c = commands.RemoveProfile(None, Identifier=remove_profile.payload_identifier) dbc = DBCommand.from_model(c) dbc.device = device db.session.add(dbc) db.session.commit() @command_router.route('CertificateList') def ack_certificate_list(request: DBCommand, device: Device, response: dict): for c in device.installed_certificates: db.session.delete(c) certificates = response['CertificateList'] current_app.logger.debug( 'Received CertificatesList response containing {} certificate(s)'.format(len(certificates))) for cert in certificates: ic = InstalledCertificate() ic.device = device ic.device_udid = device.udid ic.x509_cn = cert.get('CommonName', None) ic.is_identity = cert.get('IsIdentity', None) der_data = cert['Data'] certificate = x509.load_der_x509_certificate(der_data, default_backend()) ic.fingerprint_sha256 = hexlify(certificate.fingerprint(hashes.SHA256())) ic.der_data = der_data db.session.add(ic) db.session.commit() @command_router.route('InstalledApplicationList') def ack_installed_app_list(request: DBCommand, device: Device, response: dict): for a in device.installed_applications: db.session.delete(a) applications = response['InstalledApplicationList'] current_app.logger.debug( 'Received InstalledApplicationList response containing {} application(s)'.format(len(applications)) ) schema = InstalledApplicationListResponse() result, errors = schema.load(response) current_app.logger.debug(errors) ignored_app_bundle_ids = current_app.config['IGNORED_APPLICATION_BUNDLE_IDS'] for ia in result['InstalledApplicationList']: if isinstance(ia, db.Model): if ia.bundle_identifier in ignored_app_bundle_ids: current_app.logger.debug('Ignoring app with bundle id: %s', ia.bundle_identifier) continue ia.device = device ia.device_udid = device.udid db.session.add(ia) else: current_app.logger.debug('Not a model: %s', ia) db.session.commit() @command_router.route('InstallProfile') def ack_install_profile(request: DBCommand, device: Device, response: dict): if response.get('Status', None) == 'Error': pass @command_router.route('RemoveProfile') def ack_install_profile(request: DBCommand, device: Device, response: dict): if response.get('Status', None) == 'Error': pass @command_router.route('AvailableOSUpdates')
MIT License
containers/python-podman
podman/libs/tunnel.py
Portal.reap
python
def reap(self): now = time.time() with self.lock: reaped_data = self.data.copy() for entry in reaped_data.items(): if entry[1][1] < now: del self.data[entry[0]] else: break self._schedule_reaper()
Remove tunnels who's TTL has expired.
https://github.com/containers/python-podman/blob/838727e32e3582b03d3cb9ea314096c39d2da6da/podman/libs/tunnel.py#L84-L95
import collections import getpass import logging import os import subprocess import threading import time import weakref from contextlib import suppress import psutil Context = collections.namedtuple('Context', ( 'uri', 'interface', 'local_socket', 'remote_socket', 'username', 'hostname', 'port', 'identity_file', 'ignore_hosts', 'known_hosts', )) Context.__new__.__defaults__ = (None, ) * len(Context._fields) class Portal(collections.MutableMapping): def __init__(self, sweap=25): self.data = collections.OrderedDict() self.sweap = sweap self.ttl = sweap * 2 self.lock = threading.RLock() self._schedule_reaper() def __getitem__(self, key): with self.lock: value, _ = self.data[key] self.data[key] = (value, time.time() + self.ttl) self.data.move_to_end(key) return value def __setitem__(self, key, value): if not isinstance(value, Tunnel): raise ValueError('Portals only support Tunnels.') with self.lock: self.data[key] = (value, time.time() + self.ttl) self.data.move_to_end(key) def __delitem__(self, key): with self.lock: value, _ = self.data[key] del self.data[key] value.close() del value def __iter__(self): with self.lock: values = self.data.values() for tunnel, _ in values: yield tunnel def __len__(self): with self.lock: return len(self.data) def _schedule_reaper(self): timer = threading.Timer(interval=self.sweap, function=self.reap) timer.setName('PortalReaper') timer.setDaemon(True) timer.start()
Apache License 2.0
python-openxml/python-docx
docx/styles/style.py
BaseStyle.priority
python
def priority(self): return self._element.uiPriority_val
The integer sort key governing display sequence of this style in the Word UI. |None| indicates no setting is defined, causing Word to use the default value of 0. Style name is used as a secondary sort key to resolve ordering of styles having the same priority value.
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/styles/style.py#L107-L114
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from . import BabelFish from ..enum.style import WD_STYLE_TYPE from ..shared import ElementProxy from ..text.font import Font from ..text.parfmt import ParagraphFormat def StyleFactory(style_elm): style_cls = { WD_STYLE_TYPE.PARAGRAPH: _ParagraphStyle, WD_STYLE_TYPE.CHARACTER: _CharacterStyle, WD_STYLE_TYPE.TABLE: _TableStyle, WD_STYLE_TYPE.LIST: _NumberingStyle }[style_elm.type] return style_cls(style_elm) class BaseStyle(ElementProxy): __slots__ = () @property def builtin(self): return not self._element.customStyle def delete(self): self._element.delete() self._element = None @property def hidden(self): return self._element.semiHidden_val @hidden.setter def hidden(self, value): self._element.semiHidden_val = value @property def locked(self): return self._element.locked_val @locked.setter def locked(self, value): self._element.locked_val = value @property def name(self): name = self._element.name_val if name is None: return None return BabelFish.internal2ui(name) @name.setter def name(self, value): self._element.name_val = value @property
MIT License
gdatasoftwareag/robotframework-flaui
src/FlaUILibrary/flaui/module/tree.py
Tree._select
python
def _select(control: Any, location: str): obj = TreeItems(control) obj.execute_by_location(location, "Select")
Try to select element from given parameter. Args: control (Object): Tree control UI object. location (String): series of pointers, which shows the item's location. Example: Location = "N:nameofitem1->N:nameofitem2->I:indexofitem2 Raises: FlaUiError: If value can not be found by control.
https://github.com/gdatasoftwareag/robotframework-flaui/blob/1503253b0adb2a47ac0a75c51f1aba9f5bb6ed7a/src/FlaUILibrary/flaui/module/tree.py#L241-L255
from enum import Enum from typing import Optional, Any from FlaUILibrary.flaui.exception import FlaUiError from FlaUILibrary.flaui.interface import (ModuleInterface, ValueContainer) from FlaUILibrary.flaui.util.treeitems import TreeItems from FlaUILibrary.flaui.util.converter import Converter class Tree(ModuleInterface): class Container(ValueContainer): element: Optional[Any] item: Optional[str] class Action(Enum): GET_ROOT_ITEMS_COUNT = "GET_ROOT_ITEMS_COUNT" GET_VISIBLE_ITEMS_COUNT = "GET_VISIBLE_ITEMS_COUNT" GET_VISIBLE_ITEMS_NAMES = "GET_VISIBLE_ITEMS_NAMES" ITEM_SHOULD_BE_VISIBLE = "ITEM_SHOULD_BE_VISIBLE" EXPAND_ALL = "EXPAND_ALL" COLLAPSE_ALL = "COLLAPSE_ALL" SELECT_ITEM_BY_NAME = "SELECT_ITEM_BY_NAME" SELECT_ITEM = "SELECT_ITEM" EXPAND_ITEM = "EXPAND_ITEM" COLLAPSE_ITEM = "COLLAPSE_ITEM" SELECTED_ITEM_SHOULD_BE = "SELECTED_ITEM_SHOULD_BE" GET_SELECTED_ITEMS_NAME = "GET_SELECTED_ITEMS_NAME" @staticmethod def create_value_container(element=None, item=None): return Tree.Container(element=element, item=Converter.cast_to_string(item)) def execute_action(self, action: Action, values: Container): switcher = { self.Action.GET_ROOT_ITEMS_COUNT: lambda: values["element"].Items.Length, self.Action.EXPAND_ALL: lambda: self._expand_all_treetems(values["element"]), self.Action.COLLAPSE_ALL: lambda: self._collapse_all_treetems(values["element"]), self.Action.GET_VISIBLE_ITEMS_NAMES: lambda: self._get_every_visible_treeitems_name(values["element"]), self.Action.GET_VISIBLE_ITEMS_COUNT: lambda: self._get_every_visible_treeitems_count(values["element"]), self.Action.ITEM_SHOULD_BE_VISIBLE: lambda: self._should_be_visible(values["element"], values["item"]), self.Action.SELECT_ITEM_BY_NAME: lambda: self._select_by_name(values["element"], values["item"]), self.Action.SELECT_ITEM: lambda: self._select(values["element"], values["item"]), self.Action.EXPAND_ITEM: lambda: self._expand(values["element"], values["item"]), self.Action.COLLAPSE_ITEM: lambda: self._collapse(values["element"], values["item"]), self.Action.SELECTED_ITEM_SHOULD_BE: lambda: self._selected_item_should_be(values["element"], values["item"]), self.Action.GET_SELECTED_ITEMS_NAME: lambda: self._get_selected_items_name(values["element"]), } return switcher.get(action, lambda: FlaUiError.raise_fla_ui_error(FlaUiError.ActionNotSupported))() @staticmethod def _get_every_visible_treeitems_name(control: Any): obj = TreeItems(control) return obj.get_every_visible_treeitems_name() @staticmethod def _get_every_visible_treeitems_count(control: Any): obj = TreeItems(control) obj.get_every_visible_treeitems_name() return obj.treeitems_count @staticmethod def _get_selected_item(control: Any): obj = TreeItems(control) selected = obj.selected_treeitem if not selected: raise FlaUiError(FlaUiError.NoItemSelected) return selected @staticmethod def _should_be_visible(control: Any, name: str): names = Tree._get_every_visible_treeitems_name(control) if name not in names: raise FlaUiError(FlaUiError.ElementNotVisible.format(name)) @staticmethod def _expand_all_treetems(control: Any): obj = TreeItems(control) obj.expand_all_treeitems() @staticmethod def _collapse_all_treetems(control: Any): TreeItems(control).collapse() @staticmethod def _select_by_name(control: Any, name: str): obj = TreeItems(control) obj.select_visible_treeitem_by_name(name) @staticmethod
MIT License
kivy/python-for-android
pythonforandroid/recommendations.py
print_recommendations
python
def print_recommendations(): print('Min supported NDK version: {}'.format(MIN_NDK_VERSION)) print('Recommended NDK version: {}'.format(RECOMMENDED_NDK_VERSION)) print('Min target API: {}'.format(MIN_TARGET_API)) print('Recommended target API: {}'.format(RECOMMENDED_TARGET_API)) print('Min NDK API: {}'.format(MIN_NDK_API)) print('Recommended NDK API: {}'.format(RECOMMENDED_NDK_API))
Print the main recommended dependency versions as simple key-value pairs.
https://github.com/kivy/python-for-android/blob/3a9bcabd91aa498982ab42ef7e59846f90df25d7/pythonforandroid/recommendations.py#L224-L233
import sys from distutils.version import LooseVersion from os.path import join from pythonforandroid.logger import info, warning from pythonforandroid.util import BuildInterruptingException MIN_NDK_VERSION = 19 MAX_NDK_VERSION = 20 RECOMMENDED_NDK_VERSION = "19c" NDK_DOWNLOAD_URL = "https://developer.android.com/ndk/downloads/" NEW_NDK_MESSAGE = 'Newer NDKs may not be fully supported by p4a.' UNKNOWN_NDK_MESSAGE = ( 'Could not determine NDK version, no source.properties in the NDK dir.' ) PARSE_ERROR_NDK_MESSAGE = ( 'Could not parse $NDK_DIR/source.properties, not checking NDK version.' ) READ_ERROR_NDK_MESSAGE = ( 'Unable to read the NDK version from the given directory {ndk_dir}.' ) ENSURE_RIGHT_NDK_MESSAGE = ( 'Make sure your NDK version is greater than {min_supported}. If you get ' 'build errors, download the recommended NDK {rec_version} from {ndk_url}.' ) NDK_LOWER_THAN_SUPPORTED_MESSAGE = ( 'The minimum supported NDK version is {min_supported}. ' 'You can download it from {ndk_url}.' ) UNSUPPORTED_NDK_API_FOR_ARMEABI_MESSAGE = ( 'Asked to build for armeabi architecture with API ' '{req_ndk_api}, but API {max_ndk_api} or greater does not support armeabi.' ) CURRENT_NDK_VERSION_MESSAGE = ( 'Found NDK version {ndk_version}' ) RECOMMENDED_NDK_VERSION_MESSAGE = ( 'Maximum recommended NDK version is {recommended_ndk_version}, but newer versions may work.' ) def check_ndk_version(ndk_dir): version = read_ndk_version(ndk_dir) if version is None: warning(READ_ERROR_NDK_MESSAGE.format(ndk_dir=ndk_dir)) warning( ENSURE_RIGHT_NDK_MESSAGE.format( min_supported=MIN_NDK_VERSION, rec_version=RECOMMENDED_NDK_VERSION, ndk_url=NDK_DOWNLOAD_URL, ) ) return minor_to_letter = {0: ''} minor_to_letter.update( {n + 1: chr(i) for n, i in enumerate(range(ord('b'), ord('b') + 25))} ) major_version = version.version[0] letter_version = minor_to_letter[version.version[1]] string_version = '{major_version}{letter_version}'.format( major_version=major_version, letter_version=letter_version ) info(CURRENT_NDK_VERSION_MESSAGE.format(ndk_version=string_version)) if major_version < MIN_NDK_VERSION: raise BuildInterruptingException( NDK_LOWER_THAN_SUPPORTED_MESSAGE.format( min_supported=MIN_NDK_VERSION, ndk_url=NDK_DOWNLOAD_URL ), instructions=( 'Please, go to the android NDK page ({ndk_url}) and download a' ' supported version.\n*** The currently recommended NDK' ' version is {rec_version} ***'.format( ndk_url=NDK_DOWNLOAD_URL, rec_version=RECOMMENDED_NDK_VERSION, ) ), ) elif major_version > MAX_NDK_VERSION: warning( RECOMMENDED_NDK_VERSION_MESSAGE.format( recommended_ndk_version=RECOMMENDED_NDK_VERSION ) ) warning(NEW_NDK_MESSAGE) def read_ndk_version(ndk_dir): try: with open(join(ndk_dir, 'source.properties')) as fileh: ndk_data = fileh.read() except IOError: info(UNKNOWN_NDK_MESSAGE) return for line in ndk_data.split('\n'): if line.startswith('Pkg.Revision'): break else: info(PARSE_ERROR_NDK_MESSAGE) return ndk_version = LooseVersion(line.split('=')[-1].strip()) return ndk_version MIN_TARGET_API = 26 RECOMMENDED_TARGET_API = 27 ARMEABI_MAX_TARGET_API = 21 OLD_API_MESSAGE = ( 'Target APIs lower than 26 are no longer supported on Google Play, ' 'and are not recommended. Note that the Target API can be higher than ' 'your device Android version, and should usually be as high as possible.') def check_target_api(api, arch): if api >= ARMEABI_MAX_TARGET_API and arch == 'armeabi': raise BuildInterruptingException( UNSUPPORTED_NDK_API_FOR_ARMEABI_MESSAGE.format( req_ndk_api=api, max_ndk_api=ARMEABI_MAX_TARGET_API ), instructions='You probably want to build with --arch=armeabi-v7a instead') if api < MIN_TARGET_API: warning('Target API {} < {}'.format(api, MIN_TARGET_API)) warning(OLD_API_MESSAGE) MIN_NDK_API = 21 RECOMMENDED_NDK_API = 21 OLD_NDK_API_MESSAGE = ('NDK API less than {} is not supported'.format(MIN_NDK_API)) TARGET_NDK_API_GREATER_THAN_TARGET_API_MESSAGE = ( 'Target NDK API is {ndk_api}, ' 'higher than the target Android API {android_api}.' ) def check_ndk_api(ndk_api, android_api): if ndk_api > android_api: raise BuildInterruptingException( TARGET_NDK_API_GREATER_THAN_TARGET_API_MESSAGE.format( ndk_api=ndk_api, android_api=android_api ), instructions=('The NDK API is a minimum supported API number and must be lower ' 'than the target Android API')) if ndk_api < MIN_NDK_API: warning(OLD_NDK_API_MESSAGE) MIN_PYTHON_MAJOR_VERSION = 3 MIN_PYTHON_MINOR_VERSION = 6 MIN_PYTHON_VERSION = LooseVersion('{major}.{minor}'.format(major=MIN_PYTHON_MAJOR_VERSION, minor=MIN_PYTHON_MINOR_VERSION)) PY2_ERROR_TEXT = ( 'python-for-android no longer supports running under Python 2. Either upgrade to ' 'Python {min_version} or higher (recommended), or revert to python-for-android 2019.07.08.' ).format(min_version=MIN_PYTHON_VERSION) PY_VERSION_ERROR_TEXT = ( 'Your Python version {user_major}.{user_minor} is not supported by python-for-android, ' 'please upgrade to {min_version} or higher.' ).format( user_major=sys.version_info.major, user_minor=sys.version_info.minor, min_version=MIN_PYTHON_VERSION) def check_python_version(): if sys.version_info.major == 2: raise BuildInterruptingException(PY2_ERROR_TEXT) if ( sys.version_info.major < MIN_PYTHON_MAJOR_VERSION or sys.version_info.minor < MIN_PYTHON_MINOR_VERSION ): raise BuildInterruptingException(PY_VERSION_ERROR_TEXT)
MIT License
datastax/python-driver
cassandra/cqlengine/columns.py
Map.__init__
python
def __init__(self, key_type, value_type, default=dict, **kwargs): super(Map, self).__init__((key_type, value_type), default=default, **kwargs) self.key_col = self.types[0] self.value_col = self.types[1] if not self.key_col._python_type_hashable: raise ValidationError("Cannot create a Map with unhashable key type (see PYTHON-494)") self.db_type = 'map<{0}, {1}>'.format(self.key_col.db_type, self.value_col.db_type)
:param key_type: a column class indicating the types of the key :param value_type: a column class indicating the types of the value
https://github.com/datastax/python-driver/blob/12a8adce943fe37a05ad6580e8bd302b65c2d93a/cassandra/cqlengine/columns.py#L941-L953
from copy import deepcopy, copy from datetime import date, datetime, timedelta import logging import six from uuid import UUID as _UUID from cassandra import util from cassandra.cqltypes import SimpleDateType, _cqltypes, UserType from cassandra.cqlengine import ValidationError from cassandra.cqlengine.functions import get_total_seconds from cassandra.util import Duration as _Duration log = logging.getLogger(__name__) class BaseValueManager(object): def __init__(self, instance, column, value): self.instance = instance self.column = column self.value = value self.previous_value = None self.explicit = False @property def deleted(self): return self.column._val_is_null(self.value) and (self.explicit or not self.column._val_is_null(self.previous_value)) @property def changed(self): if self.explicit: return self.value != self.previous_value if isinstance(self.column, BaseContainerColumn): default_value = self.column.get_default() if self.column._val_is_null(default_value): return not self.column._val_is_null(self.value) and self.value != self.previous_value elif self.previous_value is None: return self.value != default_value return self.value != self.previous_value return False def reset_previous_value(self): self.previous_value = deepcopy(self.value) def getval(self): return self.value def setval(self, val): self.value = val self.explicit = True def delval(self): self.value = None def get_property(self): _get = lambda slf: self.getval() _set = lambda slf, val: self.setval(val) _del = lambda slf: self.delval() if self.column.can_delete: return property(_get, _set, _del) else: return property(_get, _set) class Column(object): db_type = None value_manager = BaseValueManager instance_counter = 0 _python_type_hashable = True primary_key = False partition_key = False """ indicates that this column should be the partition key, defining more than one partition key column creates a compound partition key """ index = False custom_index = False db_field = None default = None required = False clustering_order = None discriminator_column = False static = False def __init__(self, primary_key=False, partition_key=False, index=False, db_field=None, default=None, required=False, clustering_order=None, discriminator_column=False, static=False, custom_index=False): self.partition_key = partition_key self.primary_key = partition_key or primary_key self.index = index self.custom_index = custom_index self.db_field = db_field self.default = default self.required = required self.clustering_order = clustering_order self.discriminator_column = discriminator_column self.column_name = None self._partition_key_index = None self.static = static self.value = None self.position = Column.instance_counter Column.instance_counter += 1 def __ne__(self, other): if isinstance(other, Column): return self.position != other.position return NotImplemented def __eq__(self, other): if isinstance(other, Column): return self.position == other.position return NotImplemented def __lt__(self, other): if isinstance(other, Column): return self.position < other.position return NotImplemented def __le__(self, other): if isinstance(other, Column): return self.position <= other.position return NotImplemented def __gt__(self, other): if isinstance(other, Column): return self.position > other.position return NotImplemented def __ge__(self, other): if isinstance(other, Column): return self.position >= other.position return NotImplemented def __hash__(self): return id(self) def validate(self, value): if value is None: if self.required: raise ValidationError('{0} - None values are not allowed'.format(self.column_name or self.db_field)) return value def to_python(self, value): return value def to_database(self, value): return value @property def has_default(self): return self.default is not None @property def is_primary_key(self): return self.primary_key @property def can_delete(self): return not self.primary_key def get_default(self): if self.has_default: if callable(self.default): return self.default() else: return self.default def get_column_def(self): static = "static" if self.static else "" return '{0} {1} {2}'.format(self.cql, self.db_type, static) def cql_parameterized_type(self): return self.db_type def set_column_name(self, name): self.column_name = name @property def db_field_name(self): return self.db_field if self.db_field is not None else self.column_name @property def db_index_name(self): return 'index_{0}'.format(self.db_field_name) @property def has_index(self): return self.index or self.custom_index @property def cql(self): return self.get_cql() def get_cql(self): return '"{0}"'.format(self.db_field_name) def _val_is_null(self, val): return val is None @property def sub_types(self): return [] @property def cql_type(self): return _cqltypes[self.db_type] class Blob(Column): db_type = 'blob' def to_database(self, value): if not isinstance(value, (six.binary_type, bytearray)): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) return bytearray(val) Bytes = Blob class Inet(Column): db_type = 'inet' class Text(Column): db_type = 'text' def __init__(self, min_length=None, max_length=None, **kwargs): self.min_length = ( 1 if min_length is None and kwargs.get('required', False) else min_length) self.max_length = max_length if self.min_length is not None: if self.min_length < 0: raise ValueError( 'Minimum length is not allowed to be negative.') if self.max_length is not None: if self.max_length < 0: raise ValueError( 'Maximum length is not allowed to be negative.') if self.min_length is not None and self.max_length is not None: if self.max_length < self.min_length: raise ValueError( 'Maximum length must be greater or equal ' 'to minimum length.') super(Text, self).__init__(**kwargs) def validate(self, value): value = super(Text, self).validate(value) if not isinstance(value, (six.string_types, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length is not None: if value and len(value) > self.max_length: raise ValidationError('{0} is longer than {1} characters'.format(self.column_name, self.max_length)) if self.min_length: if (self.min_length and not value) or len(value) < self.min_length: raise ValidationError('{0} is shorter than {1} characters'.format(self.column_name, self.min_length)) return value class Ascii(Text): db_type = 'ascii' def validate(self, value): value = super(Ascii, self).validate(value) if value: charset = value if isinstance( value, (bytearray, )) else map(ord, value) if not set(range(128)).issuperset(charset): raise ValidationError( '{!r} is not an ASCII string.'.format(value)) return value class Integer(Column): db_type = 'int' def validate(self, value): val = super(Integer, self).validate(value) if val is None: return try: return int(val) except (TypeError, ValueError): raise ValidationError("{0} {1} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) def to_database(self, value): return self.validate(value) class TinyInt(Integer): db_type = 'tinyint' class SmallInt(Integer): db_type = 'smallint' class BigInt(Integer): db_type = 'bigint' class VarInt(Column): db_type = 'varint' def validate(self, value): val = super(VarInt, self).validate(value) if val is None: return try: return int(val) except (TypeError, ValueError): raise ValidationError( "{0} {1} can't be converted to integral value".format(self.column_name, value)) def to_python(self, value): return self.validate(value) def to_database(self, value): return self.validate(value) class CounterValueManager(BaseValueManager): def __init__(self, instance, column, value): super(CounterValueManager, self).__init__(instance, column, value) self.value = self.value or 0 self.previous_value = self.previous_value or 0 class Counter(Integer): db_type = 'counter' value_manager = CounterValueManager def __init__(self, index=False, db_field=None, required=False): super(Counter, self).__init__( primary_key=False, partition_key=False, index=index, db_field=db_field, default=0, required=required, ) class DateTime(Column): db_type = 'timestamp' truncate_microseconds = False def to_python(self, value): if value is None: return if isinstance(value, datetime): if DateTime.truncate_microseconds: us = value.microsecond truncated_us = us // 1000 * 1000 return value - timedelta(microseconds=us - truncated_us) else: return value elif isinstance(value, date): return datetime(*(value.timetuple()[:6])) return datetime.utcfromtimestamp(value) def to_database(self, value): value = super(DateTime, self).to_database(value) if value is None: return if not isinstance(value, datetime): if isinstance(value, date): value = datetime(value.year, value.month, value.day) else: raise ValidationError("{0} '{1}' is not a datetime object".format(self.column_name, value)) epoch = datetime(1970, 1, 1, tzinfo=value.tzinfo) offset = get_total_seconds(epoch.tzinfo.utcoffset(epoch)) if epoch.tzinfo else 0 return int((get_total_seconds(value - epoch) - offset) * 1000) class Date(Column): db_type = 'date' def to_database(self, value): if value is None: return d = value if isinstance(value, util.Date) else util.Date(value) return d.days_from_epoch + SimpleDateType.EPOCH_OFFSET_DAYS def to_python(self, value): if value is None: return if isinstance(value, util.Date): return value if isinstance(value, datetime): value = value.date() return util.Date(value) class Time(Column): db_type = 'time' def to_database(self, value): value = super(Time, self).to_database(value) if value is None: return return value if isinstance(value, util.Time) else util.Time(value) def to_python(self, value): value = super(Time, self).to_database(value) if value is None: return if isinstance(value, util.Time): return value return util.Time(value) class Duration(Column): db_type = 'duration' def validate(self, value): val = super(Duration, self).validate(value) if val is None: return if not isinstance(val, _Duration): raise TypeError('{0} {1} is not a valid Duration.'.format(self.column_name, value)) return val class UUID(Column): db_type = 'uuid' def validate(self, value): val = super(UUID, self).validate(value) if val is None: return if isinstance(val, _UUID): return val if isinstance(val, six.string_types): try: return _UUID(val) except ValueError: pass raise ValidationError("{0} {1} is not a valid uuid".format( self.column_name, value)) def to_python(self, value): return self.validate(value) def to_database(self, value): return self.validate(value) class TimeUUID(UUID): db_type = 'timeuuid' class Boolean(Column): db_type = 'boolean' def validate(self, value): value = super(Boolean, self).validate(value) if value is not None: value = bool(value) return value def to_python(self, value): return self.validate(value) class BaseFloat(Column): def validate(self, value): value = super(BaseFloat, self).validate(value) if value is None: return try: return float(value) except (TypeError, ValueError): raise ValidationError("{0} {1} is not a valid float".format(self.column_name, value)) def to_python(self, value): return self.validate(value) def to_database(self, value): return self.validate(value) class Float(BaseFloat): db_type = 'float' class Double(BaseFloat): db_type = 'double' class Decimal(Column): db_type = 'decimal' def validate(self, value): from decimal import Decimal as _Decimal from decimal import InvalidOperation val = super(Decimal, self).validate(value) if val is None: return try: return _Decimal(repr(val)) if isinstance(val, float) else _Decimal(val) except InvalidOperation: raise ValidationError("{0} '{1}' can't be coerced to decimal".format(self.column_name, val)) def to_python(self, value): return self.validate(value) def to_database(self, value): return self.validate(value) class BaseCollectionColumn(Column): def __init__(self, types, **kwargs): instances = [] for t in types: inheritance_comparator = issubclass if isinstance(t, type) else isinstance if not inheritance_comparator(t, Column): raise ValidationError("%s is not a column class" % (t,)) if t.db_type is None: raise ValidationError("%s is an abstract type" % (t,)) inst = t() if isinstance(t, type) else t if isinstance(t, BaseCollectionColumn): inst._freeze_db_type() instances.append(inst) self.types = instances super(BaseCollectionColumn, self).__init__(**kwargs) def validate(self, value): value = super(BaseCollectionColumn, self).validate(value) if value is not None and len(value) > 65535: raise ValidationError("{0} Collection can't have more than 65535 elements.".format(self.column_name)) return value def _val_is_null(self, val): return not val def _freeze_db_type(self): if not self.db_type.startswith('frozen'): self.db_type = "frozen<%s>" % (self.db_type,) @property def sub_types(self): return self.types @property def cql_type(self): return _cqltypes[self.__class__.__name__.lower()].apply_parameters([c.cql_type for c in self.types]) class Tuple(BaseCollectionColumn): def __init__(self, *args, **kwargs): if not args: raise ValueError("Tuple must specify at least one inner type") super(Tuple, self).__init__(args, **kwargs) self.db_type = 'tuple<{0}>'.format(', '.join(typ.db_type for typ in self.types)) def validate(self, value): val = super(Tuple, self).validate(value) if val is None: return if len(val) > len(self.types): raise ValidationError("Value %r has more fields than tuple definition (%s)" % (val, ', '.join(t for t in self.types))) return tuple(t.validate(v) for t, v in zip(self.types, val)) def to_python(self, value): if value is None: return tuple() return tuple(t.to_python(v) for t, v in zip(self.types, value)) def to_database(self, value): if value is None: return return tuple(t.to_database(v) for t, v in zip(self.types, value)) class BaseContainerColumn(BaseCollectionColumn): pass class Set(BaseContainerColumn): _python_type_hashable = False def __init__(self, value_type, strict=True, default=set, **kwargs): self.strict = strict super(Set, self).__init__((value_type,), default=default, **kwargs) self.value_col = self.types[0] if not self.value_col._python_type_hashable: raise ValidationError("Cannot create a Set with unhashable value type (see PYTHON-494)") self.db_type = 'set<{0}>'.format(self.value_col.db_type) def validate(self, value): val = super(Set, self).validate(value) if val is None: return types = (set, util.SortedSet) if self.strict else (set, util.SortedSet, list, tuple) if not isinstance(val, types): if self.strict: raise ValidationError('{0} {1} is not a set object'.format(self.column_name, val)) else: raise ValidationError('{0} {1} cannot be coerced to a set object'.format(self.column_name, val)) if None in val: raise ValidationError("{0} None not allowed in a set".format(self.column_name)) return set(self.value_col.validate(v) for v in val) def to_python(self, value): if value is None: return set() return set(self.value_col.to_python(v) for v in value) def to_database(self, value): if value is None: return None return set(self.value_col.to_database(v) for v in value) class List(BaseContainerColumn): _python_type_hashable = False def __init__(self, value_type, default=list, **kwargs): super(List, self).__init__((value_type,), default=default, **kwargs) self.value_col = self.types[0] self.db_type = 'list<{0}>'.format(self.value_col.db_type) def validate(self, value): val = super(List, self).validate(value) if val is None: return if not isinstance(val, (set, list, tuple)): raise ValidationError('{0} {1} is not a list object'.format(self.column_name, val)) if None in val: raise ValidationError("{0} None is not allowed in a list".format(self.column_name)) return [self.value_col.validate(v) for v in val] def to_python(self, value): if value is None: return [] return [self.value_col.to_python(v) for v in value] def to_database(self, value): if value is None: return None return [self.value_col.to_database(v) for v in value] class Map(BaseContainerColumn): _python_type_hashable = False
Apache License 2.0
sangyi92/alphago_zero
AlphagoZero/models/nn_util.py
neuralnet
python
def neuralnet(cls): NeuralNetBase.subclasses[cls.__name__] = cls return cls
Class decorator for registering subclasses of NeuralNetBase
https://github.com/sangyi92/alphago_zero/blob/6acd092e9b904eb725da031f8c5f8b4e1bdae111/AlphagoZero/models/nn_util.py#L109-L113
from tensorflow.contrib.keras import backend as K from tensorflow.contrib.keras import models as M from AlphagoZero.preprocessing.preprocessing import Preprocess import json class NeuralNetBase(object): subclasses = {} def __init__(self, feature_list, **kwargs): self.preprocessor = Preprocess(feature_list) kwargs["input_dim"] = self.preprocessor.output_dim if kwargs.get('init_network', True): self.model = self.__class__.create_network(**kwargs) self.forward = self._model_forward() def _model_forward(self): if self.model.uses_learning_phase: forward_function = K.function(self.model.inputs + [K.learning_phase()], self.model.outputs) return lambda inpt: forward_function([inpt, 0]) else: forward_function = K.function(self.model.inputs, self.model.outputs) return lambda inpt: forward_function([inpt]) @staticmethod def load_model(json_file): with open(json_file, 'r') as f: object_specs = json.load(f) class_name = object_specs.get('class', 'PolicyValue') try: network_class = NeuralNetBase.subclasses[class_name] except KeyError: raise ValueError("Unknown neural network type in json file: {}\n" "(was it registered with the @neuralnet decorator?)" .format(class_name)) new_net = network_class(object_specs['feature_list'], init_network=False) new_net.model = M.model_from_json(object_specs['keras_model']) if 'weights_file' in object_specs: new_net.model.load_weights(object_specs['weights_file']) new_net.forward = new_net._model_forward() return new_net def save_model(self, json_file, weights_file=None): object_specs = { 'class': self.__class__.__name__, 'keras_model': self.model.to_json(), 'feature_list': self.preprocessor.feature_list } if weights_file is not None: self.model.save_weights(weights_file) object_specs['weights_file'] = weights_file with open(json_file, 'w') as f: json.dump(object_specs, f)
MIT License
aoldoni/tetre
lib/openie_tools/interfaces.py
ExternalInterface.get_interface
python
def get_interface(self): class_name = self.argv.openie_run_others if class_name in self.classes: self.interface = globals()[class_name](self.argv) return self.interface
Returns the class instance for the interface with the selected external tool.
https://github.com/aoldoni/tetre/blob/a8b07aa47a9adf7dce46dff96e20be63a761e9f7/lib/openie_tools/interfaces.py#L111-L120
from directories import dirs import os class StanfordOpenIE: def __init__(self, argv): self.argv = argv self.output_dir = dirs['output_stanford_openie']['path'] @staticmethod def run(i, o): command = ''.join(['java -cp ', dirs['stanford_corenlp_path']['path'], '*:', dirs['stanford_corenlp_path']['path'], 'lib/* ', '-Xmx8g edu.stanford.nlp.naturalli.OpenIE -props ', dirs['config']['path'], 'pipeline-openie.properties ', i, ' 1>', o]) os.system(command) class AllenAIOpenIE: def __init__(self, argv): self.argv = argv self.output_dir = dirs['output_allenai_openie']['path'] @staticmethod def run(i, o): command = ''.join(['(cd ', dirs['allenai_root']['path'], '; ', 'sbt \'run-main edu.knowitall.openie.OpenIECli --input-file ', dirs['allenai_root']['root_distance'], i, ' --ouput-file ', dirs['allenai_root']['root_distance'], o, '\')']) os.system(command) class MPICluaseIE: def __init__(self, argv): self.argv = argv self.output_dir = dirs['output_mpi_clauseie']['path'] @staticmethod def run(i, o): command = ''.join(['./', dirs['clauseie_root']['path'], 'clausie.sh -f ', i, ' -o ', o]) os.system(command) class ExternalInterface: def __init__(self, argv): self.argv = argv self.classes = ["MPICluaseIE", "AllenAIOpenIE", "StanfordOpenIE"] self.interface = None def run(self, file, outfile): external = self.get_interface() external.run(file, outfile) return
MIT License
haroldmills/vesper
vesper/util/sample_buffer.py
SampleBuffer.read
python
def read(self, num_samples=None, increment=None): if num_samples is None: num_samples = len(self) elif num_samples < 0: raise ValueError('Sample buffer read size cannot be negative.') elif num_samples > len(self): raise ValueError(( 'Attempt to read {} samples from sample buffer with ' 'only {}.').format(num_samples, len(self))) if increment is None: increment = num_samples elif increment < 0: raise ValueError( 'Sample buffer read increment cannot be negative.') if num_samples == 0: result = np.array([], dtype=self._dtype) else: result_arrays = [] array_num = 0 start = self.read_index - self._stored_start_index n = num_samples while n != 0: array = self._arrays[array_num] length = len(array) available = length - start if n >= available: result_arrays.append(array[start:]) array_num += 1 start = 0 n -= available else: result_arrays.append(array[start:start + n]) n -= n result = np.concatenate(result_arrays) if increment != 0: self.increment(increment) return result
Reads samples from this buffer. Parameters ---------- num_samples : int or None the number of samples to read, or `None` to read all available samples. increment : int or None the number by which to increment the read index of this buffer, or `None` to increment it by `num_samples`. Returns ------- NumPy array of samples, of this buffer's `dtype`. Raises ------ ValueError if `num_samples` or `increment` is negative, or if `num_samples` exceeds the current buffer length.
https://github.com/haroldmills/vesper/blob/2bde3447eeb34b75cc580fbdafe7b26195a31530/vesper/util/sample_buffer.py#L119-L199
from collections import deque import numpy as np class SampleBuffer: def __init__(self, dtype): self._dtype = np.dtype(dtype) self._arrays = deque() self._stored_start_index = 0 self._stored_length = 0 self._read_index = 0 @property def dtype(self): return self._dtype @property def _stored_end_index(self): return self._stored_start_index + self._stored_length @property def write_index(self): return self._stored_end_index @property def read_index(self): return self._read_index def __len__(self): return max(self._stored_end_index - self._read_index, 0) def write(self, samples): if samples.dtype != self._dtype: raise ValueError(( 'NumPy dtype "{}" of samples does not match sample ' 'buffer dtype "{}".').format( str(samples.dtype), str(self._dtype))) else: if len(samples) != 0: self._arrays.append(samples) self._stored_length += len(samples)
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/analytics_max_concurrent_viewers_response.py
AnalyticsMaxConcurrentViewersResponse.rows
python
def rows(self): return self._rows
Gets the rows of this AnalyticsMaxConcurrentViewersResponse. :return: The rows of this AnalyticsMaxConcurrentViewersResponse. :rtype: list[float]
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/analytics_max_concurrent_viewers_response.py#L50-L58
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model import pprint import six class AnalyticsMaxConcurrentViewersResponse(object): @poscheck_model def __init__(self, rows=None, row_count=None, column_labels=None): self._rows = list() self._row_count = None self._column_labels = list() self.discriminator = None if rows is not None: self.rows = rows if row_count is not None: self.row_count = row_count if column_labels is not None: self.column_labels = column_labels @property def openapi_types(self): types = { 'rows': 'list[float]', 'row_count': 'int', 'column_labels': 'list[AnalyticsColumnLabel]' } return types @property def attribute_map(self): attributes = { 'rows': 'rows', 'row_count': 'rowCount', 'column_labels': 'columnLabels' } return attributes @property
MIT License
karlgluck/heroes-of-the-storm-replay-parser
s2protocol/protocol23260.py
decode_replay_game_events
python
def decode_replay_game_events(contents): decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, game_eventid_typeid, game_event_types, decode_player_id=True): yield event
Decodes and yields each game event from the contents byte string.
https://github.com/karlgluck/heroes-of-the-storm-replay-parser/blob/5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62/s2protocol/protocol23260.py#L345-L352
from decoders import * typeinfos = [ ('_int',[(0,7)]), ('_int',[(0,4)]), ('_int',[(0,6)]), ('_int',[(0,14)]), ('_int',[(0,22)]), ('_int',[(0,32)]), ('_choice',[(0,2),{0:('m_uint6',2),1:('m_uint14',3),2:('m_uint22',4),3:('m_uint32',5)}]), ('_int',[(0,5)]), ('_struct',[[('m_playerId',7,-1)]]), ('_blob',[(0,8)]), ('_int',[(0,8)]), ('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',5,4),('m_baseBuild',5,5)]]), ('_int',[(0,3)]), ('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',5,3)]]), ('_fourcc',[]), ('_blob',[(0,7)]), ('_int',[(0,64)]), ('_struct',[[('m_region',10,0),('m_programId',14,1),('m_realm',5,2),('m_name',15,3),('m_id',16,4)]]), ('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), ('_int',[(0,2)]), ('_struct',[[('m_name',9,0),('m_toon',17,1),('m_race',9,2),('m_color',18,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',19,7),('m_result',19,8)]]), ('_array',[(0,5),20]), ('_optional',[21]), ('_blob',[(0,10)]), ('_blob',[(0,11)]), ('_struct',[[('m_file',24,0)]]), ('_bool',[]), ('_int',[(-9223372036854775808,64)]), ('_blob',[(0,12)]), ('_blob',[(40,0)]), ('_array',[(0,6),29]), ('_optional',[30]), ('_array',[(0,6),24]), ('_optional',[32]), ('_struct',[[('m_playerList',22,0),('m_title',23,1),('m_difficulty',9,2),('m_thumbnail',25,3),('m_isBlizzardMap',26,4),('m_timeUTC',27,5),('m_timeLocalOffset',27,6),('m_description',28,7),('m_imageFilePath',24,8),('m_mapFileName',24,9),('m_cacheHandles',31,10),('m_miniSave',26,11),('m_gameSpeed',12,12),('m_defaultDifficulty',2,13),('m_modPaths',33,14)]]), ('_optional',[10]), ('_struct',[[('m_race',35,-1)]]), ('_struct',[[('m_team',35,-1)]]), ('_struct',[[('m_name',9,-8),('m_randomSeed',5,-7),('m_racePreference',36,-6),('m_teamPreference',37,-5),('m_testMap',26,-4),('m_testAuto',26,-3),('m_examine',26,-2),('m_observe',19,-1)]]), ('_array',[(0,5),38]), ('_struct',[[('m_lockTeams',26,-12),('m_teamsTogether',26,-11),('m_advancedSharedControl',26,-10),('m_randomRaces',26,-9),('m_battleNet',26,-8),('m_amm',26,-7),('m_ranked',26,-6),('m_noVictoryOrDefeat',26,-5),('m_fog',19,-4),('m_observers',19,-3),('m_userDifficulty',19,-2),('m_clientDebugFlags',16,-1)]]), ('_int',[(1,4)]), ('_int',[(1,8)]), ('_bitarray',[(0,6)]), ('_bitarray',[(0,8)]), ('_bitarray',[(0,2)]), ('_struct',[[('m_allowedColors',43,-5),('m_allowedRaces',44,-4),('m_allowedDifficulty',43,-3),('m_allowedControls',44,-2),('m_allowedObserveTypes',45,-1)]]), ('_array',[(0,5),46]), ('_struct',[[('m_randomValue',5,-23),('m_gameCacheName',23,-22),('m_gameOptions',40,-21),('m_gameSpeed',12,-20),('m_gameType',12,-19),('m_maxUsers',7,-18),('m_maxObservers',7,-17),('m_maxPlayers',7,-16),('m_maxTeams',41,-15),('m_maxColors',2,-14),('m_maxRaces',42,-13),('m_maxControls',42,-12),('m_mapSizeX',10,-11),('m_mapSizeY',10,-10),('m_mapFileSyncChecksum',5,-9),('m_mapFileName',24,-8),('m_mapAuthorName',9,-7),('m_modFileSyncChecksum',5,-6),('m_slotDescriptions',47,-5),('m_defaultDifficulty',2,-4),('m_cacheHandles',30,-3),('m_isBlizzardMap',26,-2),('m_isPremadeFFA',26,-1)]]), ('_optional',[1]), ('_optional',[7]), ('_struct',[[('m_color',50,-1)]]), ('_array',[(0,5),5]), ('_array',[(0,9),5]), ('_struct',[[('m_control',10,-11),('m_userId',49,-10),('m_teamId',1,-9),('m_colorPref',51,-8),('m_racePref',36,-7),('m_difficulty',2,-6),('m_handicap',0,-5),('m_observe',19,-4),('m_rewards',52,-3),('m_toonHandle',15,-2),('m_licenses',53,-1)]]), ('_array',[(0,5),54]), ('_struct',[[('m_phase',12,-9),('m_maxUsers',7,-8),('m_maxObservers',7,-7),('m_slots',55,-6),('m_randomSeed',5,-5),('m_hostUserId',49,-4),('m_isSinglePlayer',26,-3),('m_gameDuration',5,-2),('m_defaultDifficulty',2,-1)]]), ('_struct',[[('m_userInitialData',39,-3),('m_gameDescription',48,-2),('m_lobbyState',56,-1)]]), ('_struct',[[('m_syncLobbyState',57,-1)]]), ('_struct',[[('m_name',15,-1)]]), ('_blob',[(0,6)]), ('_struct',[[('m_name',60,-1)]]), ('_struct',[[('m_name',60,-3),('m_type',5,-2),('m_data',15,-1)]]), ('_struct',[[('m_type',5,-3),('m_name',60,-2),('m_data',28,-1)]]), ('_array',[(0,5),10]), ('_struct',[[('m_signature',64,-1)]]), ('_struct',[[('m_gameFullyDownloaded',26,-6),('m_developmentCheatsEnabled',26,-5),('m_multiplayerCheatsEnabled',26,-4),('m_syncChecksummingEnabled',26,-3),('m_isMapToMapTransition',26,-2),('m_useAIBeacons',26,-1)]]), ('_struct',[[]]), ('_struct',[[('m_fileName',24,-5),('m_automatic',26,-4),('m_overwrite',26,-3),('m_name',9,-2),('m_description',23,-1)]]), ('_int',[(-2147483648,32)]), ('_struct',[[('x',69,-2),('y',69,-1)]]), ('_struct',[[('m_point',70,-4),('m_time',69,-3),('m_verb',23,-2),('m_arguments',23,-1)]]), ('_struct',[[('m_data',71,-1)]]), ('_int',[(0,20)]), ('_int',[(0,16)]), ('_struct',[[('m_abilLink',74,-3),('m_abilCmdIndex',7,-2),('m_abilCmdData',35,-1)]]), ('_optional',[75]), ('_null',[]), ('_struct',[[('x',73,-3),('y',73,-2),('z',69,-1)]]), ('_struct',[[('m_targetUnitFlags',10,-7),('m_timer',10,-6),('m_tag',5,-5),('m_snapshotUnitLink',74,-4),('m_snapshotControlPlayerId',49,-3),('m_snapshotUpkeepPlayerId',49,-2),('m_snapshotPoint',78,-1)]]), ('_choice',[(0,2),{0:('None',77),1:('TargetPoint',78),2:('TargetUnit',79),3:('Data',5)}]), ('_optional',[5]), ('_struct',[[('m_cmdFlags',73,-4),('m_abil',76,-3),('m_data',80,-2),('m_otherUnit',81,-1)]]), ('_int',[(0,9)]), ('_bitarray',[(0,9)]), ('_array',[(0,9),83]), ('_choice',[(0,2),{0:('None',77),1:('Mask',84),2:('OneIndices',85),3:('ZeroIndices',85)}]), ('_struct',[[('m_unitLink',74,-3),('m_intraSubgroupPriority',10,-2),('m_count',83,-1)]]), ('_array',[(0,9),87]), ('_struct',[[('m_subgroupIndex',83,-4),('m_removeMask',86,-3),('m_addSubgroups',88,-2),('m_addUnitTags',53,-1)]]), ('_struct',[[('m_controlGroupId',1,-2),('m_delta',89,-1)]]), ('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',19,-2),('m_mask',86,-1)]]), ('_struct',[[('m_count',83,-6),('m_subgroupCount',83,-5),('m_activeSubgroupIndex',83,-4),('m_unitTagsChecksum',5,-3),('m_subgroupIndicesChecksum',5,-2),('m_subgroupsChecksum',5,-1)]]), ('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',92,-1)]]), ('_array',[(0,3),69]), ('_struct',[[('m_recipientId',1,-2),('m_resources',94,-1)]]), ('_struct',[[('m_chatMessage',23,-1)]]), ('_int',[(-128,8)]), ('_struct',[[('x',69,-3),('y',69,-2),('z',69,-1)]]), ('_struct',[[('m_beacon',97,-9),('m_ally',97,-8),('m_flags',97,-7),('m_build',97,-6),('m_targetUnitTag',5,-5),('m_targetUnitSnapshotUnitLink',74,-4),('m_targetUnitSnapshotUpkeepPlayerId',97,-3),('m_targetUnitSnapshotControlPlayerId',97,-2),('m_targetPoint',98,-1)]]), ('_struct',[[('m_speed',12,-1)]]), ('_struct',[[('m_delta',97,-1)]]), ('_struct',[[('m_point',70,-3),('m_unit',5,-2),('m_pingedMinimap',26,-1)]]), ('_struct',[[('m_verb',23,-2),('m_arguments',23,-1)]]), ('_struct',[[('m_alliance',5,-2),('m_control',5,-1)]]), ('_struct',[[('m_unitTag',5,-1)]]), ('_struct',[[('m_unitTag',5,-2),('m_flags',10,-1)]]), ('_struct',[[('m_conversationId',69,-2),('m_replyId',69,-1)]]), ('_struct',[[('m_purchaseItemId',69,-1)]]), ('_struct',[[('m_difficultyLevel',69,-1)]]), ('_choice',[(0,3),{0:('None',77),1:('Checked',26),2:('ValueChanged',5),3:('SelectionChanged',69),4:('TextChanged',24)}]), ('_struct',[[('m_controlId',69,-3),('m_eventType',69,-2),('m_eventData',110,-1)]]), ('_struct',[[('m_soundHash',5,-2),('m_length',5,-1)]]), ('_array',[(0,7),5]), ('_struct',[[('m_soundHash',113,-2),('m_length',113,-1)]]), ('_struct',[[('m_syncInfo',114,-1)]]), ('_struct',[[('m_sound',5,-1)]]), ('_struct',[[('m_transmissionId',69,-2),('m_thread',5,-1)]]), ('_struct',[[('m_transmissionId',69,-1)]]), ('_struct',[[('x',74,-2),('y',74,-1)]]), ('_optional',[74]), ('_struct',[[('m_target',119,-4),('m_distance',120,-3),('m_pitch',120,-2),('m_yaw',120,-1)]]), ('_int',[(0,1)]), ('_struct',[[('m_skipType',122,-1)]]), ('_int',[(0,11)]), ('_struct',[[('x',124,-2),('y',124,-1)]]), ('_struct',[[('m_button',5,-4),('m_down',26,-3),('m_posUI',125,-2),('m_posWorld',78,-1)]]), ('_struct',[[('m_posUI',125,-2),('m_posWorld',78,-1)]]), ('_struct',[[('m_achievementLink',74,-1)]]), ('_struct',[[('m_soundtrack',5,-1)]]), ('_struct',[[('m_planetId',69,-1)]]), ('_struct',[[('m_key',97,-2),('m_flags',97,-1)]]), ('_struct',[[('m_resources',94,-1)]]), ('_struct',[[('m_fulfillRequestId',69,-1)]]), ('_struct',[[('m_cancelRequestId',69,-1)]]), ('_struct',[[('m_researchItemId',69,-1)]]), ('_struct',[[('m_laggingPlayerId',1,-1)]]), ('_struct',[[('m_mercenaryId',69,-1)]]), ('_struct',[[('m_battleReportId',69,-2),('m_difficultyLevel',69,-1)]]), ('_struct',[[('m_battleReportId',69,-1)]]), ('_int',[(0,19)]), ('_struct',[[('m_decrementMs',140,-1)]]), ('_struct',[[('m_portraitId',69,-1)]]), ('_struct',[[('m_functionName',15,-1)]]), ('_struct',[[('m_result',69,-1)]]), ('_struct',[[('m_gameMenuItemIndex',69,-1)]]), ('_struct',[[('m_reason',97,-1)]]), ('_struct',[[('m_purchaseCategoryId',69,-1)]]), ('_struct',[[('m_button',74,-1)]]), ('_struct',[[('m_cutsceneId',69,-2),('m_bookmarkName',15,-1)]]), ('_struct',[[('m_cutsceneId',69,-1)]]), ('_struct',[[('m_cutsceneId',69,-3),('m_conversationLine',15,-2),('m_altConversationLine',15,-1)]]), ('_struct',[[('m_cutsceneId',69,-2),('m_conversationLine',15,-1)]]), ('_struct',[[('m_recipient',12,-2),('m_string',24,-1)]]), ('_struct',[[('m_recipient',12,-2),('m_point',70,-1)]]), ('_struct',[[('m_progress',69,-1)]]), ] game_event_types = { 5: (67, 'NNet.Game.SUserFinishedLoadingSyncEvent'), 7: (59, 'NNet.Game.SBankFileEvent'), 8: (61, 'NNet.Game.SBankSectionEvent'), 9: (62, 'NNet.Game.SBankKeyEvent'), 10: (63, 'NNet.Game.SBankValueEvent'), 11: (65, 'NNet.Game.SBankSignatureEvent'), 12: (66, 'NNet.Game.SUserOptionsEvent'), 22: (68, 'NNet.Game.SSaveGameEvent'), 23: (67, 'NNet.Game.SSaveGameDoneEvent'), 25: (67, 'NNet.Game.SPlayerLeaveEvent'), 26: (72, 'NNet.Game.SGameCheatEvent'), 27: (82, 'NNet.Game.SCmdEvent'), 28: (90, 'NNet.Game.SSelectionDeltaEvent'), 29: (91, 'NNet.Game.SControlGroupUpdateEvent'), 30: (93, 'NNet.Game.SSelectionSyncCheckEvent'), 31: (95, 'NNet.Game.SResourceTradeEvent'), 32: (96, 'NNet.Game.STriggerChatMessageEvent'), 33: (99, 'NNet.Game.SAICommunicateEvent'), 34: (100, 'NNet.Game.SSetAbsoluteGameSpeedEvent'), 35: (101, 'NNet.Game.SAddAbsoluteGameSpeedEvent'), 36: (102, 'NNet.Game.STriggerPingEvent'), 37: (103, 'NNet.Game.SBroadcastCheatEvent'), 38: (104, 'NNet.Game.SAllianceEvent'), 39: (105, 'NNet.Game.SUnitClickEvent'), 40: (106, 'NNet.Game.SUnitHighlightEvent'), 41: (107, 'NNet.Game.STriggerReplySelectedEvent'), 44: (67, 'NNet.Game.STriggerSkippedEvent'), 45: (112, 'NNet.Game.STriggerSoundLengthQueryEvent'), 46: (116, 'NNet.Game.STriggerSoundOffsetEvent'), 47: (117, 'NNet.Game.STriggerTransmissionOffsetEvent'), 48: (118, 'NNet.Game.STriggerTransmissionCompleteEvent'), 49: (121, 'NNet.Game.SCameraUpdateEvent'), 50: (67, 'NNet.Game.STriggerAbortMissionEvent'), 51: (108, 'NNet.Game.STriggerPurchaseMadeEvent'), 52: (67, 'NNet.Game.STriggerPurchaseExitEvent'), 53: (109, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'), 54: (67, 'NNet.Game.STriggerPlanetPanelCanceledEvent'), 55: (111, 'NNet.Game.STriggerDialogControlEvent'), 56: (115, 'NNet.Game.STriggerSoundLengthSyncEvent'), 57: (123, 'NNet.Game.STriggerConversationSkippedEvent'), 58: (126, 'NNet.Game.STriggerMouseClickedEvent'), 59: (127, 'NNet.Game.STriggerMouseMovedEvent'), 60: (128, 'NNet.Game.SAchievementAwardedEvent'), 63: (67, 'NNet.Game.STriggerPlanetPanelReplayEvent'), 64: (129, 'NNet.Game.STriggerSoundtrackDoneEvent'), 65: (130, 'NNet.Game.STriggerPlanetMissionSelectedEvent'), 66: (131, 'NNet.Game.STriggerKeyPressedEvent'), 67: (143, 'NNet.Game.STriggerMovieFunctionEvent'), 68: (67, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'), 69: (67, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'), 70: (132, 'NNet.Game.SResourceRequestEvent'), 71: (133, 'NNet.Game.SResourceRequestFulfillEvent'), 72: (134, 'NNet.Game.SResourceRequestCancelEvent'), 73: (67, 'NNet.Game.STriggerResearchPanelExitEvent'), 74: (67, 'NNet.Game.STriggerResearchPanelPurchaseEvent'), 75: (135, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'), 76: (136, 'NNet.Game.SLagMessageEvent'), 77: (67, 'NNet.Game.STriggerMercenaryPanelExitEvent'), 78: (67, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'), 79: (137, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'), 80: (67, 'NNet.Game.STriggerVictoryPanelExitEvent'), 81: (67, 'NNet.Game.STriggerBattleReportPanelExitEvent'), 82: (138, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'), 83: (139, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'), 84: (139, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'), 85: (109, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'), 86: (67, 'NNet.Game.STriggerMovieStartedEvent'), 87: (67, 'NNet.Game.STriggerMovieFinishedEvent'), 88: (141, 'NNet.Game.SDecrementGameTimeRemainingEvent'), 89: (142, 'NNet.Game.STriggerPortraitLoadedEvent'), 90: (144, 'NNet.Game.STriggerCustomDialogDismissedEvent'), 91: (145, 'NNet.Game.STriggerGameMenuItemSelectedEvent'), 92: (146, 'NNet.Game.STriggerCameraMoveEvent'), 93: (108, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'), 94: (147, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'), 95: (148, 'NNet.Game.STriggerButtonPressedEvent'), 96: (67, 'NNet.Game.STriggerGameCreditsFinishedEvent'), 97: (149, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'), 98: (150, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'), 99: (151, 'NNet.Game.STriggerCutsceneConversationLineEvent'), 100: (152, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'), } game_eventid_typeid = 0 message_event_types = { 0: (153, 'NNet.Game.SChatMessage'), 1: (154, 'NNet.Game.SPingMessage'), 2: (155, 'NNet.Game.SLoadingProgressMessage'), 3: (67, 'NNet.Game.SServerPingMessage'), } message_eventid_typeid = 1 svaruint32_typeid = 6 replay_playerid_typeid = 8 replay_header_typeid = 13 game_details_typeid = 34 replay_initdata_typeid = 58 def _varuint32_value(value): for k,v in value.iteritems(): return v return 0 def _decode_event_stream(decoder, eventid_typeid, event_types, decode_player_id): gameloop = 0 while not decoder.done(): start_bits = decoder.used_bits() delta = _varuint32_value(decoder.instance(svaruint32_typeid)) gameloop += delta if decode_player_id: playerid = decoder.instance(replay_playerid_typeid) eventid = decoder.instance(eventid_typeid) typeid, typename = event_types.get(eventid, (None, None)) if typeid is None: raise CorruptedError('eventid(%d) at %s' % (eventid, decoder)) event = decoder.instance(typeid) event['_event'] = typename event['_eventid'] = eventid event['_gameloop'] = gameloop if decode_player_id: event['_playerid'] = playerid decoder.byte_align() event['_bits'] = decoder.used_bits() - start_bits yield event
MIT License