repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
mseclab/pyjfuzz
gramfuzz/gramfuzz/utils.py
val
python
def val(val, pre=None, shortest=False): if pre is None: pre = [] fields = gramfuzz.fields MF = fields.MetaField F = fields.Field if type(val) is MF: val = val() if isinstance(val, F): val = str(val.build(pre, shortest=shortest)) return str(val)
Build the provided value, while properly handling native Python types, :any:`gramfuzz.fields.Field` instances, and :any:`gramfuzz.fields.Field` subclasses. :param list pre: The prerequisites list :returns: str
https://github.com/mseclab/pyjfuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/gramfuzz/gramfuzz/utils.py#L13-L33
import gramfuzz
MIT License
christabor/moal
MOAL/data_structures/abstract/tree.py
Tree.total_height
python
def total_height(self): return self.node_height(None, use_root=True)
Similar to height, but height is always absolute, rather than relative from a specific node; also hides the details behind a simpler interface with no arguments.
https://github.com/christabor/moal/blob/7d3062f6a49e45147172b7d577472b1e2aa084fa/MOAL/data_structures/abstract/tree.py#L137-L141
__author__ = """Chris Tabor (dxdstudio@gmail.com)""" if __name__ == '__main__': from os import getcwd from os import sys sys.path.append(getcwd()) from MOAL.helpers.display import Section from MOAL.helpers.display import prnt from MOAL.helpers.display import divider from MOAL.helpers.display import cmd_title from MOAL.data_structures.graphs.graphs import Graph DEBUG = True if __name__ == '__main__' else False class InvalidGraph(Exception): pass class InvalidNode(Exception): pass class Tree(Graph): def __init__(self, vertices={}): super(Tree, self).__init__(vertices=vertices) for key, node in vertices.iteritems(): self.__setitem__(key, node) def path(self, start_node, end_node): return super(Tree, self).walk(start_node, end_node) def level(self, node_name): raise NotImplementedError def __delitem__(self, key): edges = self.__getitem__(key).get('edges') for edge in edges: del self.vertices[edge] self.node_count -= 1 del self.vertices[key] self.node_count -= 1 def __setitem__(self, key, node): node['edges'] = set(node['edges']) if key in node['edges']: node['edges'].remove(key) node.update({'is_root': node.get('is_root', False)}) node.update({'is_child': not node.get('is_root', True)}) node.update({'val': node.get('val', '')}) node.update({'edges': node.get('edges', [])}) is_leaf = len(node.get('edges')) == 0 node.update({'is_leaf': is_leaf}) node.update({'parent': node.get('parent', None)}) super(Tree, self).__setitem__(key, node) return node def build_tree(self, **kwargs): g = super(Tree, self).build_graph(**kwargs) for name, data in self.vertices.iteritems(): g.add_subgraph(data['edges']) return g def render_tree(self, filename, **kwargs): g = self.build_tree(**kwargs) g.layout(prog='dot') g.draw(filename) def node_depth(self, node_name, use_root=False): height = 1 if use_root: return height node = self.__getitem__(node_name) while not node.get('is_root'): height += 1 node = self.__getitem__(node.get('parent')) return height def is_internal(self, node_name): node = self.__getitem__(node_name) if node is None: return False return 'is_root' not in node and len(node['edges']) != 0 def is_leaf(self, node_name): node = self.__getitem__(node_name) if node is None: return False return 'is_root' not in node and len(node['edges']) == 0 def node_height(self, node_name, height=1, use_root=False): node = self.get_root() if use_root else self.__getitem__(node_name) if node.get('is_leaf'): return height for child_name in node.get('edges'): return self.node_height(child_name, height=height + 1) return height
Apache License 2.0
mrknow/filmkodi
plugin.video.mrknow/mylib/pydevd_attach_to_process/winappdbg/process.py
Process.__iter__
python
def __iter__(self): return self.__ThreadsAndModulesIterator(self)
@see: L{iter_threads}, L{iter_modules} @rtype: iterator @return: Iterator of L{Thread} and L{Module} objects in this snapshot. All threads are iterated first, then all modules.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/plugin.video.mrknow/mylib/pydevd_attach_to_process/winappdbg/process.py#L346-L353
from __future__ import with_statement __revision__ = "$Id$" __all__ = ['Process'] import sys from winappdbg import win32 from winappdbg import compat from winappdbg.textio import HexDump, HexInput from winappdbg.util import Regenerator, PathOperations, MemoryAddresses from winappdbg.module import Module, _ModuleContainer from winappdbg.thread import Thread, _ThreadContainer from winappdbg.window import Window from winappdbg.search import Search, Pattern, BytePattern, TextPattern, RegExpPattern, HexPattern from winappdbg.disasm import Disassembler import re import os import os.path import ctypes import struct import warnings import traceback System = None class Process (_ThreadContainer, _ModuleContainer): def __init__(self, dwProcessId, hProcess = None, fileName = None): _ThreadContainer.__init__(self) _ModuleContainer.__init__(self) self.dwProcessId = dwProcessId self.hProcess = hProcess self.fileName = fileName def get_pid(self): return self.dwProcessId def get_filename(self): if not self.fileName: self.fileName = self.get_image_name() return self.fileName def open_handle(self, dwDesiredAccess = win32.PROCESS_ALL_ACCESS): hProcess = win32.OpenProcess(dwDesiredAccess, win32.FALSE, self.dwProcessId) try: self.close_handle() except Exception: warnings.warn( "Failed to close process handle: %s" % traceback.format_exc()) self.hProcess = hProcess def close_handle(self): try: if hasattr(self.hProcess, 'close'): self.hProcess.close() elif self.hProcess not in (None, win32.INVALID_HANDLE_VALUE): win32.CloseHandle(self.hProcess) finally: self.hProcess = None def get_handle(self, dwDesiredAccess = win32.PROCESS_ALL_ACCESS): if self.hProcess in (None, win32.INVALID_HANDLE_VALUE): self.open_handle(dwDesiredAccess) else: dwAccess = self.hProcess.dwAccess if (dwAccess | dwDesiredAccess) != dwAccess: self.open_handle(dwAccess | dwDesiredAccess) return self.hProcess def __contains__(self, anObject): return _ThreadContainer.__contains__(self, anObject) or _ModuleContainer.__contains__(self, anObject) def __len__(self): return _ThreadContainer.__len__(self) + _ModuleContainer.__len__(self) class __ThreadsAndModulesIterator (object): def __init__(self, container): self.__container = container self.__iterator = None self.__state = 0 def __iter__(self): return self def next(self): if self.__state == 0: self.__iterator = self.__container.iter_threads() self.__state = 1 if self.__state == 1: try: return self.__iterator.next() except StopIteration: self.__iterator = self.__container.iter_modules() self.__state = 2 if self.__state == 2: try: return self.__iterator.next() except StopIteration: self.__iterator = None self.__state = 3 raise StopIteration
Apache License 2.0
birkbeckctp/annotran
annotran/client.py
merge
python
def merge(d1, d2): result = d1.copy() result.update(d2) return result
Merge two dictionaries :param d1: first dictionary :param d2: second dictionary :return: a merge of d1 and d2
https://github.com/birkbeckctp/annotran/blob/42678afaee6d4b57cfaddb402bc6f15b37fdd027/annotran/client.py#L111-L120
import json import os from urlparse import urlparse import annotran.views import h import h.client from h import __version__ from jinja2 import Environment, PackageLoader jinja_env = Environment(loader=PackageLoader(__package__, 'templates')) def angular_template_context(name): jinja_env_ext = Environment(loader=PackageLoader(__package__, 'templates')) jinja_env_hypothesis = h.client.jinja_env angular_template_path = 'client/{}.html'.format(name) base_directory = os.path.dirname(os.path.realpath(__file__)) if os.path.isfile('{0}/templates/{1}'.format(base_directory, angular_template_path)): content, _, _ = jinja_env_ext.loader.get_source(jinja_env_ext, angular_template_path) else: content, _, _ = jinja_env_hypothesis.loader.get_source(jinja_env_hypothesis, angular_template_path) return {'name': '{}.html'.format(name), 'content': content} def app_html_context(webassets_env, api_url, service_url, ga_tracking_id, sentry_public_dsn, websocket_url): if urlparse(service_url).hostname == 'localhost': ga_cookie_domain = 'none' else: ga_cookie_domain = 'auto' service_url = h.client.url_with_path(service_url) app_config = { 'apiUrl': api_url, 'serviceUrl': service_url, 'supportAddress': annotran.views.Shared.support_address } if websocket_url: app_config.update({ 'websocketUrl': websocket_url, }) if sentry_public_dsn: app_config.update({ 'raven': { 'dsn': sentry_public_dsn, 'release': __version__ } }) return { 'app_config': json.dumps(app_config), 'angular_templates': map(angular_template_context, h.client.ANGULAR_DIRECTIVE_TEMPLATES), 'app_css_urls': h.client.asset_urls(webassets_env, 'app_css'), 'app_js_urls': h.client.asset_urls(webassets_env, 'app_js'), 'ga_tracking_id': ga_tracking_id, 'ga_cookie_domain': ga_cookie_domain, 'register_url': service_url + 'register', }
MIT License
tmbdev/webdataset
webdataset/autodecode.py
Decoder.__call__
python
def __call__(self, sample): assert isinstance(sample, dict), (len(sample), sample) return self.decode(sample)
Decode an entire sample. :param sample: the sample
https://github.com/tmbdev/webdataset/blob/3431eeada3cba81c19966604116c4b5d6d6994ae/webdataset/autodecode.py#L414-L420
import io import json import os import pickle import re import tempfile from functools import partial import numpy as np from .checks import checkmember, checknotnone image_extensions = "jpg jpeg png ppm pgm pbm pnm".split() def torch_loads(data): import io import torch stream = io.BytesIO(data) return torch.load(stream) def basichandlers(key, data): extension = re.sub(r".*[.]", "", key) if extension in "txt text transcript": return data.decode("utf-8") if extension in "cls cls2 class count index inx id".split(): try: return int(data) except ValueError: return None if extension in "json jsn": return json.loads(data) if extension in "pyd pickle".split(): return pickle.loads(data) if extension in "pth".split(): return torch_loads(data) if extension in "ten tb".split(): from . import tenbin return tenbin.decode_buffer(data) if extension in "mp msgpack msg".split(): import msgpack return msgpack.unpackb(data) if extension in "npy".split(): import numpy.lib.format stream = io.BytesIO(data) return numpy.lib.format.read_array(stream) if extension in "npz".split(): return np.load(io.BytesIO(data)) def call_extension_handler(key, data, f, extensions): extension = key.lower().split(".") for target in extensions: target = target.split(".") if len(target) > len(extension): continue if extension[-len(target):] == target: return f(data) return None def handle_extension(extensions, f): extensions = extensions.lower().split() return partial(call_extension_handler, f=f, extensions=extensions) imagespecs = { "l8": ("numpy", "uint8", "l"), "rgb8": ("numpy", "uint8", "rgb"), "rgba8": ("numpy", "uint8", "rgba"), "l": ("numpy", "float", "l"), "rgb": ("numpy", "float", "rgb"), "rgba": ("numpy", "float", "rgba"), "torchl8": ("torch", "uint8", "l"), "torchrgb8": ("torch", "uint8", "rgb"), "torchrgba8": ("torch", "uint8", "rgba"), "torchl": ("torch", "float", "l"), "torchrgb": ("torch", "float", "rgb"), "torch": ("torch", "float", "rgb"), "torchrgba": ("torch", "float", "rgba"), "pill": ("pil", None, "l"), "pil": ("pil", None, "rgb"), "pilrgb": ("pil", None, "rgb"), "pilrgba": ("pil", None, "rgba"), } class ImageHandler: def __init__(self, imagespec, extensions=image_extensions): checkmember(imagespec, list(imagespecs.keys()), "unknown image specification") self.imagespec = imagespec.lower() self.extensions = extensions def __call__(self, key, data): import PIL.Image extension = re.sub(r".*[.]", "", key) if extension.lower() not in self.extensions: return None imagespec = self.imagespec atype, etype, mode = imagespecs[imagespec] with io.BytesIO(data) as stream: img = PIL.Image.open(stream) img.load() img = img.convert(mode.upper()) if atype == "pil": return img elif atype == "numpy": result = np.asarray(img) checkmember(result.dtype, [np.uint8]) if etype == "uint8": return result else: return result.astype("f") / 255.0 elif atype == "torch": import torch result = np.asarray(img) checkmember(result.dtype, [np.uint8]) if etype == "uint8": result = np.array(result.transpose(2, 0, 1)) return torch.tensor(result) else: result = np.array(result.transpose(2, 0, 1)) return torch.tensor(result) / 255.0 return None def imagehandler(imagespec, extensions=image_extensions): return ImageHandler(imagespec, extensions) def torch_video(key, data): extension = re.sub(r".*[.]", "", key) if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split(): return None import torchvision.io with tempfile.TemporaryDirectory() as dirname: fname = os.path.join(dirname, f"file.{extension}") with open(fname, "wb") as stream: stream.write(data) return torchvision.io.read_video(fname, pts_unit="sec") def torch_audio(key, data): extension = re.sub(r".*[.]", "", key) if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]: return None import torchaudio with tempfile.TemporaryDirectory() as dirname: fname = os.path.join(dirname, f"file.{extension}") with open(fname, "wb") as stream: stream.write(data) return torchaudio.load(fname) class Continue: def __init__(self, key, data): self.key, self.data = key, data def gzfilter(key, data): import gzip if not key.endswith(".gz"): return None decompressed = gzip.open(io.BytesIO(data)).read() return Continue(key[:-3], decompressed) default_pre_handlers = [gzfilter] default_post_handlers = [basichandlers] class Decoder: def __init__(self, handlers, pre=None, post=None, only=None): if isinstance(only, str): only = only.split() self.only = only if only is None else set(only) if pre is None: pre = default_pre_handlers if post is None: post = default_post_handlers assert all(callable(h) for h in handlers), f"one of {handlers} not callable" assert all(callable(h) for h in pre), f"one of {pre} not callable" assert all(callable(h) for h in post), f"one of {post} not callable" self.handlers = pre + handlers + post def decode1(self, key, data): key = "." + key for f in self.handlers: result = f(key, data) if isinstance(result, Continue): key, data = result.key, result.data continue if result is not None: return result return data def decode(self, sample): result = {} assert isinstance(sample, dict), sample for k, v in list(sample.items()): if k[0] == "_": if isinstance(v, bytes): v = v.decode("utf-8") result[k] = v continue if self.only is not None and k not in self.only: result[k] = v continue checknotnone(v) assert isinstance(v, bytes) result[k] = self.decode1(k, v) return result
BSD 3-Clause New or Revised License
oshlack/mintie
collate/make_supertranscript.py
write_canonical_genes
python
def write_canonical_genes(args, contigs, gtf): genes = contigs.overlapping_genes.apply(lambda x: x.split('|')) genes = [g.split(':') for gene in genes for g in gene] genes = [g for gene in genes for g in gene if g != '' and g not in canonical_genes_written] genes = np.unique(np.array(genes)) logging.info('%d additional canonical genes to write...' % len(genes)) for gene in genes: logging.info('Writing %s' % gene) blocks, block_seqs = bedtool_helper.get_merged_exons([gene], gtf, args.fasta, '') if len(blocks) == 0: continue if len(blocks.drop_duplicates()) != len(block_seqs): continue blocks = sh.sort_blocks(blocks) write_gene('', blocks, block_seqs, args, [gene], gtf)
append unmodified reference genes for competitive mapping
https://github.com/oshlack/mintie/blob/88b28687bd31a35df6cf686ea524c5b14d05f3a6/collate/make_supertranscript.py#L364-L382
import numpy as np import pandas as pd import re import sys import logging import os import tempfile import pickle import st_helper as sh import bedtool_helper from pybedtools import BedTool from Bio import SeqIO from argparse import ArgumentParser from utils import cached, init_logging, exit_with_error pd.set_option("mode.chained_assignment", None) EXIT_FILE_IO_ERROR = 1 GTF_COLS = ['chr', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute'] BED_EXT_COLS = ['chr', 'start', 'end', 'name', 'score', 'strand', 'thickStart', 'thickEnd', 'itemRgb'] VARS_TO_ANNOTATE = ['EE','NE','INS','RI','UN','FUS','DEL'] STRAND = r'\(([+-])\)' canonical_genes_written = [] def parse_args(): description = 'Make supertranscript reference' parser = ArgumentParser(description=description) parser.add_argument('--log', metavar='LOG_FILE', type=str, help='record program progress in LOG_FILE') parser.add_argument(dest='contig_info', metavar='CONTIG_INFO', type=str, help='''Contig information for novel contigs.''') parser.add_argument(dest='contig_vcf', metavar='CONTIG_VCF', type=str, help='''Novel variants in VCF format.''') parser.add_argument(dest='gtf_file', metavar='GTF_FILE', type=str, help='''GTF annotation file containing transcript annotations.''') parser.add_argument(dest='fasta', metavar='FASTA', type=str, help='''Genome reference in fasta format.''') parser.add_argument(dest='outdir', metavar='OUTDIR', type=str, help='''Output directory.''') parser.add_argument(dest='sample', metavar='SAMPLE', type=str, help='''Sample name. Used to name bed and supertranscript output files.''') return parser.parse_args() def reverse_complement(seq): lookup = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'N': 'N'} if seq == '': return '' if type(seq) == float and math.isnan(seq): return '' seq = seq[::-1] seq = ''.join([lookup[base.upper()] for base in list(seq)]) return(seq) def get_gene(attribute): try: return attribute['gene_name'] except KeyError: return '' def get_contig_genes(con_info): fus_genes = con_info[con_info.overlapping_genes.str.contains(':')] if len(fus_genes) > 0: fus_genes = np.unique(fus_genes.overlapping_genes) fus_genes = [fg.split(':') for fg in fus_genes][0] assert len(fus_genes) <= 2 return fus_genes[0], fus_genes[1] else: genes = np.unique(con_info.overlapping_genes.values) if len(genes) > 1: logging.info('WARNING: multiple overlapping genes found for contig %s' % con_info.contig_id.values[0]) return genes[0], '' def get_contig_strand(con_info, variant): strand = '.' if variant in con_info.variant_id.values: var_info = con_info[con_info.variant_id == variant] strand = re.search(STRAND, var_info.pos1.values[0]).group(1) if variant in con_info.partner_id.values: var_info = con_info[con_info.partner_id == variant] strand = re.search(STRAND, var_info.pos2.values[0]).group(1) return strand def get_gene_strands(gtf, genes): strands = [] for gene in genes: gene = gene.split('|')[0] strand = gtf[gtf.gene == gene].strand.values strand = strand[0] if gene != '' and len(strand) > 0 else '' strands.append(strand) return strands def get_strand_info(con_info, gstrands): if 'FUS' in con_info.variant_type.values: con_fus = con_info[con_info.variant_type == 'FUS'] cs1 = get_contig_strand(con_fus, con_fus.variant_id.values[0]) cs2 = get_contig_strand(con_fus, con_fus.partner_id.values[0]) gs1, gs2 = gstrands if (cs1 != gs1 and cs2 == gs2) or (cs1 == gs1 and cs2 != gs2): return [cs1, cs2] else: return gstrands else: return gstrands def get_output_files(sample, outdir): genome_bed = '%s/%s_genome.bed' % (outdir, sample) st_block_bed = '%s/%s_blocks_supertranscript.bed' % (outdir, sample) st_gene_bed = '%s/%s_genes_supertranscript.bed' % (outdir, sample) st_fasta = '%s/%s_supertranscript.fasta' % (outdir, sample) return genome_bed, st_block_bed, st_gene_bed, st_fasta @cached('gene_gtf.pickle') def load_gtf_file(gtf_file): logging.info('Processing GTF reference file...') gtf = BedTool(gtf_file).remove_invalid().saveas() gene_names = [get_gene(row.attrs) for row in gtf] with tempfile.NamedTemporaryFile(mode='r+') as temp_gtf: gtf.saveas(temp_gtf.name) gtf_pd = pd.read_csv(temp_gtf, header=None, sep='\t', names=GTF_COLS, comment='#', low_memory=False) gtf_pd['gene'] = gene_names alt_chrs = gtf_pd['chr'].str.contains('Un|alt|unknown|random|K') gtf_pd = gtf_pd[np.invert(alt_chrs.values)] gtf_pd_chrs = gtf_pd['chr'].str.contains('chr') if any(gtf_pd_chrs.values): gtf_pd['chr'] = gtf_pd['chr'].apply(lambda a: a.split('chr')[1]) gtf_pd.loc[gtf_pd['chr'] == 'M', 'chr'] = 'MT' if len(gtf_pd[gtf_pd.feature == 'gene']) == 0: aggregator = {'start': lambda x: min(x), 'end': lambda x: max(x)} gene_gtf_pd = gtf_pd.groupby(['chr', 'score', 'strand', 'frame', 'gene'], as_index=False, sort=False).agg(aggregator) gene_gtf_pd['source'] = 'ALL' gene_gtf_pd['attribute'] = '' gene_gtf_pd['feature'] = 'gene' gene_gtf_pd = gene_gtf_pd[GTF_COLS + ['gene']] gtf_pd = gtf_pd.append(gene_gtf_pd) return gtf_pd def load_vcf_file(contig_vcf): cvcf = pd.read_csv(contig_vcf, sep='\t', header=None, comment='#', low_memory=False) vcf_chrs = cvcf[0].map(str).str.contains('chr') if any(vcf_chrs.values): cvcf = cvcf[vcf_chrs] cvcf[0] = cvcf[0].apply(lambda a: a.split('chr')[1]) cvcf.loc[cvcf[0] == 'M', 0] = 'MT' return cvcf def write_supertranscript_genes(blocks, block_bed, gtf, genes, st_gene_bed): seg_starts, seg_ends = block_bed.start, block_bed.end seg_starts.index, seg_ends.index = blocks.index, blocks.index contig_name = block_bed['chr'].values[0] gene_gtf = gtf[gtf.feature == 'gene'] gene_names, gene_starts, gene_ends, gene_strands, gene_cols = [], [], [], [], [] block_strands = [] last_strand = blocks.strand.values[0] for gene in genes: for gn in gene: gn_match = blocks.name.str.contains(gn) if any(gn_match): last_strand = blocks[gn_match].strand.values[0] block_strands.append(last_strand) else: block_strands.append(last_strand) genes = [g for gn in genes for g in gn if g != ''] for gene, block_strand in zip(genes, block_strands): gn = gene_gtf[gene_gtf.gene == gene] if len(gn) == 0: logging.info('WARNING: gene %s not found in reference GTF.' % gene) continue start, end = gn.start.values[0] - 1, gn.end.values[0] start_block = blocks[np.logical_and(blocks.start <= start, blocks.end >= start)] end_block = blocks[np.logical_and(blocks.start <= end, blocks.end >= end)] if len(start_block) == 0 or len(end_block) == 0: logging.info('WARNING: part of gene %s sits outside the reference blocks; skipping.') continue start_offset = start - min(start_block.start) end_offset = max(end_block.end) - end ref_strand = gn.strand.values[0] gene_strand = '+' if block_strand == ref_strand else '-' gene_strands.append(gene_strand) antisense = block_strand == '-' tmp = start_block.copy() start_block = end_block if antisense else start_block end_block = tmp if antisense else end_block gene_start = seg_starts[start_block.index[0]] + start_offset gene_end = seg_ends[end_block.index[0]] - end_offset R, G, B = np.random.randint(0, 255, 3) gene_col = '%s,%s,%s' % (R, G, B) gene_starts.append(gene_start) gene_ends.append(gene_end) gene_names.append(gene) gene_cols.append(gene_col) if len(gene_starts) > 0: if len(genes) > 1 and genes[0] == genes[1]: gene_starts[1] = gene_ends[0] gene_ends[1] = gene_ends[1] * 2 bed = pd.DataFrame({'chr': contig_name, 'start': gene_starts, 'end': gene_ends, 'name': gene_names, 'score': '.', 'strand': gene_strands, 'thickStart': gene_starts, 'thickEnd': gene_ends, 'itemRgb': gene_cols}, columns=BED_EXT_COLS) bed.to_csv(st_gene_bed, mode='a', index=False, header=False, sep='\t') def write_gene(contig, blocks, block_seqs, args, genes, gtf): sample = args.sample genome_bed, st_block_bed, st_gene_bed, st_fasta = get_output_files(args.sample, args.outdir) seqs = [] for idx,x in blocks.iterrows(): seq = str(block_seqs['%s:%d-%d(%s)' % (x['chr'], x.start, x.end, x.strand)]) seqs.append(seq) seg_ends = np.cumsum([len(s) for s in seqs]) seg_starts = np.concatenate([[0], seg_ends[:-1]]) segs = ['%s-%s' % (s1+1, s2) for s1,s2 in zip(seg_starts, seg_ends)] gene_name = [gn.split('|')[0] for gn in genes if gn != ''] contig_name = '%s|%s|%s' % (sample, contig, '|'.join(gene_name)) if contig != '' else gene_name[0] names = blocks['name'].apply(lambda x: x.split('|')[-1]).values header = '>%s segs:%s names:%s\n' % (contig_name, ','.join(segs), ','.join(names)) sequence = ''.join(seqs) + '\n' with open(st_fasta, 'a') as st_fasta: st_fasta.writelines([header, sequence]) if len(genes) > 1 and '' not in genes: blocks = blocks.reset_index() gnames = blocks.name.apply(lambda x: x.split('|')[0]) gns, idxs = np.unique(gnames.values, return_index=True) if len(gns) == 1: half_idx = int((len(blocks)/2)) g1 = blocks.index < half_idx g2 = blocks.index >= half_idx else: gene1, gene2 = gns[:2] if idxs[0] < idxs[1] else gns[1::-1] g1 = gnames == gene1 g2 = gnames == gene2 c1 = sh.get_block_colours(blocks[g1], names[g1]) c2 = sh.get_block_colours(blocks[g2], names[g2], alt=True) colours = np.concatenate([c1, c2]) assert len(colours) == len(seg_starts) else: colours = sh.get_block_colours(blocks, names) bed = pd.DataFrame({'chr': contig_name, 'start': seg_starts, 'end': seg_ends, 'name': names, 'score': 0, 'strand': '.', 'thickStart': seg_starts, 'thickEnd': seg_ends, 'itemRgb': colours}, columns=BED_EXT_COLS) bed.to_csv(st_block_bed, mode='a', index=False, header=False, sep='\t') genes = [gn.split('|') for gn in genes if gn != ''] write_supertranscript_genes(blocks, bed, gtf, genes, st_gene_bed)
MIT License
neherlab/treetime
treetime/treetime.py
TreeTime.resolve_polytomies
python
def resolve_polytomies(self, merge_compressed=False): self.logger("TreeTime.resolve_polytomies: resolving multiple mergers...",1) poly_found=0 for n in self.tree.find_clades(): if len(n.clades) > 2: prior_n_clades = len(n.clades) self._poly(n, merge_compressed) poly_found+=prior_n_clades - len(n.clades) obsolete_nodes = [n for n in self.tree.find_clades() if len(n.clades)==1 and n.up is not None] for node in obsolete_nodes: self.logger('TreeTime.resolve_polytomies: remove obsolete node '+node.name,4) if node.up is not None: self.tree.collapse(node) if poly_found: self.logger('TreeTime.resolve_polytomies: introduces %d new nodes'%poly_found,3) else: self.logger('TreeTime.resolve_polytomies: No more polytomies to resolve',3) return poly_found
Resolve the polytomies on the tree. The function scans the tree, resolves polytomies if present, and re-optimizes the tree with new topology. Note that polytomies are only resolved if that would result in higher likelihood. Sometimes, stretching two or more branches that carry several mutations is less costly than an additional branch with zero mutations (long branches are not stiff, short branches are). Parameters ---------- merge_compressed : bool If True, keep compressed branches as polytomies. If False, return a strictly binary tree. Returns -------- poly_found : int The number of polytomies found
https://github.com/neherlab/treetime/blob/a58e854f13cf05e222c93e2319476a53810a8af3/treetime/treetime.py#L504-L546
from __future__ import print_function, division, absolute_import import numpy as np from scipy import optimize as sciopt from Bio import Phylo from . import config as ttconf from . import MissingDataError,UnknownMethodError,NotReadyError from .utils import tree_layout from .clock_tree import ClockTree rerooting_mechanisms = ["min_dev", "best", "least-squares"] deprecated_rerooting_mechanisms = {"residual":"least-squares", "res":"least-squares", "min_dev_ML": "min_dev", "ML":"least-squares"} class TreeTime(ClockTree): def __init__(self, *args,**kwargs): super(TreeTime, self).__init__(*args, **kwargs) def run(self, root=None, infer_gtr=True, relaxed_clock=None, n_iqd = None, resolve_polytomies=True, max_iter=0, Tc=None, fixed_clock_rate=None, time_marginal=False, sequence_marginal=False, branch_length_mode='auto', vary_rate=False, use_covariation=False, tracelog_file=None, **kwargs): self.use_covariation = use_covariation or (vary_rate and (not type(vary_rate)==float)) if (self.tree is None) or (self.aln is None and self.data.full_length is None): raise MissingDataError("TreeTime.run: ERROR, alignment or tree are missing") if self.aln is None: branch_length_mode='input' self._set_branch_length_mode(branch_length_mode) seq_kwargs = {"marginal_sequences":sequence_marginal or (self.branch_length_mode=='marginal'), "branch_length_mode": self.branch_length_mode, "sample_from_profile":"root", "reconstruct_tip_states":kwargs.get("reconstruct_tip_states", False)} tt_kwargs = {'clock_rate':fixed_clock_rate, 'time_marginal':False} tt_kwargs.update(kwargs) seq_LH = 0 if "fixed_pi" in kwargs: seq_kwargs["fixed_pi"] = kwargs["fixed_pi"] if "do_marginal" in kwargs: time_marginal=kwargs["do_marginal"] if self.branch_length_mode=='input': if self.aln: self.infer_ancestral_sequences(infer_gtr=infer_gtr, marginal=seq_kwargs["marginal_sequences"], **seq_kwargs) self.prune_short_branches() else: self.optimize_tree(infer_gtr=infer_gtr, max_iter=1, prune_short=True, **seq_kwargs) avg_root_to_tip = np.mean([x.dist2root for x in self.tree.get_terminals()]) if n_iqd or root=='clock_filter': if "plot_rtt" in kwargs and kwargs["plot_rtt"]: plot_rtt=True else: plot_rtt=False reroot_mechanism = 'least-squares' if root=='clock_filter' else root self.clock_filter(reroot=reroot_mechanism, n_iqd=n_iqd, plot=plot_rtt, fixed_clock_rate=fixed_clock_rate) elif root is not None: self.reroot(root=root, clock_rate=fixed_clock_rate) if self.branch_length_mode=='input': if self.aln: self.infer_ancestral_sequences(**seq_kwargs) else: self.optimize_tree(max_iter=1, prune_short=False,**seq_kwargs) self.logger("###TreeTime.run: INITIAL ROUND",0) self.make_time_tree(**tt_kwargs) if self.aln: seq_LH = self.tree.sequence_marginal_LH if seq_kwargs['marginal_sequences'] else self.tree.sequence_joint_LH self.LH =[[seq_LH, self.tree.positional_joint_LH, 0]] if root is not None and max_iter: new_root = self.reroot(root='least-squares' if root=='clock_filter' else root, clock_rate=fixed_clock_rate) self.logger("###TreeTime.run: rerunning timetree after rerooting",0) self.make_time_tree(**tt_kwargs) niter = 0 ndiff = 0 self.trace_run = [] self.trace_run.append(self.tracelog_run(niter=0, ndiff=0, n_resolved=0, time_marginal = tt_kwargs['time_marginal'], sequence_marginal = seq_kwargs['marginal_sequences'], Tc=None, tracelog=tracelog_file)) need_new_time_tree=False while niter < max_iter: self.logger("###TreeTime.run: ITERATION %d out of %d iterations"%(niter+1,max_iter),0) tmpTc=None if Tc: if Tc=='skyline' and niter<max_iter-1: tmpTc='const' else: tmpTc=Tc self.add_coalescent_model(tmpTc, **kwargs) need_new_time_tree = True if relaxed_clock: print("relaxed_clock", relaxed_clock) self.relaxed_clock(**relaxed_clock) need_new_time_tree = True n_resolved=0 if resolve_polytomies: n_resolved = self.resolve_polytomies() if n_resolved: self.prepare_tree() if self.branch_length_mode!='input': self.optimize_tree(prune_short=False, max_iter=0, **seq_kwargs) need_new_time_tree = True if need_new_time_tree: self.make_time_tree(**tt_kwargs) if self.aln: ndiff = self.infer_ancestral_sequences('ml',**seq_kwargs) else: if self.aln: ndiff = self.infer_ancestral_sequences('ml',**seq_kwargs) self.make_time_tree(**tt_kwargs) self.tree.coalescent_joint_LH = self.merger_model.total_LH() if Tc else 0.0 if self.aln: seq_LH = self.tree.sequence_marginal_LH if seq_kwargs['marginal_sequences'] else self.tree.sequence_joint_LH self.LH.append([seq_LH, self.tree.positional_joint_LH, self.tree.coalescent_joint_LH]) self.trace_run.append(self.tracelog_run(niter=niter+1, ndiff=ndiff, n_resolved=n_resolved, time_marginal = tt_kwargs['time_marginal'], sequence_marginal = seq_kwargs['marginal_sequences'], Tc=tmpTc, tracelog=tracelog_file)) niter+=1 if ndiff==0 and n_resolved==0 and Tc!='skyline': self.logger("###TreeTime.run: CONVERGED",0) break if vary_rate: if type(vary_rate)==float: self.calc_rate_susceptibility(rate_std=vary_rate, params=tt_kwargs) elif self.clock_model['valid_confidence']: self.calc_rate_susceptibility(params=tt_kwargs) else: raise UnknownMethodError("TreeTime.run: rate variation for confidence estimation is not available. Either specify it explicitly, or estimate from root-to-tip regression.") if time_marginal: self.logger("###TreeTime.run: FINAL ROUND - confidence estimation via marginal reconstruction", 0) tt_kwargs['time_marginal']=time_marginal self.make_time_tree(**tt_kwargs) self.trace_run.append(self.tracelog_run(niter=niter+1, ndiff=0, n_resolved=0, time_marginal = tt_kwargs['time_marginal'], sequence_marginal = seq_kwargs['marginal_sequences'], Tc=Tc, tracelog=tracelog_file)) bad_branches =[n for n in self.tree.get_terminals() if n.bad_branch and n.raw_date_constraint] if bad_branches: self.logger("TreeTime: the following tips have been marked as outliers. Their date constraints were not used. " "Please remove them from the tree. Their dates have been reset:",0,warn=True) for n in bad_branches: self.logger("%s, input date: %s, apparent date: %1.2f"%(n.name, str(n.raw_date_constraint), n.numdate),0,warn=True) return ttconf.SUCCESS def _set_branch_length_mode(self, branch_length_mode): if branch_length_mode in ['joint', 'marginal', 'input']: self.branch_length_mode = branch_length_mode elif self.aln: bl_dis = [n.branch_length for n in self.tree.find_clades() if n.up] max_bl = np.max(bl_dis) if max_bl>0.1: bl_mode = 'input' else: bl_mode = 'joint' self.logger("TreeTime._set_branch_length_mode: maximum branch length is %1.3e, using branch length mode %s"%(max_bl, bl_mode),1) self.branch_length_mode = bl_mode else: self.branch_length_mode = 'input' def clock_filter(self, reroot='least-squares', n_iqd=None, plot=False, fixed_clock_rate=None): if n_iqd is None: n_iqd = ttconf.NIQD if type(reroot) is list and len(reroot)==1: reroot=str(reroot[0]) terminals = self.tree.get_terminals() if reroot: self.reroot(root='least-squares' if reroot=='best' else reroot, covariation=False, clock_rate=fixed_clock_rate) else: self.get_clock_model(covariation=False, slope=fixed_clock_rate) clock_rate = self.clock_model['slope'] icpt = self.clock_model['intercept'] res = {} for node in terminals: if hasattr(node, 'raw_date_constraint') and (node.raw_date_constraint is not None): res[node] = node.dist2root - clock_rate*np.mean(node.raw_date_constraint) - icpt residuals = np.array(list(res.values())) iqd = np.percentile(residuals,75) - np.percentile(residuals,25) bad_branch_count = 0 for node,r in res.items(): if abs(r)>n_iqd*iqd and node.up.up is not None: self.logger('TreeTime.ClockFilter: marking %s as outlier, residual %f interquartile distances'%(node.name,r/iqd), 3, warn=True) node.bad_branch=True bad_branch_count += 1 else: node.bad_branch=False if bad_branch_count>0.34*self.tree.count_terminals(): self.logger("TreeTime.clock_filter: More than a third of leaves have been excluded by the clock filter. Please check your input data.", 0, warn=True) self.prepare_tree() if reroot: self.reroot(root=reroot, clock_rate=fixed_clock_rate) if plot: self.plot_root_to_tip() return ttconf.SUCCESS def plot_root_to_tip(self, add_internal=False, label=True, ax=None): Treg = self.setup_TreeRegression() if self.clock_model and 'cov' in self.clock_model: cf = self.clock_model['valid_confidence'] else: cf = False Treg.clock_plot(ax=ax, add_internal=add_internal, confidence=cf, n_sigma=1, regression=self.clock_model) def reroot(self, root='least-squares', force_positive=True, covariation=None, clock_rate=None): if type(root) is list and len(root)==1: root=str(root[0]) if root=='best': root='least-squares' use_cov = self.use_covariation if covariation is None else covariation slope = 0.0 if type(root)==str and root.startswith('min_dev') else clock_rate old_root = self.tree.root self.logger("TreeTime.reroot: with method or node: %s"%root,0) for n in self.tree.find_clades(): n.branch_length=n.mutation_length if (type(root) is str) and (root in rerooting_mechanisms or root in deprecated_rerooting_mechanisms): if root in deprecated_rerooting_mechanisms: if "ML" in root: use_cov=True self.logger('TreeTime.reroot: rerooting mechanisms %s has been renamed to %s' %(root, deprecated_rerooting_mechanisms[root]), 1, warn=True) root = deprecated_rerooting_mechanisms[root] self.logger("TreeTime.reroot: rerooting will %s covariance and shared ancestry."%("account for" if use_cov else "ignore"),0) new_root = self._find_best_root(covariation=use_cov, slope = slope, force_positive=force_positive and (not root.startswith('min_dev'))) else: if isinstance(root,Phylo.BaseTree.Clade): new_root = root elif isinstance(root, list): new_root = self.tree.common_ancestor(root) elif root in self._leaves_lookup: new_root = self._leaves_lookup[root] elif root=='oldest': new_root = sorted([n for n in self.tree.get_terminals() if n.raw_date_constraint is not None], key=lambda x:np.mean(x.raw_date_constraint))[0] else: raise UnknownMethodError('TreeTime.reroot -- ERROR: unsupported rooting mechanisms or root not found') self.tree.root_with_outgroup(new_root, outgroup_branch_length=new_root.branch_length/2) self.get_clock_model(covariation=use_cov, slope = slope) self.logger("TreeTime.reroot: Tree was re-rooted to node " +('new_node' if new_root.name is None else new_root.name), 2) self.tree.root.branch_length = self.one_mutation self.tree.root.clock_length = self.one_mutation self.tree.root.raw_date_constraint = None if hasattr(new_root, 'time_before_present'): self.tree.root.time_before_present = new_root.time_before_present if hasattr(new_root, 'numdate'): self.tree.root.numdate = new_root.numdate if not hasattr(self.tree.root, 'gamma'): self.tree.root.gamma = 1.0 for n in self.tree.find_clades(): n.mutation_length = n.branch_length if not hasattr(n, 'clock_length'): n.clock_length = n.branch_length self.prepare_tree() self.get_clock_model(covariation=self.use_covariation, slope=slope) return new_root
MIT License
lttm/gmnet
Deeplab/research/deeplab/utils/general_utils.py
list_parts_names
python
def list_parts_names(num_parts): list_parts = [] if num_parts == 108: list_parts = ['background', 'aeroplane_body', 'aeroplane_stern', 'aeroplane_rwing', 'aeroplane_engine', 'aeroplane_wheel', 'bicycle_fwheel', 'bicycle_saddle', 'bicycle_handlebar', 'bicycle_chainwheel', 'birds_head', 'birds_beak', 'birds_torso', 'birds_neck', 'birds_rwing', 'birds_rleg', 'birds_rfoot', 'birds_tail', 'boat', 'bottle_cap', 'bottle_body', 'bus_rightside', 'bus_roofside', 'bus_rightmirror', 'bus_fliplate', 'bus_door', 'bus_wheel', 'bus_headlight', 'bus_window', 'car_rightside', 'car_roofside', 'car_fliplate', 'car_door', 'car_wheel', 'car_headlight', 'car_window', 'cat_head', 'cat_reye', 'cat_rear', 'cat_nose', 'cat_torso', 'cat_neck', 'cat_rfleg', 'cat_rfpa', 'cat_tail', 'chair', 'cow_head', 'cow_rear', 'cow_muzzle', 'cow_rhorn', 'cow_torso', 'cow_neck', 'cow_rfuleg', 'cow_tail', 'dining_table', 'dog_head', 'dog_reye', 'dog_rear', 'dog_nose', 'dog_torso', 'dog_neck', 'dog_rfleg', 'dog_rfpa', 'dog_tail', 'dog_muzzle', 'horse_head', 'horse_rear', 'horse_muzzle', 'horse_torso', 'horse_neck', 'horse_rfuleg', 'horse_tail', 'horse_rfho', 'motorbike_fwheel', 'motorbike_handlebar', 'motorbike_saddle', 'motorbike_headlight', 'person_head', 'person_reye', 'person_rear', 'person_nose', 'person_mouth', 'person_hair', 'person_torso', 'person_neck', 'person_ruarm', 'person_rhand', 'person_ruleg', 'person_rfoot', 'pottedplant_pot', 'pottedplant_plant', 'sheep_head', 'sheep_rear', 'sheep_muzzle', 'sheep_rhorn', 'sheep_torso', 'sheep_neck', 'sheep_rfuleg', 'sheep_tail', 'sofa', 'train_head', 'train_hrightside', 'train_hroofside', 'train_headlight', 'train_coach', 'train_crightside', 'train_croofside', 'tvmonitor_screen'] elif num_parts == 58: list_parts = ['background', 'aeroplane_body', 'aeroplane_engine', 'aeroplane_wing', 'aeroplane_stern', 'aeroplane_wheel', 'bycicle_wheel', 'bycicle_body', 'bird_head', 'bird_wing', 'bird_leg', 'bird_torso', 'boat', 'bottle_cap', 'bottle_body', 'bus_window', 'bus_wheel', 'bus_body', 'car_window', 'car_wheel', 'car_light', 'car_plate', 'car_body', 'cat_head', 'cat_leg', 'cat_tail', 'cat_torso', 'chair', 'cow_head', 'cow_tail', 'cow_leg', 'cow_torso', 'dining_table', 'dog_head', 'dog_leg', 'dog_tail', 'dog_torso', 'horse_head', 'horse_tail', 'horse_leg', 'horse_torso', 'motorbike_wheel', 'motorbike_body', 'person_head', 'person_torso', 'person_larm', 'person_uarm', 'person_lleg', 'person_uleg', 'pottedplant_pot', 'pottedplant_plant', 'sheep_head', 'sheep_leg', 'sheep_torso', 'sofa', 'train', 'tvmonitor_screen', 'tvmonitor_frame'] elif num_parts == 7: list_parts = ['background', 'person_head', 'person_torso', 'person_uarm', 'person_larm', 'person_uleg', 'person_lleg'] elif num_parts == 21: list_parts = ['background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'table', 'dog', 'horse', 'motorbike', 'person', 'potted_plant', 'sheep', 'sofa', 'train', 'tv'] elif num_parts == 2: list_parts = ['background', 'person'] else: print_message(FilesName.GENERAL_UTILS, "Error: num_parts = " + str(num_parts), MessageType.FAIL) exit(-1) return list_parts
:return: list parts name
https://github.com/lttm/gmnet/blob/e17959eb219e1884e2be271c9244ba284c2f4ffa/Deeplab/research/deeplab/utils/general_utils.py#L157-L230
import sys import numpy as np import os import cv2 import itertools import glob import random from tqdm import tqdm import tensorflow as tf from datetime import datetime from scipy.io import loadmat from openpyxl import Workbook from openpyxl.styles import PatternFill class MessageType: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' class FilesName: TRAIN = "[TRAIN] " EVAL = "[EVAL] " VIS = "[VIS] " GENERAL_UTILS = "[GENERAL_UTILS] " def print_message(filename, message, mess_type=""): if mess_type == "": print(filename, MessageType.ENDC, message, MessageType.ENDC) else: print(filename, mess_type, message, MessageType.ENDC) def map_parts_to_classes(num_classes, num_parts): map_pc = [] if num_classes == 2: if num_parts == 2: map_pc = [ [0, 1], [1, 2], ] elif num_parts == 7: map_pc = [ [0, 1], [1, 7], ] else: print_message(FilesName.GENERAL_UTILS, "Error: num_classes = " + str(num_classes) + " and num_parts = " + str(num_parts), MessageType.FAIL) exit(-1) elif num_classes == 21: if num_parts == 21: map_pc = [ [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13], [13, 14], [14, 15], [15, 16], [16, 17], [17, 18], [18, 19], [19, 20], [20, 21], ] elif num_parts == 58: map_pc = [ [0, 1], [1, 6], [6, 8], [8, 12], [12, 13], [13, 15], [15, 18], [18, 23], [23, 27], [27, 28], [28, 32], [32, 33], [33, 37], [37, 41], [41, 43], [43, 49], [49, 51], [51, 54], [54, 55], [55, 56], [56, 58], ] elif num_parts == 108: map_pc = [ [0, 1], [1, 6], [6, 10], [10, 18], [18, 19], [19, 21], [21, 29], [29, 36], [36, 45], [45, 46], [46, 54], [54, 55], [55, 65], [65, 73], [73, 77], [77, 89], [89, 91], [91, 99], [99, 100], [100, 107], [107, 108], ] else: print_message(FilesName.GENERAL_UTILS, "Error: num_classes = " + str(num_classes) + " and num_parts = " + str(num_parts), MessageType.FAIL) exit(-1) return map_pc
Apache License 2.0
cohorte/cohorte-runtime
python/cohorte/utils/statemachine.py
State.copy
python
def copy(self): state = State(self.__name, self.__data, self.__callback) state.__transitions = self.__transitions.copy() return state
Makes a copy of this state
https://github.com/cohorte/cohorte-runtime/blob/686556cdde20beba77ae202de9969be46feed5e2/python/cohorte/utils/statemachine.py#L76-L85
import logging import cohorte.version __version__=cohorte.version.__version__ class State(object): def __init__(self, name, data=None, callback=None): self.__name = name self.__data = data self.__callback = callback self.__transitions = {} def __str__(self): return "{{{0}}}".format(self.__name) @property def data(self): return self.__data @property def name(self): return self.__name
Apache License 2.0
jonathanfeng/new_horizons
venv/lib/python3.7/site-packages/jinja2/parser.py
Parser.parse_expression
python
def parse_expression(self, with_condexpr=True): if with_condexpr: return self.parse_condexpr() return self.parse_or()
Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed.
https://github.com/jonathanfeng/new_horizons/blob/0ec21c8f8423932611e1e0bf24548dcef912bc54/venv/lib/python3.7/site-packages/jinja2/parser.py#L442-L449
from . import nodes from ._compat import imap from .exceptions import TemplateAssertionError from .exceptions import TemplateSyntaxError from .lexer import describe_token from .lexer import describe_token_expr _statement_keywords = frozenset( [ "for", "if", "block", "extends", "print", "macro", "include", "from", "import", "set", "with", "autoescape", ] ) _compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"]) _math_nodes = { "add": nodes.Add, "sub": nodes.Sub, "mul": nodes.Mul, "div": nodes.Div, "floordiv": nodes.FloorDiv, "mod": nodes.Mod, } class Parser(object): def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack = [] self._end_token_stack = [] def fail(self, msg, lineno=None, exc=TemplateSyntaxError): if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof(self, name, end_token_stack, lineno): expected = [] for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: currently_looking = " or ".join( "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1] ) else: currently_looking = None if name is None: message = ["Unexpected end of template."] else: message = ["Encountered unknown tag '%s'." % name] if currently_looking: if name is not None and name in expected: message.append( "You probably made a nesting mistake. Jinja " "is expecting this tag, but currently looking " "for %s." % currently_looking ) else: message.append( "Jinja was looking for the following tags: " "%s." % currently_looking ) if self._tag_stack: message.append( "The innermost block that needs to be " "closed is '%s'." % self._tag_stack[-1] ) self.fail(" ".join(message), lineno) def fail_unknown_tag(self, name, lineno=None): return self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof(self, end_tokens=None, lineno=None): stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) return self._fail_ut_eof(None, stack, lineno) def is_tuple_end(self, extra_end_rules=None): if self.stream.current.type in ("variable_end", "block_end", "rparen"): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) return False def free_identifier(self, lineno=None): self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno) return rv def parse_statement(self): token = self.stream.current if token.type != "name": self.fail("tag name expected", token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: return getattr(self, "parse_" + self.stream.current.value)() if token.value == "call": return self.parse_call_block() if token.value == "filter": return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements(self, end_tokens, drop_needle=False): self.stream.skip_if("colon") self.stream.expect("block_end") result = self.subparse(end_tokens) if self.stream.current.type == "eof": self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self): lineno = next(self.stream).lineno target = self.parse_assign_target(with_namespace=True) if self.stream.skip_if("assign"): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) filter_node = self.parse_filter(None) body = self.parse_statements(("name:endset",), drop_needle=True) return nodes.AssignBlock(target, filter_node, body, lineno=lineno) def parse_for(self): lineno = self.stream.expect("name:for").lineno target = self.parse_assign_target(extra_end_rules=("name:in",)) self.stream.expect("name:in") iter = self.parse_tuple( with_condexpr=False, extra_end_rules=("name:recursive",) ) test = None if self.stream.skip_if("name:if"): test = self.parse_expression() recursive = self.stream.skip_if("name:recursive") body = self.parse_statements(("name:endfor", "name:else")) if next(self.stream).value == "endfor": else_ = [] else: else_ = self.parse_statements(("name:endfor",), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): node = result = nodes.If(lineno=self.stream.expect("name:if").lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(("name:elif", "name:else", "name:endif")) node.elif_ = [] node.else_ = [] token = next(self.stream) if token.test("name:elif"): node = nodes.If(lineno=self.stream.current.lineno) result.elif_.append(node) continue elif token.test("name:else"): result.else_ = self.parse_statements(("name:endif",), drop_needle=True) break return result def parse_with(self): node = nodes.With(lineno=next(self.stream).lineno) targets = [] values = [] while self.stream.current.type != "block_end": if targets: self.stream.expect("comma") target = self.parse_assign_target() target.set_ctx("param") targets.append(target) self.stream.expect("assign") values.append(self.parse_expression()) node.targets = targets node.values = values node.body = self.parse_statements(("name:endwith",), drop_needle=True) return node def parse_autoescape(self): node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) node.options = [nodes.Keyword("autoescape", self.parse_expression())] node.body = self.parse_statements(("name:endautoescape",), drop_needle=True) return nodes.Scope([node]) def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect("name").value node.scoped = self.stream.skip_if("name:scoped") if self.stream.current.type == "sub": self.fail( "Block names in Jinja have to be valid Python " "identifiers and may not contain hyphens, use an " "underscore instead." ) node.body = self.parse_statements(("name:endblock",), drop_needle=True) self.stream.skip_if("name:" + node.name) return node def parse_extends(self): node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context(self, node, default): if self.stream.current.test_any( "name:with", "name:without" ) and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() else: node.with_context = default return node def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test("name:ignore") and self.stream.look().test( "name:missing" ): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:as") node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect("name:import") node.names = [] def parse_context(): if self.stream.current.value in ( "with", "without", ) and self.stream.look().test("name:context"): node.with_context = next(self.stream).value == "with" self.stream.skip() return True return False while 1: if node.names: self.stream.expect("comma") if self.stream.current.type == "name": if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith("_"): self.fail( "names starting with an underline can not be imported", target.lineno, exc=TemplateAssertionError, ) if self.stream.skip_if("name:as"): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != "comma": break else: self.stream.expect("name") if not hasattr(node, "with_context"): node.with_context = False return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] self.stream.expect("lparen") while self.stream.current.type != "rparen": if args: self.stream.expect("comma") arg = self.parse_assign_target(name_only=True) arg.set_ctx("param") if self.stream.skip_if("assign"): defaults.append(self.parse_expression()) elif defaults: self.fail("non-default argument follows default argument") args.append(arg) self.stream.expect("rparen") def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == "lparen": self.parse_signature(node) else: node.args = [] node.defaults = [] node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): self.fail("expected call", node.lineno) node.body = self.parse_statements(("name:endcall",), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) node.body = self.parse_statements(("name:endfilter",), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(("name:endmacro",), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != "block_end": if node.nodes: self.stream.expect("comma") node.nodes.append(self.parse_expression()) return node def parse_assign_target( self, with_tuple=True, name_only=False, extra_end_rules=None, with_namespace=False, ): if with_namespace and self.stream.look().type == "dot": token = self.stream.expect("name") next(self.stream) attr = self.stream.expect("name") target = nodes.NSRef(token.value, attr.value, lineno=token.lineno) elif name_only: token = self.stream.expect("name") target = nodes.Name(token.value, "store", lineno=token.lineno) else: if with_tuple: target = self.parse_tuple( simplified=True, extra_end_rules=extra_end_rules ) else: target = self.parse_primary() target.set_ctx("store") if not target.can_assign(): self.fail( "can't assign to %r" % target.__class__.__name__.lower(), target.lineno ) return target
MIT License
romeodespres/reapy
reapy/core/track/track.py
Track.is_selected
python
def is_selected(self, selected): if selected: self.select() else: self.unselect()
Select or unselect track. Parameters ---------- selected : bool Whether to select or unselect track.
https://github.com/romeodespres/reapy/blob/730627cee6f39fc26d6ebc8a3df0112e5921cd9f/reapy/core/track/track.py#L424-L436
import reapy from reapy import reascript_api as RPR from reapy.core import ReapyObject, ReapyObjectList from reapy.errors import InvalidObjectError, UndefinedEnvelopeError class Track(ReapyObject): def __init__(self, id, project=None): self._project = None if isinstance(id, int): id = RPR.GetTrack(project.id, id) if id.endswith("0x0000000000000000"): raise IndexError('Track index out of range') self._project = project elif isinstance(id, str) and not id.startswith("(MediaTrack*)"): name = id id = project._get_track_by_name(name).id if id.endswith("0x0000000000000000"): raise KeyError(name) self._project = project self.id = id @property def _args(self): return self.id, @classmethod def _get_id_from_pointer(cls, pointer): return '(MediaTrack*)0x{0:0{1}X}'.format(int(pointer), 16) @reapy.inside_reaper() def _get_project(self): for project in reapy.get_projects(): if self.id in [t.id for t in project.tracks]: return project def add_audio_accessor(self): audio_accessor_id = RPR.CreateTrackAudioAccessor(self.id) audio_accessor = reapy.AudioAccessor(audio_accessor_id) return audio_accessor def add_fx(self, name, input_fx=False, even_if_exists=True): index = RPR.TrackFX_AddByName( self.id, name, input_fx, 1 - 2 * even_if_exists ) if index == -1: raise ValueError("Can't find FX named {}".format(name)) fx = reapy.FX(self, index) return fx @reapy.inside_reaper() def add_item(self, start=0, end=None, length=0): if end is None: end = start + length item = reapy.Item(RPR.AddMediaItemToTrack(self.id)) item.position = start item.length = end - start return item def add_midi_item(self, start=0, end=1, quantize=False): item_id = RPR.CreateNewMIDIItemInProj(self.id, start, end, quantize)[0] item = reapy.Item(item_id) return item def add_send(self, destination=None): if isinstance(destination, Track): destination = destination.id send_id = RPR.CreateTrackSend(self.id, destination) type = "hardware" if destination is None else "send" send = reapy.Send(self, send_id, type=type) return send @property def automation_mode(self): modes = "trim/read", "read", "touch", "write", "latch", "latch preview" automation_mode = modes[RPR.GetTrackAutomationMode(self.id)] return automation_mode @automation_mode.setter def automation_mode(self, mode): modes = "trim/read", "read", "touch", "write", "latch", "latch preview" RPR.SetTrackAutomationMode(self.id, modes.index(mode)) @property def color(self): native_color = RPR.GetTrackColor(self.id) r, g, b = reapy.rgb_from_native(native_color) return r, g, b @color.setter def color(self, color): native_color = reapy.rgb_to_native(color) RPR.SetTrackColor(self.id, native_color) def delete(self): RPR.DeleteTrack(self.id) @property def depth(self): depth = RPR.GetTrackDepth(self.id) return depth @property def envelopes(self): return reapy.EnvelopeList(self) @property def fxs(self): fxs = reapy.FXList(self) return fxs def get_info_string(self, param_name): return RPR.GetSetMediaTrackInfo_String(self.id, param_name, "", False)[3] def get_info_value(self, param_name): value = RPR.GetMediaTrackInfo_Value(self.id, param_name) return value @property def GUID(self): return RPR.GetTrackGUID(self.id) @GUID.setter def GUID(self, guid_string): self.set_info_string("GUID", guid_string) @reapy.inside_reaper() @property def has_valid_id(self): pointer, name = self._get_pointer_and_name() if self._project is None: return any( RPR.ValidatePtr2(p.id, pointer, name) for p in reapy.get_projects() ) return bool(RPR.ValidatePtr2(self.project.id, pointer, name)) @property def icon(self): return self.get_info_string("P_ICON") @icon.setter def icon(self, filename): self.set_info_string("P_ICON", filename) @property def index(self): index = int(self.get_info_value('IP_TRACKNUMBER')) - 1 if index >= 0: return index if index == -1: raise InvalidObjectError(self) @property def instrument(self): fx_index = RPR.TrackFX_GetInstrument(self.id) instrument = None if fx_index == -1 else reapy.FX(self, fx_index) return instrument @reapy.inside_reaper() @property def items(self): n_items = RPR.CountTrackMediaItems(self.id) item_ids = [ RPR.GetTrackMediaItem(self.id, i) for i in range(n_items) ] return list(map(reapy.Item, item_ids)) @property def is_muted(self): return bool(self.get_info_value("B_MUTE")) @is_muted.setter def is_muted(self, muted): if muted: self.mute() else: self.unmute() @property def is_selected(self): is_selected = bool(RPR.IsTrackSelected(self.id)) return is_selected @is_selected.setter
MIT License
quattor/aquilon
tests/broker/personalitytest.py
PersonalityTestMixin.create_personality
python
def create_personality(self, archetype, name, environment="dev", grn="grn:/ms/ei/aquilon/unittest", staged=False, comments=None, maps=None, required=None, cluster_required=None, config_override=None): command = ["add_personality", "--archetype", archetype, "--personality", name, "--grn", grn, "--host_environment", environment] if cluster_required or (cluster_required is None and archetype in clustered_archetypes): command.append("--cluster_required") if staged is not None: if staged: command.append("--staged") else: command.append("--unstaged") if config_override: command.append("--config_override") if staged: command.append("--staged") if comments: command.extend(["--comments", comments]) self.noouttest(command) self.setup_personality(archetype, name, maps=maps, required=required)
Create the given personality with reasonable defaults.
https://github.com/quattor/aquilon/blob/6562ea0f224cda33b72a6f7664f48d65f96bd41a/tests/broker/personalitytest.py#L70-L96
default_parameters = { 'aquilon': { "espinfo/function": "development", "espinfo/class": "INFRASTRUCTURE", "espinfo/users": "IT / TECHNOLOGY", "windows/windows": '[{"duration": 8, "start": "08:00", "day": "Sun"}]', }, 'esx_cluster': { "espinfo/class": "INFRASTRUCTURE", "windows/windows": '[{"duration": 8, "start": "08:00", "day": "Sun"}]', }, 'hacluster': { "espinfo/class": "INFRASTRUCTURE", }, 'vmhost': { "espinfo/function": "development", "espinfo/class": "INFRASTRUCTURE", "espinfo/users": "IT / TECHNOLOGY", "windows/windows": '[{"duration": 8, "start": "08:00", "day": "Sun"}]', }, } clustered_archetypes = ["vmhost"] class PersonalityTestMixin(object): def setup_personality(self, archetype, name, maps=None, required=None): if archetype in default_parameters: for path, value in default_parameters[archetype].items(): command = ["add_parameter", "--archetype", archetype, "--personality", name, "--path", path, "--value", value] self.noouttest(command) if required: for service in required: command = ["add_required_service", "--service", service, "--archetype", archetype, "--personality", name] self.noouttest(command) if maps: for service, mappings in maps.items(): for instance, locations in mappings.items(): for loc_type, loc_names in locations.items(): for loc_name in loc_names: self.noouttest(["map_service", "--service", service, "--instance", instance, "--" + loc_type, loc_name, "--personality", name, "--archetype", archetype])
Apache License 2.0
autosoft-dev/tree-hugger
tree_hugger/core/parser/php/php_parser.py
PHPParser.get_all_method_documentations
python
def get_all_method_documentations(self) -> Dict[str, str]: return self.get_all_method_phpdocs()
Returns a dict where method names are the key and the comment docs are the values Excludes any functions, i.e., functions defined outside a class.
https://github.com/autosoft-dev/tree-hugger/blob/750de305c3efbc0c9440f858a39d33697d04d49f/tree_hugger/core/parser/php/php_parser.py#L144-L150
import re from typing import List, Dict from pathlib import Path from tree_sitter import Tree, Node, TreeCursor from tree_hugger.core.code_parser import BaseParser, match_from_span from tree_hugger.core.queries import Query class PHPParser(BaseParser): QUERY_FILE_PATH = Path(__file__).parent / "queries.yml" def __init__(self, library_loc: str=None, query_file_path: str=None): super(PHPParser, self).__init__('php', 'php_queries', PHPParser.QUERY_FILE_PATH, library_loc) def get_all_function_names(self) -> List[str]: captures = self._run_query_and_get_captures('all_function_names', self.root_node) all_funcs = set([match_from_span(n[0], self.splitted_code) for n in captures]) return list(all_funcs) def get_all_class_method_names(self) -> List[str]: captures = self._run_query_and_get_captures('all_class_methods', self.root_node) ret_struct = {} current_key = "" for tpl in captures: if tpl[1] == "class.name": current_key = match_from_span(tpl[0], self.splitted_code) ret_struct[current_key] = [] continue else: ret_struct[current_key].append(match_from_span(tpl[0], self.splitted_code)) return ret_struct def get_all_class_names(self) -> List[str]: captures = self._run_query_and_get_captures('all_class_names', self.root_node) return [match_from_span(t[0], self.splitted_code) for t in captures] def get_all_function_bodies(self) -> Dict[str, str]: function_names = self.get_all_function_names() captures = self._run_query_and_get_captures('all_function_bodies', self.root_node) function_bodies = {} for i in range(0, len(captures), 2): func_name = match_from_span(captures[i][0], self.splitted_code) if func_name in function_names: function_bodies[func_name] = match_from_span(captures[i+1][0], self.splitted_code) return function_bodies def get_all_function_names_with_params(self) -> Dict[str, str]: captures = self._run_query_and_get_captures('all_function_names_and_params', self.root_node) ret_struct = {} for i in range(0, len(captures), 2): func_name = match_from_span(captures[i][0], self.splitted_code) ret_struct[func_name] = [] for param in captures[i+1][0].children: if param.type == "simple_parameter": name = match_from_span( param.child_by_field_name("name").children[1], self.splitted_code ) node_typ = param.child_by_field_name("type") typ = match_from_span(node_typ, self.splitted_code) if node_typ else None node_value = param.child_by_field_name("default_value") value = match_from_span(node_value, self.splitted_code) if node_value else None elif param.type == "variadic_parameter": name = match_from_span( param.child_by_field_name("name").children[1], self.splitted_code ) typ = match_from_span( param.child_by_field_name("type"), self.splitted_code ) value = None else: continue ret_struct[func_name].append((name,typ,value)) return ret_struct def _walk_recursive_phpdoc(self, cursor: TreeCursor, lines: List, node_type: str, documented: Dict): n = cursor.node for i in range(len(n.children)): if i < len(n.children)-1 and n.children[i].type == "comment" and n.children[i+1].type == node_type: name = str(match_from_span(cursor.node.children[i+1].child_by_field_name("name"), lines)) documented[name] = str(match_from_span(cursor.node.children[i], lines)) self._walk_recursive_phpdoc(n.children[i].walk(), lines, node_type, documented) def get_all_function_phpdocs(self) -> Dict[str, str]: documentation = {} self._walk_recursive_phpdoc(self.root_node.walk(), self.splitted_code, "function_definition", documentation) return documentation def get_all_function_documentations(self) -> Dict[str, str]: return self.get_all_function_phpdocs() def get_all_method_phpdocs(self) -> Dict[str, str]: documentation = {} self._walk_recursive_phpdoc(self.root_node.walk(), self.splitted_code, "method_declaration", documentation) return documentation
MIT License
systems-shiftlab/pmfuzz
src/pmfuzz/stages/stage1.py
Stage1.compress_new_crash_sites
python
def compress_new_crash_sites(self, parent_img, clean_name): crash_imgs_pattern = parent_img.replace('.'+nh.PM_IMG_EXT, '') + '.' + clean_name.replace('.'+nh.TC_EXT, '') + '.*' new_crash_imgs = glob(crash_imgs_pattern) if self.verbose: printv('Using pattern %s found %d images' % (crash_imgs_pattern, len(new_crash_imgs))) hashdb = pickledb.load(self.crash_site_db_f, True) def get_hash(img): clean_img = re.sub(r"<pid=\d+>", "", img) crash_img_name = path.basename(clean_img) crash_img_name = crash_img_name[crash_img_name.index('.')+1:] hash_v = sha256sum(img) hash_f = path.join(self.img_dir, crash_img_name + '.hash') with open(hash_f, 'w') as hash_obj: hash_obj.write(hash_v) prl_hash = parallel.Parallel( get_hash, self.cores, transparent_io=True, failure_mode=parallel.Parallel.FAILURE_EXIT ) for img in new_crash_imgs: if self.verbose: printv('Running hash collection for ' + img) prl_hash.run([img]) prl_hash.wait() if self.verbose: printv('Now left: %d images' % (len(new_crash_imgs))) prl = parallel.Parallel( self.compress_new_crash_site, self.cores, transparent_io=True, failure_mode=parallel.Parallel.FAILURE_EXIT ) for img in new_crash_imgs: prl.run([img]) if self.verbose: printv('Waiting for compression to complete') prl.wait() for img in new_crash_imgs: os.remove(img)
Compresses the crash sites generated for the parent img
https://github.com/systems-shiftlab/pmfuzz/blob/4318d1daf2720a2164ca609ca35f7e33dd312e91/src/pmfuzz/stages/stage1.py#L226-L287
import pickledb import re import sys from os import path, makedirs, listdir from random import randrange from shutil import which, rmtree import handlers.name_handler as nh import interfaces.failureinjection as finj from .dedup import Dedup from .stage import Stage from interfaces.afl import * from helper import config from helper import parallel from helper.common import * from helper.prettyprint import * from helper.target import Target as Tgt from helper.target import TempEmptyImage class Stage1(Stage): def __init__(self, name, srcdir, outdir, cfg, cores, verbose, force_resp, dry_run): afloutdir = path.join(outdir, nh.get_outdir_name(1, 1), nh.AFL_DIR_NM) stageoutdir = path.join(outdir, nh.get_outdir_name(1, 1)) self.afloutdir = afloutdir try: makedirs(afloutdir) except OSError: if path.isfile(afloutdir): abort('%s is not a directory.' % afloutdir) super().__init__(name, srcdir, outdir, cfg, cores, verbose, force_resp, dry_run) self.o_tc_dir = path.join(self.afloutdir, 'master_fuzzer/queue') self.img_dir = path.join(stageoutdir, Dedup.PM_IMG_DIR) self.tc_dir = path.join(stageoutdir, Dedup.TESTCASE_DIR) self.map_dir = path.join(stageoutdir, Dedup.MAP_DIR) try: makedirs(self.tc_dir) except OSError as e: if path.isfile(self.tc_dir): abort('%s is not a directory.' % self.tc_dir) try: makedirs(self.img_dir) except OSError as e: if path.isfile(self.img_dir): abort('%s is not a directory.' % self.img_dir) try: makedirs(self.o_tc_dir) except OSError as e: if path.isfile(self.o_tc_dir): abort('%s is not a directory.' % self.o_tc_dir) try: makedirs(self.map_dir) except OSError as e: if path.isfile(self.map_dir): abort('%s is not a directory.' % self.map_dir) def run(self): run_afl( indir = self.srcdir, outdir = self.afloutdir, tgtcmd = self.cfg.tgtcmd, cfg = self.cfg, cores = self.cores, persist_tgt = False, verbose = self.verbose, dry_run = self.dry_run, ) self.test_img_creation() def whatsup(self): afl_whatsup_bin = path.join(self.cfg['pmfuzz']['bin_dir'], 'afl-whatsup') if which('watch') is None: abort('`watch\' not found, is it included in the $PATH?') afl_whatsup_cmd = [afl_whatsup_bin, '-s', self.afloutdir] printi('Use the following command to track progress:' + '\n\t\twatch --color -n0.1 -d \'' + ' '.join(afl_whatsup_cmd) + '\'') exec_shell(afl_whatsup_cmd, wait=True) def test_img_creation(self): fd, imgpath = tempfile.mkstemp(dir=self.cfg('pmfuzz.img_loc'), prefix='img_creation_test_img') os.close(fd) os.remove(imgpath) fd, testcase_f = tempfile.mkstemp(prefix='pmfuzz-img-creation-input-') os.close(fd) if 'empty_img' in self.cfg('target') != 'None': with open(testcase_f, 'w') as obj: obj.write(self.cfg('target.empty_img.stdin') + '\n') finj.run_failure_inj( cfg = self.cfg, tgtcmd = self.cfg.tgtcmd, imgpath = imgpath, testcase_f = testcase_f, clean_name = 'id=000000', create = True, verbose = self.verbose, ) for img in glob(imgpath + '*'): self.check_crash_site(img) def _collect_map(self, source_name, clean_name): src_nm = 'map_' + source_name.replace('.testcase', '') dest_nm = 'map_' + clean_name src = path.join(self.o_tc_dir, src_nm) dest = path.join(self.tc_dir, dest_nm) if path.isfile(src): if self.verbose: printv('mapcpy %s -> %s' %(src, dest)) copypreserve(src, dest) else: abort(f'Cannot find {src}') src_nm = 'pm_map_' + source_name.replace('.testcase', '') dest_nm = 'pm_map_' + clean_name src = path.join(self.o_tc_dir, src_nm) dest = path.join(self.tc_dir, dest_nm) if path.isfile(src): if self.verbose: printv('pmmapcpy%s -> %s' %(src, dest)) copypreserve(src, dest) else: printw('Unable to find PM map: ' + src) def add_cs_hash_lcl(self): hash_db = pickledb.load(self.crash_site_db_f, True) for fname in filter(nh.is_hash_f, os.listdir(self.img_dir)): hash_f = path.join(self.img_dir, fname) with open(hash_f, 'r') as hash_obj: hash_k = fname.replace('.' + nh.HASH_F_EXT, '') hash_v = hash_obj.read().strip() abort_if(hash_v.count('\n') != 0, 'Invalid hash value:' + hash_v) hash_db.set(hash_k, hash_v) hash_db.dump() if self.verbose: printv('Updated HashDB at %s with %s: %s' % (self.crash_site_db_f, hash_k, hash_v)) printw('Deleting ' + hash_f) os.remove(hash_f) def compress_new_crash_site(self, img): clean_img = re.sub(r"<pid=\d+>", "", img) crash_img_name = path.basename(clean_img) crash_img_name = crash_img_name[crash_img_name.index('.')+1:] clean_img = path.join(self.img_dir, crash_img_name) self.check_crash_site(img) compress(img, clean_img+'.tar.gz', self.verbose, level=3, extra_params=['--transform', 's/pmfuzz-tmp-img-.........//'])
BSD 3-Clause New or Revised License
pynbody/tangos
tangos/input_handlers/halo_stat_files/__init__.py
AmigaIDLStatFile.iter_rows_raw
python
def iter_rows_raw(self, *args): for row in super().iter_rows_raw(*args): row[0] = row[1] yield row
Yield the halo ID along with the values associated with each of the given arguments. The halo ID is output twice in order to be consistent with other stat file readers. In this case, the finder_offset that is normally output is just equal to the finder_id. :param args: strings for the column names :return: finder_id, finder_id, arg1, arg2, arg3, ... where finder_id is the halo's ID number read directly from the stat file and argN is the value associated with the Nth column name given as arguments.
https://github.com/pynbody/tangos/blob/1d7e837b731e974087ceda4bcb61a1730be0243a/tangos/input_handlers/halo_stat_files/__init__.py#L226-L239
from __future__ import absolute_import import os import copy import numpy as np from . import translations from ...util import proxy_object from six.moves import range from six.moves import zip class HaloStatFile(object): _finder_offset_start = 0 _column_translations = {} def __new__(cls, timestep): subcls = cls.find_loadable_subclass(timestep) if subcls: return object.__new__(subcls) else: raise IOError("No stat file found for timestep %r"%timestep) @classmethod def find_loadable_subclass(cls, timestep_filename): if cls.can_load(timestep_filename): return cls for subclass in cls.__subclasses__(): loadable_cls = subclass.find_loadable_subclass(timestep_filename) if loadable_cls: return loadable_cls return None @classmethod def can_load(cls, timestep_filename): try: return os.path.exists(cls.filename(timestep_filename)) except (ValueError, TypeError): return False @classmethod def filename(cls, timestep_filename): raise ValueError("Unknown path to stat file") def __init__(self, timestep_filename): self._timestep_filename = timestep_filename self.filename = self.filename(timestep_filename) def all_columns(self): with open(self.filename) as f: columns = self._read_column_names(f) columns+=self._column_translations.keys() return columns def iter_rows_raw(self, *args): with open(self.filename) as f: header = self._read_column_names(f) cnt = 0 ids = [0] for a in args: try: ids.append(header.index(a)) except ValueError: ids.append(None) for l in f: if not l.startswith("#"): col_data = self._get_values_for_columns(ids, l) col_data.insert(0, cnt+self._finder_offset_start) yield col_data cnt += 1 def iter_rows(self, *args): raw_args = [] for arg in args: if arg in self._column_translations: raw_args+=self._column_translations[arg].inputs() else: raw_args.append(arg) for raw_values in self.iter_rows_raw(*raw_args): values = [raw_values[0], raw_values[1]] for arg in args: if arg in self._column_translations: values.append(self._column_translations[arg](raw_args, raw_values[2:])) else: values.append(raw_values[2:][raw_args.index(arg)]) yield values def read(self, *args): return_values = [[] for _ in range(len(args)+2)] for row in self.iter_rows(*args): for return_array, value in zip(return_values, row): return_array.append(value) return [np.array(x) for x in return_values] def _get_values_for_columns(self, columns, line): results = [] l_split = line.split() for id_this in columns: if id_this is None: this_cast = None else: this_str = l_split[id_this] if "." in this_str or "e" in this_str: guess_type = float else: guess_type = int try: this_cast = guess_type(this_str) except ValueError: this_cast = this_str results.append(this_cast) return results def _read_column_names(self, f): return [x.split("(")[0] for x in f.readline().split()] class AHFStatFile(HaloStatFile): _finder_offset_start = 1 _column_translations = {'n_gas': translations.DefaultValue('n_gas', 0), 'n_star': translations.DefaultValue('n_star', 0), 'n_dm': translations.Function(lambda ngas, nstar, npart: npart - (ngas or 0) - (nstar or 0), 'n_gas', 'n_star', 'npart'), 'hostHalo': translations.Function( lambda id: None if id==-1 else proxy_object.IncompleteProxyObjectFromFinderId(id, 'halo'), 'hostHalo')} def __init__(self, timestep_filename): super(AHFStatFile, self).__init__(timestep_filename) self._column_translations = copy.copy(self._column_translations) self._column_translations['childHalo'] = translations.Function(self._child_halo_entry, '#ID') @classmethod def filename(cls, timestep_filename): import glob file_list = glob.glob(timestep_filename+ '.z*.???.AHF_halos') parts = timestep_filename.split("/") parts_with_halo = parts[:-1]+["halos"]+parts[-1:] filename_with_halo = "/".join(parts_with_halo) file_list+=glob.glob(filename_with_halo+'.z*.???.AHF_halos') if len(file_list)==0: return "CannotFindAHFHaloFilename" else: return file_list[0] def _calculate_children(self): self._children_map = {} for c_id, f_id, host_f_id in self.iter_rows_raw("hostHalo"): if host_f_id!=-1: cmap = self._children_map.get(host_f_id, []) cmap.append(proxy_object.IncompleteProxyObjectFromFinderId(f_id,'halo')) self._children_map[host_f_id] = cmap def _calculate_children_if_required(self): if not hasattr(self, "_children_map"): self._calculate_children() def _child_halo_entry(self, this_id_raw): self._calculate_children_if_required() children = self._children_map.get(this_id_raw, []) return children class RockstarStatFile(HaloStatFile): _column_translations = {'n_dm': translations.Rename('Np'), 'n_gas': translations.Value(0), 'n_star': translations.Value(0), 'npart': translations.Rename('Np')} @classmethod def filename(cls, timestep_filename): basename = os.path.basename(timestep_filename) dirname = os.path.dirname(timestep_filename) if basename.startswith("snapshot_"): timestep_id = int(basename[9:]) return os.path.join(dirname, "out_%d.list"%timestep_id) else: return "CannotComputeRockstarFilename" class AmigaIDLStatFile(HaloStatFile): _column_translations = {'n_dm': translations.Rename('N_dark'), 'n_gas': translations.Rename('N_gas'), 'n_star': translations.Rename("N_star"), 'npart': translations.Function(lambda ngas, nstar, ndark: ngas + nstar + ndark, "N_dark", "N_gas", "N_star")} @classmethod def filename(cls, timestep_filename): return timestep_filename + '.amiga.stat'
BSD 3-Clause New or Revised License
gerritcodereview/git-repo
git_config.py
Remote.WritesTo
python
def WritesTo(self, ref): for spec in self.fetch: if spec.DestMatches(ref): return True return False
True if the remote stores to the tracking ref.
https://github.com/gerritcodereview/git-repo/blob/03ff276cd70e78639232d2e878d972f15ebcd461/git_config.py#L659-L665
import contextlib import datetime import errno from http.client import HTTPException import json import os import re import ssl import subprocess import sys import urllib.error import urllib.request from error import GitError, UploadError import platform_utils from repo_trace import Trace from git_command import GitCommand from git_refs import R_CHANGES, R_HEADS, R_TAGS SYNC_STATE_PREFIX = 'repo.syncstate.' ID_RE = re.compile(r'^[0-9a-f]{40}$') REVIEW_CACHE = dict() def IsChange(rev): return rev.startswith(R_CHANGES) def IsId(rev): return ID_RE.match(rev) def IsTag(rev): return rev.startswith(R_TAGS) def IsImmutable(rev): return IsChange(rev) or IsId(rev) or IsTag(rev) def _key(name): parts = name.split('.') if len(parts) < 2: return name.lower() parts[0] = parts[0].lower() parts[-1] = parts[-1].lower() return '.'.join(parts) class GitConfig(object): _ForUser = None _USER_CONFIG = '~/.gitconfig' _ForSystem = None _SYSTEM_CONFIG = '/etc/gitconfig' @classmethod def ForSystem(cls): if cls._ForSystem is None: cls._ForSystem = cls(configfile=cls._SYSTEM_CONFIG) return cls._ForSystem @classmethod def ForUser(cls): if cls._ForUser is None: cls._ForUser = cls(configfile=os.path.expanduser(cls._USER_CONFIG)) return cls._ForUser @classmethod def ForRepository(cls, gitdir, defaults=None): return cls(configfile=os.path.join(gitdir, 'config'), defaults=defaults) def __init__(self, configfile, defaults=None, jsonFile=None): self.file = configfile self.defaults = defaults self._cache_dict = None self._section_dict = None self._remotes = {} self._branches = {} self._json = jsonFile if self._json is None: self._json = os.path.join( os.path.dirname(self.file), '.repo_' + os.path.basename(self.file) + '.json') def ClearCache(self): self._cache_dict = None def Has(self, name, include_defaults=True): if _key(name) in self._cache: return True if include_defaults and self.defaults: return self.defaults.Has(name, include_defaults=True) return False def GetInt(self, name): v = self.GetString(name) if v is None: return None v = v.strip() mult = 1 if v.endswith('k'): v = v[:-1] mult = 1024 elif v.endswith('m'): v = v[:-1] mult = 1024 * 1024 elif v.endswith('g'): v = v[:-1] mult = 1024 * 1024 * 1024 base = 10 if v.startswith('0x'): base = 16 try: return int(v, base=base) * mult except ValueError: return None def DumpConfigDict(self): config_dict = {} for key in self._cache: config_dict[key] = self.GetString(key) return config_dict def GetBoolean(self, name): v = self.GetString(name) if v is None: return None v = v.lower() if v in ('true', 'yes'): return True if v in ('false', 'no'): return False return None def SetBoolean(self, name, value): if value is not None: value = 'true' if value else 'false' self.SetString(name, value) def GetString(self, name, all_keys=False): try: v = self._cache[_key(name)] except KeyError: if self.defaults: return self.defaults.GetString(name, all_keys=all_keys) v = [] if not all_keys: if v: return v[0] return None r = [] r.extend(v) if self.defaults: r.extend(self.defaults.GetString(name, all_keys=True)) return r def SetString(self, name, value): key = _key(name) try: old = self._cache[key] except KeyError: old = [] if value is None: if old: del self._cache[key] self._do('--unset-all', name) elif isinstance(value, list): if len(value) == 0: self.SetString(name, None) elif len(value) == 1: self.SetString(name, value[0]) elif old != value: self._cache[key] = list(value) self._do('--replace-all', name, value[0]) for i in range(1, len(value)): self._do('--add', name, value[i]) elif len(old) != 1 or old[0] != value: self._cache[key] = [value] self._do('--replace-all', name, value) def GetRemote(self, name): try: r = self._remotes[name] except KeyError: r = Remote(self, name) self._remotes[r.name] = r return r def GetBranch(self, name): try: b = self._branches[name] except KeyError: b = Branch(self, name) self._branches[b.name] = b return b def GetSyncAnalysisStateData(self): return {k: v for k, v in self.DumpConfigDict().items() if k.startswith(SYNC_STATE_PREFIX)} def UpdateSyncAnalysisState(self, options, superproject_logging_data): return SyncAnalysisState(self, options, superproject_logging_data) def GetSubSections(self, section): return self._sections.get(section, set()) def HasSection(self, section, subsection=''): try: return subsection in self._sections[section] except KeyError: return False def UrlInsteadOf(self, url): for new_url in self.GetSubSections('url'): for old_url in self.GetString('url.%s.insteadof' % new_url, True): if old_url is not None and url.startswith(old_url): return new_url + url[len(old_url):] return url @property def _sections(self): d = self._section_dict if d is None: d = {} for name in self._cache.keys(): p = name.split('.') if 2 == len(p): section = p[0] subsect = '' else: section = p[0] subsect = '.'.join(p[1:-1]) if section not in d: d[section] = set() d[section].add(subsect) self._section_dict = d return d @property def _cache(self): if self._cache_dict is None: self._cache_dict = self._Read() return self._cache_dict def _Read(self): d = self._ReadJson() if d is None: d = self._ReadGit() self._SaveJson(d) return d def _ReadJson(self): try: if os.path.getmtime(self._json) <= os.path.getmtime(self.file): platform_utils.remove(self._json) return None except OSError: return None try: Trace(': parsing %s', self.file) with open(self._json) as fd: return json.load(fd) except (IOError, ValueErrorl): platform_utils.remove(self._json, missing_ok=True) return None def _SaveJson(self, cache): try: with open(self._json, 'w') as fd: json.dump(cache, fd, indent=2) except (IOError, TypeError): platform_utils.remove(self._json, missing_ok=True) def _ReadGit(self): c = {} if not os.path.exists(self.file): return c d = self._do('--null', '--list') for line in d.rstrip('\0').split('\0'): if '\n' in line: key, val = line.split('\n', 1) else: key = line val = None if key in c: c[key].append(val) else: c[key] = [val] return c def _do(self, *args): if self.file == self._SYSTEM_CONFIG: command = ['config', '--system', '--includes'] else: command = ['config', '--file', self.file, '--includes'] command.extend(args) p = GitCommand(None, command, capture_stdout=True, capture_stderr=True) if p.Wait() == 0: return p.stdout else: raise GitError('git config %s: %s' % (str(args), p.stderr)) class RepoConfig(GitConfig): _USER_CONFIG = '~/.repoconfig/config' class RefSpec(object): @classmethod def FromString(cls, rs): lhs, rhs = rs.split(':', 2) if lhs.startswith('+'): lhs = lhs[1:] forced = True else: forced = False return cls(forced, lhs, rhs) def __init__(self, forced, lhs, rhs): self.forced = forced self.src = lhs self.dst = rhs def SourceMatches(self, rev): if self.src: if rev == self.src: return True if self.src.endswith('/*') and rev.startswith(self.src[:-1]): return True return False def DestMatches(self, ref): if self.dst: if ref == self.dst: return True if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]): return True return False def MapSource(self, rev): if self.src.endswith('/*'): return self.dst[:-1] + rev[len(self.src) - 1:] return self.dst def __str__(self): s = '' if self.forced: s += '+' if self.src: s += self.src if self.dst: s += ':' s += self.dst return s URI_ALL = re.compile(r'^([a-z][a-z+-]*)://([^@/]*@?[^/]*)/') def GetSchemeFromUrl(url): m = URI_ALL.match(url) if m: return m.group(1) return None @contextlib.contextmanager def GetUrlCookieFile(url, quiet): if url.startswith('persistent-'): try: p = subprocess.Popen( ['git-remote-persistent-https', '-print_config', url], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: cookieprefix = 'http.cookiefile=' proxyprefix = 'http.proxy=' cookiefile = None proxy = None for line in p.stdout: line = line.strip().decode('utf-8') if line.startswith(cookieprefix): cookiefile = os.path.expanduser(line[len(cookieprefix):]) if line.startswith(proxyprefix): proxy = line[len(proxyprefix):] if cookiefile or proxy: yield cookiefile, proxy return finally: p.stdin.close() if p.wait(): err_msg = p.stderr.read().decode('utf-8') if ' -print_config' in err_msg: pass elif not quiet: print(err_msg, file=sys.stderr) except OSError as e: if e.errno == errno.ENOENT: pass raise cookiefile = GitConfig.ForUser().GetString('http.cookiefile') if cookiefile: cookiefile = os.path.expanduser(cookiefile) yield cookiefile, None class Remote(object): def __init__(self, config, name): self._config = config self.name = name self.url = self._Get('url') self.pushUrl = self._Get('pushurl') self.review = self._Get('review') self.projectname = self._Get('projectname') self.fetch = list(map(RefSpec.FromString, self._Get('fetch', all_keys=True))) self._review_url = None def _InsteadOf(self): globCfg = GitConfig.ForUser() urlList = globCfg.GetSubSections('url') longest = "" longestUrl = "" for url in urlList: key = "url." + url + ".insteadOf" insteadOfList = globCfg.GetString(key, all_keys=True) for insteadOf in insteadOfList: if (self.url.startswith(insteadOf) and len(insteadOf) > len(longest)): longest = insteadOf longestUrl = url if len(longest) == 0: return self.url return self.url.replace(longest, longestUrl, 1) def PreConnectFetch(self, ssh_proxy): if not ssh_proxy: return True connectionUrl = self._InsteadOf() return ssh_proxy.preconnect(connectionUrl) def ReviewUrl(self, userEmail, validate_certs): if self._review_url is None: if self.review is None: return None u = self.review if u.startswith('persistent-'): u = u[len('persistent-'):] if u.split(':')[0] not in ('http', 'https', 'sso', 'ssh'): u = 'http://%s' % u if u.endswith('/Gerrit'): u = u[:len(u) - len('/Gerrit')] if u.endswith('/ssh_info'): u = u[:len(u) - len('/ssh_info')] if not u.endswith('/'): u += '/' http_url = u if u in REVIEW_CACHE: self._review_url = REVIEW_CACHE[u] elif 'REPO_HOST_PORT_INFO' in os.environ: host, port = os.environ['REPO_HOST_PORT_INFO'].split() self._review_url = self._SshReviewUrl(userEmail, host, port) REVIEW_CACHE[u] = self._review_url elif u.startswith('sso:') or u.startswith('ssh:'): self._review_url = u REVIEW_CACHE[u] = self._review_url elif 'REPO_IGNORE_SSH_INFO' in os.environ: self._review_url = http_url REVIEW_CACHE[u] = self._review_url else: try: info_url = u + 'ssh_info' if not validate_certs: context = ssl._create_unverified_context() info = urllib.request.urlopen(info_url, context=context).read() else: info = urllib.request.urlopen(info_url).read() if info == b'NOT_AVAILABLE' or b'<' in info: self._review_url = http_url else: info = info.decode('utf-8') host, port = info.split() self._review_url = self._SshReviewUrl(userEmail, host, port) except urllib.error.HTTPError as e: raise UploadError('%s: %s' % (self.review, str(e))) except urllib.error.URLError as e: raise UploadError('%s: %s' % (self.review, str(e))) except HTTPException as e: raise UploadError('%s: %s' % (self.review, e.__class__.__name__)) REVIEW_CACHE[u] = self._review_url return self._review_url + self.projectname def _SshReviewUrl(self, userEmail, host, port): username = self._config.GetString('review.%s.username' % self.review) if username is None: username = userEmail.split('@')[0] return 'ssh://%s@%s:%s/' % (username, host, port) def ToLocal(self, rev): if self.name == '.' or IsId(rev): return rev if not rev.startswith('refs/'): rev = R_HEADS + rev for spec in self.fetch: if spec.SourceMatches(rev): return spec.MapSource(rev) if not rev.startswith(R_HEADS): return rev raise GitError('%s: remote %s does not have %s' % (self.projectname, self.name, rev))
Apache License 2.0
google/macops
gmacpyutil/gmacpyutil/gmacpyutil.py
GetAirportInfo
python
def GetAirportInfo(include_nearby_networks=False): airport_info = {} try: objc.loadBundle('CoreWLAN', globals(), bundle_path='/System/Library/Frameworks/CoreWLAN.framework') except ImportError: logging.error('Could not load CoreWLAN framework.') return airport_info cw_interface_state = {0: u'Inactive', 1: u'Scanning', 2: u'Authenticating', 3: u'Associating', 4: u'Running'} cw_security = {-1: u'Unknown', 0: u'None', 1: u'WEP', 2: u'WPA Personal', 3: u'WPA Personal Mixed', 4: u'WPA2 Personal', 6: u'Dynamic WEP', 7: u'WPA Enterprise', 8: u'WPA Enterprise Mixed', 9: u'WPA2 Enterprise', 10: u'Enterprise', 11: u'WPA3 Personal', 12: u'WPA3 Enterprise', 13: u'WPA3 Transition'} cw_phy_mode = {0: u'None', 1: u'802.11a', 2: u'802.11b', 3: u'802.11g', 4: u'802.11n'} cw_channel_band = {0: u'Unknown', 1: u'2 GHz', 2: u'5 GHz'} iface = CWInterface.interface() if not iface: return airport_info airport_info['name'] = iface.interfaceName() airport_info['hw_address'] = iface.hardwareAddress() airport_info['service_active'] = bool(iface.serviceActive()) airport_info['country_code'] = iface.countryCode() airport_info['power'] = bool(iface.powerOn()) airport_info['SSID'] = iface.ssid() airport_info['BSSID'] = iface.bssid() airport_info['noise_measurement'] = iface.noiseMeasurement() airport_info['phy_mode'] = iface.activePHYMode() airport_info['phy_mode_name'] = cw_phy_mode[iface.activePHYMode()] airport_info['rssi'] = iface.rssiValue() airport_info['state'] = iface.interfaceState() airport_info['state_name'] = cw_interface_state[iface.interfaceState()] airport_info['transmit_power'] = iface.transmitPower() airport_info['transmit_rate'] = iface.transmitRate() cw_channel = iface.wlanChannel() if cw_channel: airport_info['channel_number'] = cw_channel.channelNumber() airport_info['channel_band'] = cw_channel_band[cw_channel.channelBand()] security = iface.security() if security > 100: security = -1 airport_info['security'] = security airport_info['security_name'] = cw_security[security] if include_nearby_networks: nearby_networks = [] try: for nw in iface.scanForNetworksWithName_error_(None, None): ssid = nw.ssid() if ssid not in nearby_networks: nearby_networks.append(ssid) except TypeError: pass airport_info['nearby_networks'] = nearby_networks return airport_info
Returns information about current AirPort connection. Args: include_nearby_networks: bool, if True a nearby_networks key will be in the returned dict with a list of detected SSIDs nearby. Returns: dict: key value pairs from CWInterface data. If an error occurs or there is no Wi-Fi interface: the dict will be empty.
https://github.com/google/macops/blob/8442745359c0c941cd4e4e7d243e43bd16b40dec/gmacpyutil/gmacpyutil/gmacpyutil.py#L351-L449
import contextlib import ctypes import fcntl import logging import logging.handlers import os import pwd import re import select import signal import socket import subprocess import sys import time from . import defaults from distutils import version as distutils_version if os.uname()[0] == 'Linux': pass else: try: import objc except ImportError: print >>sys.stderr, ('Can\'t import Mac-specific objc library! ' 'Some functionality may be broken.') objc = None try: from Foundation import NSDictionary from Foundation import NSMutableDictionary except ImportError: print >>sys.stderr, ('Can\'t import Mac-specific Foundation libraries! ' 'Some functionality may be broken.') NSDictionary = None NSMutableDictionary = None try: from CoreFoundation import CFStringCreateWithCString from CoreFoundation import kCFStringEncodingASCII except ImportError: print >>sys.stderr, ('Can\'t import Mac-specific CoreFoundation libraries! ' 'Some functionality may be broken.') CFStringCreateWithCString = None kCFStringEncodingASCII = None MACHINEINFO = defaults.MACHINEINFO IMAGEINFO = defaults.IMAGEINFO SESSIONHASGRAPHICACCESS = 0x0010 LOG_FORMAT_SYSLOG = '%(pathname)s[%(process)d]:%(message)s' LOG_FORMAT_STDERR_LEVEL = '%(levelname)s: %(message)s' LOG_FORMAT_STDERR = '%(message)s' MAX_SUPPORTED_VERS = '10.10' class GmacpyutilException(Exception): class LogConfigurationError(GmacpyutilException): class MissingImportsError(GmacpyutilException): class MultilineSysLogHandler(logging.handlers.SysLogHandler): def emit(self, record): msg = self.format(record) if len(msg) > 2000: break_loc_pre = 0 for break_char in ['\n', ' ', '\t']: break_loc_pre = msg.rfind(break_char, 1000, 2000) break_loc_post = break_loc_pre + 1 if break_loc_pre > 0: break if break_loc_pre < 1: break_loc_pre = 2000 break_loc_post = 2000 r1msg = msg[:break_loc_pre] r2msg = 'CONTINUED: %s' % msg[break_loc_post:] r1 = logging.LogRecord( record.name, record.levelno, record.pathname, record.lineno, r1msg, None, None, func=record.funcName) r2 = logging.LogRecord( record.name, record.levelno, record.pathname, None, r2msg, None, None) logging.handlers.SysLogHandler.emit(self, r1) self.emit(r2) else: logging.handlers.SysLogHandler.emit(self, record) def _ConfigureHandler(handler, logger, formatstr, debug_level): handler.setFormatter(logging.Formatter(formatstr)) handler.setLevel(debug_level) logger.addHandler(handler) def ConfigureLogging(debug_level=logging.INFO, show_level=True, stderr=True, syslog=True, facility=None): if not stderr and not syslog: raise LogConfigurationError('Neither syslog nor stdout handlers set.') if facility and not syslog: raise LogConfigurationError('facility can only be used with syslog.') logger = logging.getLogger() logger.handlers = [] logger.setLevel(debug_level) if syslog: facility_id = logging.handlers.SysLogHandler.LOG_USER if facility: try: facility_id = logging.handlers.SysLogHandler.facility_names[facility] except KeyError: logging.error('%s is an invalid facility, using default.', facility) try: syslog_handler = MultilineSysLogHandler(facility=facility_id) _ConfigureHandler(syslog_handler, logger, LOG_FORMAT_SYSLOG, debug_level) except socket.error: print >>sys.stderr, 'Warning: Could not configure syslog based logging.' stderr = True if stderr: stderr_handler = logging.StreamHandler() if show_level: _ConfigureHandler(stderr_handler, logger, LOG_FORMAT_STDERR_LEVEL, debug_level) else: _ConfigureHandler(stderr_handler, logger, LOG_FORMAT_STDERR, debug_level) logging.debug('Logging enabled at level %s', debug_level) def SetFileNonBlocking(f, non_blocking=True): flags = fcntl.fcntl(f.fileno(), fcntl.F_GETFL) if bool(flags & os.O_NONBLOCK) != non_blocking: flags ^= os.O_NONBLOCK fcntl.fcntl(f.fileno(), fcntl.F_SETFL, flags) def _RunProcess(cmd, stdinput=None, env=None, cwd=None, sudo=False, sudo_password=None, background=False, stream_output=False, timeout=0, waitfor=0): if timeout and (background or stream_output or sudo or sudo_password or stdinput): raise GmacpyutilException('timeout is not compatible with background, ' 'stream_output, sudo, sudo_password, or ' 'stdinput.') if waitfor and not timeout: raise GmacpyutilException('waitfor only valid with timeout.') if timeout < 0: raise GmacpyutilException('timeout must be greater than 0.') if stream_output: stdoutput = None stderror = None else: stdoutput = subprocess.PIPE stderror = subprocess.PIPE if sudo and not background: sudo_cmd = ['sudo'] if sudo_password and not stdinput: sudo_cmd.extend(['-S']) stdinput = sudo_password + '\n' elif sudo_password and stdinput: raise GmacpyutilException('stdinput and sudo_password are mutually ' 'exclusive') else: sudo_cmd.extend(['-p', "%u's password is required for admin access: "]) sudo_cmd.extend(cmd) cmd = sudo_cmd elif sudo and background: raise GmacpyutilException('sudo is not compatible with background.') environment = os.environ.copy() if env is not None: environment.update(env) try: task = subprocess.Popen(cmd, stdout=stdoutput, stderr=stderror, stdin=subprocess.PIPE, env=environment, cwd=cwd) except OSError, e: raise GmacpyutilException('Could not execute: %s' % e.strerror) if timeout == 0: if not background: (stdout, stderr) = task.communicate(input=stdinput) return (stdout, stderr, task.returncode) else: if stdinput: task.stdin.write(stdinput) return task else: inactive = 0 stdoutput = [] stderror = [] SetFileNonBlocking(task.stdout) SetFileNonBlocking(task.stderr) returncode = None while returncode is None: rlist, _, _ = select.select([task.stdout, task.stderr], [], [], 1.0) if not rlist: inactive += 1 if inactive >= timeout: logging.error('cmd has timed out: %s', cmd) logging.error('Sending SIGTERM to PID=%s', task.pid) os.kill(task.pid, signal.SIGTERM) break else: inactive = 0 for fd in rlist: if fd is task.stdout: stdoutput.append(fd.read()) elif fd is task.stderr: stderror.append(fd.read()) returncode = task.poll() if inactive >= timeout and waitfor > 0: time.sleep(waitfor) returncode = task.poll() stdoutput = ''.join(stdoutput) stderror = ''.join(stderror) return (stdoutput, stderror, task.returncode) def RunProcess(*args, **kwargs): if kwargs.get('background'): raise GmacpyutilException('Use RunProcessInBackground() instead.') out, err, rc = _RunProcess(*args, **kwargs) return (out, err, rc) def RunProcessInBackground(*args, **kwargs): kwargs['background'] = True return _RunProcess(*args, **kwargs) def GetConsoleUser(): stat_info = os.stat('/dev/console') console_user = pwd.getpwuid(stat_info.st_uid)[0] return console_user
Apache License 2.0
conducto/conducto
nb/magic.py
load_ipython_extension
python
def load_ipython_extension(ipython): ipython.register_magics(ConductoMagics)
This function is called when the extension is loaded. It accepts an IPython InteractiveShell instance.
https://github.com/conducto/conducto/blob/b480780905f5a25e8c803b60ca7cdf6976ce5ef6/nb/magic.py#L183-L187
from IPython.core.magic import Magics, magics_class, line_magic, cell_magic import ast, astsearch import re import shlex from textwrap import indent class Param(object): def __init__(self, name, vtype, value=None, metadata=None): self.name = name self.type = vtype self.value = value self.metadata = metadata or {} def __repr__(self): params = [repr(self.name), self.type.__name__] if self.value is not None: params.append(f"value={self.value!r}") if self.metadata: params.append(f"metadata={self.metadata!r}") return f"Param({', '.join(params)})" def with_value(self, value): return type(self)(self.name, self.type, value, self.metadata or None) def __eq__(self, other): if isinstance(other, Param): return ( self.name == other.name and self.type == other.type and self.value == other.value ) def any_supported_node(node, path): if isinstance(node, (ast.Num, ast.Str)): return elif isinstance(node, ast.NameConstant) and (node.value in (True, False)): return elif isinstance(node, ast.Dict): for n in node.keys: any_supported_node(n, path) for n in node.values: any_supported_node(n, path) return elif isinstance(node, ast.List): for n in node.elts: any_supported_node(n, path) return raise Exception( "Parsing an unsupported param in first cell. Only number, string, or Boolean supported: {path}, {node}" ) from None pydefine_ptrn = ast.Assign(targets=[ast.Name()], value=any_supported_node) def type_and_value(node): if isinstance(node, ast.Num): return type(node.n), node.n elif isinstance(node, ast.Str): return str, node.s elif isinstance(node, ast.NameConstant) and (node.value in (True, False)): return (bool, node.value) elif isinstance(node, ast.Dict): return ( dict, { type_and_value(node.keys[i])[1]: type_and_value(node.values[i])[1] for i in range(len(node.keys)) }, ) elif isinstance(node, ast.List): return (list, [type_and_value(n)[1] for n in node.elts]) raise Exception( "Parsing an unsupported param in first cell. Only number, string, or Boolean supported: {node}" ) from None def get_param_definitions(cellstr): definitions = list() cellast = ast.parse(cellstr) for assign in astsearch.ASTPatternFinder(pydefine_ptrn).scan_ast(cellast): definitions.append(Param(assign.targets[0].id, *type_and_value(assign.value))) return definitions def get_conbparam_definitions(co_nb_params): definitions = list() ARG_ASSIGNMENT = re.compile(r"([\-a-zA-Z0-9]+)=(.*)") for arg in co_nb_params: kv = re.search(ARG_ASSIGNMENT, arg) if kv: name = kv.group(1).lstrip("-") value = kv.group(2) param = Param(name, str, value) definitions.append(param) else: name = arg.lstrip("-") value = not name.startswith("no-") if not value: name = name[3:] param = Param(name, bool, value) definitions.append(param) return definitions RUN_IPYTHON_LOAD_PARAMS = """ import sys sys.argv[1:] = co_nb_params del co_nb_params """ from IPython.core.magics.namespace import NamespaceMagics import IPython.core as core import ipynbname @magics_class class ConductoMagics(Magics): @cell_magic def conducto(self, line, cell): self.shell.run_line_magic("store", "-r co_nb_params") has_co_nb_params = self.shell.ev("'co_nb_params' in locals()") if not has_co_nb_params: raise Exception( "Couldn't store or retrieve parameters in IPython" ) from None co_nb_params = self.shell.ev("co_nb_params") nbname = ipynbname.name() params = co_nb_params[nbname] if nbname in co_nb_params else list() self.shell.ex(f"co_nb_params = {str(params)}") for line in RUN_IPYTHON_LOAD_PARAMS.split("\n"): self.shell.ex(line) userparams = get_param_definitions(cell) cliparams = get_conbparam_definitions(params) overrides = list() for cliparam in cliparams: for param in userparams: if param.name == cliparam.name: if param.type == str: overrides.append(f"{param.name} = '{cliparam.value}'") elif param.type in (list, dict, tuple): raise Exception( f"list type notebook arguments not supported. variable '{param.name}'" ) from None else: overrides.append(f"{param.name} = {cliparam.value}") self.shell.run_cell(cell, store_history=False) if len(overrides): overrides_statements = "\n".join(overrides) notification_of_overrides = ( f'print("Applying arguments from the command line: (use `conducto-notebook clearargs` to clear)")\n' f'print("""{indent(overrides_statements, " ")}""")' ) overrides_cell = f"{overrides_statements}\n{notification_of_overrides}" self.shell.run_cell(overrides_cell, store_history=False)
Apache License 2.0
facebookresearch/classyvision
classy_vision/hooks/classy_hook.py
ClassyHook.on_phase_start
python
def on_phase_start(self, task) -> None: pass
Called at the start of each phase.
https://github.com/facebookresearch/classyvision/blob/4785d5ee19d3bcedd5b28c1eb51ea1f59188b54d/classy_vision/hooks/classy_hook.py#L81-L83
from abc import ABC, abstractmethod from typing import Any, Dict from classy_vision.generic.util import log_class_usage class ClassyHookState: def get_classy_state(self) -> Dict[str, Any]: return self.__dict__ def set_classy_state(self, state_dict: Dict[str, Any]): self.__dict__.update(state_dict) class ClassyHook(ABC): def __init__(self): log_class_usage("Hooks", self.__class__) self.state = ClassyHookState() @classmethod def from_config(cls, config) -> "ClassyHook": return cls(**config) def _noop(self, *args, **kwargs) -> None: pass @classmethod def name(cls) -> str: return cls.__name__ @abstractmethod def on_start(self, task) -> None: pass @abstractmethod
MIT License
compas-dev/compas
src/compas_ghpython/artists/networkartist.py
NetworkArtist.draw_edges
python
def draw_edges(self, edges=None, color=None): self.edge_color = color node_xyz = self.node_xyz edges = edges or list(self.network.edges()) lines = [] for edge in edges: lines.append({ 'start': node_xyz[edge[0]], 'end': node_xyz[edge[1]], 'color': self.edge_color.get(edge, self.default_edgecolor), 'name': "{}.edge.{}-{}".format(self.network.name, *edge) }) return compas_ghpython.draw_lines(lines)
Draw a selection of edges. Parameters ---------- edges : list, optional A list of edges to draw. The default is ``None``, in which case all edges are drawn. color : tuple or dict of tuple, optional The color specififcation for the edges. The default color is the value of ``~NetworkArtist.default_edgecolor``. Returns ------- list of :class:`Rhino.Geometry.Line`
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas_ghpython/artists/networkartist.py#L66-L93
from __future__ import print_function from __future__ import absolute_import from __future__ import division from functools import partial import compas_ghpython from compas.utilities import color_to_colordict from compas.artists import NetworkArtist from .artist import GHArtist colordict = partial(color_to_colordict, colorformat='rgb', normalize=False) class NetworkArtist(GHArtist, NetworkArtist): def __init__(self, network, **kwargs): super(NetworkArtist, self).__init__(network=network, **kwargs) def draw(self): return (self.draw_nodes(), self.draw_edges()) def draw_nodes(self, nodes=None, color=None): self.node_color = color node_xyz = self.node_xyz nodes = nodes or list(self.network.nodes()) points = [] for node in nodes: points.append({ 'pos': node_xyz[node], 'name': "{}.node.{}".format(self.network.name, node), 'color': self.node_color.get(node, self.default_nodecolor) }) return compas_ghpython.draw_points(points)
MIT License
square/connect-python-sdk
squareconnect/models/checkout.py
Checkout.pre_populate_shipping_address
python
def pre_populate_shipping_address(self, pre_populate_shipping_address): self._pre_populate_shipping_address = pre_populate_shipping_address
Sets the pre_populate_shipping_address of this Checkout. If provided, the buyer's shipping info is pre-populated on the checkout page as editable text fields. Default: none; only exists if explicitly set. :param pre_populate_shipping_address: The pre_populate_shipping_address of this Checkout. :type: Address
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/checkout.py#L203-L212
from pprint import pformat from six import iteritems import re class Checkout(object): def __init__(self, id=None, checkout_page_url=None, ask_for_shipping_address=None, merchant_support_email=None, pre_populate_buyer_email=None, pre_populate_shipping_address=None, redirect_url=None, order=None, created_at=None, additional_recipients=None): self.swagger_types = { 'id': 'str', 'checkout_page_url': 'str', 'ask_for_shipping_address': 'bool', 'merchant_support_email': 'str', 'pre_populate_buyer_email': 'str', 'pre_populate_shipping_address': 'Address', 'redirect_url': 'str', 'order': 'Order', 'created_at': 'str', 'additional_recipients': 'list[AdditionalRecipient]' } self.attribute_map = { 'id': 'id', 'checkout_page_url': 'checkout_page_url', 'ask_for_shipping_address': 'ask_for_shipping_address', 'merchant_support_email': 'merchant_support_email', 'pre_populate_buyer_email': 'pre_populate_buyer_email', 'pre_populate_shipping_address': 'pre_populate_shipping_address', 'redirect_url': 'redirect_url', 'order': 'order', 'created_at': 'created_at', 'additional_recipients': 'additional_recipients' } self._id = id self._checkout_page_url = checkout_page_url self._ask_for_shipping_address = ask_for_shipping_address self._merchant_support_email = merchant_support_email self._pre_populate_buyer_email = pre_populate_buyer_email self._pre_populate_shipping_address = pre_populate_shipping_address self._redirect_url = redirect_url self._order = order self._created_at = created_at self._additional_recipients = additional_recipients @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def checkout_page_url(self): return self._checkout_page_url @checkout_page_url.setter def checkout_page_url(self, checkout_page_url): self._checkout_page_url = checkout_page_url @property def ask_for_shipping_address(self): return self._ask_for_shipping_address @ask_for_shipping_address.setter def ask_for_shipping_address(self, ask_for_shipping_address): self._ask_for_shipping_address = ask_for_shipping_address @property def merchant_support_email(self): return self._merchant_support_email @merchant_support_email.setter def merchant_support_email(self, merchant_support_email): self._merchant_support_email = merchant_support_email @property def pre_populate_buyer_email(self): return self._pre_populate_buyer_email @pre_populate_buyer_email.setter def pre_populate_buyer_email(self, pre_populate_buyer_email): self._pre_populate_buyer_email = pre_populate_buyer_email @property def pre_populate_shipping_address(self): return self._pre_populate_shipping_address @pre_populate_shipping_address.setter
Apache License 2.0
ageir/chirp-rpi
chirp.py
Chirp.sensor_address
python
def sensor_address(self, new_addr): if isinstance(new_addr, int) and (new_addr >= 3 and new_addr <= 119): self.bus.write_byte_data(self.address, 1, new_addr) self.reset() self.address = new_addr else: raise ValueError('I2C address must be between 3-119 or 0x03-0x77.')
Set a new I2C address for the sensor Args: new_addr (int): New I2C address. 3-119 or 0x03-0x77 Raises: ValueError: If new_addr is not within required range.
https://github.com/ageir/chirp-rpi/blob/6e411d6c382d5e43ee1fd269ec4de6a316893407/chirp.py#L184-L198
from __future__ import division from datetime import datetime import smbus import sys import time class Chirp(object): def __init__(self, bus=1, address=0x20, min_moist=False, max_moist=False, temp_scale='celsius', temp_offset=0, read_temp=True, read_moist=True, read_light=True): self.bus_num = bus self.bus = smbus.SMBus(bus) self.busy_sleep = 0.01 self.address = address self.min_moist = min_moist self.max_moist = max_moist self.temp_scale = temp_scale self.temp_offset = temp_offset self.read_temp = read_temp self.read_moist = read_moist self.read_light = read_light self.temp = False self.moist = False self.light = False self.temp_timestamp = datetime self.moist_timestamp = datetime self.light_timestamp = datetime self._GET_CAPACITANCE = 0x00 self._SET_ADDRESS = 0x01 self._GET_ADDRESS = 0x02 self._MEASURE_LIGHT = 0x03 self._GET_LIGHT = 0x04 self._GET_TEMPERATURE = 0x05 self._RESET = 0x06 self._GET_VERSION = 0x07 self._SLEEP = 0x08 self._GET_BUSY = 0x09 def trigger(self): if self.read_temp is True: self.temp = self._read_temp() if self.read_moist is True: self.moist = self._read_moist() if self.read_light is True: self.light = self._read_light() def get_reg(self, reg): val = self.bus.read_word_data(self.address, reg) return (val >> 8) + ((val & 0xFF) << 8) @property def version(self): return self.bus.read_byte_data(self.address, self._GET_VERSION) @property def busy(self): busy = self.bus.read_byte_data(self.address, self._GET_BUSY) if busy == 1: return True else: return False def reset(self): self.bus.write_byte(self.address, self._RESET) def sleep(self): self.bus.write_byte(self.address, self._SLEEP) def wake_up(self, wake_time=1): self.wake_time = wake_time try: self.bus.read_byte_data(self.address, self._GET_VERSION) except OSError: pass finally: time.sleep(self.wake_time) @property def sensor_address(self): return self.bus.read_byte_data(self.address, self._GET_ADDRESS) @sensor_address.setter
MIT License
user-cont/conu
conu/backend/k8s/deployment.py
Deployment.wait
python
def wait(self, timeout=15): Probe(timeout=timeout, fnc=self.all_pods_ready, expected_retval=True).run()
block until all replicas are not ready, raises an exc ProbeTimeout if timeout is reached :param timeout: int or float (seconds), time to wait for pods to run :return: None
https://github.com/user-cont/conu/blob/0d8962560f6f7f17fe1be0d434a4809e2a0ea51d/conu/backend/k8s/deployment.py#L128-L135
import logging import yaml from kubernetes import client from kubernetes.client.rest import ApiException from conu.utils.probes import Probe from conu.backend.k8s.pod import Pod from conu.backend.k8s.client import get_apps_api from conu.exceptions import ConuException logger = logging.getLogger(__name__) class Deployment(object): def __init__(self, name=None, selector=None, labels=None, image_metadata=None, namespace='default', create_in_cluster=False, from_template=None): self.namespace = namespace if (from_template is not None) and (name is not None or selector is not None or labels is not None or image_metadata is not None): raise ConuException( 'from_template cannot be passed to constructor at the same time with' ' name, selector, labels or image_metadata') elif from_template is not None: self.body = yaml.safe_load(from_template) self.name = self.body['metadata']['name'] elif (name is not None and selector is not None and labels is not None and image_metadata is not None): self.name = name self.pod = Pod.create(image_metadata) self.spec = client.V1DeploymentSpec( selector=client.V1LabelSelector(match_labels=selector), template=client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(labels=selector), spec=self.pod.spec)) self.metadata = client.V1ObjectMeta(name=self.name, namespace=self.namespace, labels=labels) self.body = client.V1Deployment(spec=self.spec, metadata=self.metadata) else: raise ConuException( 'to create deployment you need to specify template or' ' properties: name, selector, labels, image_metadata') self.api = get_apps_api() if create_in_cluster: self.create_in_cluster() def delete(self): body = client.V1DeleteOptions() try: status = self.api.delete_namespaced_deployment(self.name, self.namespace, body) logger.info("Deleting Deployment %s in namespace: %s", self.name, self.namespace) except ApiException as e: raise ConuException( "Exception when calling Kubernetes API - delete_namespaced_deployment: %s\n" % e) if status.status == 'Failure': raise ConuException("Deletion of Deployment failed") def get_status(self): try: api_response = self.api.read_namespaced_deployment_status(self.name, self.namespace) except ApiException as e: raise ConuException( "Exception when calling Kubernetes API - " "read_namespaced_deployment_status: %s\n" % e) return api_response.status def all_pods_ready(self): if self.get_status().replicas and self.get_status().ready_replicas: if self.get_status().replicas == self.get_status().ready_replicas: logger.info("All pods are ready for deployment %s in namespace: %s", self.name, self.namespace) return True return False
MIT License
openstack/cinder
cinder/volume/drivers/dell_emc/sc/storagecenter_api.py
SCApi._find_folder
python
def _find_folder(self, url, foldername, ssn=-1): ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) basename = os.path.basename(foldername) pf.append('Name', basename) folderpath = foldername.strip('/') folderpath = os.path.dirname(folderpath) if folderpath != '': if not self.legacyfoldernames: folderpath = '/' + folderpath folderpath += '/' elif not self.legacyfoldernames: folderpath = '/' pf.append('folderPath', folderpath) folder = None r = self.client.post(url, pf.payload) if self._check_result(r): folder = self._get_result(r, 'folderPath', folderpath) return folder
Find a folder on the SC using the specified url. Most of the time the folder will already have been created so we look for the end folder and check that the rest of the path is right. The REST url sent in defines the folder type being created on the Dell Storage Center backend. Thus this is generic to server and volume folders. :param url: The portion of the url after the base url (see http class) to use for this operation. (Can be for Server or Volume folders.) :param foldername: Full path to the folder we are looking for. :returns: Dell folder object.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/volume/drivers/dell_emc/sc/storagecenter_api.py#L826-L866
import json import os.path import uuid import eventlet from oslo_log import log as logging from oslo_utils import excutils import requests import six from six.moves import http_client from cinder import exception from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) class DellDriverRetryableException(exception.VolumeBackendAPIException): message = _("Retryable Dell Exception encountered") class PayloadFilter(object): def __init__(self, filtertype='AND'): self.payload = {} self.payload['filter'] = {'filterType': filtertype, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filter']['filters'].append(apifilter) class LegacyPayloadFilter(object): def __init__(self, filter_type='AND'): self.payload = {'filterType': filter_type, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filters'].append(apifilter) class HttpClient(object): def __init__(self, host, port, user, password, verify, asynctimeout, synctimeout, apiversion): self.baseUrl = 'https://%s:%s/' % (host, port) self.session = requests.Session() self.session.auth = (user, password) self.header = {} self.header['Content-Type'] = 'application/json; charset=utf-8' self.header['Accept'] = 'application/json' self.header['x-dell-api-version'] = apiversion self.verify = verify self.asynctimeout = asynctimeout self.synctimeout = synctimeout if not verify: requests.packages.urllib3.disable_warnings() def __enter__(self): return self def __exit__(self, type, value, traceback): self.session.close() def __formatUrl(self, url): baseurl = self.baseUrl if 'api/rest' not in url: baseurl += 'api/rest/' return '%s%s' % (baseurl, url if url[0] != '/' else url[1:]) def _get_header(self, header_async): if header_async: header = self.header.copy() header['async'] = 'True' return header return self.header def _get_async_url(self, asyncTask): try: url = asyncTask.get('returnValue').split( 'https://')[1].split('/', 1)[1] except IndexError: url = asyncTask.get('returnValue') except AttributeError: LOG.debug('_get_async_url: Attribute Error. (%r)', asyncTask) url = 'api/rest/ApiConnection/AsyncTask/' if not url: LOG.debug('_get_async_url: No URL. (%r)', asyncTask) url = 'api/rest/ApiConnection/AsyncTask/' if url.endswith('/'): id = asyncTask.get('instanceId') if id: LOG.debug('_get_async_url: url format error. (%r)', asyncTask) url = url + id else: LOG.error('_get_async_url: Bogus return async task %r', asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Invalid URL.')) if url.startswith('<') and url.endswith('>'): LOG.error('_get_async_url: Malformed URL (XML returned). (%r)', asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Malformed URL.')) return url def _wait_for_async_complete(self, asyncTask): url = self._get_async_url(asyncTask) while True and url: try: r = self.get(url) if not SCApi._check_result(r): LOG.debug('Async error:\n' '\tstatus_code: %(code)s\n' '\ttext: %(text)s\n', {'code': r.status_code, 'text': r.text}) else: if r.content: content = r.json() if content.get('objectType') == 'AsyncTask': url = self._get_async_url(content) eventlet.sleep(1) continue else: LOG.debug('Async debug: r.content is None') return r except Exception: methodname = asyncTask.get('methodName') objectTypeName = asyncTask.get('objectTypeName') msg = (_('Async error: Unable to retrieve %(obj)s ' 'method %(method)s result') % {'obj': objectTypeName, 'method': methodname}) raise exception.VolumeBackendAPIException(message=msg) LOG.debug('_wait_for_async_complete: Error asyncTask: %r', asyncTask) return None def _rest_ret(self, rest_response, async_call): if async_call: if rest_response.status_code == http_client.ACCEPTED: asyncTask = rest_response.json() return self._wait_for_async_complete(asyncTask) else: LOG.debug('REST Async error command not accepted:\n' '\tUrl: %(url)s\n' '\tCode: %(code)d\n' '\tReason: %(reason)s\n', {'url': rest_response.url, 'code': rest_response.status_code, 'reason': rest_response.reason}) msg = _('REST Async Error: Command not accepted.') raise exception.VolumeBackendAPIException(message=msg) return rest_response @utils.retry(retry_param=(requests.ConnectionError, DellDriverRetryableException)) def get(self, url): LOG.debug('get: %(url)s', {'url': url}) rest_response = self.session.get(self.__formatUrl(url), headers=self.header, verify=self.verify, timeout=self.synctimeout) if (rest_response and rest_response.status_code == ( http_client.BAD_REQUEST)) and ( 'Unhandled Exception' in rest_response.text): raise DellDriverRetryableException() return rest_response @utils.retry(retry_param=(requests.ConnectionError,)) def post(self, url, payload, async_call=False): LOG.debug('post: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self._rest_ret(self.session.post( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self._get_header(async_call), verify=self.verify, timeout=( self.asynctimeout if async_call else self.synctimeout)), async_call) @utils.retry(retry_param=(requests.ConnectionError,)) def put(self, url, payload, async_call=False): LOG.debug('put: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self._rest_ret(self.session.put( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self._get_header(async_call), verify=self.verify, timeout=( self.asynctimeout if async_call else self.synctimeout)), async_call) @utils.retry(retry_param=(requests.ConnectionError,)) def delete(self, url, payload=None, async_call=False): LOG.debug('delete: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) named = {'headers': self._get_header(async_call), 'verify': self.verify, 'timeout': ( self.asynctimeout if async_call else self.synctimeout)} if payload: named['data'] = json.dumps( payload, ensure_ascii=False).encode('utf-8') return self._rest_ret( self.session.delete(self.__formatUrl(url), **named), async_call) class SCApiHelper(object): def __init__(self, config, active_backend_id, storage_protocol): self.config = config self.active_backend_id = active_backend_id self.primaryssn = self.config.dell_sc_ssn self.storage_protocol = storage_protocol self.san_ip = self.config.san_ip self.san_login = self.config.san_login self.san_password = self.config.san_password self.san_port = self.config.dell_sc_api_port self.apiversion = '2.0' def _swap_credentials(self): if self.san_ip == self.config.san_ip: if (self.config.secondary_san_ip and self.config.secondary_san_login and self.config.secondary_san_password): self.san_ip = self.config.secondary_san_ip self.san_login = self.config.secondary_san_login self.san_password = self.config.secondary_san_password else: LOG.info('Swapping DSM credentials: Secondary DSM ' 'credentials are not set or are incomplete.') return False if self.config.secondary_sc_api_port: self.san_port = self.config.secondary_sc_api_port else: self.san_ip = self.config.san_ip self.san_login = self.config.san_login self.san_password = self.config.san_password self.san_port = self.config.dell_sc_api_port LOG.info('Swapping DSM credentials: New DSM IP is %r.', self.san_ip) return True def _setup_connection(self): connection = SCApi(self.san_ip, self.san_port, self.san_login, self.san_password, self.config.dell_sc_verify_cert, self.config.dell_api_async_rest_timeout, self.config.dell_api_sync_rest_timeout, self.apiversion) connection.vfname = self.config.dell_sc_volume_folder connection.sfname = self.config.dell_sc_server_folder connection.excluded_domain_ips = self.config.excluded_domain_ips connection.included_domain_ips = self.config.included_domain_ips if self.config.excluded_domain_ip: LOG.info("Using excluded_domain_ip for " "excluding domain IPs is deprecated in the " "Stein release of OpenStack. Please use the " "excluded_domain_ips configuration option.") connection.excluded_domain_ips += self.config.excluded_domain_ip connection.excluded_domain_ips = list(set( connection.excluded_domain_ips)) connection.primaryssn = self.primaryssn if self.storage_protocol == 'FC': connection.protocol = 'FibreChannel' if self.active_backend_id: connection.ssn = int(self.active_backend_id) else: connection.ssn = self.primaryssn connection.open_connection() return connection def open_connection(self): connection = None LOG.info('open_connection to %(ssn)s at %(ip)s', {'ssn': self.primaryssn, 'ip': self.san_ip}) if self.primaryssn: try: connection = self._setup_connection() except Exception: if self._swap_credentials(): connection = self._setup_connection() else: with excutils.save_and_reraise_exception(): LOG.error('Failed to connect to the API. ' 'No backup DSM provided.') if self.apiversion != connection.apiversion: LOG.info('open_connection: Updating API version to %s', connection.apiversion) self.apiversion = connection.apiversion else: raise exception.VolumeBackendAPIException( data=_('Configuration error: dell_sc_ssn not set.')) return connection class SCApi(object): APIDRIVERVERSION = '4.1.2' def __init__(self, host, port, user, password, verify, asynctimeout, synctimeout, apiversion): self.notes = 'Created by Dell EMC Cinder Driver' self.repl_prefix = 'Cinder repl of ' self.ssn = None self.primaryssn = None self.failed_over = False self.vfname = 'openstack' self.sfname = 'openstack' self.excluded_domain_ips = [] self.included_domain_ips = [] self.legacypayloadfilters = False self.consisgroups = True self.protocol = 'Iscsi' self.apiversion = apiversion self.legacyfoldernames = True self.is_direct_connect = False self.client = HttpClient(host, port, user, password, verify, asynctimeout, synctimeout, apiversion) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close_connection() @staticmethod def _check_result(rest_response): if rest_response is not None: if http_client.OK <= rest_response.status_code < ( http_client.MULTIPLE_CHOICES): return True try: response_json = rest_response.json() response_text = response_json.text['result'] except Exception: response_text = rest_response.text LOG.debug('REST call result:\n' '\tUrl: %(url)s\n' '\tCode: %(code)d\n' '\tReason: %(reason)s\n' '\tText: %(text)s', {'url': rest_response.url, 'code': rest_response.status_code, 'reason': rest_response.reason, 'text': response_text}) else: LOG.warning('Failed to get REST call result.') return False @staticmethod def _path_to_array(path): array = [] while True: (path, tail) = os.path.split(path) if tail == '': array.reverse() return array array.append(tail) def _first_result(self, blob): return self._get_result(blob, None, None) def _get_result(self, blob, attribute, value): rsp = None content = self._get_json(blob) if content is not None: if isinstance(content, list): for r in content: if attribute is None or r.get(attribute) == value: rsp = r break elif isinstance(content, dict): if attribute is None or content.get(attribute) == value: rsp = content elif attribute is None: rsp = content if rsp is None: LOG.debug('Unable to find result where %(attr)s is %(val)s', {'attr': attribute, 'val': value}) LOG.debug('Blob was %(blob)s', {'blob': blob.text}) return rsp def _get_json(self, blob): try: return blob.json() except AttributeError: LOG.error('Error invalid json: %s', blob) except TypeError as ex: LOG.error('Error TypeError. %s', ex) except ValueError as ex: LOG.error('JSON decoding error. %s', ex) LOG.debug('_get_json blob %s', blob) return None def _get_id(self, blob): try: if isinstance(blob, dict): return blob.get('instanceId') except AttributeError: LOG.error('Invalid API object: %s', blob) except TypeError as ex: LOG.error('Error TypeError. %s', ex) except ValueError as ex: LOG.error('JSON decoding error. %s', ex) LOG.debug('_get_id failed: blob %s', blob) return None def _get_payload_filter(self, filterType='AND'): if self.legacypayloadfilters: return LegacyPayloadFilter(filterType) return PayloadFilter(filterType) def _check_version_fail(self, payload, response): try: result = self._get_json(response).get('result') if result and result.startswith( 'Invalid API version specified, ' 'the version must be in the range ['): self.apiversion = response.text.split('[')[1].split(',')[0] self.client.header['x-dell-api-version'] = self.apiversion LOG.debug('API version updated to %s', self.apiversion) r = self.client.post('ApiConnection/Login', payload) return r except Exception: LOG.error('_check_version_fail: Parsing error.') return response def open_connection(self): self.failed_over = (self.primaryssn != self.ssn) payload = {} payload['Application'] = 'Cinder REST Driver' payload['ApplicationVersion'] = self.APIDRIVERVERSION r = self.client.post('ApiConnection/Login', payload) if not self._check_result(r): r = self._check_version_fail(payload, r) if not self._check_result(r): raise exception.VolumeBackendAPIException( data=_('Failed to connect to Dell REST API')) try: apidict = self._get_json(r) version = apidict['apiVersion'] self.is_direct_connect = apidict['provider'] == 'StorageCenter' splitver = version.split('.') if splitver[0] == '2': if splitver[1] == '0': self.consisgroups = False self.legacypayloadfilters = True elif splitver[1] == '1': self.legacypayloadfilters = True self.legacyfoldernames = (splitver[0] < '4') except Exception: LOG.error('Unrecognized Login Response: %s', r) def close_connection(self): r = self.client.post('ApiConnection/Logout', {}) self._check_result(r) self.client = None def _use_provider_id(self, provider_id): ret = False if provider_id: try: if provider_id.split('.')[0] == six.text_type(self.ssn): ret = True else: LOG.debug('_use_provider_id: provider_id ' '%(pid)r not valid on %(ssn)r', {'pid': provider_id, 'ssn': self.ssn}) except Exception: LOG.error('_use_provider_id: provider_id %s is invalid!', provider_id) return ret def find_sc(self, ssn=-1): ssn = self._vet_ssn(ssn) r = self.client.get('StorageCenter/StorageCenter') result = self._get_result(r, 'scSerialNumber', ssn) if result is None: LOG.error('Failed to find %(s)s. Result %(r)s', {'s': ssn, 'r': r}) raise exception.VolumeBackendAPIException( data=_('Failed to find Storage Center')) return self._get_id(result) def _create_folder(self, url, parent, folder, ssn=-1): ssn = self._vet_ssn(ssn) scfolder = None payload = {} payload['Name'] = folder payload['StorageCenter'] = ssn if parent != '': payload['Parent'] = parent payload['Notes'] = self.notes r = self.client.post(url, payload, True) if self._check_result(r): scfolder = self._first_result(r) return scfolder def _create_folder_path(self, url, foldername, ssn=-1): ssn = self._vet_ssn(ssn) path = self._path_to_array(foldername) folderpath = '' instanceId = '' found = True scfolder = None for folder in path: folderpath = folderpath + folder if found: listurl = url + '/GetList' scfolder = self._find_folder(listurl, folderpath, ssn) if scfolder is None: found = False if found is False: scfolder = self._create_folder(url, instanceId, folder, ssn) if scfolder is None: LOG.error('Unable to create folder path %s', folderpath) break instanceId = self._get_id(scfolder) folderpath = folderpath + '/' return scfolder
Apache License 2.0
felix-hilden/sphinx-codeautolink
src/sphinx_codeautolink/parse.py
ImportTrackerVisitor.visit_ClassDef
python
def visit_ClassDef(self, node: ast.ClassDef): for dec in node.decorator_list: self.visit(dec) for base in node.bases: self.visit(base) for kw in node.keywords: self.visit(kw) self._overwrite(node.name) self.pseudo_scopes_stack.append(self.pseudo_scopes_stack[0].copy()) for b in node.body: self.visit(b) self.pseudo_scopes_stack.pop()
Handle pseudo scope of class body.
https://github.com/felix-hilden/sphinx-codeautolink/blob/1ab13844f58cf4efe8aac2da710ff5794502770b/src/sphinx_codeautolink/parse.py#L446-L459
import ast import sys from contextlib import contextmanager from enum import Enum from functools import wraps from importlib import import_module from typing import Dict, Union, List, Optional, Tuple from warnings import warn from dataclasses import dataclass, field def parse_names(source: str) -> List['Name']: tree = ast.parse(source) visitor = ImportTrackerVisitor() visitor.visit(tree) return sum([split_access(a) for a in visitor.accessed], []) @dataclass class PendingAccess: components: List[ast.AST] @dataclass class Component: name: str lineno: int end_lineno: int context: str @classmethod def from_ast(cls, node): context = 'load' if isinstance(node, ast.Name): name = node.id context = node.ctx.__class__.__name__.lower() elif isinstance(node, ast.Attribute): name = node.attr context = node.ctx.__class__.__name__.lower() elif isinstance(node, ast.Call): name = NameBreak.call else: raise ValueError(f'Invalid AST for component: {node.__class__.__name__}') end_lineno = getattr(node, 'end_lineno', node.lineno) return cls(name, node.lineno, end_lineno, context) class NameBreak(str, Enum): call = '()' class LinkContext(str, Enum): none = 'none' after_call = 'after_call' import_from = 'import_from' import_target = 'import_target' @dataclass class Name: import_components: List[str] code_str: str lineno: int end_lineno: int context: LinkContext = None resolved_location: str = None @dataclass class Access: context: LinkContext prior_components: List[Component] components: List[Component] hidden_components: List[Component] = field(default_factory=list) @property def full_components(self): if not self.prior_components: return self.hidden_components + self.components if self.hidden_components: proper_components = self.hidden_components[1:] + self.components else: proper_components = self.components[1:] return self.prior_components + proper_components @property def code_str(self): breaks = set(NameBreak) return '.'.join(c.name for c in self.components if c.name not in breaks) @property def lineno_span(self) -> Tuple[int, int]: min_ = min(c.lineno for c in self.components) max_ = max(c.end_lineno for c in self.components) return min_, max_ def split_access(access: Access) -> List[Name]: split = [access] while True: current = split[-1] for i, comp in enumerate(current.components): if i and comp.name == NameBreak.call: hidden = current.hidden_components + current.components[:i] next_ = Access( LinkContext.after_call, current.prior_components, current.components[i:], hidden_components=hidden, ) current.components = current.components[:i] split.append(next_) break else: break if split[-1].components[-1].name == NameBreak.call: split.pop() return [ Name( [c.name for c in s.full_components], s.code_str, *s.lineno_span, context=s.context, ) for s in split ] @dataclass class Assignment: to: Optional[PendingAccess] value: Optional[PendingAccess] def track_parents(func): @wraps(func) def wrapper(self: 'ImportTrackerVisitor', *args, **kwargs): self._parents += 1 r: Union[PendingAccess, Assignment, None] = func(self, *args, **kwargs) self._parents -= 1 if not self._parents: if isinstance(r, Assignment): self._resolve_assignment(r) elif isinstance(r, PendingAccess): self._access(r) return r return wrapper class ImportTrackerVisitor(ast.NodeVisitor): def __init__(self): super().__init__() self.accessed: List[Access] = [] self.in_augassign = False self._parents = 0 self.pseudo_scopes_stack: List[Dict[str, List[Component]]] = [{}] self.outer_scopes_stack: List[Dict[str, List[Component]]] = [] @contextmanager def reset_parents(self): self._parents, old = (0, self._parents) yield self._parents = old track_nodes = ( ast.Name, ast.Attribute, ast.Call, ast.Assign, ast.AnnAssign, ) if sys.version_info >= (3, 8): track_nodes += (ast.NamedExpr,) def visit(self, node: ast.AST): if not isinstance(node, self.track_nodes): with self.reset_parents(): return super().visit(node) return super().visit(node) def _overwrite(self, name: str): self.pseudo_scopes_stack[-1].pop(name, None) def _assign(self, local_name: str, components: List[Component]): self._overwrite(local_name) self.pseudo_scopes_stack[-1][local_name] = components def _access(self, access: PendingAccess) -> Optional[Access]: components = [Component.from_ast(n) for n in access.components] prior = self.pseudo_scopes_stack[-1].get(components[0].name, None) if prior is None: return context = components[0].context if context == 'store' and not self.in_augassign: self._overwrite(components[0].name) return access = Access(LinkContext.none, prior, components) self.accessed.append(access) if context == 'del': self._overwrite(components[0].name) return access def _resolve_assignment(self, assignment: Assignment): value = assignment.value access = self._access(value) if value is not None else None if assignment.to is None: return if len(assignment.to.components) == 1: comp = Component.from_ast(assignment.to.components[0]) self._overwrite(comp.name) if access is not None: self._assign(comp.name, access.full_components) else: self._access(assignment.to) def _access_simple(self, name: str, lineno: int) -> Optional[Access]: component = Component(name, lineno, lineno, 'load') prior = self.pseudo_scopes_stack[-1].get(component.name, None) if prior is None: return access = Access(LinkContext.none, prior, [component]) self.accessed.append(access) return access def visit_Global(self, node: ast.Global): if not self.outer_scopes_stack: return imports = self.outer_scopes_stack[0] for name in node.names: self._overwrite(name) if name in imports: self._assign(name, imports[name]) self._access_simple(name, node.lineno) def visit_Nonlocal(self, node: ast.Nonlocal): imports_stack = self.outer_scopes_stack[1:] for name in node.names: self._overwrite(name) for imports in imports_stack[::-1]: if name in imports: self.pseudo_scopes_stack[-1][name] = imports[name] self._access_simple(name, node.lineno) break def visit_Import(self, node: Union[ast.Import, ast.ImportFrom], prefix: str = ''): import_star = (node.names[0].name == '*') if import_star: try: mod = import_module(node.module) except ImportError: warn(f'Could not import module `{node.module}` for parsing!') return import_names = [name for name in mod.__dict__ if not name.startswith('_')] aliases = [None] * len(import_names) else: import_names = [name.name for name in node.names] aliases = [name.asname for name in node.names] end_lineno = getattr(node, 'end_lineno', node.lineno) prefix_parts = prefix.rstrip('.').split('.') if prefix else [] prefix_components = [ Component(n, node.lineno, end_lineno, 'load') for n in prefix_parts ] if prefix: self.accessed.append(Access(LinkContext.import_from, [], prefix_components)) for import_name, alias in zip(import_names, aliases): if not import_star: components = [ Component(n, node.lineno, end_lineno, 'load') for n in import_name.split('.') ] self.accessed.append( Access(LinkContext.import_target, [], components, prefix_components) ) if not alias and '.' in import_name: import_name = import_name.split('.')[0] full_components = [ Component(n, node.lineno, end_lineno, 'store') for n in (prefix + import_name).split('.') ] self._assign(alias or import_name, full_components) def visit_ImportFrom(self, node: ast.ImportFrom): if node.level: for name in node.names: self._overwrite(name.asname or name.name) else: self.visit_Import(node, prefix=node.module + '.') @track_parents def visit_Name(self, node): return PendingAccess([node]) @track_parents def visit_Attribute(self, node): inner: PendingAccess = self.visit(node.value) if inner is not None: inner.components.append(node) return inner @track_parents def visit_Call(self, node: ast.Call): inner: PendingAccess = self.visit(node.func) if inner is not None: inner.components.append(node) with self.reset_parents(): for arg in node.args + node.keywords: self.visit(arg) if hasattr(node, 'starargs'): self.visit(node.starargs) if hasattr(node, 'kwargs'): self.visit(node.kwargs) return inner @track_parents def visit_Assign(self, node: ast.Assign): value = self.visit(node.value) target_returns = [] for n in node.targets: target_returns.append(self.visit(n)) if len(target_returns) == 1: return Assignment(target_returns[0], value) else: return value @track_parents def visit_AnnAssign(self, node: ast.AnnAssign): if node.value is not None: value = self.visit(node.value) target = self.visit(node.target) with self.reset_parents(): self.visit(node.annotation) if node.value is not None: return Assignment(target, value) def visit_AugAssign(self, node: ast.AugAssign): self.visit(node.value) self.in_augassign, temp = (True, self.in_augassign) self.visit(node.target) self.in_augassign = temp @track_parents def visit_NamedExpr(self, node): value = self.visit(node.value) target = self.visit(node.target) return Assignment(target, value) def visit_AsyncFor(self, node: ast.AsyncFor): self.visit_AsyncFor(node) def visit_For(self, node: Union[ast.For, ast.AsyncFor]): self.visit(node.iter) self.visit(node.target) for n in node.body: self.visit(n) for n in node.orelse: self.visit(n)
MIT License
westpa/westpa
src/oldtools/stats/edfs.py
EDF.std
python
def std(self): return self.cmoment(2)**0.5
Return the standard deviation (root of the variance) of this probability distribution.
https://github.com/westpa/westpa/blob/cda177c5dea2cee571d71c4b04fcc625dc5f689c/src/oldtools/stats/edfs.py#L130-L132
import numpy class EDF: @staticmethod def from_array(array): edf = EDF(None,None) edf.x = array[:,0] edf.F = array[:,1] edf.dF = numpy.diff(edf.F) return edf @staticmethod def from_arrays(x, F): edf = EDF(None,None) edf.x = x edf.F = F edf.dF = numpy.diff(edf.F) return edf def __init__(self, values, weights = None): if values is None: self.x = None self.F = None self.dF = None return if weights is None: weights = numpy.ones((len(values)), numpy.float64) elif numpy.isscalar(weights): tweights = numpy.empty((len(values)), numpy.float64) tweights[:] = weights weights = tweights else: if len(weights) != len(values): raise TypeError('values and weights have different lengths') sort_indices = numpy.argsort(values) values = values[sort_indices] weights = weights[sort_indices] x = values[numpy.concatenate(([True], values[1:] != values[:-1]))] F = numpy.empty((len(x),), numpy.float64) ival_last = 0 ival = 0 for ibin in range(0, len(x)): while ival < len(values) and values[ival] <= x[ibin]: ival+=1 F[ibin] = weights[ival_last:ival].sum() ival_last = ival F = numpy.add.accumulate(F) F /= F[-1] self.x = x self.F = F self.dF = numpy.diff(F) def __len__(self): return len(self.x) def __call__(self, x): indices = numpy.digitize(x, self.x) indices[indices >= len(self.x)] = len(self.x) - 1 return self.F[indices] def as_array(self): result = numpy.empty((len(self.F),2), dtype=numpy.result_type(self.x, self.F)) result[:,0] = self.x result[:,1] = self.F return result def quantiles(self, p): indices = numpy.searchsorted(self.F, p) indices[indices >= len(self.x)] = len(self.x) - 1 return self.x[indices] def quantile(self, p): return self.quantiles([p])[0] def median(self): return self.quantiles([0.5])[0] def moment(self, n): if n == 1: return (self.x[:-1] * self.dF).sum() else: return (self.x[:-1]**n * self.dF).sum() def cmoment(self, n): if n < 2: return 0 return ((self.x[:-1]-self.moment(1))**n * self.dF).sum() def mean(self): return self.moment(1) def var(self): return self.cmoment(2)
MIT License
pappasam/latexbuild
latexbuild/assertions.py
list_is_type
python
def list_is_type(ls, t): if not isclass(t): raise TypeError("{} is not a class".format(t)) elif not isinstance(ls, list): raise TypeError("{} is not a list".format(ls)) else: ls_bad_types = [i for i in ls if not isinstance(i, t)] if len(ls_bad_types) > 0: raise TypeError("{} are not {}".format(ls_bad_types, t)) return True
Assert that a list contains only elements of type t Return True if list contains elements of type t Raise TypeError if t is not a class Raise TypeError if ls is not a list Raise TypeError if ls contains non-t elements :param ls: LIST :param t: python class
https://github.com/pappasam/latexbuild/blob/596a2a0a4c42eaa5eb9503d64f9073ad5d0640d5/latexbuild/assertions.py#L44-L63
import shutil import os from inspect import isclass def has_file_extension(filepath, ext_required): ext = os.path.splitext(filepath)[-1] if ext != ext_required: msg_tmpl = "The extension for {}, which is {}, does not equal {}" msg_format = msg_tmpl.format(filepath, ext, ext_required) raise ValueError(msg_format) return True def is_binary(system_binary_str): if not isinstance(system_binary_str, str): raise TypeError("{} must be of type STR".format(system_binary_str)) binary_str = shutil.which(system_binary_str) if not binary_str: msg = "{} is not valid system binary".format(system_binary_str) raise ValueError(msg) return True
MIT License
berkeleyautomation/autolab_core
autolab_core/image.py
Image.mask_by_ind
python
def mask_by_ind(self, inds): new_data = np.zeros(self.shape) for ind in inds: new_data[ind[0], ind[1]] = self.data[ind[0], ind[1]] return type(self)(new_data.astype(self.data.dtype), self.frame)
Create a new image by zeroing out data at locations not in the given indices. Parameters ---------- inds : :obj:`numpy.ndarray` of int A 2D ndarray whose first entry is the list of row indices and whose second entry is the list of column indices. The data at these indices will not be set to zero. Returns ------- :obj:`Image` A new Image of the same type, with data not indexed by inds set to zero.
https://github.com/berkeleyautomation/autolab_core/blob/cda081d2e07e3fe6cc9f3e8c86eea92330910d20/autolab_core/image.py#L496-L516
from abc import ABCMeta, abstractmethod import logging import os import cv2 import numpy as np import PIL.Image as PImage import matplotlib.pyplot as plt import scipy.signal as ssg import scipy.ndimage.filters as sf import scipy.ndimage.interpolation as sni import scipy.ndimage.morphology as snm import scipy.spatial.distance as ssd import sklearn.cluster as sc import skimage.morphology as morph import skimage.transform as skt from .constants import MAX_DEPTH, MIN_DEPTH, MAX_IR, COLOR_IMAGE_EXTS from .points import PointCloud, NormalCloud, PointNormalCloud from .primitives import Contour BINARY_IM_MAX_VAL = np.iinfo(np.uint8).max BINARY_IM_DEFAULT_THRESH = BINARY_IM_MAX_VAL / 2 def imresize(image, size, interp="nearest"): skt_interp_map = { "nearest": 0, "bilinear": 1, "biquadratic": 2, "bicubic": 3, "biquartic": 4, "biquintic": 5, } if interp in ("lanczos", "cubic"): raise ValueError( '"lanczos" and "cubic"' " interpolation are no longer supported." ) assert ( interp in skt_interp_map ), 'Interpolation "{}" not' " supported.".format(interp) if isinstance(size, (tuple, list)): output_shape = size elif isinstance(size, (float)): np_shape = np.asarray(image.shape).astype(np.float32) np_shape[0:2] *= size output_shape = tuple(np_shape.astype(int)) elif isinstance(size, (int)): np_shape = np.asarray(image.shape).astype(np.float32) np_shape[0:2] *= size / 100.0 output_shape = tuple(np_shape.astype(int)) else: raise ValueError('Invalid type for size "{}".'.format(type(size))) return skt.resize( image.astype(np.float), output_shape, order=skt_interp_map[interp], anti_aliasing=False, mode="constant", ) class Image(object): __metaclass__ = ABCMeta def __init__(self, data, frame="unspecified"): if not isinstance(data, np.ndarray): raise ValueError("Must initialize image with a numpy ndarray") if not isinstance(frame, str): raise ValueError("Must provide string name of frame of data") self._check_valid_data(data) self._data = self._preprocess_data(data) self._frame = frame self._encoding = "passthrough" def _preprocess_data(self, data): original_type = data.dtype if len(data.shape) == 1: data = data[:, np.newaxis, np.newaxis] elif len(data.shape) == 2: data = data[:, :, np.newaxis] elif len(data.shape) == 0 or len(data.shape) > 3: raise ValueError( "Illegal data array passed to image. " "Must be 1, 2, or 3 dimensional numpy array" ) return data.astype(original_type) @property def shape(self): return self._data.shape @property def height(self): return self._data.shape[0] @property def width(self): return self._data.shape[1] @property def center(self): return np.array([self.height / 2, self.width / 2]) @property def channels(self): return self._data.shape[2] @property def type(self): return self._data.dtype.type @property def raw_data(self): return self._data @property def data(self): return self._data.squeeze() @property def frame(self): return self._frame @property def encoding(self): return self._encoding @property def rosmsg(self): from cv_bridge import CvBridge, CvBridgeError cv_bridge = CvBridge() try: return cv_bridge.cv2_to_imgmsg(self._data, encoding=self._encoding) except CvBridgeError as cv_bridge_exception: logging.error("%s" % (str(cv_bridge_exception))) @abstractmethod def _check_valid_data(self, data): pass @abstractmethod def _image_data(self): pass @abstractmethod def resize(self, size, interp): pass @staticmethod def can_convert(x): if len(x.shape) < 2 or len(x.shape) > 3: return False channels = 1 if len(x.shape) == 3: channels = x.shape[2] if channels > 4: return False return True @staticmethod def from_array(x, frame="unspecified"): if not Image.can_convert(x): raise ValueError("Cannot convert array to an Image!") dtype = x.dtype channels = 1 if len(x.shape) == 3: channels = x.shape[2] if dtype == np.uint8: if channels == 1: if np.any((x % BINARY_IM_MAX_VAL) > 0): return GrayscaleImage(x, frame) return BinaryImage(x, frame) elif channels == 3: return ColorImage(x, frame) else: raise ValueError( "No available image conversion for uint8 array " "with 2 channels" ) elif dtype == np.uint16: if channels != 1: raise ValueError( "No available image conversion for uint16 array " "with 2 or 3 channels" ) return GrayscaleImage(x, frame) elif dtype == np.float32 or dtype == np.float64: if channels == 1: return DepthImage(x, frame) elif channels == 2: return GdImage(x, frame) elif channels == 3: logging.warning("Converting float array to uint8") return ColorImage(x.astype(np.uint8), frame) return RgbdImage(x, frame) else: raise ValueError( "Conversion for dtype %s not supported!" % (str(dtype)) ) def transform(self, translation, theta, method="opencv"): theta = np.rad2deg(theta) trans_map = np.float32( [[1, 0, translation[1]], [0, 1, translation[0]]] ) rot_map = cv2.getRotationMatrix2D( (self.center[1], self.center[0]), theta, 1 ) trans_map_aff = np.r_[trans_map, [[0, 0, 1]]] rot_map_aff = np.r_[rot_map, [[0, 0, 1]]] full_map = rot_map_aff.dot(trans_map_aff) full_map = full_map[:2, :] if method == "opencv": im_data_tf = cv2.warpAffine( self.data, full_map, (self.width, self.height), flags=cv2.INTER_NEAREST, ) else: im_data_tf = sni.affine_transform( self.data, matrix=full_map[:, :2], offset=full_map[:, 2], order=0, ) return type(self)( im_data_tf.astype(self.data.dtype), frame=self._frame ) def align(self, scale, center, angle, height, width): scaled_im = self.resize(scale) cx = scaled_im.center[1] cy = scaled_im.center[0] dx = cx - center[0] * scale dy = cy - center[1] * scale translation = np.array([dy, dx]) tf_im = scaled_im.transform(translation, angle) aligned_im = tf_im.crop(height, width) return aligned_im def gradients(self): g = np.gradient(self.data.astype(np.float32)) return g def ij_to_linear(self, i, j): return i + j.dot(self.width) def linear_to_ij(self, linear_inds): return np.c_[linear_inds / self.width, linear_inds % self.width] def is_same_shape(self, other_im, check_channels=False): if self.height == other_im.height and self.width == other_im.width: if check_channels and self.channels != other_im.channels: return False return True return False
Apache License 2.0
opennetworkingfoundation/tapi
RI/flask_server/tapi_server/models/tapi_connectivity_switch.py
TapiConnectivitySwitch.switch_direction
python
def switch_direction(self, switch_direction): self._switch_direction = switch_direction
Sets the switch_direction of this TapiConnectivitySwitch. :param switch_direction: The switch_direction of this TapiConnectivitySwitch. :type switch_direction: TapiCommonPortDirection
https://github.com/opennetworkingfoundation/tapi/blob/1f3fd9483d5674552c5a31206c97399c8c151897/RI/flask_server/tapi_server/models/tapi_connectivity_switch.py#L227-L235
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from tapi_server.models.base_model_ import Model from tapi_server.models.tapi_common_local_class import TapiCommonLocalClass from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue from tapi_server.models.tapi_common_port_direction import TapiCommonPortDirection from tapi_server.models.tapi_connectivity_connection_end_point_ref import TapiConnectivityConnectionEndPointRef from tapi_server.models.tapi_connectivity_route_ref import TapiConnectivityRouteRef from tapi_server.models.tapi_connectivity_selection_control import TapiConnectivitySelectionControl from tapi_server.models.tapi_connectivity_selection_reason import TapiConnectivitySelectionReason from tapi_server import util class TapiConnectivitySwitch(Model): def __init__(self, name=None, local_id=None, selected_connection_end_point=None, selected_route=None, selection_control=None, selection_reason=None, switch_direction=None): self.openapi_types = { 'name': List[TapiCommonNameAndValue], 'local_id': str, 'selected_connection_end_point': List[TapiConnectivityConnectionEndPointRef], 'selected_route': List[TapiConnectivityRouteRef], 'selection_control': TapiConnectivitySelectionControl, 'selection_reason': TapiConnectivitySelectionReason, 'switch_direction': TapiCommonPortDirection } self.attribute_map = { 'name': 'name', 'local_id': 'local-id', 'selected_connection_end_point': 'selected-connection-end-point', 'selected_route': 'selected-route', 'selection_control': 'selection-control', 'selection_reason': 'selection-reason', 'switch_direction': 'switch-direction' } self._name = name self._local_id = local_id self._selected_connection_end_point = selected_connection_end_point self._selected_route = selected_route self._selection_control = selection_control self._selection_reason = selection_reason self._switch_direction = switch_direction @classmethod def from_dict(cls, dikt) -> 'TapiConnectivitySwitch': return util.deserialize_model(dikt, cls) @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def local_id(self): return self._local_id @local_id.setter def local_id(self, local_id): self._local_id = local_id @property def selected_connection_end_point(self): return self._selected_connection_end_point @selected_connection_end_point.setter def selected_connection_end_point(self, selected_connection_end_point): self._selected_connection_end_point = selected_connection_end_point @property def selected_route(self): return self._selected_route @selected_route.setter def selected_route(self, selected_route): self._selected_route = selected_route @property def selection_control(self): return self._selection_control @selection_control.setter def selection_control(self, selection_control): self._selection_control = selection_control @property def selection_reason(self): return self._selection_reason @selection_reason.setter def selection_reason(self, selection_reason): self._selection_reason = selection_reason @property def switch_direction(self): return self._switch_direction @switch_direction.setter
Apache License 2.0
neurostuff/nimare
nimare/dataset.py
Dataset._generic_column_getter
python
def _generic_column_getter(self, attr, ids=None, column=None, ignore_columns=None): if ignore_columns is None: ignore_columns = self._id_cols else: ignore_columns += self._id_cols df = getattr(self, attr) return_first = False if isinstance(ids, str) and column is not None: return_first = True ids = listify(ids) available_types = [c for c in df.columns if c not in self._id_cols] if (column is not None) and (column not in available_types): raise ValueError( f"{column} not found in {attr}.\nAvailable types: {', '.join(available_types)}" ) if column is not None: if ids is not None: result = df[column].loc[df["id"].isin(ids)].tolist() else: result = df[column].tolist() else: if ids is not None: result = {v: df[v].loc[df["id"].isin(ids)].tolist() for v in available_types} result = {k: v for k, v in result.items() if any(v)} else: result = {v: df[v].tolist() for v in available_types} result = list(result.keys()) if return_first: return result[0] else: return result
Extract information from DataFrame-based attributes. Parameters ---------- attr : :obj:`str` The name of the DataFrame-format Dataset attribute to search. ids : :obj:`list` or None, optional A list of study IDs within which to extract values. If None, extract values for all studies in the Dataset. Default is None. column : :obj:`str` or None, optional The column from which to extract values. If None, a list of all columns with valid values will be returned. Must be a column within Dataset.[attr]. ignore_columns : :obj:`list` or None, optional A list of columns to ignore. Only used if ``column`` is None. Returns ------- result : :obj:`list` or :obj:`str` A list of values or a string, depending on if ids is a list (or None) or a string.
https://github.com/neurostuff/nimare/blob/e8814c23b33c64a73de907bf56de852b13a9d56a/nimare/dataset.py#L432-L489
import copy import inspect import json import logging import os.path as op import numpy as np import pandas as pd from nilearn._utils import load_niimg from .base import NiMAREBase from .utils import ( dict_to_coordinates, dict_to_df, get_masker, get_template, listify, mm2vox, transform_coordinates_to_space, try_prepend, validate_df, validate_images_df, ) LGR = logging.getLogger(__name__) class Dataset(NiMAREBase): _id_cols = ["id", "study_id", "contrast_id"] def __init__(self, source, target="mni152_2mm", mask=None): if isinstance(source, str): with open(source, "r") as f_obj: data = json.load(f_obj) elif isinstance(source, dict): data = source else: raise Exception("`source` needs to be a file path or a dictionary") id_columns = ["id", "study_id", "contrast_id"] all_ids = [] for pid in data.keys(): for expid in data[pid]["contrasts"].keys(): id_ = f"{pid}-{expid}" all_ids.append([id_, pid, expid]) id_df = pd.DataFrame(columns=id_columns, data=all_ids) id_df = id_df.set_index("id", drop=False) self._ids = id_df.index.values if mask is None: mask = get_template(target, mask="brain") self.masker = mask self.space = target self.annotations = dict_to_df(id_df, data, key="labels") self.coordinates = dict_to_coordinates(data, masker=self.masker, space=self.space) self.images = dict_to_df(id_df, data, key="images") self.metadata = dict_to_df(id_df, data, key="metadata") self.texts = dict_to_df(id_df, data, key="text") self.basepath = None def __repr__(self): signature = inspect.signature(self.__init__) defaults = { k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty } params = self.get_params() params = {k: v for k, v in params.items() if "__" not in k} params["space"] = self.space params.pop("target") params = {k: v for k, v in params.items() if defaults.get(k) != v} param_strs = [] for k, v in params.items(): if isinstance(v, str): param_str = f"{k}='{v}'" else: param_str = f"{k}={v}" param_strs.append(param_str) params_str = ", ".join(param_strs) params_str = f"{len(self.ids)} experiments{', ' if params_str else ''}{params_str}" rep = f"{self.__class__.__name__}({params_str})" return rep @property def ids(self): return self.__ids @ids.setter def _ids(self, ids): ids = np.sort(np.asarray(ids)) assert isinstance(ids, np.ndarray) and ids.ndim == 1 self.__ids = ids @property def masker(self): return self.__masker @masker.setter def masker(self, mask): mask = get_masker(mask) if hasattr(self, "masker") and not np.array_equal( self.masker.mask_img.affine, mask.mask_img.affine ): LGR.warning("New masker does not match old masker. Space is assumed to be the same.") self.__masker = mask @property def annotations(self): return self.__annotations @annotations.setter def annotations(self, df): validate_df(df) self.__annotations = df.sort_values(by="id") @property def coordinates(self): return self.__coordinates @coordinates.setter def coordinates(self, df): validate_df(df) self.__coordinates = df.sort_values(by="id") @property def images(self): return self.__images @images.setter def images(self, df): validate_df(df) self.__images = validate_images_df(df).sort_values(by="id") @property def metadata(self): return self.__metadata @metadata.setter def metadata(self, df): validate_df(df) self.__metadata = df.sort_values(by="id") @property def texts(self): return self.__texts @texts.setter def texts(self, df): validate_df(df) self.__texts = df.sort_values(by="id") def slice(self, ids): new_dset = copy.deepcopy(self) new_dset._ids = ids for attribute in ("annotations", "coordinates", "images", "metadata", "texts"): df = getattr(new_dset, attribute) df = df.loc[df["id"].isin(ids)] setattr(new_dset, attribute, df) return new_dset def merge(self, right): assert isinstance(right, Dataset) shared_ids = np.intersect1d(self.ids, right.ids) if shared_ids.size: raise Exception("Duplicate IDs detected in both datasets.") all_ids = np.concatenate((self.ids, right.ids)) new_dset = copy.deepcopy(self) new_dset._ids = all_ids for attribute in ("annotations", "coordinates", "images", "metadata", "texts"): df1 = getattr(self, attribute) df2 = getattr(right, attribute) new_df = df1.append(df2, ignore_index=True, sort=False) new_df.sort_values(by="id", inplace=True) new_df.reset_index(drop=True, inplace=True) new_df = new_df.where(~new_df.isna(), None) setattr(new_dset, attribute, new_df) new_dset.coordinates = transform_coordinates_to_space( new_dset.coordinates, self.masker, self.space, ) return new_dset def update_path(self, new_path): self.basepath = op.abspath(new_path) df = self.images relative_path_cols = [c for c in df if c.endswith("__relative")] for col in relative_path_cols: abs_col = col.replace("__relative", "") if abs_col in df.columns: LGR.info(f"Overwriting images column {abs_col}") df[abs_col] = df[col].apply(try_prepend, prefix=self.basepath) self.images = df def copy(self): return copy.deepcopy(self) def get(self, dict_, drop_invalid=True): results = {} results["id"] = self.ids keep_idx = np.arange(len(self.ids), dtype=int) for k, vals in dict_.items(): if vals[0] == "image": temp = self.get_images(imtype=vals[1]) elif vals[0] == "metadata": temp = self.get_metadata(field=vals[1]) elif vals[0] == "coordinates": temp = [self.coordinates.loc[self.coordinates["id"] == id_] for id_ in self.ids] temp = [t if t.size else None for t in temp] elif vals[0] == "annotations": temp = [self.annotations.loc[self.annotations["id"] == id_] for id_ in self.ids] temp = [t if t.size else None for t in temp] else: raise ValueError(f"Input '{vals[0]}' not understood.") results[k] = temp temp_keep_idx = np.where([t is not None for t in temp])[0] keep_idx = np.intersect1d(keep_idx, temp_keep_idx) if drop_invalid and (len(keep_idx) != len(self.ids)): LGR.info(f"Retaining {len(keep_idx)}/{len(self.ids)} studies") elif len(keep_idx) != len(self.ids): raise Exception( f"Only {len(keep_idx)}/{len(self.ids)} in Dataset contain the necessary data. " "If you want to analyze the subset of studies with required data, " "set `drop_invalid` to True." ) for k in results: results[k] = [results[k][i] for i in keep_idx] if dict_.get(k, [None])[0] in ("coordinates", "annotations"): results[k] = pd.concat(results[k]) return results
MIT License
forseti-security/forseti-security
google/cloud/forseti/services/inventory/storage.py
InventoryIndex.create
python
def create(cls): utc_now = date_time.get_utc_now_datetime() micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now) return InventoryIndex( id=micro_timestamp, created_at_datetime=utc_now, completed_at_datetime=None, inventory_status=IndexState.CREATED, schema_version=CURRENT_SCHEMA, counter=0)
Create a new inventory index row. Returns: InventoryIndex: InventoryIndex row object.
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/services/inventory/storage.py#L129-L143
from builtins import object import json import enum import threading from sqlalchemy import and_ from sqlalchemy import BigInteger from sqlalchemy import case from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Enum from sqlalchemy import exists from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import or_ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import aliased from sqlalchemy.orm import column_property from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from google.cloud.forseti.common.util import date_time from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util.index_state import IndexState from google.cloud.forseti.services import utils from google.cloud.forseti.services.inventory.base.storage import Storage as BaseStorage from google.cloud.forseti.services.scanner.dao import ScannerIndex LOGGER = logger.get_logger(__name__) BASE = declarative_base() CURRENT_SCHEMA = 1 PER_YIELD = 1024 class Categories(enum.Enum): resource = 1 iam_policy = 2 gcs_policy = 3 dataset_policy = 4 billing_info = 5 enabled_apis = 6 kubernetes_service_config = 7 org_policy = 8 access_policy = 9 SUPPORTED_CATEGORIES = frozenset(item.name for item in list(Categories)) class InventoryWarnings(BASE): __tablename__ = 'inventory_warnings' id = Column(Integer, primary_key=True, autoincrement=True) inventory_index_id = Column(BigInteger, ForeignKey('inventory_index.id')) resource_full_name = Column(String(2048)) warning_message = Column(Text) class InventoryIndex(BASE): __tablename__ = 'inventory_index' id = Column(BigInteger, primary_key=True) created_at_datetime = Column(DateTime) completed_at_datetime = Column(DateTime) inventory_status = Column(Text) schema_version = Column(Integer) progress = Column(Text) counter = Column(Integer) inventory_index_warnings = Column(Text(16777215)) inventory_index_errors = Column(Text(16777215)) message = Column(Text(16777215)) warning_count = column_property( select([func.count(InventoryWarnings.id)]).where( InventoryWarnings.inventory_index_id == id).correlate_except( InventoryWarnings)) warning_messages = relationship('InventoryWarnings', cascade='expunge') def __repr__(self): return """<{}(id='{}', version='{}', timestamp='{}')>""".format( self.__class__.__name__, self.id, self.schema_version, self.created_at_datetime) @classmethod
Apache License 2.0
golemhq/golem
golem/browser.py
elements
python
def elements(*args, **kwargs): webelement = get_browser().find_all(*args, **kwargs) return webelement
Shortcut to golem.browser.get_browser().find_all()
https://github.com/golemhq/golem/blob/ab6a08ee54d2c5d27ab6af15b833ce3d2575d3e3/golem/browser.py#L29-L32
import traceback from contextlib import contextmanager from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from golem.core import utils from golem.core.project import Project from golem import execution from golem.webdriver import (GolemChromeDriver, GolemEdgeDriver, GolemGeckoDriver, GolemIeDriver, GolemOperaDriver, GolemRemoteDriver) class InvalidBrowserIdError(Exception): pass def element(*args, **kwargs): webelement = get_browser().find(*args, **kwargs) return webelement
MIT License
niaorg/niapy
niapy/algorithms/basic/fwa.py
EnhancedFireworksAlgorithm.mapping
python
def mapping(self, x, task): return repair.rand(x, task.lower, task.upper, rng=self.rng)
r"""Fix value to bounds. Args: x (numpy.ndarray): Individual to fix. task (Task): Optimization task. Returns: numpy.ndarray: Individual in search range.
https://github.com/niaorg/niapy/blob/88de4acbdec9836111c8a0f1c7f76f75b191d344/niapy/algorithms/basic/fwa.py#L488-L499
import logging import numpy as np from niapy.algorithms.algorithm import Algorithm from niapy.util.distances import euclidean import niapy.util.repair as repair logging.basicConfig() logger = logging.getLogger('niapy.algorithms.basic') logger.setLevel('INFO') __all__ = ['FireworksAlgorithm', 'EnhancedFireworksAlgorithm', 'DynamicFireworksAlgorithm', 'DynamicFireworksAlgorithmGauss', 'BareBonesFireworksAlgorithm'] class BareBonesFireworksAlgorithm(Algorithm): Name = ['BareBonesFireworksAlgorithm', 'BBFWA'] @staticmethod def info(): return r"""Junzhi Li, Ying Tan, The bare bones fireworks algorithm: A minimalist global optimizer, Applied Soft Computing, Volume 62, 2018, Pages 454-462, ISSN 1568-4946, https://doi.org/10.1016/j.asoc.2017.10.046.""" def __init__(self, num_sparks=10, amplification_coefficient=1.5, reduction_coefficient=0.5, *args, **kwargs): kwargs.pop('population_size', None) super().__init__(1, *args, **kwargs) self.num_sparks = num_sparks self.amplification_coefficient = amplification_coefficient self.reduction_coefficient = reduction_coefficient def set_parameters(self, num_sparks=10, amplification_coefficient=1.5, reduction_coefficient=0.5, **kwargs): kwargs.pop('population_size', None) super().set_parameters(population_size=1, **kwargs) self.num_sparks = num_sparks self.amplification_coefficient = amplification_coefficient self.reduction_coefficient = reduction_coefficient def init_population(self, task): x, x_fit, d = super().init_population(task) d.update({'amplitude': task.range}) return x, x_fit, d def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params): amplitude = params.pop('amplitude') sparks = self.uniform(population - amplitude, population + amplitude, (self.num_sparks, task.dimension)) sparks = np.apply_along_axis(task.repair, 1, sparks, self.rng) sparks_fitness = np.apply_along_axis(task.eval, 1, sparks) best_index = np.argmin(sparks_fitness) if sparks_fitness[best_index] < population_fitness: population = sparks[best_index] population_fitness = sparks_fitness[best_index] amplitude = self.amplification_coefficient * amplitude else: amplitude = self.reduction_coefficient * amplitude return population, population_fitness, population.copy(), population_fitness, {'amplitude': amplitude} class FireworksAlgorithm(Algorithm): Name = ['FireworksAlgorithm', 'FWA'] @staticmethod def info(): return r"""Tan, Ying. "Fireworks algorithm." Heidelberg, Germany: Springer 10 (2015): 978-3.""" def __init__(self, population_size=5, num_sparks=50, a=0.04, b=0.8, max_amplitude=40, num_gaussian=5, *args, **kwargs): super().__init__(population_size, *args, **kwargs) self.num_sparks = num_sparks self.a = a self.b = b self.max_amplitude = max_amplitude self.num_gaussian = num_gaussian self.epsilon = np.finfo(float).eps def set_parameters(self, population_size=5, num_sparks=50, a=0.04, b=0.8, max_amplitude=40, num_gaussian=5, **kwargs): super().set_parameters(population_size=population_size, **kwargs) self.num_sparks = num_sparks self.a = a self.b = b self.max_amplitude = max_amplitude self.num_gaussian = num_gaussian self.epsilon = np.finfo(float).eps def sparks_num(self, population_fitness): worst_fitness = np.amax(population_fitness) sparks_num = self.num_sparks * (worst_fitness - population_fitness + self.epsilon) sparks_num /= np.sum(worst_fitness - population_fitness) + self.epsilon cond = [sparks_num < self.a * self.num_sparks, (sparks_num > self.b * self.num_sparks) * (self.a < self.b < 1)] choices = [round(self.a * self.num_sparks), round(self.b * self.num_sparks)] return np.select(cond, choices, default=np.round(sparks_num)).astype(int) def explosion_amplitudes(self, population_fitness, task=None): best_fitness = np.amin(population_fitness) amplitudes = self.max_amplitude * (population_fitness - best_fitness + self.epsilon) amplitudes /= np.sum(population_fitness - best_fitness) + self.epsilon return amplitudes def explosion_spark(self, x, amplitude, task): z = self.rng.choice(task.dimension, self.rng.integers(task.dimension), replace=False) x[z] = x[z] + amplitude * self.uniform(-1, 1) return self.mapping(x, task) def gaussian_spark(self, x, task, best_x=None): z = self.rng.choice(task.dimension, self.rng.integers(task.dimension), replace=False) x[z] = x[z] * self.normal(1, 1) return self.mapping(x, task) def mapping(self, x, task): return repair.reflect(x, task.lower, task.upper) def selection(self, population, population_fitness, sparks, task): sparks_fitness = np.apply_along_axis(task.eval, 1, sparks) best_index = np.argmin(sparks_fitness) best_x = sparks[best_index].copy() best_fitness = sparks_fitness[best_index] all_sparks = np.delete(sparks, best_index, axis=0) fitness = np.delete(sparks_fitness, best_index) distances = np.sum(euclidean(all_sparks[:, np.newaxis, :], all_sparks[np.newaxis, :, :]), axis=0) probabilities = distances / np.sum(distances) selected_indices = self.rng.choice(len(all_sparks), self.population_size - 1, replace=False, p=probabilities) population[0] = best_x population[1:] = all_sparks[selected_indices] population_fitness[0] = best_fitness population_fitness[1:] = fitness[selected_indices] return population, population_fitness, best_x, best_fitness def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params): sparks_num = self.sparks_num(population_fitness) amplitudes = self.explosion_amplitudes(population_fitness, task=task) all_sparks = population.copy() for i in range(self.population_size): si = sparks_num[i] ai = amplitudes[i] sparks_i = np.empty((si, task.dimension)) for s in range(si): sparks_i[s] = population[i] sparks_i[s] = self.explosion_spark(sparks_i[s], ai, task) all_sparks = np.concatenate((all_sparks, sparks_i), axis=0) gaussian_idx = self.rng.choice(len(all_sparks), self.num_gaussian, replace=False) gaussian_sparks = np.array(all_sparks[gaussian_idx]) for i in range(self.num_gaussian): gaussian_sparks[i] = self.gaussian_spark(gaussian_sparks[i], task, best_x=best_x) all_sparks = np.concatenate((all_sparks, gaussian_sparks), axis=0) population, population_fitness, best_x, best_fitness = self.selection(population, population_fitness, all_sparks, task) return population, population_fitness, best_x, best_fitness, {} class EnhancedFireworksAlgorithm(FireworksAlgorithm): Name = ['EnhancedFireworksAlgorithm', 'EFWA'] @staticmethod def info(): return r"""S. Zheng, A. Janecek and Y. Tan, "Enhanced Fireworks Algorithm," 2013 IEEE Congress on Evolutionary Computation, Cancun, 2013, pp. 2069-2077. doi: 10.1109/CEC.2013.6557813""" def __init__(self, amplitude_init=0.2, amplitude_final=0.01, *args, **kwargs): super().__init__(*args, **kwargs) self.amplitude_init = amplitude_init self.amplitude_final = amplitude_final def set_parameters(self, amplitude_init=0.2, amplitude_final=0.01, **kwargs): super().set_parameters(**kwargs) self.amplitude_init = amplitude_init self.amplitude_final = amplitude_final def explosion_amplitudes(self, population_fitness, task=None): amplitudes = super().explosion_amplitudes(population_fitness, task) a_min = self.amplitude_init - np.sqrt(task.evals * (2 * task.max_evals - task.evals)) * (self.amplitude_init - self.amplitude_final) / task.max_evals amplitudes[amplitudes < a_min] = a_min return amplitudes
MIT License
google/uncertainty-baselines
baselines/cifar/ood_utils.py
load_ood_datasets
python
def load_ood_datasets(ood_dataset_names, in_dataset_builder, in_dataset_validation_percent, batch_size, drop_remainder=False): steps = {} datasets = {} for ood_dataset_name in ood_dataset_names: ood_dataset_class = ub.datasets.DATASETS[ood_dataset_name] ood_dataset_class = ub.datasets.make_ood_dataset(ood_dataset_class) if 'cifar' not in ood_dataset_name: ood_dataset_builder = ood_dataset_class( in_dataset_builder, split='test', validation_percent=in_dataset_validation_percent, normalize_by_cifar=True, drop_remainder=drop_remainder) else: ood_dataset_builder = ood_dataset_class( in_dataset_builder, split='test', validation_percent=in_dataset_validation_percent, drop_remainder=drop_remainder) ood_dataset = ood_dataset_builder.load(batch_size=batch_size) steps[f'ood/{ood_dataset_name}'] = ood_dataset_builder.num_examples( 'in_distribution') // batch_size + ood_dataset_builder.num_examples( 'ood') // batch_size datasets[f'ood/{ood_dataset_name}'] = ood_dataset return datasets, steps
Load OOD datasets.
https://github.com/google/uncertainty-baselines/blob/d37c17c4b08a88d6546bbf299b59127a03398404/baselines/cifar/ood_utils.py#L64-L96
import tensorflow as tf import uncertainty_baselines as ub def DempsterShaferUncertainty(logits): num_classes = tf.shape(logits)[-1] num_classes = tf.cast(num_classes, dtype=logits.dtype) belief_mass = tf.reduce_sum(tf.exp(logits), axis=-1) return num_classes / (belief_mass + num_classes) def create_ood_metrics(ood_dataset_names): ood_metrics = {} for dataset_name in ood_dataset_names: ood_dataset_name = f'ood/{dataset_name}' ood_metrics.update({ f'{ood_dataset_name}_auroc': tf.keras.metrics.AUC(curve='ROC', num_thresholds=100000), f'{ood_dataset_name}_auprc': tf.keras.metrics.AUC(curve='PR', num_thresholds=100000), f'{ood_dataset_name}_(1-fpr)@95tpr': tf.keras.metrics.SpecificityAtSensitivity( 0.95, num_thresholds=100000) }) return ood_metrics
Apache License 2.0
upb-lea/gym-electric-motor
gym_electric_motor/reference_generators/subepisoded_reference_generator.py
SubepisodedReferenceGenerator._reset_reference
python
def _reset_reference(self): raise NotImplementedError
Subclasses implement in this method its generation of the references for the next self._current_episode_length time steps and write it into self._reference.
https://github.com/upb-lea/gym-electric-motor/blob/f091d6b4a754d4fa2439fea64e2c89b9e86d683a/gym_electric_motor/reference_generators/subepisoded_reference_generator.py#L98-L103
import numpy as np from gym.spaces import Box from ..random_component import RandomComponent from ..core import ReferenceGenerator from ..utils import set_state_array class SubepisodedReferenceGenerator(ReferenceGenerator, RandomComponent): def __init__(self, reference_state='omega', episode_lengths=(500, 2000), limit_margin=None, **kwargs): ReferenceGenerator.__init__(self, **kwargs) RandomComponent.__init__(self) self.reference_space = Box(-1, 1, shape=(1,)) self._reference = None self._limit_margin = limit_margin self._reference_value = 0.0 self._reference_state = reference_state.lower() self._episode_len_range = episode_lengths self._current_episode_length = int(self._get_current_value(episode_lengths)) self._k = 0 self._reference_names = [self._reference_state] def set_modules(self, physical_system): super().set_modules(physical_system) self._referenced_states = set_state_array( {self._reference_state: 1}, physical_system.state_names ).astype(bool) rs = self._referenced_states ps = physical_system if self._limit_margin is None: upper_margin = (ps.nominal_state[rs] / ps.limits[rs])[0] * ps.state_space.high[rs] lower_margin = (ps.nominal_state[rs] / ps.limits[rs])[0] * ps.state_space.low[rs] self._limit_margin = lower_margin[0], upper_margin[0] elif type(self._limit_margin) in [float, int]: upper_margin = self._limit_margin * ps.state_space.high[rs] lower_margin = self._limit_margin * ps.state_space.low[rs] self._limit_margin = lower_margin[0], upper_margin[0] elif type(self._limit_margin) is tuple: lower_margin = self._limit_margin[0] * ps.state_space.low[rs] upper_margin = self._limit_margin[1] * ps.state_space.high[rs] self._limit_margin = lower_margin[0], upper_margin[0] else: raise Exception('Unknown type for the limit margin.') self.reference_space = Box(lower_margin[0], upper_margin[0], shape=(1,)) def reset(self, initial_state=None, initial_reference=None): if initial_reference is not None: self._reference_value = initial_reference[self._referenced_states][0] else: self._reference_value = 0.0 self.next_generator() self._current_episode_length = -1 return super().reset(initial_state) def get_reference(self, *_, **__): reference = np.zeros_like(self._referenced_states, dtype=float) reference[self._referenced_states] = self._reference_value return reference def get_reference_observation(self, *_, **__): if self._k >= self._current_episode_length: self._k = 0 self._current_episode_length = int(self._get_current_value(self._episode_len_range)) self._reset_reference() self._reference_value = self._reference[self._k] self._k += 1 return np.array([self._reference_value])
MIT License
argoproj-labs/argo-client-python
argo/workflows/client/models/v1alpha1_workflow_template_lint_request.py
V1alpha1WorkflowTemplateLintRequest.template
python
def template(self): return self._template
Gets the template of this V1alpha1WorkflowTemplateLintRequest. # noqa: E501 :return: The template of this V1alpha1WorkflowTemplateLintRequest. # noqa: E501 :rtype: V1alpha1WorkflowTemplate
https://github.com/argoproj-labs/argo-client-python/blob/993d684cab39a834770b296e028519cec035c7b5/argo/workflows/client/models/v1alpha1_workflow_template_lint_request.py#L108-L115
import pprint import re import six from argo.workflows.client.configuration import Configuration class V1alpha1WorkflowTemplateLintRequest(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'create_options': 'V1CreateOptions', 'namespace': 'str', 'template': 'V1alpha1WorkflowTemplate' } attribute_map = { 'create_options': 'createOptions', 'namespace': 'namespace', 'template': 'template' } def __init__(self, create_options=None, namespace=None, template=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._create_options = None self._namespace = None self._template = None self.discriminator = None if create_options is not None: self.create_options = create_options if namespace is not None: self.namespace = namespace if template is not None: self.template = template @property def create_options(self): return self._create_options @create_options.setter def create_options(self, create_options): self._create_options = create_options @property def namespace(self): return self._namespace @namespace.setter def namespace(self, namespace): self._namespace = namespace @property
Apache License 2.0
rbuffat/pyidf
pyidf/location_and_climate.py
SizingPeriodDesignDay.humidity_condition_type
python
def humidity_condition_type(self): return self["Humidity Condition Type"]
field `Humidity Condition Type` | values/schedules indicated here and in subsequent fields create the humidity | values in the 24 hour design day conditions profile. | Default value: WetBulb Args: value (str): value for IDD Field `Humidity Condition Type` Raises: ValueError: if `value` is not a valid value Returns: str: the value of `humidity_condition_type` or None if not set
https://github.com/rbuffat/pyidf/blob/c2f744211572b5e14e29522aac1421ba88addb0e/pyidf/location_and_climate.py#L692-L709
from collections import OrderedDict import logging from pyidf.helper import DataObject logger = logging.getLogger("pyidf") logger.addHandler(logging.NullHandler()) class SiteLocation(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'latitude', {'name': u'Latitude', 'pyname': u'latitude', 'default': 0.0, 'maximum': 90.0, 'required-field': False, 'autosizable': False, 'minimum': -90.0, 'autocalculatable': False, 'type': u'real', 'unit': u'deg'}), (u'longitude', {'name': u'Longitude', 'pyname': u'longitude', 'default': 0.0, 'maximum': 180.0, 'required-field': False, 'autosizable': False, 'minimum': -180.0, 'autocalculatable': False, 'type': u'real', 'unit': u'deg'}), (u'time zone', {'name': u'Time Zone', 'pyname': u'time_zone', 'default': 0.0, 'maximum': 14.0, 'required-field': False, 'autosizable': False, 'minimum': -12.0, 'autocalculatable': False, 'type': u'real', 'unit': u'hr'}), (u'elevation', {'name': u'Elevation', 'pyname': u'elevation', 'default': 0.0, 'maximum<': 8900.0, 'required-field': False, 'autosizable': False, 'minimum': -300.0, 'autocalculatable': False, 'type': u'real', 'unit': u'm'})]), 'format': None, 'group': u'Location and Climate', 'min-fields': 5, 'name': u'Site:Location', 'pyname': u'SiteLocation', 'required-object': False, 'unique-object': True} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def latitude(self): return self["Latitude"] @latitude.setter def latitude(self, value=None): self["Latitude"] = value @property def longitude(self): return self["Longitude"] @longitude.setter def longitude(self, value=None): self["Longitude"] = value @property def time_zone(self): return self["Time Zone"] @time_zone.setter def time_zone(self, value=None): self["Time Zone"] = value @property def elevation(self): return self["Elevation"] @elevation.setter def elevation(self, value=None): self["Elevation"] = value class SizingPeriodDesignDay(DataObject): _schema = {'extensible-fields': OrderedDict(), 'fields': OrderedDict([(u'name', {'name': u'Name', 'pyname': u'name', 'required-field': True, 'autosizable': False, 'autocalculatable': False, 'type': u'alpha'}), (u'month', {'name': u'Month', 'pyname': u'month', 'maximum': 12, 'required-field': True, 'autosizable': False, 'minimum': 1, 'autocalculatable': False, 'type': u'integer'}), (u'day of month', {'name': u'Day of Month', 'pyname': u'day_of_month', 'maximum': 31, 'required-field': True, 'autosizable': False, 'minimum': 1, 'autocalculatable': False, 'type': u'integer'}), (u'day type', {'name': u'Day Type', 'pyname': u'day_type', 'required-field': True, 'autosizable': False, 'accepted-values': [u'Sunday', u'Monday', u'Tuesday', u'Wednesday', u'Thursday', u'Friday', u'Saturday', u'Holiday', u'SummerDesignDay', u'WinterDesignDay', u'CustomDay1', u'CustomDay2'], 'autocalculatable': False, 'type': 'alpha'}), (u'maximum dry-bulb temperature', {'name': u'Maximum Dry-Bulb Temperature', 'pyname': u'maximum_drybulb_temperature', 'maximum': 70.0, 'required-field': False, 'autosizable': False, 'minimum': -90.0, 'autocalculatable': False, 'type': u'real', 'unit': u'C'}), (u'daily dry-bulb temperature range', {'name': u'Daily Dry-Bulb Temperature Range', 'pyname': u'daily_drybulb_temperature_range', 'default': 0.0, 'required-field': False, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': u'real', 'unit': u'deltaC'}), (u'dry-bulb temperature range modifier type', {'name': u'Dry-Bulb Temperature Range Modifier Type', 'pyname': u'drybulb_temperature_range_modifier_type', 'default': u'DefaultMultipliers', 'required-field': False, 'autosizable': False, 'accepted-values': [u'MultiplierSchedule', u'DifferenceSchedule', u'TemperatureProfileSchedule', u'DefaultMultipliers'], 'autocalculatable': False, 'type': 'alpha'}), (u'dry-bulb temperature range modifier day schedule name', {'name': u'Dry-Bulb Temperature Range Modifier Day Schedule Name', 'pyname': u'drybulb_temperature_range_modifier_day_schedule_name', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'humidity condition type', {'name': u'Humidity Condition Type', 'pyname': u'humidity_condition_type', 'default': u'WetBulb', 'required-field': False, 'autosizable': False, 'accepted-values': [u'WetBulb', u'DewPoint', u'HumidityRatio', u'Enthalpy', u'RelativeHumiditySchedule', u'WetBulbProfileMultiplierSchedule', u'WetBulbProfileDifferenceSchedule', u'WetBulbProfileDefaultMultipliers'], 'autocalculatable': False, 'type': 'alpha'}), (u'wetbulb or dewpoint at maximum dry-bulb', {'name': u'Wetbulb or DewPoint at Maximum Dry-Bulb', 'pyname': u'wetbulb_or_dewpoint_at_maximum_drybulb', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'C'}), (u'humidity condition day schedule name', {'name': u'Humidity Condition Day Schedule Name', 'pyname': u'humidity_condition_day_schedule_name', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'humidity ratio at maximum dry-bulb', {'name': u'Humidity Ratio at Maximum Dry-Bulb', 'pyname': u'humidity_ratio_at_maximum_drybulb', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'kgWater/kgDryAir'}), (u'enthalpy at maximum dry-bulb', {'name': u'Enthalpy at Maximum Dry-Bulb', 'pyname': u'enthalpy_at_maximum_drybulb', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'real', 'unit': u'J/kg'}), (u'daily wet-bulb temperature range', {'name': u'Daily Wet-Bulb Temperature Range', 'pyname': u'daily_wetbulb_temperature_range', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': 'real', 'unit': u'deltaC'}), (u'barometric pressure', {'name': u'Barometric Pressure', 'pyname': u'barometric_pressure', 'maximum': 120000.0, 'required-field': False, 'autosizable': False, 'minimum': 31000.0, 'autocalculatable': False, 'type': u'real', 'unit': u'Pa'}), (u'wind speed', {'name': u'Wind Speed', 'pyname': u'wind_speed', 'maximum': 40.0, 'required-field': True, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': u'real', 'unit': u'm/s'}), (u'wind direction', {'name': u'Wind Direction', 'pyname': u'wind_direction', 'maximum': 360.0, 'required-field': True, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': u'real', 'unit': u'deg'}), (u'rain indicator', {'name': u'Rain Indicator', 'pyname': u'rain_indicator', 'default': u'No', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Yes', u'No'], 'autocalculatable': False, 'type': 'alpha'}), (u'snow indicator', {'name': u'Snow Indicator', 'pyname': u'snow_indicator', 'default': u'No', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Yes', u'No'], 'autocalculatable': False, 'type': 'alpha'}), (u'daylight saving time indicator', {'name': u'Daylight Saving Time Indicator', 'pyname': u'daylight_saving_time_indicator', 'default': u'No', 'required-field': False, 'autosizable': False, 'accepted-values': [u'Yes', u'No'], 'autocalculatable': False, 'type': 'alpha'}), (u'solar model indicator', {'name': u'Solar Model Indicator', 'pyname': u'solar_model_indicator', 'default': u'ASHRAEClearSky', 'required-field': False, 'autosizable': False, 'accepted-values': [u'ASHRAEClearSky', u'ZhangHuang', u'Schedule', u'ASHRAETau'], 'autocalculatable': False, 'type': 'alpha'}), (u'beam solar day schedule name', {'name': u'Beam Solar Day Schedule Name', 'pyname': u'beam_solar_day_schedule_name', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'diffuse solar day schedule name', {'name': u'Diffuse Solar Day Schedule Name', 'pyname': u'diffuse_solar_day_schedule_name', 'required-field': False, 'autosizable': False, 'autocalculatable': False, 'type': u'object-list'}), (u'ashrae clear sky optical depth for beam irradiance (taub)', {'name': u'ASHRAE Clear Sky Optical Depth for Beam Irradiance (taub)', 'pyname': u'ashrae_clear_sky_optical_depth_for_beam_irradiance_taub', 'default': 0.0, 'maximum': 1.2, 'required-field': False, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': 'real', 'unit': u'dimensionless'}), (u'ashrae clear sky optical depth for diffuse irradiance (taud)', {'name': u'ASHRAE Clear Sky Optical Depth for Diffuse Irradiance (taud)', 'pyname': u'ashrae_clear_sky_optical_depth_for_diffuse_irradiance_taud', 'default': 0.0, 'maximum': 3.0, 'required-field': False, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': 'real', 'unit': u'dimensionless'}), (u'sky clearness', {'name': u'Sky Clearness', 'pyname': u'sky_clearness', 'default': 0.0, 'maximum': 1.2, 'required-field': False, 'autosizable': False, 'minimum': 0.0, 'autocalculatable': False, 'type': u'real'})]), 'format': None, 'group': u'Location and Climate', 'min-fields': 0, 'name': u'SizingPeriod:DesignDay', 'pyname': u'SizingPeriodDesignDay', 'required-object': False, 'unique-object': False} @property def name(self): return self["Name"] @name.setter def name(self, value=None): self["Name"] = value @property def month(self): return self["Month"] @month.setter def month(self, value=None): self["Month"] = value @property def day_of_month(self): return self["Day of Month"] @day_of_month.setter def day_of_month(self, value=None): self["Day of Month"] = value @property def day_type(self): return self["Day Type"] @day_type.setter def day_type(self, value=None): self["Day Type"] = value @property def maximum_drybulb_temperature(self): return self["Maximum Dry-Bulb Temperature"] @maximum_drybulb_temperature.setter def maximum_drybulb_temperature(self, value=None): self["Maximum Dry-Bulb Temperature"] = value @property def daily_drybulb_temperature_range(self): return self["Daily Dry-Bulb Temperature Range"] @daily_drybulb_temperature_range.setter def daily_drybulb_temperature_range(self, value=None): self["Daily Dry-Bulb Temperature Range"] = value @property def drybulb_temperature_range_modifier_type(self): return self["Dry-Bulb Temperature Range Modifier Type"] @drybulb_temperature_range_modifier_type.setter def drybulb_temperature_range_modifier_type( self, value="DefaultMultipliers"): self["Dry-Bulb Temperature Range Modifier Type"] = value @property def drybulb_temperature_range_modifier_day_schedule_name(self): return self["Dry-Bulb Temperature Range Modifier Day Schedule Name"] @drybulb_temperature_range_modifier_day_schedule_name.setter def drybulb_temperature_range_modifier_day_schedule_name(self, value=None): self["Dry-Bulb Temperature Range Modifier Day Schedule Name"] = value @property
Apache License 2.0
polysquare/polysquare-travis-container
psqtraviscontainer/linux_container.py
_rmtrees_as_container
python
def _rmtrees_as_container(cont, directories): root = cont.root_filesystem_directory() with tempfile.NamedTemporaryFile(dir=root, mode="wt") as bash_script: bash_script.write(";\n".join([("rm -rf " + d) for d in directories])) bash_script.flush() cont.execute(["bash", bash_script.name], minimal_bind=True)
Remove directories as the root user in the container. This allows the removal of directories where permission errors might not permit otherwise.
https://github.com/polysquare/polysquare-travis-container/blob/bb31302cfc48f55da56c12ab27b88644380209b9/psqtraviscontainer/linux_container.py#L89-L100
from __future__ import unicode_literals import errno import fnmatch import os import platform import shutil import stat import tarfile import tempfile from collections import defaultdict from collections import namedtuple from getpass import getuser from itertools import chain from clint.textui import colored from psqtraviscontainer import architecture from psqtraviscontainer import constants from psqtraviscontainer import container from psqtraviscontainer import debian_package from psqtraviscontainer import directory from psqtraviscontainer import distro from psqtraviscontainer import package_system from psqtraviscontainer import printer from psqtraviscontainer import util from psqtraviscontainer.download import TemporarilyDownloadedFile import tempdir _PROOT_URL_BASE = "http://static.proot.me/proot-{arch}" _QEMU_URL_BASE = ("http://download.opensuse.org/repositories" "/home:/cedric-vincent/xUbuntu_12.04/{arch}/" "qemu-user-mode_1.6.1-1_{arch}.deb") DistroInfo = distro.DistroInfo DistroConfig = distro.DistroConfig ProotDistribution = namedtuple("ProotDistribution", "proot qemu") def proot_distro_from_container(container_dir): path_to_proot_dir = constants.proot_distribution_dir(container_dir) path_to_proot_bin = os.path.join(path_to_proot_dir, "bin/proot") path_to_qemu_template = os.path.join(path_to_proot_dir, "bin/qemu-{arch}") def _get_qemu_binary(arch): qemu_arch = architecture.Alias.qemu(arch) return path_to_qemu_template.format(arch=qemu_arch) def _get_proot_binary(): return path_to_proot_bin return ProotDistribution(proot=_get_proot_binary, qemu=_get_qemu_binary) def get_dir_for_distro(container_dir, config): arch = config["arch"] url = config["url"] distro_folder_name_template = (os.path.basename(url) + ".root") distro_folder_name = distro_folder_name_template.format(arch=arch) return os.path.realpath(os.path.join(container_dir, distro_folder_name))
MIT License
data-apis/dataframe-api
protocol/dataframe_protocol.py
DataFrame.metadata
python
def metadata(self) -> Dict[str, Any]: pass
The metadata for the data frame, as a dictionary with string keys. The contents of `metadata` may be anything, they are meant for a library to store information that it needs to, e.g., roundtrip losslessly or for two implementations to share data that is not (yet) part of the interchange protocol specification. For avoiding collisions with other entries, please add name the keys with the name of the library followed by a period and the desired name, e.g, ``pandas.indexcol``.
https://github.com/data-apis/dataframe-api/blob/5cbbfc1fcc6458e367ba55b28196e82c87014311/protocol/dataframe_protocol.py#L313-L323
class Buffer: @property def bufsize(self) -> int: pass @property def ptr(self) -> int: pass def __dlpack__(self): raise NotImplementedError("__dlpack__") def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]: pass class Column: @property def size(self) -> Optional[int]: pass @property def offset(self) -> int: pass @property def dtype(self) -> Tuple[enum.IntEnum, int, str, str]: pass @property def describe_categorical(self) -> dict[bool, bool, Optional[dict]]: pass @property def describe_null(self) -> Tuple[int, Any]: pass @property def null_count(self) -> Optional[int]: pass @property def metadata(self) -> Dict[str, Any]: pass def num_chunks(self) -> int: pass def get_chunks(self, n_chunks : Optional[int] = None) -> Iterable[Column]: pass def get_buffers(self) -> dict[Tuple[Buffer, Any], Optional[Tuple[Buffer, Any]], Optional[Tuple[Buffer, Any]]]: pass class DataFrame: def __dataframe__(self, nan_as_null : bool = False, allow_copy : bool = True) -> dict: self._nan_as_null = nan_as_null self._allow_zero_zopy = allow_copy return { "dataframe": self, "version": 0 } @property
MIT License
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/bmp_save_options_data.py
BmpSaveOptionsData.color_mode
python
def color_mode(self): return self._color_mode
Gets the color_mode of this BmpSaveOptionsData. # noqa: E501 Gets or sets the value determining how colors are rendered. { Normal | Grayscale}. # noqa: E501 :return: The color_mode of this BmpSaveOptionsData. # noqa: E501 :rtype: str
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/bmp_save_options_data.py#L566-L574
import pprint import re import datetime import six import json class BmpSaveOptionsData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_embedding_post_script_fonts': 'bool', 'custom_time_zone_info_data': 'TimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'str', 'dml_effects_rendering_mode': 'str', 'dml_rendering_mode': 'str', 'file_name': 'str', 'flat_opc_xml_mapping_only': 'bool', 'iml_rendering_mode': 'str', 'save_format': 'str', 'update_created_time_property': 'bool', 'update_fields': 'bool', 'update_last_printed_property': 'bool', 'update_last_saved_time_property': 'bool', 'update_sdt_content': 'bool', 'zip_output': 'bool', 'color_mode': 'str', 'jpeg_quality': 'int', 'metafile_rendering_options': 'MetafileRenderingOptionsData', 'numeral_format': 'str', 'optimize_output': 'bool', 'page_count': 'int', 'page_index': 'int', 'horizontal_resolution': 'float', 'image_brightness': 'float', 'image_color_mode': 'str', 'image_contrast': 'float', 'paper_color': 'str', 'pixel_format': 'str', 'resolution': 'float', 'scale': 'float', 'use_anti_aliasing': 'bool', 'use_gdi_emf_renderer': 'bool', 'use_high_quality_rendering': 'bool', 'vertical_resolution': 'float' } attribute_map = { 'allow_embedding_post_script_fonts': 'AllowEmbeddingPostScriptFonts', 'custom_time_zone_info_data': 'CustomTimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'Dml3DEffectsRenderingMode', 'dml_effects_rendering_mode': 'DmlEffectsRenderingMode', 'dml_rendering_mode': 'DmlRenderingMode', 'file_name': 'FileName', 'flat_opc_xml_mapping_only': 'FlatOpcXmlMappingOnly', 'iml_rendering_mode': 'ImlRenderingMode', 'save_format': 'SaveFormat', 'update_created_time_property': 'UpdateCreatedTimeProperty', 'update_fields': 'UpdateFields', 'update_last_printed_property': 'UpdateLastPrintedProperty', 'update_last_saved_time_property': 'UpdateLastSavedTimeProperty', 'update_sdt_content': 'UpdateSdtContent', 'zip_output': 'ZipOutput', 'color_mode': 'ColorMode', 'jpeg_quality': 'JpegQuality', 'metafile_rendering_options': 'MetafileRenderingOptions', 'numeral_format': 'NumeralFormat', 'optimize_output': 'OptimizeOutput', 'page_count': 'PageCount', 'page_index': 'PageIndex', 'horizontal_resolution': 'HorizontalResolution', 'image_brightness': 'ImageBrightness', 'image_color_mode': 'ImageColorMode', 'image_contrast': 'ImageContrast', 'paper_color': 'PaperColor', 'pixel_format': 'PixelFormat', 'resolution': 'Resolution', 'scale': 'Scale', 'use_anti_aliasing': 'UseAntiAliasing', 'use_gdi_emf_renderer': 'UseGdiEmfRenderer', 'use_high_quality_rendering': 'UseHighQualityRendering', 'vertical_resolution': 'VerticalResolution' } def __init__(self, allow_embedding_post_script_fonts=None, custom_time_zone_info_data=None, dml3_d_effects_rendering_mode=None, dml_effects_rendering_mode=None, dml_rendering_mode=None, file_name=None, flat_opc_xml_mapping_only=None, iml_rendering_mode=None, save_format=None, update_created_time_property=None, update_fields=None, update_last_printed_property=None, update_last_saved_time_property=None, update_sdt_content=None, zip_output=None, color_mode=None, jpeg_quality=None, metafile_rendering_options=None, numeral_format=None, optimize_output=None, page_count=None, page_index=None, horizontal_resolution=None, image_brightness=None, image_color_mode=None, image_contrast=None, paper_color=None, pixel_format=None, resolution=None, scale=None, use_anti_aliasing=None, use_gdi_emf_renderer=None, use_high_quality_rendering=None, vertical_resolution=None): self._allow_embedding_post_script_fonts = None self._custom_time_zone_info_data = None self._dml3_d_effects_rendering_mode = None self._dml_effects_rendering_mode = None self._dml_rendering_mode = None self._file_name = None self._flat_opc_xml_mapping_only = None self._iml_rendering_mode = None self._save_format = None self._update_created_time_property = None self._update_fields = None self._update_last_printed_property = None self._update_last_saved_time_property = None self._update_sdt_content = None self._zip_output = None self._color_mode = None self._jpeg_quality = None self._metafile_rendering_options = None self._numeral_format = None self._optimize_output = None self._page_count = None self._page_index = None self._horizontal_resolution = None self._image_brightness = None self._image_color_mode = None self._image_contrast = None self._paper_color = None self._pixel_format = None self._resolution = None self._scale = None self._use_anti_aliasing = None self._use_gdi_emf_renderer = None self._use_high_quality_rendering = None self._vertical_resolution = None self.discriminator = None if allow_embedding_post_script_fonts is not None: self.allow_embedding_post_script_fonts = allow_embedding_post_script_fonts if custom_time_zone_info_data is not None: self.custom_time_zone_info_data = custom_time_zone_info_data if dml3_d_effects_rendering_mode is not None: self.dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode if dml_effects_rendering_mode is not None: self.dml_effects_rendering_mode = dml_effects_rendering_mode if dml_rendering_mode is not None: self.dml_rendering_mode = dml_rendering_mode if file_name is not None: self.file_name = file_name if flat_opc_xml_mapping_only is not None: self.flat_opc_xml_mapping_only = flat_opc_xml_mapping_only if iml_rendering_mode is not None: self.iml_rendering_mode = iml_rendering_mode if save_format is not None: self.save_format = save_format if update_created_time_property is not None: self.update_created_time_property = update_created_time_property if update_fields is not None: self.update_fields = update_fields if update_last_printed_property is not None: self.update_last_printed_property = update_last_printed_property if update_last_saved_time_property is not None: self.update_last_saved_time_property = update_last_saved_time_property if update_sdt_content is not None: self.update_sdt_content = update_sdt_content if zip_output is not None: self.zip_output = zip_output if color_mode is not None: self.color_mode = color_mode if jpeg_quality is not None: self.jpeg_quality = jpeg_quality if metafile_rendering_options is not None: self.metafile_rendering_options = metafile_rendering_options if numeral_format is not None: self.numeral_format = numeral_format if optimize_output is not None: self.optimize_output = optimize_output if page_count is not None: self.page_count = page_count if page_index is not None: self.page_index = page_index if horizontal_resolution is not None: self.horizontal_resolution = horizontal_resolution if image_brightness is not None: self.image_brightness = image_brightness if image_color_mode is not None: self.image_color_mode = image_color_mode if image_contrast is not None: self.image_contrast = image_contrast if paper_color is not None: self.paper_color = paper_color if pixel_format is not None: self.pixel_format = pixel_format if resolution is not None: self.resolution = resolution if scale is not None: self.scale = scale if use_anti_aliasing is not None: self.use_anti_aliasing = use_anti_aliasing if use_gdi_emf_renderer is not None: self.use_gdi_emf_renderer = use_gdi_emf_renderer if use_high_quality_rendering is not None: self.use_high_quality_rendering = use_high_quality_rendering if vertical_resolution is not None: self.vertical_resolution = vertical_resolution @property def allow_embedding_post_script_fonts(self): return self._allow_embedding_post_script_fonts @allow_embedding_post_script_fonts.setter def allow_embedding_post_script_fonts(self, allow_embedding_post_script_fonts): self._allow_embedding_post_script_fonts = allow_embedding_post_script_fonts @property def custom_time_zone_info_data(self): return self._custom_time_zone_info_data @custom_time_zone_info_data.setter def custom_time_zone_info_data(self, custom_time_zone_info_data): self._custom_time_zone_info_data = custom_time_zone_info_data @property def dml3_d_effects_rendering_mode(self): return self._dml3_d_effects_rendering_mode @dml3_d_effects_rendering_mode.setter def dml3_d_effects_rendering_mode(self, dml3_d_effects_rendering_mode): allowed_values = ["Basic", "Advanced"] if not dml3_d_effects_rendering_mode.isdigit(): if dml3_d_effects_rendering_mode not in allowed_values: raise ValueError( "Invalid value for `dml3_d_effects_rendering_mode` ({0}), must be one of {1}" .format(dml3_d_effects_rendering_mode, allowed_values)) self._dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode else: self._dml3_d_effects_rendering_mode = allowed_values[int(dml3_d_effects_rendering_mode) if six.PY3 else long(dml3_d_effects_rendering_mode)] @property def dml_effects_rendering_mode(self): return self._dml_effects_rendering_mode @dml_effects_rendering_mode.setter def dml_effects_rendering_mode(self, dml_effects_rendering_mode): self._dml_effects_rendering_mode = dml_effects_rendering_mode @property def dml_rendering_mode(self): return self._dml_rendering_mode @dml_rendering_mode.setter def dml_rendering_mode(self, dml_rendering_mode): self._dml_rendering_mode = dml_rendering_mode @property def file_name(self): return self._file_name @file_name.setter def file_name(self, file_name): self._file_name = file_name @property def flat_opc_xml_mapping_only(self): return self._flat_opc_xml_mapping_only @flat_opc_xml_mapping_only.setter def flat_opc_xml_mapping_only(self, flat_opc_xml_mapping_only): self._flat_opc_xml_mapping_only = flat_opc_xml_mapping_only @property def iml_rendering_mode(self): return self._iml_rendering_mode @iml_rendering_mode.setter def iml_rendering_mode(self, iml_rendering_mode): self._iml_rendering_mode = iml_rendering_mode @property def save_format(self): return self._save_format @save_format.setter def save_format(self, save_format): self._save_format = save_format @property def update_created_time_property(self): return self._update_created_time_property @update_created_time_property.setter def update_created_time_property(self, update_created_time_property): self._update_created_time_property = update_created_time_property @property def update_fields(self): return self._update_fields @update_fields.setter def update_fields(self, update_fields): self._update_fields = update_fields @property def update_last_printed_property(self): return self._update_last_printed_property @update_last_printed_property.setter def update_last_printed_property(self, update_last_printed_property): self._update_last_printed_property = update_last_printed_property @property def update_last_saved_time_property(self): return self._update_last_saved_time_property @update_last_saved_time_property.setter def update_last_saved_time_property(self, update_last_saved_time_property): self._update_last_saved_time_property = update_last_saved_time_property @property def update_sdt_content(self): return self._update_sdt_content @update_sdt_content.setter def update_sdt_content(self, update_sdt_content): self._update_sdt_content = update_sdt_content @property def zip_output(self): return self._zip_output @zip_output.setter def zip_output(self, zip_output): self._zip_output = zip_output @property
MIT License
universitadellacalabria/uniticket
uni_ticket/views/manager.py
category_add_new
python
def category_add_new(request, structure_slug, structure): title = _('Nuova tipologia di richiesta') sub_title = _("Crea una nuova tipologia di richieste nella struttura {}").format(structure) form = CategoryForm() if request.method == 'POST': form = CategoryForm(request.POST) if form.is_valid(): name = form.cleaned_data['name'] slug = slugify(name) m = TicketCategory slug_name_exist = m.objects.filter(Q(name=name) | Q(slug=slug), organizational_structure=structure) if slug_name_exist: logger.error('[{}] manager of structure {}' ' {} tried to add a new category' ' with existent name {} or slug {}'.format(timezone.localtime(), structure, request.user, name, slug)) messages.add_message(request, messages.ERROR, _("Esiste già una tipologia di richiesta con" " nome {} o slug {}").format(name, slug)) else: new_category = form.save(commit=False) protocol_required = form.cleaned_data['protocol_required'] if protocol_required: protocol_required = False messages.add_message(request, messages.INFO, _("Prima di attivare il protocollo " "obbligatorio è necessario " "configurare i parametri")) new_category.protocol_required = protocol_required new_category.slug = slug new_category.organizational_structure = structure new_category.save() messages.add_message(request, messages.SUCCESS, _("Categoria creata con successo")) logger.info('[{}] manager of structure {}' ' {} added a new category' ' with name {} and slug {}'.format(timezone.localtime(), structure, request.user, name, slug)) return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=new_category.slug) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "<b>{}</b>: {}".format(k, strip_tags(v))) template = 'manager/category_add_new.html' d = {'form': form, 'structure': structure, 'sub_title': sub_title, 'title': title,} return render(request, template, d)
Adds new category :type structure_slug: String :type structure: OrganizationalStructure (from @is_manager) :param structure_slug: structure slug :param structure: structure object (from @is_manager) :return: render
https://github.com/universitadellacalabria/uniticket/blob/b7c6e9b793eda273038a6339f6dfdfc3e3b5a344/uni_ticket/views/manager.py#L749-L825
import datetime import logging from django.conf import settings from django.contrib import messages from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth import get_user_model from django.contrib.auth.decorators import login_required from django.db.models import Count from django.http import HttpResponse, HttpResponseRedirect from django.db.models import Q from django.shortcuts import get_object_or_404, render, redirect from django.utils.html import strip_tags from django.utils.text import slugify from django.utils.translation import gettext as _ from django_form_builder.utils import get_labeled_errors from organizational_area.models import * from uni_ticket.decorators import (has_access_to_ticket, is_manager, ticket_assigned_to_structure, ticket_is_not_taken_and_not_closed) from uni_ticket.forms import * from uni_ticket.models import * from uni_ticket.protocol_utils import ticket_protocol from uni_ticket.utils import (custom_message, office_can_be_deleted, user_is_manager, uuid_code) logger = logging.getLogger(__name__) @login_required @is_manager def dashboard(request, structure_slug, structure): title = _("Pannello di Controllo") sub_title = _("Gestisci le richieste per la struttura {}").format(structure) template = "manager/dashboard.html" ta = TicketAssignment structure_tickets = ta.get_ticket_per_structure(structure=structure) tickets = Ticket.objects.filter(code__in=structure_tickets) not_closed = tickets.filter(is_closed=False) unassigned = 0 opened = 0 my_opened = 0 for nc in not_closed: if nc.has_been_taken(): opened += 1 if nc.has_been_taken_by_user(structure=structure, user=request.user): my_opened += 1 else: unassigned += 1 chiusi = tickets.filter(is_closed=True).count() om = OrganizationalStructureOffice offices = om.objects.filter(organizational_structure=structure) cm = TicketCategory categories = cm.objects.filter(organizational_structure=structure) disabled_expired_items(categories) messages = TicketReply.get_unread_messages_count(tickets=tickets) d = {'categories': categories, 'offices': offices, 'structure': structure, 'sub_title': sub_title, 'ticket_aperti': opened, 'ticket_assegnati_a_me': my_opened, 'ticket_chiusi': chiusi, 'ticket_messages': messages, 'ticket_non_gestiti': unassigned, 'title': title,} return render(request, template, d) @login_required @is_manager def offices(request, structure_slug, structure): title = _('Gestione uffici') template = 'manager/offices.html' os = OrganizationalStructureOffice offices = os.objects.filter(organizational_structure=structure) d = {'offices': offices, 'structure': structure, 'title': title,} return render(request, template, d) @login_required @is_manager def office_add_new(request, structure_slug, structure): title = _('Nuovo ufficio') sub_title = _("Crea un nuovo ufficio nella struttura {}").format(structure) form = OfficeForm() if request.method == 'POST': form = OfficeForm(request.POST) if form.is_valid(): name = form.cleaned_data['name'] slug = slugify(name) os = OrganizationalStructureOffice slug_name_exist = os.objects.filter(Q(name=name) | Q(slug=slug), organizational_structure=structure) if slug_name_exist: messages.add_message(request, messages.ERROR, _("Esiste già un ufficio con" " nome {} o slug {}".format(name, slug))) else: new_office = form.save(commit=False) new_office.slug = slug new_office.organizational_structure = structure new_office.save() logger.info('[{}] manager of structure {}' ' {} created new office {}'.format(timezone.localtime(), structure, request.user, new_office)) messages.add_message(request, messages.SUCCESS, _("Ufficio creato con successo")) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=new_office.slug) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "{}: {}".format(k, strip_tags(v))) template = 'manager/office_add_new.html' d = {'form': form, 'structure': structure, 'sub_title': sub_title, 'title': title,} return render(request, template, d) @login_required @is_manager def office_edit(request, structure_slug, office_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) title = _('Modifica ufficio') sub_title = office.name form = OfficeForm(instance=office) if request.method == 'POST': form = OfficeForm(instance=office, data=request.POST) if form.is_valid(): name = form.cleaned_data['name'] slug = slugify(name) oso = OrganizationalStructureOffice slug_name_exist = oso.objects.filter(Q(name=name) | Q(slug=slug), organizational_structure=structure).exclude(pk=office.pk) if slug_name_exist: messages.add_message(request, messages.ERROR, _("Esiste già un ufficio con questo" " nome o slug")) else: edited_office = form.save(commit=False) edited_office.slug = slug edited_office.save() messages.add_message(request, messages.SUCCESS, _("Ufficio modificato con successo")) logger.info('[{}] manager of structure {}' ' {} edited office {}'.format(timezone.localtime(), structure, request.user, edited_office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=edited_office.slug) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "<b>{}</b>: {}".format(k, strip_tags(v))) template = 'manager/office_edit.html' d = {'form': form, 'office': office, 'structure': structure, 'sub_title': sub_title, 'title': title,} return render(request, template, d) @login_required @is_manager def office_detail(request, structure_slug, office_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) title = _('Gestione ufficio') template = 'manager/office_detail.html' sub_title = office.name form = OfficeAddOperatorForm(structure=structure, office_slug=office_slug) category_form = OfficeAddCategoryForm(structure=structure, office=office) if request.method == 'POST': form = OfficeAddOperatorForm(request.POST, structure=structure, office_slug=office_slug) if form.is_valid(): employee = form.cleaned_data['operatore'] description = form.cleaned_data['description'] osoe = OrganizationalStructureOfficeEmployee new_officeemployee = osoe(employee=employee, office=office, description=description) new_officeemployee.save() messages.add_message(request, messages.SUCCESS, _("Operatore assegnato con successo")) logger.info('[{}] manager of structure {}' ' {} added employee {}' ' to office {}'.format(timezone.localtime(), structure, request.user, employee, office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "<b>{}</b>: {}".format(k, strip_tags(v))) em = OrganizationalStructureOfficeEmployee employees = em.objects.filter(office=office, employee__is_active=True) d = {'category_form': category_form, 'employees': employees, 'form': form, 'office': office, 'structure': structure, 'sub_title': sub_title, 'title': title,} return render(request, template, d) @login_required @is_manager def office_add_category(request, structure_slug, office_slug, structure): if request.method == 'POST': office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) form = OfficeAddCategoryForm(request.POST, structure=structure, office=office) if form.is_valid(): category = form.cleaned_data['category'] if category.organizational_office: messages.add_message(request, messages.ERROR, _("La tipologia di richiesta <b>{}</b> risulta " "già assegnato all'ufficio <b>{}</b>. " "Rimuovere la competenza per " "procedere").format(category, category.organizational_office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) category.organizational_office = office category.save(update_fields = ['organizational_office']) messages.add_message(request, messages.SUCCESS, _("Competenza ufficio impostata con successo")) logger.info('[{}] manager of structure {}' ' {} added category {}' ' to office {}'.format(timezone.localtime(), structure, request.user, category, office)) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "<b>{}</b>: {}".format(k, strip_tags(v))) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) return custom_message(request, _("Impossibile accedere a questo URL " "senza passare dal form collegato."), structure_slug=structure.slug) @login_required @is_manager def office_remove_category(request, structure_slug, office_slug, category_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) category = get_object_or_404(TicketCategory, organizational_structure=structure, slug=category_slug) if category.organizational_office != office: messages.add_message(request, messages.ERROR, _("La tipologia di richiesta non è di competenza di" " questo ufficio")) else: category.organizational_office = None category.is_active = False category.save(update_fields = ['organizational_office', 'is_active']) messages.add_message(request, messages.SUCCESS, _("La tipologia di richiesta <b>{}</b> non è più di competenza " " dell'ufficio <b>{}</b> ed è stato disattivato".format(category, office))) logger.info('[{}] manager of structure {}' ' {} removed category {}' ' from office {}'.format(timezone.localtime(), structure, request.user, category, office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) @login_required @is_manager def office_remove_operator(request, structure_slug, office_slug, employee_id, structure): user_model = get_user_model() employee = user_model.objects.get(pk=employee_id) usertype = get_user_type(employee, structure) office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) if usertype == 'manager' and office.is_default: return custom_message(request, _("Eliminando l'afferenza dell'utente" " a questo ufficio, egli perderà i" " privilegi di Amministratore." " Questa operazione, pertanto," " non può essere eseguita in autonomia." " Contattare l'assistenza tecnica."), structure_slug=structure.slug) m = OrganizationalStructureOfficeEmployee office_employee = m.objects.get(office=office, employee=employee) if not office_employee: messages.add_message(request, messages.ERROR, _("L'operatore non è assegnato a questo ufficio")) else: logger.info('[{}] manager of structure {}' ' {} removed employee {}' ' from office {}'.format(timezone.localtime(), structure, request.user, employee, office)) office_employee.delete() messages.add_message(request, messages.SUCCESS, _("Operatore {} rimosso correttamente").format(employee)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) @login_required @is_manager def office_disable(request, structure_slug, office_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) one_tickets_for_this_office = False office_tickets = TicketAssignment.objects.filter(office=office, follow=True) one_tickets_for_this_office = False for ot in office_tickets: other_offices_for_ticket = TicketAssignment.objects.filter(office__is_active=True, ticket=ot.ticket, follow=True, readonly=False).exclude(office=office) if not other_offices_for_ticket: one_tickets_for_this_office = True break if office.is_default: messages.add_message(request, messages.ERROR, _("Impossibile disattivare questo ufficio")) elif one_tickets_for_this_office: messages.add_message(request, messages.ERROR, _("Impossibile disattivare questo ufficio." " Alcuni ticket potrebbero rimanere privi di gestione")) elif office.is_active: assigned_categories = TicketCategory.objects.filter(organizational_office=office) for cat in assigned_categories: cat.is_active = False cat.save(update_fields = ['is_active']) messages.add_message(request, messages.SUCCESS, _("Categoria {} disattivata correttamente").format(cat)) office.is_active = False office.save(update_fields = ['is_active']) messages.add_message(request, messages.SUCCESS, _("Ufficio {} disattivato con successo").format(office)) logger.info('[{}] manager of structure {}' ' {} disabled office {}'.format(timezone.localtime(), structure, request.user, office)) else: messages.add_message(request, messages.ERROR, _("Ufficio {} già disattivato").format(office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) @login_required @is_manager def office_enable(request, structure_slug, office_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) if office.is_active: messages.add_message(request, messages.ERROR, _("Ufficio {} già attivato").format(office)) else: office.is_active = True office.save(update_fields = ['is_active']) messages.add_message(request, messages.SUCCESS, _("Ufficio {} attivato con successo").format(office)) logger.info('[{}] manager of structure {}' ' {} enabled office {}'.format(timezone.localtime(), structure, request.user, office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) @login_required @is_manager def office_delete(request, structure_slug, office_slug, structure): office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) if office_can_be_deleted(office): assigned_categories = TicketCategory.objects.filter(organizational_office=office) for cat in assigned_categories: cat.is_active = False cat.save(update_fields = ['is_active']) messages.add_message(request, messages.SUCCESS, _("Categoria {} disattivata correttamente").format(cat)) messages.add_message(request, messages.SUCCESS, _("Ufficio {} eliminato correttamente").format(office)) logger.info('[{}] manager of structure {}' ' {} deleted office {}'.format(timezone.localtime(), structure, request.user, office)) office.delete() return redirect('uni_ticket:manager_dashboard', structure_slug=structure_slug) messages.add_message(request, messages.ERROR, _("Impossibile eliminare l'ufficio {}." " Ci sono ticket assegnati" " o è l'ufficio predefinito della struttura.").format(office)) return redirect('uni_ticket:manager_office_detail', structure_slug=structure_slug, office_slug=office_slug) @login_required @is_manager def category_detail(request, structure_slug, category_slug, structure): category = get_object_or_404(TicketCategory, organizational_structure=structure, slug=category_slug) category.disable_if_expired() title = _('Gestione tipologia di richiesta') template = 'manager/category_detail.html' sub_title = category form = CategoryAddOfficeForm(structure=structure) if request.method == 'POST': if category.organizational_office: messages.add_message(request, messages.ERROR, _("Competenza ufficio già presente")) return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=category_slug) form = CategoryAddOfficeForm(request.POST, structure=structure) if form.is_valid(): office = form.cleaned_data['office'] category.organizational_office = office category.save(update_fields = ['organizational_office']) messages.add_message(request, messages.SUCCESS, _("Competenza ufficio impostata con successo")) return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=category_slug) else: for k,v in get_labeled_errors(form).items(): messages.add_message(request, messages.ERROR, "<b>{}</b>: {}".format(k, strip_tags(v))) slug_url = request.build_absolute_uri(reverse('uni_ticket:add_new_ticket', kwargs={'structure_slug': structure.slug, 'category_slug': category.slug})) pk_url = request.build_absolute_uri(reverse('uni_ticket:add_new_ticket', kwargs={'structure_slug': structure.pk, 'category_slug': category.pk})) category_urls = (slug_url, pk_url) d = {'category': category, 'category_urls': category_urls, 'form': form, 'structure': structure, 'sub_title': sub_title, 'title': title,} return render(request, template, d) @login_required @is_manager def category_remove_office(request, structure_slug, category_slug, office_slug, structure): category = get_object_or_404(TicketCategory, organizational_structure=structure, slug=category_slug) office = get_object_or_404(OrganizationalStructureOffice, organizational_structure=structure, slug=office_slug) if category.organizational_office != office: messages.add_message(request, messages.ERROR, _("La tipologia di richiesta non è di competenza di" " questo ufficio")) else: category.organizational_office = None category.is_active = False category.save(update_fields = ['is_active', 'organizational_office']) messages.add_message(request, messages.SUCCESS, _("Competenza ufficio {} rimossa correttamente").format(office)) messages.add_message(request, messages.ERROR, _("Tipo di richieste {} disattivato poichè" " priva di ufficio competente").format(category)) logger.info('[{}] manager of structure {}' ' {} removed office {}' ' from category {}' ' (now disabled)'.format(timezone.localtime(), structure, request.user, office, category)) return redirect('uni_ticket:manager_category_detail', structure_slug=structure_slug, category_slug=category_slug) @login_required @is_manager
Apache License 2.0
td22057/t-home
python/tHome/eagle/messages/Reading.py
Reading.__init__
python
def __init__( self, node ): assert( node.tag == "Reading" ) Base.__init__( self, "Reading", node ) convert.time( self, "Time", "TimeUnix", self.TimeStamp ) self.Value = float( self.Value )
node == xml ETree node
https://github.com/td22057/t-home/blob/5dc8689f52d87dac890051e540b338b009293ced/python/tHome/eagle/messages/Reading.py#L39-L46
from .Base import Base from . import convert class Reading ( Base ): _intHexKeys = [] _numHexKeys = [ "TimeStamp" ] _jsonKeys = [ "Value", "Type" ]
BSD 2-Clause Simplified License
xingjunm/lid_adversarial_subspace_detection
util.py
block_split
python
def block_split(X, Y): print("Isolated split 80%, 20% for training and testing") num_samples = X.shape[0] partition = int(num_samples / 3) X_adv, Y_adv = X[:partition], Y[:partition] X_norm, Y_norm = X[partition: 2*partition], Y[partition: 2*partition] X_noisy, Y_noisy = X[2*partition:], Y[2*partition:] num_train = int(partition*0.008) * 100 X_train = np.concatenate((X_norm[:num_train], X_noisy[:num_train], X_adv[:num_train])) Y_train = np.concatenate((Y_norm[:num_train], Y_noisy[:num_train], Y_adv[:num_train])) X_test = np.concatenate((X_norm[num_train:], X_noisy[num_train:], X_adv[num_train:])) Y_test = np.concatenate((Y_norm[num_train:], Y_noisy[num_train:], Y_adv[num_train:])) return X_train, Y_train, X_test, Y_test
Split the data into 80% for training and 20% for testing in a block size of 100. :param X: :param Y: :return:
https://github.com/xingjunm/lid_adversarial_subspace_detection/blob/29ae6f5d1b376753876c9cb404e7fe297d17a2d4/util.py#L770-L792
from __future__ import absolute_import from __future__ import print_function import os import multiprocessing as mp from subprocess import call import warnings import numpy as np import scipy.io as sio from tqdm import tqdm import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc, roc_auc_score from sklearn.linear_model import LogisticRegressionCV from sklearn.preprocessing import scale import keras.backend as K from keras.datasets import mnist, cifar10 from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization from keras.layers import Conv2D, MaxPooling2D from keras.regularizers import l2 import tensorflow as tf from scipy.spatial.distance import pdist, cdist, squareform from keras import regularizers from sklearn.decomposition import PCA STDEVS = { 'mnist': {'fgsm': 0.271, 'bim-a': 0.111, 'bim-b': 0.167, 'cw-l2': 0.207}, 'cifar': {'fgsm': 0.0504, 'bim-a': 0.0084, 'bim-b': 0.0428, 'cw-l2': 0.007}, 'svhn': {'fgsm': 0.133, 'bim-a': 0.0155, 'bim-b': 0.095, 'cw-l2': 0.008} } CLIP_MIN = -0.5 CLIP_MAX = 0.5 PATH_DATA = "data/" np.random.seed(0) def get_data(dataset='mnist'): assert dataset in ['mnist', 'cifar', 'svhn'], "dataset parameter must be either 'mnist' 'cifar' or 'svhn'" if dataset == 'mnist': (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1) elif dataset == 'cifar': (X_train, y_train), (X_test, y_test) = cifar10.load_data() else: if not os.path.isfile(os.path.join(PATH_DATA, "svhn_train.mat")): print('Downloading SVHN train set...') call( "curl -o ../data/svhn_train.mat " "http://ufldl.stanford.edu/housenumbers/train_32x32.mat", shell=True ) if not os.path.isfile(os.path.join(PATH_DATA, "svhn_test.mat")): print('Downloading SVHN test set...') call( "curl -o ../data/svhn_test.mat " "http://ufldl.stanford.edu/housenumbers/test_32x32.mat", shell=True ) train = sio.loadmat(os.path.join(PATH_DATA,'svhn_train.mat')) test = sio.loadmat(os.path.join(PATH_DATA, 'svhn_test.mat')) X_train = np.transpose(train['X'], axes=[3, 0, 1, 2]) X_test = np.transpose(test['X'], axes=[3, 0, 1, 2]) y_train = np.reshape(train['y'], (-1,)) - 1 y_test = np.reshape(test['y'], (-1,)) - 1 X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train = (X_train/255.0) - (1.0 - CLIP_MAX) X_test = (X_test/255.0) - (1.0 - CLIP_MAX) Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) print("X_train:", X_train.shape) print("Y_train:", Y_train.shape) print("X_test:", X_test.shape) print("Y_test", Y_test.shape) return X_train, Y_train, X_test, Y_test def get_model(dataset='mnist', softmax=True): assert dataset in ['mnist', 'cifar', 'svhn'], "dataset parameter must be either 'mnist' 'cifar' or 'svhn'" if dataset == 'mnist': layers = [ Conv2D(64, (3, 3), padding='valid', input_shape=(28, 28, 1)), Activation('relu'), BatchNormalization(), Conv2D(64, (3, 3)), Activation('relu'), BatchNormalization(), MaxPooling2D(pool_size=(2, 2)), Dropout(0.5), Flatten(), Dense(128), Activation('relu'), BatchNormalization(), Dropout(0.5), Dense(10), ] elif dataset == 'cifar': layers = [ Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)), Activation('relu'), BatchNormalization(), Conv2D(32, (3, 3), padding='same'), Activation('relu'), BatchNormalization(), MaxPooling2D(pool_size=(2, 2)), Conv2D(64, (3, 3), padding='same'), Activation('relu'), BatchNormalization(), Conv2D(64, (3, 3), padding='same'), Activation('relu'), BatchNormalization(), MaxPooling2D(pool_size=(2, 2)), Conv2D(128, (3, 3), padding='same'), Activation('relu'), BatchNormalization(), Conv2D(128, (3, 3), padding='same'), Activation('relu'), BatchNormalization(), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dropout(0.5), Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), Activation('relu'), BatchNormalization(), Dropout(0.5), Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), Activation('relu'), BatchNormalization(), Dropout(0.5), Dense(10), ] else: layers = [ Conv2D(64, (3, 3), padding='valid', input_shape=(32, 32, 3)), Activation('relu'), BatchNormalization(), Conv2D(64, (3, 3)), Activation('relu'), BatchNormalization(), MaxPooling2D(pool_size=(2, 2)), Dropout(0.5), Flatten(), Dense(512), Activation('relu'), BatchNormalization(), Dropout(0.5), Dense(128), Activation('relu'), BatchNormalization(), Dropout(0.5), Dense(10), ] model = Sequential() for layer in layers: model.add(layer) if softmax: model.add(Activation('softmax')) return model def cross_entropy(y_true, y_pred): return tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred) def lid_term(logits, batch_size=100): y_pred = logits r = tf.reduce_sum(tf.square(y_pred), axis=1) r = tf.reshape(r, [-1, 1]) D = r - 2 * tf.matmul(y_pred, tf.transpose(y_pred)) + tf.transpose(r) D1 = tf.sqrt(D + 1e-9) D2, _ = tf.nn.top_k(-D1, k=21, sorted=True) D3 = -D2[:, 1:] m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1])) v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1) lids = -20 / v_log return lids def lid_adv_term(clean_logits, adv_logits, batch_size=100): c_pred = tf.reshape(clean_logits, (batch_size, -1)) a_pred = tf.reshape(adv_logits, (batch_size, -1)) r_a = tf.reduce_sum(tf.square(a_pred), axis=1) r_a = tf.reshape(r_a, [-1, 1]) r_c = tf.reduce_sum(tf.square(c_pred), axis=1) r_c = tf.reshape(r_c, [1, -1]) D = r_a - 2 * tf.matmul(a_pred, tf.transpose(c_pred)) + r_c D1 = tf.sqrt(D + 1e-9) D2, _ = tf.nn.top_k(-D1, k=21, sorted=True) D3 = -D2[:, 1:] m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1])) v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1) lids = -20 / v_log lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12) return lids def flip(x, nb_diff): original_shape = x.shape x = np.copy(np.reshape(x, (-1,))) candidate_inds = np.where(x < CLIP_MAX)[0] assert candidate_inds.shape[0] >= nb_diff inds = np.random.choice(candidate_inds, nb_diff) x[inds] = CLIP_MAX return np.reshape(x, original_shape) def get_noisy_samples(X_test, X_test_adv, dataset, attack): if attack in ['jsma', 'cw-l0']: X_test_noisy = np.zeros_like(X_test) for i in range(len(X_test)): nb_diff = len(np.where(X_test[i] != X_test_adv[i])[0]) X_test_noisy[i] = flip(X_test[i], nb_diff) else: warnings.warn("Important: using pre-set Gaussian scale sizes to craft noisy " "samples. You will definitely need to manually tune the scale " "according to the L2 print below, otherwise the result " "will inaccurate. In future scale sizes will be inferred " "automatically. For now, manually tune the scales around " "mnist: L2/20.0, cifar: L2/54.0, svhn: L2/60.0") X_test_noisy = np.minimum( np.maximum( X_test + np.random.normal(loc=0, scale=STDEVS[dataset][attack], size=X_test.shape), CLIP_MIN ), CLIP_MAX ) return X_test_noisy def get_mc_predictions(model, X, nb_iter=50, batch_size=256): output_dim = model.layers[-1].output.shape[-1].value get_output = K.function( [model.layers[0].input, K.learning_phase()], [model.layers[-1].output] ) def predict(): n_batches = int(np.ceil(X.shape[0] / float(batch_size))) output = np.zeros(shape=(len(X), output_dim)) for i in range(n_batches): output[i * batch_size:(i + 1) * batch_size] = get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0] return output preds_mc = [] for i in tqdm(range(nb_iter)): preds_mc.append(predict()) return np.asarray(preds_mc) def get_deep_representations(model, X, batch_size=256): output_dim = model.layers[-4].output.shape[-1].value get_encoding = K.function( [model.layers[0].input, K.learning_phase()], [model.layers[-4].output] ) n_batches = int(np.ceil(X.shape[0] / float(batch_size))) output = np.zeros(shape=(len(X), output_dim)) for i in range(n_batches): output[i * batch_size:(i + 1) * batch_size] = get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0] return output def get_layer_wise_activations(model, dataset): assert dataset in ['mnist', 'cifar', 'svhn'], "dataset parameter must be either 'mnist' 'cifar' or 'svhn'" if dataset == 'mnist': acts = [model.layers[0].input] acts.extend([layer.output for layer in model.layers]) elif dataset == 'cifar': acts = [model.layers[0].input] acts.extend([layer.output for layer in model.layers]) else: acts = [model.layers[0].input] acts.extend([layer.output for layer in model.layers]) return acts def mle_single(data, x, k=20): data = np.asarray(data, dtype=np.float32) x = np.asarray(x, dtype=np.float32) if x.ndim == 1: x = x.reshape((-1, x.shape[0])) k = min(k, len(data)-1) f = lambda v: - k / np.sum(np.log(v/v[-1])) a = cdist(x, data) a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1] a = np.apply_along_axis(f, axis=1, arr=a) return a[0] def mle_batch(data, batch, k): data = np.asarray(data, dtype=np.float32) batch = np.asarray(batch, dtype=np.float32) k = min(k, len(data)-1) f = lambda v: - k / np.sum(np.log(v/v[-1])) a = cdist(batch, data) a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1] a = np.apply_along_axis(f, axis=1, arr=a) return a def kmean_batch(data, batch, k): data = np.asarray(data, dtype=np.float32) batch = np.asarray(batch, dtype=np.float32) k = min(k, len(data)-1) f = lambda v: np.mean(v) a = cdist(batch, data) a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1] a = np.apply_along_axis(f, axis=1, arr=a) return a def kmean_pca_batch(data, batch, k=10): data = np.asarray(data, dtype=np.float32) batch = np.asarray(batch, dtype=np.float32) a = np.zeros(batch.shape[0]) for i in np.arange(batch.shape[0]): tmp = np.concatenate((data, [batch[i]])) tmp_pca = PCA(n_components=2).fit_transform(tmp) a[i] = kmean_batch(tmp_pca[:-1], tmp_pca[-1], k=k) return a def get_lids_random_batch(model, X, X_noisy, X_adv, dataset, k=10, batch_size=100): funcs = [K.function([model.layers[0].input, K.learning_phase()], [out]) for out in get_layer_wise_activations(model, dataset)] lid_dim = len(funcs) print("Number of layers to estimate: ", lid_dim) def estimate(i_batch): start = i_batch * batch_size end = np.minimum(len(X), (i_batch + 1) * batch_size) n_feed = end - start lid_batch = np.zeros(shape=(n_feed, lid_dim)) lid_batch_adv = np.zeros(shape=(n_feed, lid_dim)) lid_batch_noisy = np.zeros(shape=(n_feed, lid_dim)) for i, func in enumerate(funcs): X_act = func([X[start:end], 0])[0] X_act = np.asarray(X_act, dtype=np.float32).reshape((n_feed, -1)) X_adv_act = func([X_adv[start:end], 0])[0] X_adv_act = np.asarray(X_adv_act, dtype=np.float32).reshape((n_feed, -1)) X_noisy_act = func([X_noisy[start:end], 0])[0] X_noisy_act = np.asarray(X_noisy_act, dtype=np.float32).reshape((n_feed, -1)) lid_batch[:, i] = mle_batch(X_act, X_act, k=k) lid_batch_adv[:, i] = mle_batch(X_act, X_adv_act, k=k) lid_batch_noisy[:, i] = mle_batch(X_act, X_noisy_act, k=k) return lid_batch, lid_batch_noisy, lid_batch_adv lids = [] lids_adv = [] lids_noisy = [] n_batches = int(np.ceil(X.shape[0] / float(batch_size))) for i_batch in tqdm(range(n_batches)): lid_batch, lid_batch_noisy, lid_batch_adv = estimate(i_batch) lids.extend(lid_batch) lids_adv.extend(lid_batch_adv) lids_noisy.extend(lid_batch_noisy) lids = np.asarray(lids, dtype=np.float32) lids_noisy = np.asarray(lids_noisy, dtype=np.float32) lids_adv = np.asarray(lids_adv, dtype=np.float32) return lids, lids_noisy, lids_adv def get_kmeans_random_batch(model, X, X_noisy, X_adv, dataset, k=10, batch_size=100, pca=False): funcs = [K.function([model.layers[0].input, K.learning_phase()], [model.layers[-2].output])] km_dim = len(funcs) print("Number of layers to use: ", km_dim) def estimate(i_batch): start = i_batch * batch_size end = np.minimum(len(X), (i_batch + 1) * batch_size) n_feed = end - start km_batch = np.zeros(shape=(n_feed, km_dim)) km_batch_adv = np.zeros(shape=(n_feed, km_dim)) km_batch_noisy = np.zeros(shape=(n_feed, km_dim)) for i, func in enumerate(funcs): X_act = func([X[start:end], 0])[0] X_act = np.asarray(X_act, dtype=np.float32).reshape((n_feed, -1)) X_adv_act = func([X_adv[start:end], 0])[0] X_adv_act = np.asarray(X_adv_act, dtype=np.float32).reshape((n_feed, -1)) X_noisy_act = func([X_noisy[start:end], 0])[0] X_noisy_act = np.asarray(X_noisy_act, dtype=np.float32).reshape((n_feed, -1)) if pca: km_batch[:, i] = kmean_pca_batch(X_act, X_act, k=k) else: km_batch[:, i] = kmean_batch(X_act, X_act, k=k) if pca: km_batch_adv[:, i] = kmean_pca_batch(X_act, X_adv_act, k=k) else: km_batch_adv[:, i] = kmean_batch(X_act, X_adv_act, k=k) if pca: km_batch_noisy[:, i] = kmean_pca_batch(X_act, X_noisy_act, k=k) else: km_batch_noisy[:, i] = kmean_batch(X_act, X_noisy_act, k=k) return km_batch, km_batch_noisy, km_batch_adv kms = [] kms_adv = [] kms_noisy = [] n_batches = int(np.ceil(X.shape[0] / float(batch_size))) for i_batch in tqdm(range(n_batches)): km_batch, km_batch_noisy, km_batch_adv = estimate(i_batch) kms.extend(km_batch) kms_adv.extend(km_batch_adv) kms_noisy.extend(km_batch_noisy) kms = np.asarray(kms, dtype=np.float32) kms_noisy = np.asarray(kms_noisy, dtype=np.float32) kms_adv = np.asarray(kms_adv, dtype=np.float32) return kms, kms_noisy, kms_adv def score_point(tup): x, kde = tup return kde.score_samples(np.reshape(x, (1, -1)))[0] def score_samples(kdes, samples, preds, n_jobs=None): if n_jobs is not None: p = mp.Pool(n_jobs) else: p = mp.Pool() results = np.asarray( p.map( score_point, [(x, kdes[i]) for x, i in zip(samples, preds)] ) ) p.close() p.join() return results def normalize(normal, adv, noisy): n_samples = len(normal) total = scale(np.concatenate((normal, adv, noisy))) return total[:n_samples], total[n_samples:2*n_samples], total[2*n_samples:] def train_lr(X, y): lr = LogisticRegressionCV(n_jobs=-1).fit(X, y) return lr def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr def compute_roc(y_true, y_pred, plot=False): fpr, tpr, _ = roc_curve(y_true, y_pred) auc_score = roc_auc_score(y_true, y_pred) if plot: plt.figure(figsize=(7, 6)) plt.plot(fpr, tpr, color='blue', label='ROC (AUC = %0.4f)' % auc_score) plt.legend(loc='lower right') plt.title("ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR") plt.show() return fpr, tpr, auc_score def compute_roc_rfeinman(probs_neg, probs_pos, plot=False): probs = np.concatenate((probs_neg, probs_pos)) labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos))) fpr, tpr, _ = roc_curve(labels, probs) auc_score = auc(fpr, tpr) if plot: plt.figure(figsize=(7, 6)) plt.plot(fpr, tpr, color='blue', label='ROC (AUC = %0.4f)' % auc_score) plt.legend(loc='lower right') plt.title("ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR") plt.show() return fpr, tpr, auc_score def random_split(X, Y): print("random split 80%, 20% for training and testing") num_samples = X.shape[0] num_train = int(num_samples * 0.8) rand_pert = np.random.permutation(num_samples) X = X[rand_pert] Y = Y[rand_pert] X_train, X_test = X[:num_train], X[num_train:] Y_train, Y_test = Y[:num_train], Y[num_train:] return X_train, Y_train, X_test, Y_test
MIT License
pytest-dev/pytest-testinfra
testinfra/backend/base.py
BaseBackend.get_connection_type
python
def get_connection_type(cls): return cls.NAME
Return the connection backend used as string. Can be local, paramiko, ssh, docker, salt or ansible
https://github.com/pytest-dev/pytest-testinfra/blob/1974005549cc1b37af778371879b17ea372150d8/testinfra/backend/base.py#L132-L137
import abc import collections import locale import logging import shlex import subprocess import urllib.parse logger = logging.getLogger("testinfra") HostSpec = collections.namedtuple("HostSpec", ["name", "port", "user", "password"]) class CommandResult: def __init__( self, backend, exit_status, command, stdout_bytes, stderr_bytes, stdout=None, stderr=None, ): self.exit_status = exit_status self._stdout_bytes = stdout_bytes self._stderr_bytes = stderr_bytes self._stdout = stdout self._stderr = stderr self.command = command self._backend = backend super().__init__() @property def succeeded(self): return self.exit_status == 0 @property def failed(self): return self.exit_status != 0 @property def rc(self): return self.exit_status @property def stdout(self): if self._stdout is None: self._stdout = self._backend.decode(self._stdout_bytes) return self._stdout @property def stderr(self): if self._stderr is None: self._stderr = self._backend.decode(self._stderr_bytes) return self._stderr @property def stdout_bytes(self): if self._stdout_bytes is None: self._stdout_bytes = self._backend.encode(self._stdout) return self._stdout_bytes @property def stderr_bytes(self): if self._stderr_bytes is None: self._stderr_bytes = self._backend.encode(self._stderr) return self._stderr_bytes def __repr__(self): return ( "CommandResult(command={!r}, exit_status={}, stdout={!r}, " "stderr={!r})" ).format( self.command, self.exit_status, self._stdout_bytes or self._stdout, self._stderr_bytes or self._stderr, ) class BaseBackend(metaclass=abc.ABCMeta): HAS_RUN_SALT = False HAS_RUN_ANSIBLE = False @property @classmethod @abc.abstractmethod def NAME(cls) -> str: raise NotImplementedError() def __init__(self, hostname, sudo=False, sudo_user=None, *args, **kwargs): self._encoding = None self._host = None self.hostname = hostname self.sudo = sudo self.sudo_user = sudo_user super().__init__() def set_host(self, host): self._host = host @classmethod
Apache License 2.0
docusign/docusign-python-client
docusign_esign/models/event_notification.py
EventNotification.include_envelope_void_reason
python
def include_envelope_void_reason(self): return self._include_envelope_void_reason
Gets the include_envelope_void_reason of this EventNotification. # noqa: E501 When set to **true**, this tells the Connect Service to include the void reason, as entered by the person that voided the envelope, in the message. # noqa: E501 :return: The include_envelope_void_reason of this EventNotification. # noqa: E501 :rtype: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/event_notification.py#L282-L290
import pprint import re import six from docusign_esign.client.configuration import Configuration class EventNotification(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'delivery_mode': 'str', 'envelope_events': 'list[EnvelopeEvent]', 'event_data': 'ConnectEventData', 'include_certificate_of_completion': 'str', 'include_certificate_with_soap': 'str', 'include_document_fields': 'str', 'include_documents': 'str', 'include_envelope_void_reason': 'str', 'include_hmac': 'str', 'include_sender_account_as_custom_field': 'str', 'include_time_zone': 'str', 'logging_enabled': 'str', 'recipient_events': 'list[RecipientEvent]', 'require_acknowledgment': 'str', 'sign_message_with_x509_cert': 'str', 'soap_name_space': 'str', 'url': 'str', 'use_soap_interface': 'str' } attribute_map = { 'delivery_mode': 'deliveryMode', 'envelope_events': 'envelopeEvents', 'event_data': 'eventData', 'include_certificate_of_completion': 'includeCertificateOfCompletion', 'include_certificate_with_soap': 'includeCertificateWithSoap', 'include_document_fields': 'includeDocumentFields', 'include_documents': 'includeDocuments', 'include_envelope_void_reason': 'includeEnvelopeVoidReason', 'include_hmac': 'includeHMAC', 'include_sender_account_as_custom_field': 'includeSenderAccountAsCustomField', 'include_time_zone': 'includeTimeZone', 'logging_enabled': 'loggingEnabled', 'recipient_events': 'recipientEvents', 'require_acknowledgment': 'requireAcknowledgment', 'sign_message_with_x509_cert': 'signMessageWithX509Cert', 'soap_name_space': 'soapNameSpace', 'url': 'url', 'use_soap_interface': 'useSoapInterface' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._delivery_mode = None self._envelope_events = None self._event_data = None self._include_certificate_of_completion = None self._include_certificate_with_soap = None self._include_document_fields = None self._include_documents = None self._include_envelope_void_reason = None self._include_hmac = None self._include_sender_account_as_custom_field = None self._include_time_zone = None self._logging_enabled = None self._recipient_events = None self._require_acknowledgment = None self._sign_message_with_x509_cert = None self._soap_name_space = None self._url = None self._use_soap_interface = None self.discriminator = None setattr(self, "_{}".format('delivery_mode'), kwargs.get('delivery_mode', None)) setattr(self, "_{}".format('envelope_events'), kwargs.get('envelope_events', None)) setattr(self, "_{}".format('event_data'), kwargs.get('event_data', None)) setattr(self, "_{}".format('include_certificate_of_completion'), kwargs.get('include_certificate_of_completion', None)) setattr(self, "_{}".format('include_certificate_with_soap'), kwargs.get('include_certificate_with_soap', None)) setattr(self, "_{}".format('include_document_fields'), kwargs.get('include_document_fields', None)) setattr(self, "_{}".format('include_documents'), kwargs.get('include_documents', None)) setattr(self, "_{}".format('include_envelope_void_reason'), kwargs.get('include_envelope_void_reason', None)) setattr(self, "_{}".format('include_hmac'), kwargs.get('include_hmac', None)) setattr(self, "_{}".format('include_sender_account_as_custom_field'), kwargs.get('include_sender_account_as_custom_field', None)) setattr(self, "_{}".format('include_time_zone'), kwargs.get('include_time_zone', None)) setattr(self, "_{}".format('logging_enabled'), kwargs.get('logging_enabled', None)) setattr(self, "_{}".format('recipient_events'), kwargs.get('recipient_events', None)) setattr(self, "_{}".format('require_acknowledgment'), kwargs.get('require_acknowledgment', None)) setattr(self, "_{}".format('sign_message_with_x509_cert'), kwargs.get('sign_message_with_x509_cert', None)) setattr(self, "_{}".format('soap_name_space'), kwargs.get('soap_name_space', None)) setattr(self, "_{}".format('url'), kwargs.get('url', None)) setattr(self, "_{}".format('use_soap_interface'), kwargs.get('use_soap_interface', None)) @property def delivery_mode(self): return self._delivery_mode @delivery_mode.setter def delivery_mode(self, delivery_mode): self._delivery_mode = delivery_mode @property def envelope_events(self): return self._envelope_events @envelope_events.setter def envelope_events(self, envelope_events): self._envelope_events = envelope_events @property def event_data(self): return self._event_data @event_data.setter def event_data(self, event_data): self._event_data = event_data @property def include_certificate_of_completion(self): return self._include_certificate_of_completion @include_certificate_of_completion.setter def include_certificate_of_completion(self, include_certificate_of_completion): self._include_certificate_of_completion = include_certificate_of_completion @property def include_certificate_with_soap(self): return self._include_certificate_with_soap @include_certificate_with_soap.setter def include_certificate_with_soap(self, include_certificate_with_soap): self._include_certificate_with_soap = include_certificate_with_soap @property def include_document_fields(self): return self._include_document_fields @include_document_fields.setter def include_document_fields(self, include_document_fields): self._include_document_fields = include_document_fields @property def include_documents(self): return self._include_documents @include_documents.setter def include_documents(self, include_documents): self._include_documents = include_documents @property
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/light/lifx_legacy.py
LIFXLight.color_temp
python
def color_temp(self): temperature = color_temperature_kelvin_to_mired(self._kel) _LOGGER.debug("color_temp: %d", temperature) return temperature
Return the color temperature.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/light/lifx_legacy.py#L173-L178
import logging import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_TRANSITION, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, SUPPORT_TRANSITION, Light, PLATFORM_SCHEMA) from homeassistant.helpers.event import track_time_change from homeassistant.util.color import ( color_temperature_mired_to_kelvin, color_temperature_kelvin_to_mired) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['liffylights==0.9.4'] BYTE_MAX = 255 CONF_BROADCAST = 'broadcast' CONF_SERVER = 'server' SHORT_MAX = 65535 TEMP_MAX = 9000 TEMP_MAX_HASS = 500 TEMP_MIN = 2500 TEMP_MIN_HASS = 154 SUPPORT_LIFX = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_TRANSITION) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_SERVER): cv.string, vol.Optional(CONF_BROADCAST): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): server_addr = config.get(CONF_SERVER) broadcast_addr = config.get(CONF_BROADCAST) lifx_library = LIFX(add_devices, server_addr, broadcast_addr) track_time_change(hass, lifx_library.poll, second=[10, 40]) lifx_library.probe() class LIFX(object): def __init__(self, add_devices_callback, server_addr=None, broadcast_addr=None): import liffylights self._devices = [] self._add_devices_callback = add_devices_callback self._liffylights = liffylights.LiffyLights( self.on_device, self.on_power, self.on_color, server_addr, broadcast_addr) def find_bulb(self, ipaddr): bulb = None for device in self._devices: if device.ipaddr == ipaddr: bulb = device break return bulb def on_device(self, ipaddr, name, power, hue, sat, bri, kel): bulb = self.find_bulb(ipaddr) if bulb is None: _LOGGER.debug("new bulb %s %s %d %d %d %d %d", ipaddr, name, power, hue, sat, bri, kel) bulb = LIFXLight( self._liffylights, ipaddr, name, power, hue, sat, bri, kel) self._devices.append(bulb) self._add_devices_callback([bulb]) else: _LOGGER.debug("update bulb %s %s %d %d %d %d %d", ipaddr, name, power, hue, sat, bri, kel) bulb.set_power(power) bulb.set_color(hue, sat, bri, kel) bulb.schedule_update_ha_state() def on_color(self, ipaddr, hue, sat, bri, kel): bulb = self.find_bulb(ipaddr) if bulb is not None: bulb.set_color(hue, sat, bri, kel) bulb.schedule_update_ha_state() def on_power(self, ipaddr, power): bulb = self.find_bulb(ipaddr) if bulb is not None: bulb.set_power(power) bulb.schedule_update_ha_state() def poll(self, now): self.probe() def probe(self, address=None): self._liffylights.probe(address) class LIFXLight(Light): def __init__(self, liffy, ipaddr, name, power, hue, saturation, brightness, kelvin): _LOGGER.debug("LIFXLight: %s %s", ipaddr, name) self._liffylights = liffy self._ip = ipaddr self.set_name(name) self.set_power(power) self.set_color(hue, saturation, brightness, kelvin) @property def should_poll(self): return False @property def name(self): return self._name @property def ipaddr(self): return self._ip @property def hs_color(self): return (self._hue / 65535 * 360, self._sat / 65535 * 100) @property def brightness(self): brightness = int(self._bri / (BYTE_MAX + 1)) _LOGGER.debug("brightness: %d", brightness) return brightness @property
MIT License
sdu-cfei/modest-py
modestpy/estim/ga/ga.py
GA.plot_error_evo
python
def plot_error_evo(self, file=None): fig, ax = plt.subplots() ax.plot(self.fittest_errors) ax.set_xlabel('Generation') ax.set_ylabel('Error (NRMSE)') if file: fig = ax.get_figure() fig.set_size_inches(GA.FIG_SIZE) fig.savefig(file, dpi=GA.FIG_DPI) return ax
Returns a plot of the error evolution. :param file: string (path to the file, if None, file not created) :return: Axes
https://github.com/sdu-cfei/modest-py/blob/dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a/modestpy/estim/ga/ga.py#L309-L323
import logging import os import random import pandas as pd import numpy as np import matplotlib.pyplot as plt import pyDOE as doe from modestpy.estim.ga import algorithm import modestpy.estim.plots as plots from modestpy.estim.estpar import EstPar from modestpy.estim.ga.population import Population class GA(object): FIG_DPI = 150 FIG_SIZE = (15, 10) NAME = 'GA' METHOD = '_method_' ITER = '_iter_' ERR = '_error_' def __init__(self, fmu_path, inp, known, est, ideal, maxiter=100, tol=0.001, look_back=10, pop_size=40, uniformity=0.5, mut=0.05, mut_inc=0.3, trm_size=6, ftype='RMSE', init_pop=None, lhs=False): self.logger = logging.getLogger(type(self).__name__) deprecated_msg = 'This GA implementation is deprecated. Use MODESTGA instead.' print(deprecated_msg) self.logger.warning('This GA implementation is deprecated. Use MODESTGA instead.') self.logger.info('GA constructor invoked') assert inp.index.equals(ideal.index), 'inp and ideal indexes are not matching' algorithm.UNIFORM_RATE = uniformity algorithm.MUT_RATE = mut algorithm.MUT_RATE_INC = mut_inc algorithm.TOURNAMENT_SIZE = int(trm_size) self.max_generations = maxiter self.tol = tol self.look_back = look_back self.fittest_errors = list() self.all_estim_and_err = pd.DataFrame() estpars = list() for key in sorted(est.keys()): self.logger.info( 'Add {} (initial guess={}) to estimated parameters' .format(key, est[key][0]) ) estpars.append(EstPar(name=key, value=est[key][0], lo=est[key][1], hi=est[key][2])) known_df = pd.DataFrame() for key in known: assert known[key] is not None, 'None is not allowed in known parameters (parameter {})' .format(key) known_df[key] = [known[key]] self.logger.info('Known parameters:\n{}'.format(str(known_df))) if lhs: self.logger.info('LHS initialization') init_pop = GA._lhs_init(par_names=[p.name for p in estpars], bounds=[(p.lo, p.hi) for p in estpars], samples=pop_size, criterion='c') self.logger.debug('Current population:\n{}'.format(str(init_pop))) elif init_pop is None: self.logger.info( 'No initial population provided, one individual will be based ' 'on the initial guess and the other will be random' ) init_pop = pd.DataFrame({k: [est[k][0]] for k in est}) self.logger.debug('Current population:\n{}'.format(str(init_pop))) if init_pop is not None: missing = pop_size - init_pop.index.size self.logger.debug('Missing individuals = {}'.format(missing)) if missing > 0: self.logger.debug('Add missing individuals (random)...') while missing > 0: init_pop = init_pop.append({ key: random.random() * (est[key][2] - est[key][1]) + est[key][1] for key in sorted(est.keys()) }, ignore_index=True) missing -= 1 self.logger.debug('Current population:\n{}'.format(str(init_pop))) self.logger.debug('Instantiate Population ') self.pop = Population(fmu_path=fmu_path, pop_size=pop_size, inp=inp, known=known_df, est=estpars, ideal=ideal, init=True, ftype=ftype, init_pop=init_pop) def estimate(self): self.evolution() return self.get_estimates() def evolution(self): gen_count = 1 err_decreasing = True self.logger.info('Generation ' + str(gen_count)) self.logger.info(str(self.pop)) self._update_res(gen_count) gen_count += 1 while (gen_count <= self.max_generations) and err_decreasing: self.pop = algorithm.evolve(self.pop) self._update_res(gen_count) self.logger.info('Generation ' + str(gen_count)) self.logger.info(str(self.pop)) if len(self.fittest_errors) > self.look_back: err_past = self.fittest_errors[-self.look_back] err_now = self.fittest_errors[-1] err_decrease = err_past - err_now if err_decrease < self.tol: self.logger.info( 'Error decrease smaller than tol: {0:.5f} < {1:.5f}' .format(err_decrease, self.tol)) self.logger.info('Stopping evolution...') err_decreasing = False else: self.logger.info( "'Look back' error decrease = {0:.5f} > " "tol = {1:.5f}\n" .format(err_decrease, self.tol)) gen_count += 1 self.logger.info('FITTEST PARAMETERS:\n{}' .format(self.get_estimates())) return self.pop.get_fittest() def get_estimates(self, as_dict=False): return self.pop.get_fittest_estimates() def get_error(self): return self.pop.get_fittest_error() def get_errors(self): return self.fittest_errors def get_sim_res(self): return self.pop.get_fittest().result.copy() def get_full_solution_trajectory(self): df = self.all_estim_and_err.copy() summary = pd.DataFrame() for i in range(1, df[GA.ITER].max() + 1): summary = summary.append(self._get_best_from_gen(i)) summary[GA.ITER] = summary[GA.ITER].astype(int) summary = summary.set_index(GA.ITER) summary[GA.METHOD] = GA.NAME return summary def get_plots(self): plots = list() plots.append({'name': 'GA', 'axes': self.plot_pop_evo()}) return plots def save_plots(self, workdir): self.plot_comparison(os.path.join(workdir, 'ga_comparison.png')) self.plot_error_evo(os.path.join(workdir, 'ga_error_evo.png')) self.plot_parameter_evo(os.path.join(workdir, 'ga_param_evo.png')) self.plot_pop_evo(os.path.join(workdir, 'ga_pop_evo.png'))
BSD 2-Clause Simplified License
01walid/py-dz-phone-number
dz_phone_number/dz_phones.py
DZPhoneNumber.__setattr__
python
def __setattr__(self, *args): raise TypeError( "DZPhoneNumber is an immutable object. You cannot set/delete its attributes" )
Disables setting attributes
https://github.com/01walid/py-dz-phone-number/blob/4149faf0e94d172e1b108139ea0ca33a1011a5f8/dz_phone_number/dz_phones.py#L214-L219
from __future__ import annotations import re from dataclasses import dataclass, field from enum import Flag, IntFlag, unique from typing import List, Union, Callable, Optional, Any from .enums import CountryCode, LandlinePrefix, MobileOperator from .exceptions import InvalidDZPhoneNumber class DZPhoneNumber: __slots__ = ["number", "indicative", "operator_or_region", "suffix", "raw_number"] def __init__( self, number: Optional[str] = None, indicative: Optional[str] = "0", operator_or_region: Optional[str] = None, suffix: Optional[str] = None, ): number = number or f"{indicative}{operator_or_region}{suffix}" self._set_number(number) def is_mobile(self) -> bool: return isinstance(self.operator_or_region, MobileOperator) def is_landline(self) -> bool: return isinstance(self.operator_or_region, LandlinePrefix) def replace( self, indicative: Union[str, None] = None, operator_or_region: Union[str, None] = None, suffix: Optional[str] = None, ) -> DZPhoneNumber: indicative = indicative or self.indicative.value operator_or_region = operator_or_region or self.operator_or_region.value suffix = suffix or self.suffix return DZPhoneNumber( indicative=indicative, operator_or_region=operator_or_region, suffix=suffix ) def get_pattern(self) -> re.Pattern: landline_re = "|".join([str(o) for o in LandlinePrefix.all()]) mobile_re = "|".join([str(o) for o in MobileOperator.all()]) global_re = rf"\A(00213|\+213|0)({landline_re})?(?(2)([0-9]{{6}})|({mobile_re})([0-9]{{8}}))\Z" return re.compile(global_re) def _is_(self, operator_or_region: str) -> Callable: def is_match(): return self.operator_or_region.is_of_equal_value(operator_or_region) return is_match def with_number(self, number): return self.from_string(number) @classmethod def from_string(cls, number: str) -> DZPhoneNumber: return cls(number) def _set_number(self, number: str): object.__setattr__(self, "raw_number", number) if not isinstance(number, str): self.__raise_invalid() number = self._normalize_number(number) pattern = self.get_pattern() it_matches = pattern.match(number) if it_matches: indicative, operator_or_region, suffix = tuple( x for x in it_matches.groups() if x is not None ) object.__setattr__(self, "indicative", CountryCode(indicative)) object.__setattr__( self, "operator_or_region", self.get_operator_or_region_by_number(operator_or_region), ) object.__setattr__(self, "suffix", suffix) else: self.__raise_invalid() object.__setattr__(self, "number", number) def get_operator_or_region_by_number( self, number: Union[str, int] ) -> Union[MobileOperator, LandlinePrefix]: number = int(number) return ( MobileOperator(number) if number in MobileOperator.all() else LandlinePrefix(number) ) def _normalize_number(self, value: str) -> str: number = value.strip() if number.startswith("-") or number.endswith("-"): self.__raise_invalid() chars_to_remove = {"-": 5, " ": 13, "(": 1, ")": 1, ".": 5} for char, count in chars_to_remove.items(): number = number.replace(char, "", count) return number def __raise_invalid(self): raise InvalidDZPhoneNumber( f"{self.raw_number} is invalid Algerian phone number" ) def __getattr__(self, name: str) -> Any: if name.startswith("is_"): operator_or_region_str = name.replace("is_", "").upper() return self._is_(operator_or_region_str) raise AttributeError def __eq__(self, other: object) -> bool: if not isinstance(other, DZPhoneNumber): raise TypeError("Expected object of type DZPhoneNumber got", type(other)) return ( self.operator_or_region.value == other.operator_or_region.value and self.suffix == other.suffix ) def __hash__(self): return hash((self.indicative, self.operator_or_region, self.suffix)) def __str__(self) -> str: return self.number def __repr__(self) -> str: return f"<{self.__class__.__name__}:{self.indicative.LOCAL} - {self.operator_or_region.describe()} - {self.suffix}>"
MIT License
rensaproject/rensapy
rensapy/src/en/parser/nltk_lite/draw/plot.py
PlotFrameI.create_zoom_marker
python
def create_zoom_marker(self): raise AssertionError, 'PlotFrameI is an interface'
mark the zoom region, for drag-zooming
https://github.com/rensaproject/rensapy/blob/e7bb0bd248c23353226d4582eb3cc15e0de168ca/rensapy/src/en/parser/nltk_lite/draw/plot.py#L92-L94
__all__ = ['Plot'] from types import * from math import log, log10, ceil, floor import Tkinter, sys, time from en.parser.nltk_lite.draw import ShowText, in_idle class PlotFrameI(object): def postscript(self, filename): raise AssertionError, 'PlotFrameI is an interface' def config_axes(self, xlog, ylog): raise AssertionError, 'PlotFrameI is an interface' def invtransform(self, x, y): raise AssertionError, 'PlotFrameI is an interface' def zoom(self, i1, j1, i2, j2): raise AssertionError, 'PlotFrameI is an interface' def visible_area(self): raise AssertionError, 'PlotFrameI is an interface'
MIT License
allexks/py-polynomial
polynomial/core.py
Polynomial.__isub__
python
def __isub__(self, other): return self.try_set_self(_sub(self.terms, other.terms))
Implement self -= other.
https://github.com/allexks/py-polynomial/blob/b2132f8b4c24227cfb2dbb2f61cf26ce3978c3a0/polynomial/core.py#L490-L492
from copy import deepcopy from itertools import chain from math import inf import string class PolynomialError(Exception): class DegreeError(PolynomialError): class TermError(PolynomialError): def _accepts_many_arguments(function): def decorated(self, *args, **kwargs): if len(args) == 1 and not isinstance(args[0], (int, float, complex)): function(self, args[0], kwargs) else: function(self, args, kwargs) return decorated def _extract_polynomial(method): def decorated(self, other): if isinstance(other, Polynomial): return method(self, other) if isinstance(other, (int, float, complex)): return method(self, Constant(other)) raise ValueError( "{0}.{1} requires a Polynomial or number, got {2}." .format( self.__class__.__name__, method.__name__, type(other).__name__ ) ) return decorated def _get_more_permissive_class(a, b): a_cls = a.__class__ b_cls = b.__class__ return b_cls if issubclass(a_cls, b_cls) else a_cls def _trim(_vector): if not _vector or len(_vector) == 1: return _vector ind = len(_vector) while _vector[ind - 1] == 0 and ind > 0: ind -= 1 return _vector[:ind] def _to_terms(vec): s_d = _degree(vec, tuples=False) return [(coeff, s_d - deg) for deg, coeff in enumerate(reversed(vec)) if coeff != 0] def _degree(vec, tuples=True): if not vec: return -inf if tuples: return max(vec, key=lambda term: term[1] if term[0] else -inf)[1] return len(vec) - 1 def _mul(lhs, rhs): if not lhs or not rhs: return [(0, 0)] deg = _degree(lhs) + _degree(rhs) + 1 res = [0] * deg for lcoeff, ldeg in lhs: for rcoeff, rdeg in rhs: res[ldeg + rdeg] += lcoeff * rcoeff return _to_terms(res) def _add(lhs, rhs): if not lhs: return rhs if not rhs: return lhs deg = max(_degree(lhs), _degree(rhs)) + 1 res = [0] * deg for coeff, deg in chain(lhs, rhs): res[deg] += coeff return _to_terms(res) def _neg(vec): return [(-coeff, deg) for coeff, deg in vec] def _sub(lhs, rhs): if not lhs: return _neg(rhs) if not rhs: return lhs deg = max(_degree(lhs), _degree(rhs)) + 1 res = [0] * deg for coeff, deg in lhs: res[deg] += coeff for coeff, deg in rhs: res[deg] -= coeff return _to_terms(res) class Polynomial: @_accepts_many_arguments def __init__(self, iterable, from_monomials=False): if from_monomials: def monomial_to_tuple(monomial): if isinstance(monomial, Monomial): return monomial.a, monomial.degree if len(monomial) == 2: return monomial raise TypeError("{} cannot be a monomial.". format(monomial)) self.terms = [monomial_to_tuple(monomial) for monomial in iterable] else: self._vector = _trim(list(iterable)[::-1]) @classmethod def zero_instance(cls): return Polynomial() def _trim(self): self._vector = _trim(self._vector) @property def degree(self): if not self: return -inf return len(self._vector) - 1 @property def derivative(self): return self.nth_derivative() def nth_derivative(self, n=1): if not isinstance(n, int) or n < 0: raise ValueError( "n must be a non-negative integer (got {0})".format(n) ) if not self or n > self.degree: return self.zero_instance() if n == 0: return deepcopy(self) if n == 1: factors = range(1, self.degree + 1) else: d = self.degree - n + 1 factorial_term = n + 1 factors = [1] * d for i in range(1, factorial_term): factors[0] *= i for i in range(1, d): factors[i] = (factors[i - 1] // i) * factorial_term factorial_term += 1 return Polynomial( [c * x for c, x in zip(self, reversed(factors))] ) def integral(self, a, b): res = self._indefinite_integral return res.calculate(b) - res.calculate(a) @property def _indefinite_integral(self): if not self: return Polynomial() return Polynomial( [c/x for c, x in zip(self, range(self.degree + 1, 0, -1))] + [0] ) @property def terms(self): s_d = self.degree return [(coeff, s_d - deg) for deg, coeff in enumerate(self) if coeff != 0] @terms.setter def terms(self, terms): if not terms: _vector = [0] else: list_len = max(terms, key=lambda x: x[1])[1] + 1 _vector = [0] * list_len for coeff, deg in terms: _vector[deg] += coeff _vector = _trim(_vector) self._vector = _vector @property def monomials(self): return [Monomial(k, deg) for k, deg in self.terms] def calculate(self, x): if self.degree < 0: return 0 return sum(ak * (x ** k) for ak, k in self.terms) def __getattr__(self, name): if len(name) != 1: return object.__getattribute__(self, name) if name in string.ascii_letters: return self[self.degree - ord(name.lower()) + ord('a')] raise AttributeError("attribute {0} is not defined for Polynomial." .format(name)) def __setattr__(self, name, new_value): if len(name) != 1: object.__setattr__(self, name, new_value) elif name in string.ascii_letters: self[self.degree - ord(name.lower()) + ord('a')] = new_value else: raise AttributeError("attribute {0} is not defined for Polynomial." .format(name)) def __getitem__(self, degree): if isinstance(degree, slice): return self._vector[degree] if degree == -inf and self.degree == -inf: return 0 if degree > self.degree or degree < 0: raise IndexError("Attempt to get coefficient of term with \ degree {0} of a {1}-degree polynomial".format(degree, self.degree)) return self._vector[degree] def __setitem__(self, degree, new_value): if isinstance(degree, slice): self._vector[degree] = new_value elif degree == -inf: if self.degree == -inf: self._vector = [new_value] else: raise IndexError( "Can not set term with degree -inf on a" " non-zero polynomial." ) elif degree > self.degree: raise IndexError("Attempt to set coefficient of term with \ degree {0} of a {1}-degree polynomial".format(degree, self.degree)) else: self._vector[degree] = new_value self._trim() def __iter__(self): return reversed(self._vector) def __repr__(self): if not self: return "Polynomial()" terms = ', '.join([repr(ak) for ak in self]) return "Polynomial({0})".format(terms) def __str__(self): if not self: return "0" def components(ak, k, is_leading): ak = str(ak) if ak[0] == "-": ak = ak[1:] sign = "-" if is_leading else "- " else: sign = "" if is_leading else "+ " ak = "" if ak == "1" and k != 0 else ak if k == 0: p, k = "", "" elif k == 1: p, k = "x", "" else: p = "x^" return sign, ak, p, k s_d = self.degree terms = ["{0}{1}{2}{3}". format(*components(ak, k, k == s_d)) for ak, k in self.terms] return " ".join(terms) @_extract_polynomial def __eq__(self, other): if other == 0: return not self return self.degree == other.degree and self.terms == other.terms @_extract_polynomial def __ne__(self, other): if other == 0: return bool(self) return self.degree != other.degree or self.terms != other.terms def __bool__(self): self._trim() if not self._vector: return False if len(self._vector) > 1: return True return self._vector[0] != 0 @_extract_polynomial def __add__(self, other): if not self: return deepcopy(other) if not other: return deepcopy(self) return self.__class__().try_set_self( _add(self.terms, other.terms) ) @_extract_polynomial def __radd__(self, other): return self + other @_extract_polynomial def __iadd__(self, other): return self.try_set_self(_add(self.terms, other.terms)) @_extract_polynomial def __mul__(self, other): if not self or not other: return _get_more_permissive_class(self, other).zero_instance() ret_val = deepcopy(self) ret_val *= other return ret_val @_extract_polynomial def __rmul__(self, other): return self * other @_extract_polynomial def __imul__(self, other): return self.try_set_self(_mul(self.terms, other.terms)) def __pos__(self): self._trim() return deepcopy(self) def __neg__(self): ret_val = deepcopy(self) ret_val._vector = [-x for x in _trim(self._vector)] return ret_val @_extract_polynomial def __sub__(self, other): return self + (-other) @_extract_polynomial def __rsub__(self, other): return other + (-self) @_extract_polynomial
MIT License
chanedwin/pydistinct
pydistinct/sampling.py
sample_gaussian
python
def sample_gaussian(cov=200.0, population_size=1000, sample_size=500, seed=None): np.random.seed(seed) population = [int(i) for i in np.random.normal(0, cov, population_size)] sample = population[:sample_size] return {"sample": sample, "sample_distinct": len(set(sample)), "ground_truth": len(set(population))}
generate a ground truth population of distinct integers from a gaussian distribution (rounded to nearest int), then draw a sample sequence of integers without replacement from the population. Integers created could be negative :param cov: covariance of gaussian distribution :type cov: float :param population_size: ground truth number of distinct integers in population, with different probabilities :type population_size: int :param sample_size: sample size of sequence observed :type sample_size: int :param seed: random seed for numpy rng generator :type seed: int :returns : dictionary with sampled sequence[sample], actual number of distinct values[ground_truth], sample number of distinct values[sample_distinct]
https://github.com/chanedwin/pydistinct/blob/afecd6c9226a9eee3bad3e2610c34f32026db6f7/pydistinct/sampling.py#L24-L47
import numpy as np def sample_uniform(n_distinct_integers=1000, sample_size=500, seed=None): np.random.seed(seed) sample = np.random.randint(1, n_distinct_integers, sample_size) return {"sample": sample, "sample_distinct": len(set(sample)), "ground_truth": n_distinct_integers}
MIT License
deathbeds/pidgy
pidgy/parser.py
footnote
python
def footnote(state, start, end, silent=False): import mdit_py_plugins.footnote result = mdit_py_plugins.footnote.index.footnote_def(state, start, end, silent) if not result: return result return result
a footnote lexer than inlines the token in the token stream
https://github.com/deathbeds/pidgy/blob/3cb16655df8e02f7af345faa485be22f1757975e/pidgy/parser.py#L236-L243
import functools import io import re import markdown_it from . import tangle MAGIC = re.compile(r"^\s*%{2}") class Markdown(markdown_it.MarkdownIt): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) [ self.block.ruler.before( "code", "front_matter", __import__("functools").partial(frontMatter, x), {"alt": ["paragraph", "reference", "blockquote", "list"]}, ) for x in "-+" ] self.block.ruler.before("code", "doctest", doctest) self.block.ruler.disable("code") self.block.ruler.after("doctest", "code", code) import mdit_py_plugins.footnote self.block.ruler.after( "list", "footnote_def", footnote, {"alt": ["paragraph", "reference"]} ) self.block.ruler.disable("reference") self.block.ruler.after("footnote_def", "", reference) def init_env(self, src, env): env.setdefault("source", io.StringIO(src)) env.setdefault("unprocessed", []) env.setdefault("last_line", 0) def runner(self, name, src, env=None): from .lisp import LIT_HY_MATCH if env is None: env = {} self.init_env(src, env) if MAGIC.match(src) or LIT_HY_MATCH.match(src): self.renderer_cls = tangle.Null self.renderer = tangle.Null() else: self.renderer_cls = tangle.Python self.renderer = tangle.Python() return getattr(super(), name)(src, env) parse = functools.partialmethod(runner, "parse") render = functools.partialmethod(runner, "render") def __call__(self, src): return self.render("".join(src)).splitlines(True) def print(self, src, env=None): print(self.render(src, env)) def frontMatter(marker_str, state, start, end, silent, markers=("+++", "---")): from math import floor if state.tokens: return False while start < end: if state.isEmpty(start): start += 1 continue break else: return False marker = None line = state.getLines(start, start + 1, 0, True) if not state.getLines(start, start + 1, 0, True).startswith(markers): return False next = start + 1 marker = markers[line.startswith(markers[1])] while next < end: line = state.getLines(next, next + 1, 0, True) next += 1 if line.startswith(marker): break else: return False old_parent = state.parentType old_line_max = state.lineMax state.parentType = "container" state.lineMax = next token = state.push("front_matter", "", 0) token.hidden = True token.markup = marker token.content = state.src[state.bMarks[start] : state.eMarks[next]] token.block = True state.parentType = old_parent state.lineMax = old_line_max state.line = next token.map = [start, state.line] return True def doctest(state, start, end, silent=False): if not state.getLines(start, start + 1, 0, True).lstrip().startswith(">>> "): return False indent = state.bMarks[start] next = start + 1 while next < end: if state.isEmpty(next): break if state.bMarks[next] < indent: break next += 1 state.line = next token = state.push("doctest", "code", 0) token.content = state.getLines(start, next, 0, True) token.map = [start, state.line] return True def code(state, start, end, silent=False): if state.sCount[start] - state.blkIndent < 4: return False indent = state.bMarks[start] last = next = start + 1 while next < end: if state.isEmpty(next): next += 1 continue if state.sCount[next] - state.blkIndent >= 4: if state.getLines(next, next + 1, 0, True).lstrip().startswith(">>> "): break next += 1 last = next continue break state.line = last token = state.push("code_block", "code", 0) token.content = state.getLines(start, last, 4 + state.blkIndent, True) token.map = [start, state.line] return True def reference(state, start, end, silent=False): result = markdown_it.rules_block.reference(state, start, end, silent) if not result: return result for key, value in sorted( state.env["references"].items(), key=lambda x: x[1]["map"][0] ): token = state.push("reference", "span", 0) token.content = state.getLines(*value["map"], 0, True) token.map = value["map"] token.meta.update(value, name=key) state.env["references"].clear() return result
BSD 3-Clause New or Revised License
daeilkim/refinery
refinery/bnpy/bnpy-dev/bnpy/data/AdmixMinibatchIteratorDB.py
AdmixMinibatchIteratorDB.__init__
python
def __init__(self, vocab_size=None, dbpath=None, nDocTotal=None, nBatch=None, nLap=20, dataorderseed=42): self.vocab_size= vocab_size self.nBatch = nBatch self.nLap = nLap self.dbpath = dbpath self.nDocTotal = nDocTotal self.nObsBatch = nDocTotal/nBatch self.curLapPos = -1 self.lapID = 0 self.dataorderseed = int(int(dataorderseed) % MAXSEED) self.obsIDByBatch = self.configObsIDsForEachBatch()
Constructor for creating an iterator over the batches of data
https://github.com/daeilkim/refinery/blob/0d5de8fc3d680a2c79bd0e9384b506229787c74f/refinery/bnpy/bnpy-dev/bnpy/data/AdmixMinibatchIteratorDB.py#L42-L61
import numpy as np import sqlite3 from WordsData import WordsData MAXSEED = 1000000 class AdmixMinibatchIteratorDB(object):
MIT License
solid-mechanics/matplotlib-4-abaqus
matplotlib/colorbar.py
ColorbarBase._central_N
python
def _central_N(self): nb = len(self._boundaries) if self.extend == 'both': nb -= 2 elif self.extend in ('min', 'max'): nb -= 1 return nb
number of boundaries **before** extension of ends
https://github.com/solid-mechanics/matplotlib-4-abaqus/blob/1117070fb824210c217c564ac36e69112ce70501/matplotlib/colorbar.py#L668-L675
from __future__ import print_function import warnings import numpy as np import matplotlib as mpl import matplotlib.artist as martist import matplotlib.cbook as cbook import matplotlib.collections as collections import matplotlib.colors as colors import matplotlib.contour as contour import matplotlib.cm as cm import matplotlib.gridspec as gridspec import matplotlib.lines as lines import matplotlib.patches as mpatches import matplotlib.path as mpath import matplotlib.ticker as ticker import matplotlib.transforms as mtrans from matplotlib import docstring make_axes_kw_doc = ''' ============= ==================================================== Property Description ============= ==================================================== *orientation* vertical or horizontal *fraction* 0.15; fraction of original axes to use for colorbar *pad* 0.05 if vertical, 0.15 if horizontal; fraction of original axes between colorbar and new image axes *shrink* 1.0; fraction by which to shrink the colorbar *aspect* 20; ratio of long to short dimensions *anchor* (0.0, 0.5) if vertical; (0.5, 1.0) if horizontal; the anchor point of the colorbar axes *panchor* (1.0, 0.5) if vertical; (0.5, 0.0) if horizontal; the anchor point of the colorbar parent axes. If False, the parent axes' anchor will be unchanged ============= ==================================================== ''' colormap_kw_doc = ''' ============ ==================================================== Property Description ============ ==================================================== *extend* [ 'neither' | 'both' | 'min' | 'max' ] If not 'neither', make pointed end(s) for out-of- range values. These are set for a given colormap using the colormap set_under and set_over methods. *extendfrac* [ *None* | 'auto' | length | lengths ] If set to *None*, both the minimum and maximum triangular colorbar extensions with have a length of 5% of the interior colorbar length (this is the default setting). If set to 'auto', makes the triangular colorbar extensions the same lengths as the interior boxes (when *spacing* is set to 'uniform') or the same lengths as the respective adjacent interior boxes (when *spacing* is set to 'proportional'). If a scalar, indicates the length of both the minimum and maximum triangular colorbar extensions as a fraction of the interior colorbar length. A two-element sequence of fractions may also be given, indicating the lengths of the minimum and maximum colorbar extensions respectively as a fraction of the interior colorbar length. *extendrect* [ *False* | *True* ] If *False* the minimum and maximum colorbar extensions will be triangular (the default). If *True* the extensions will be rectangular. *spacing* [ 'uniform' | 'proportional' ] Uniform spacing gives each discrete color the same space; proportional makes the space proportional to the data interval. *ticks* [ None | list of ticks | Locator object ] If None, ticks are determined automatically from the input. *format* [ None | format string | Formatter object ] If None, the :class:`~matplotlib.ticker.ScalarFormatter` is used. If a format string is given, e.g., '%.3f', that is used. An alternative :class:`~matplotlib.ticker.Formatter` object may be given instead. *drawedges* [ False | True ] If true, draw lines at color boundaries. ============ ==================================================== The following will probably be useful only in the context of indexed colors (that is, when the mappable has norm=NoNorm()), or other unusual circumstances. ============ =================================================== Property Description ============ =================================================== *boundaries* None or a sequence *values* None or a sequence which must be of length 1 less than the sequence of *boundaries*. For each region delimited by adjacent entries in *boundaries*, the color mapped to the corresponding value in values will be used. ============ =================================================== ''' colorbar_doc = ''' Add a colorbar to a plot. Function signatures for the :mod:`~matplotlib.pyplot` interface; all but the first are also method signatures for the :meth:`~matplotlib.figure.Figure.colorbar` method:: colorbar(**kwargs) colorbar(mappable, **kwargs) colorbar(mappable, cax=cax, **kwargs) colorbar(mappable, ax=ax, **kwargs) arguments: *mappable* the :class:`~matplotlib.image.Image`, :class:`~matplotlib.contour.ContourSet`, etc. to which the colorbar applies; this argument is mandatory for the :meth:`~matplotlib.figure.Figure.colorbar` method but optional for the :func:`~matplotlib.pyplot.colorbar` function, which sets the default to the current image. keyword arguments: *cax* None | axes object into which the colorbar will be drawn *ax* None | parent axes object(s) from which space for a new colorbar axes will be stolen. If a list of axes is given they will all be resized to make room for the colorbar axes. *use_gridspec* False | If *cax* is None, a new *cax* is created as an instance of Axes. If *ax* is an instance of Subplot and *use_gridspec* is True, *cax* is created as an instance of Subplot using the grid_spec module. Additional keyword arguments are of two kinds: axes properties: %s colorbar properties: %s If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend* kwarg is included automatically. Note that the *shrink* kwarg provides a simple way to keep a vertical colorbar, for example, from being taller than the axes of the mappable to which the colorbar is attached; but it is a manual method requiring some trial and error. If the colorbar is too tall (or a horizontal colorbar is too wide) use a smaller value of *shrink*. For more precise control, you can manually specify the positions of the axes objects in which the mappable and the colorbar are drawn. In this case, do not use any of the axes properties kwargs. It is known that some vector graphics viewer (svg and pdf) renders white gaps between segments of the colorbar. This is due to bugs in the viewers not matplotlib. As a workaround the colorbar can be rendered with overlapping segments:: cbar = colorbar() cbar.solids.set_edgecolor("face") draw() However this has negative consequences in other circumstances. Particularly with semi transparent images (alpha < 1) and colorbar extensions and is not enabled by default see (issue #1188). returns: :class:`~matplotlib.colorbar.Colorbar` instance; see also its base class, :class:`~matplotlib.colorbar.ColorbarBase`. Call the :meth:`~matplotlib.colorbar.ColorbarBase.set_label` method to label the colorbar. ''' % (make_axes_kw_doc, colormap_kw_doc) docstring.interpd.update(colorbar_doc=colorbar_doc) def _set_ticks_on_axis_warn(*args, **kw): warnings.warn("Use the colorbar set_ticks() method instead.") class ColorbarBase(cm.ScalarMappable): _slice_dict = {'neither': slice(0, None), 'both': slice(1, -1), 'min': slice(1, None), 'max': slice(0, -1)} def __init__(self, ax, cmap=None, norm=None, alpha=None, values=None, boundaries=None, orientation='vertical', ticklocation='auto', extend='neither', spacing='uniform', ticks=None, format=None, drawedges=False, filled=True, extendfrac=None, extendrect=False, label='', ): self.ax = ax self._patch_ax() if cmap is None: cmap = cm.get_cmap() if norm is None: norm = colors.Normalize() self.alpha = alpha cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm) self.values = values self.boundaries = boundaries self.extend = extend self._inside = self._slice_dict[extend] self.spacing = spacing self.orientation = orientation self.drawedges = drawedges self.filled = filled self.extendfrac = extendfrac self.extendrect = extendrect self.solids = None self.lines = list() self.outline = None self.patch = None self.dividers = None if ticklocation == 'auto': ticklocation = 'bottom' if orientation == 'horizontal' else 'right' self.ticklocation = ticklocation self.set_label(label) if cbook.iterable(ticks): self.locator = ticker.FixedLocator(ticks, nbins=len(ticks)) else: self.locator = ticks if format is None: if isinstance(self.norm, colors.LogNorm): self.formatter = ticker.LogFormatterMathtext() else: self.formatter = ticker.ScalarFormatter() elif cbook.is_string_like(format): self.formatter = ticker.FormatStrFormatter(format) else: self.formatter = format self.config_axis() self.draw_all() def _extend_lower(self): return self.extend in ('both', 'min') def _extend_upper(self): return self.extend in ('both', 'max') def _patch_ax(self): self.ax.set_xticks = _set_ticks_on_axis_warn self.ax.set_yticks = _set_ticks_on_axis_warn def draw_all(self): self._process_values() self._find_range() X, Y = self._mesh() C = self._values[:, np.newaxis] self._config_axes(X, Y) if self.filled: self._add_solids(X, Y, C) def config_axis(self): ax = self.ax if self.orientation == 'vertical': ax.xaxis.set_ticks([]) ax.yaxis.set_label_position(self.ticklocation) ax.yaxis.set_ticks_position(self.ticklocation) else: ax.yaxis.set_ticks([]) ax.xaxis.set_label_position(self.ticklocation) ax.xaxis.set_ticks_position(self.ticklocation) self._set_label() def update_ticks(self): ax = self.ax ticks, ticklabels, offset_string = self._ticker() if self.orientation == 'vertical': ax.yaxis.set_ticks(ticks) ax.set_yticklabels(ticklabels) ax.yaxis.get_major_formatter().set_offset_string(offset_string) else: ax.xaxis.set_ticks(ticks) ax.set_xticklabels(ticklabels) ax.xaxis.get_major_formatter().set_offset_string(offset_string) def set_ticks(self, ticks, update_ticks=True): if cbook.iterable(ticks): self.locator = ticker.FixedLocator(ticks, nbins=len(ticks)) else: self.locator = ticks if update_ticks: self.update_ticks() def set_ticklabels(self, ticklabels, update_ticks=True): if isinstance(self.locator, ticker.FixedLocator): self.formatter = ticker.FixedFormatter(ticklabels) if update_ticks: self.update_ticks() else: warnings.warn("set_ticks() must have been called.") def _config_axes(self, X, Y): ax = self.ax ax.set_frame_on(False) ax.set_navigate(False) xy = self._outline(X, Y) ax.update_datalim(xy) ax.set_xlim(*ax.dataLim.intervalx) ax.set_ylim(*ax.dataLim.intervaly) if self.outline is not None: self.outline.remove() self.outline = lines.Line2D( xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'], linewidth=mpl.rcParams['axes.linewidth']) ax.add_artist(self.outline) self.outline.set_clip_box(None) self.outline.set_clip_path(None) c = mpl.rcParams['axes.facecolor'] if self.patch is not None: self.patch.remove() self.patch = mpatches.Polygon(xy, edgecolor=c, facecolor=c, linewidth=0.01, zorder=-1) ax.add_artist(self.patch) self.update_ticks() def _set_label(self): if self.orientation == 'vertical': self.ax.set_ylabel(self._label, **self._labelkw) else: self.ax.set_xlabel(self._label, **self._labelkw) def set_label(self, label, **kw): self._label = '%s' % (label, ) self._labelkw = kw self._set_label() def _outline(self, X, Y): N = X.shape[0] ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0] x = np.take(np.ravel(np.transpose(X)), ii) y = np.take(np.ravel(np.transpose(Y)), ii) x = x.reshape((len(x), 1)) y = y.reshape((len(y), 1)) if self.orientation == 'horizontal': return np.hstack((y, x)) return np.hstack((x, y)) def _edges(self, X, Y): N = X.shape[0] if self.orientation == 'vertical': return [zip(X[i], Y[i]) for i in xrange(1, N - 1)] else: return [zip(Y[i], X[i]) for i in xrange(1, N - 1)] def _add_solids(self, X, Y, C): if self.orientation == 'vertical': args = (X, Y, C) else: args = (np.transpose(Y), np.transpose(X), np.transpose(C)) kw = dict(cmap=self.cmap, norm=self.norm, alpha=self.alpha, edgecolors='None') _hold = self.ax.ishold() self.ax.hold(True) col = self.ax.pcolormesh(*args, **kw) self.ax.hold(_hold) if self.solids is not None: self.solids.remove() self.solids = col if self.dividers is not None: self.dividers.remove() self.dividers = None if self.drawedges: linewidths = (0.5 * mpl.rcParams['axes.linewidth'],) self.dividers = collections.LineCollection(self._edges(X, Y), colors=(mpl.rcParams['axes.edgecolor'],), linewidths=linewidths) self.ax.add_collection(self.dividers) def add_lines(self, levels, colors, linewidths, erase=True): y = self._locate(levels) igood = (y < 1.001) & (y > -0.001) y = y[igood] if cbook.iterable(colors): colors = np.asarray(colors)[igood] if cbook.iterable(linewidths): linewidths = np.asarray(linewidths)[igood] N = len(y) x = np.array([0.0, 1.0]) X, Y = np.meshgrid(x, y) if self.orientation == 'vertical': xy = [zip(X[i], Y[i]) for i in xrange(N)] else: xy = [zip(Y[i], X[i]) for i in xrange(N)] col = collections.LineCollection(xy, linewidths=linewidths) if erase and self.lines: for lc in self.lines: lc.remove() self.lines = [] self.lines.append(col) col.set_color(colors) self.ax.add_collection(col) def _ticker(self): locator = self.locator formatter = self.formatter if locator is None: if self.boundaries is None: if isinstance(self.norm, colors.NoNorm): nv = len(self._values) base = 1 + int(nv / 10) locator = ticker.IndexLocator(base=base, offset=0) elif isinstance(self.norm, colors.BoundaryNorm): b = self.norm.boundaries locator = ticker.FixedLocator(b, nbins=10) elif isinstance(self.norm, colors.LogNorm): locator = ticker.LogLocator() else: locator = ticker.MaxNLocator() else: b = self._boundaries[self._inside] locator = ticker.FixedLocator(b, nbins=10) if isinstance(self.norm, colors.NoNorm): intv = self._values[0], self._values[-1] else: intv = self.vmin, self.vmax locator.create_dummy_axis(minpos=intv[0]) formatter.create_dummy_axis(minpos=intv[0]) locator.set_view_interval(*intv) locator.set_data_interval(*intv) formatter.set_view_interval(*intv) formatter.set_data_interval(*intv) b = np.array(locator()) ticks = self._locate(b) inrange = (ticks > -0.001) & (ticks < 1.001) ticks = ticks[inrange] b = b[inrange] formatter.set_locs(b) ticklabels = [formatter(t, i) for i, t in enumerate(b)] offset_string = formatter.get_offset() return ticks, ticklabels, offset_string def _process_values(self, b=None): if b is None: b = self.boundaries if b is not None: self._boundaries = np.asarray(b, dtype=float) if self.values is None: self._values = 0.5 * (self._boundaries[:-1] + self._boundaries[1:]) if isinstance(self.norm, colors.NoNorm): self._values = (self._values + 0.00001).astype(np.int16) return self._values = np.array(self.values) return if self.values is not None: self._values = np.array(self.values) if self.boundaries is None: b = np.zeros(len(self.values) + 1, 'd') b[1:-1] = 0.5 * (self._values[:-1] - self._values[1:]) b[0] = 2.0 * b[1] - b[2] b[-1] = 2.0 * b[-2] - b[-3] self._boundaries = b return self._boundaries = np.array(self.boundaries) return if isinstance(self.norm, colors.NoNorm): b = self._uniform_y(self.cmap.N + 1) * self.cmap.N - 0.5 v = np.zeros((len(b) - 1,), dtype=np.int16) v[self._inside] = np.arange(self.cmap.N, dtype=np.int16) if self._extend_lower(): v[0] = -1 if self._extend_upper(): v[-1] = self.cmap.N self._boundaries = b self._values = v return elif isinstance(self.norm, colors.BoundaryNorm): b = list(self.norm.boundaries) if self._extend_lower(): b = [b[0] - 1] + b if self._extend_upper(): b = b + [b[-1] + 1] b = np.array(b) v = np.zeros((len(b) - 1,), dtype=float) bi = self.norm.boundaries v[self._inside] = 0.5 * (bi[:-1] + bi[1:]) if self._extend_lower(): v[0] = b[0] - 1 if self._extend_upper(): v[-1] = b[-1] + 1 self._boundaries = b self._values = v return else: if not self.norm.scaled(): self.norm.vmin = 0 self.norm.vmax = 1 b = self.norm.inverse(self._uniform_y(self.cmap.N + 1)) if self._extend_lower(): b[0] = b[0] - 1 if self._extend_upper(): b[-1] = b[-1] + 1 self._process_values(b) def _find_range(self): b = self._boundaries[self._inside] self.vmin = b[0] self.vmax = b[-1]
MIT License
x1angli/cvt2utf
cvt2utf/main.py
normalize_codec_name
python
def normalize_codec_name(chardet_name): python_name = chardet_name.lower().replace('iso-', 'iso').replace('-', '_') python_name = codecs.lookup(python_name).name if python_name == 'gb2312': return 'gb18030' return python_name
Normalizes chardet codec names to Python codec names. :param chardet_name: chardet codec names :return: Python codec names. See: https://docs.python.org/3.7/library/codecs.html#standard-encodings
https://github.com/x1angli/cvt2utf/blob/8a8197e6aeda6f2eab865676279ff09c78376640/cvt2utf/main.py#L39-L55
__author__ = 'x1ang.li' import logging, os, argparse, textwrap, time import codecs import chardet from cvt2utf.meta_inf import __version__ DEFAULT_CONF = { 'inc_exts': {'txt'}, 'exc_exts': {'bak'}, 'size_limit': 10 * 1024 ** 2, 'codec_chain': ['ascii', 'utf_8_sig', 'latin_1', 'chardet'], 'confi_thres': 0.8, 'cut_time': 40 } logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) log = logging.getLogger(__name__)
MIT License
dstein64/vimgolf
vimgolf/keys.py
to_bytes
python
def to_bytes(x): return x.to_bytes(2, 'big')
Convert an integer to bytes.
https://github.com/dstein64/vimgolf/blob/af5320108e16ab15ba5afa8f4c76d1fe3b2e37cf/vimgolf/keys.py#L3-L5
MIT License
cameronlonsdale/mtp
manytime/models.py
DecryptEdit.set_edit_pos
python
def set_edit_pos(self, pos: int) -> None: if pos >= len(self._edit_text): pos = len(self._edit_text) - 1 super().set_edit_pos(pos) if self.previous_event[0] in (keys.LEFT, keys.RIGHT): self.parent.x_pos = self.get_cursor_coords(self.previous_event[1])[0]
Overload the set_edit_pos function to restrict the edit position to the end of the string, not 1 past the end
https://github.com/cameronlonsdale/mtp/blob/1ab37bb28eb72cfef9c1d2bfadf8b79b489d2621/manytime/models.py#L60-L72
import urwid import math from manytime import keys from typing import Optional, List, Tuple, Union, Iterator class Key: def __init__(self, key: List[Optional[int]], unknown_character: str = '_') -> None: self.key = key self.unknown_character = unknown_character def to_formatted_text(self) -> List: unknown_char_formatted = ('unknown', self.unknown_character) return self._to_text(unknown_char_formatted) def to_plain_text(self) -> List: return self._to_text(self.unknown_character) def _to_text(self, unknown: Union[str, Tuple[str, str]]) -> List: return [format(k, '02x') if k is not None else [unknown, unknown] for k in self.key] def __iter__(self) -> Iterator: return iter(self.key) def __getitem__(self, index: int) -> Optional[int]: return self.key[index] def __setitem__(self, index: int, value: Optional[int]) -> None: self.key[index] = value class DecryptEdit(urwid.Edit): def __init__(self, parent, edit_text): self.parent = parent self.previous_event: Tuple[Optional[str], Any] = (None, None) super().__init__(edit_pos=0, edit_text=edit_text)
MIT License
azure/azure-linux-extensions
DSC/azure/servicemanagement/servicemanagementservice.py
ServiceManagementService.walk_upgrade_domain
python
def walk_upgrade_domain(self, service_name, deployment_name, upgrade_domain): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('upgrade_domain', upgrade_domain) return self._perform_post( self._get_deployment_path_using_name( service_name, deployment_name) + '/?comp=walkupgradedomain', _XmlSerializer.walk_upgrade_domain_to_xml( upgrade_domain), async=True)
Specifies the next upgrade domain to be walked during manual in-place upgrade or configuration change. service_name: Name of the hosted service. deployment_name: The name of the deployment. upgrade_domain: An integer value that identifies the upgrade domain to walk. Upgrade domains are identified with a zero-based index: the first upgrade domain has an ID of 0, the second has an ID of 1, and so on.
https://github.com/azure/azure-linux-extensions/blob/128c1c5babfe5f74ad3c00be95abce3c54311e91/DSC/azure/servicemanagement/servicemanagementservice.py#L606-L627
from azure import ( WindowsAzureError, MANAGEMENT_HOST, _str, _validate_not_none, ) from azure.servicemanagement import ( AffinityGroups, AffinityGroup, AvailabilityResponse, Certificate, Certificates, DataVirtualHardDisk, Deployment, Disk, Disks, Locations, Operation, HostedService, HostedServices, Images, OperatingSystems, OperatingSystemFamilies, OSImage, PersistentVMRole, StorageService, StorageServices, Subscription, SubscriptionCertificate, SubscriptionCertificates, VirtualNetworkSites, _XmlSerializer, ) from azure.servicemanagement.servicemanagementclient import ( _ServiceManagementClient, ) class ServiceManagementService(_ServiceManagementClient): def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST): super(ServiceManagementService, self).__init__( subscription_id, cert_file, host) def list_storage_accounts(self): return self._perform_get(self._get_storage_service_path(), StorageServices) def get_storage_account_properties(self, service_name): _validate_not_none('service_name', service_name) return self._perform_get(self._get_storage_service_path(service_name), StorageService) def get_storage_account_keys(self, service_name): _validate_not_none('service_name', service_name) return self._perform_get( self._get_storage_service_path(service_name) + '/keys', StorageService) def regenerate_storage_account_keys(self, service_name, key_type): _validate_not_none('service_name', service_name) _validate_not_none('key_type', key_type) return self._perform_post( self._get_storage_service_path( service_name) + '/keys?action=regenerate', _XmlSerializer.regenerate_keys_to_xml( key_type), StorageService) def create_storage_account(self, service_name, description, label, affinity_group=None, location=None, geo_replication_enabled=True, extended_properties=None): _validate_not_none('service_name', service_name) _validate_not_none('description', description) _validate_not_none('label', label) if affinity_group is None and location is None: raise WindowsAzureError( 'location or affinity_group must be specified') if affinity_group is not None and location is not None: raise WindowsAzureError( 'Only one of location or affinity_group needs to be specified') return self._perform_post( self._get_storage_service_path(), _XmlSerializer.create_storage_service_input_to_xml( service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties), async=True) def update_storage_account(self, service_name, description=None, label=None, geo_replication_enabled=None, extended_properties=None): _validate_not_none('service_name', service_name) return self._perform_put( self._get_storage_service_path(service_name), _XmlSerializer.update_storage_service_input_to_xml( description, label, geo_replication_enabled, extended_properties)) def delete_storage_account(self, service_name): _validate_not_none('service_name', service_name) return self._perform_delete( self._get_storage_service_path(service_name)) def check_storage_account_name_availability(self, service_name): _validate_not_none('service_name', service_name) return self._perform_get( self._get_storage_service_path() + '/operations/isavailable/' + _str(service_name) + '', AvailabilityResponse) def list_hosted_services(self): return self._perform_get(self._get_hosted_service_path(), HostedServices) def get_hosted_service_properties(self, service_name, embed_detail=False): _validate_not_none('service_name', service_name) _validate_not_none('embed_detail', embed_detail) return self._perform_get( self._get_hosted_service_path(service_name) + '?embed-detail=' + _str(embed_detail).lower(), HostedService) def create_hosted_service(self, service_name, label, description=None, location=None, affinity_group=None, extended_properties=None): _validate_not_none('service_name', service_name) _validate_not_none('label', label) if affinity_group is None and location is None: raise WindowsAzureError( 'location or affinity_group must be specified') if affinity_group is not None and location is not None: raise WindowsAzureError( 'Only one of location or affinity_group needs to be specified') return self._perform_post(self._get_hosted_service_path(), _XmlSerializer.create_hosted_service_to_xml( service_name, label, description, location, affinity_group, extended_properties)) def update_hosted_service(self, service_name, label=None, description=None, extended_properties=None): _validate_not_none('service_name', service_name) return self._perform_put(self._get_hosted_service_path(service_name), _XmlSerializer.update_hosted_service_to_xml( label, description, extended_properties)) def delete_hosted_service(self, service_name): _validate_not_none('service_name', service_name) return self._perform_delete(self._get_hosted_service_path(service_name)) def get_deployment_by_slot(self, service_name, deployment_slot): _validate_not_none('service_name', service_name) _validate_not_none('deployment_slot', deployment_slot) return self._perform_get( self._get_deployment_path_using_slot( service_name, deployment_slot), Deployment) def get_deployment_by_name(self, service_name, deployment_name): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) return self._perform_get( self._get_deployment_path_using_name( service_name, deployment_name), Deployment) def create_deployment(self, service_name, deployment_slot, name, package_url, label, configuration, start_deployment=False, treat_warnings_as_error=False, extended_properties=None): _validate_not_none('service_name', service_name) _validate_not_none('deployment_slot', deployment_slot) _validate_not_none('name', name) _validate_not_none('package_url', package_url) _validate_not_none('label', label) _validate_not_none('configuration', configuration) return self._perform_post( self._get_deployment_path_using_slot( service_name, deployment_slot), _XmlSerializer.create_deployment_to_xml( name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties), async=True) def delete_deployment(self, service_name, deployment_name): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) return self._perform_delete( self._get_deployment_path_using_name( service_name, deployment_name), async=True) def swap_deployment(self, service_name, production, source_deployment): _validate_not_none('service_name', service_name) _validate_not_none('production', production) _validate_not_none('source_deployment', source_deployment) return self._perform_post(self._get_hosted_service_path(service_name), _XmlSerializer.swap_deployment_to_xml( production, source_deployment), async=True) def change_deployment_configuration(self, service_name, deployment_name, configuration, treat_warnings_as_error=False, mode='Auto', extended_properties=None): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('configuration', configuration) return self._perform_post( self._get_deployment_path_using_name( service_name, deployment_name) + '/?comp=config', _XmlSerializer.change_deployment_to_xml( configuration, treat_warnings_as_error, mode, extended_properties), async=True) def update_deployment_status(self, service_name, deployment_name, status): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('status', status) return self._perform_post( self._get_deployment_path_using_name( service_name, deployment_name) + '/?comp=status', _XmlSerializer.update_deployment_status_to_xml( status), async=True) def upgrade_deployment(self, service_name, deployment_name, mode, package_url, configuration, label, force, role_to_upgrade=None, extended_properties=None): _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('mode', mode) _validate_not_none('package_url', package_url) _validate_not_none('configuration', configuration) _validate_not_none('label', label) _validate_not_none('force', force) return self._perform_post( self._get_deployment_path_using_name( service_name, deployment_name) + '/?comp=upgrade', _XmlSerializer.upgrade_deployment_to_xml( mode, package_url, configuration, label, role_to_upgrade, force, extended_properties), async=True)
Apache License 2.0
alberanid/diffido
diffido.py
BaseHandler.build_success
python
def build_success(self, message='', status=200): self.set_status(status) self.write({'error': False, 'message': message})
Build and write a success message. :param message: textual message :type message: str :param status: HTTP status code :type status: int
https://github.com/alberanid/diffido/blob/89ed643f6b3967656b7817284ca26f8bab549b99/diffido.py#L608-L617
import os import re import json import pytz import shutil import urllib import smtplib from email.mime.text import MIMEText from email.utils import formatdate import logging import datetime import requests import subprocess import multiprocessing from lxml import etree from xml.etree import ElementTree from tornado.ioloop import IOLoop from apscheduler.triggers.cron import CronTrigger from apscheduler.schedulers.tornado import TornadoScheduler from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore import tornado.httpserver import tornado.ioloop import tornado.options from tornado.options import define, options import tornado.web from tornado import gen, escape JOBS_STORE = 'sqlite:///conf/jobs.db' API_VERSION = '1.0' SCHEDULES_FILE = 'conf/schedules.json' DEFAULT_CONF = 'conf/diffido.conf' EMAIL_FROM = 'diffido@localhost' SMTP_SETTINGS = {} GIT_CMD = 'git' re_commit = re.compile(r'^(?P<id>[0-9a-f]{40}) (?P<message>.*)\n(?: .* ' '(?P<insertions>\d+) insertion.* (?P<deletions>\d+) deletion.*$)?', re.M) re_insertion = re.compile(r'(\d+) insertion') re_deletion = re.compile(r'(\d+) deletion') logger = logging.getLogger() logger.setLevel(logging.INFO) def read_schedules(): if not os.path.isfile(SCHEDULES_FILE): return {'schedules': {}} try: with open(SCHEDULES_FILE, 'r') as fd: schedules = json.loads(fd.read()) for id_ in schedules.get('schedules', {}).keys(): schedule = schedules['schedules'][id_] try: schedule['last_history'] = get_last_history(id_) except: schedule['last_history'] = {} continue return schedules except Exception as e: logger.error('unable to read %s: %s' % (SCHEDULES_FILE, e)) return {'schedules': {}} def write_schedules(schedules): try: with open(SCHEDULES_FILE, 'w') as fd: fd.write(json.dumps(schedules, indent=2)) except Exception as e: logger.error('unable to write %s: %s' % (SCHEDULES_FILE, e)) return False return True def next_id(schedules): ids = schedules.get('schedules', {}).keys() if not ids: return '1' return str(max([int(i) for i in ids]) + 1) def get_schedule(id_, add_id=True, add_history=False): try: schedules = read_schedules() except Exception: return {} data = schedules.get('schedules', {}).get(id_, {}) if add_history and data: data['last_history'] = get_last_history(id_) if add_id: data['id'] = str(id_) return data def select_xpath(content, xpath): tree = etree.HTML(content) elems = tree.xpath(xpath) if not elems: return content selected_content = [] for elem in elems: pieces = [] if elem.text: pieces.append(elem.text) for sub_el in elem.getchildren(): try: sub_el_text = ElementTree.tostring(sub_el, method='html').decode('utf-8', 'replace') except: continue if sub_el_text: pieces.append(sub_el_text) selected_content.append(''.join(pieces)) content = ''.join(selected_content).strip() return content def run_job(id_=None, force=False, *args, **kwargs): schedule = get_schedule(id_, add_id=False) url = schedule.get('url') if not url: return False logger.debug('running job id:%s title:%s url: %s' % (id_, schedule.get('title', ''), url)) if not schedule.get('enabled') and not force: logger.info('not running job %s: disabled' % id_) return True req = requests.get(url, allow_redirects=True, timeout=(30.10, 240)) content = req.text xpath = schedule.get('xpath') if xpath: try: content = select_xpath(content, xpath) except Exception as e: logger.warn('unable to extract XPath %s: %s' % (xpath, e)) req_path = urllib.parse.urlparse(req.url).path base_name = os.path.basename(req_path) or 'index.html' def _commit(id_, filename, content, queue): try: os.chdir('storage/%s' % id_) except Exception as e: logger.info('unable to move to storage/%s directory: %s; trying to create it...' % (id_, e)) _created = False try: _created = git_create_repo(id_) except Exception as e: logger.info('unable to move to storage/%s directory: %s; unable to create it' % (id_, e)) if not _created: return False current_lines = 0 if os.path.isfile(filename): with open(filename, 'r') as fd: for line in fd: current_lines += 1 with open(filename, 'w') as fd: fd.write(content) p = subprocess.Popen([GIT_CMD, 'add', filename]) p.communicate() p = subprocess.Popen([GIT_CMD, 'commit', '-m', '%s' % datetime.datetime.utcnow(), '--allow-empty'], stdout=subprocess.PIPE) stdout, _ = p.communicate() stdout = stdout.decode('utf-8') insert = re_insertion.findall(stdout) if insert: insert = int(insert[0]) else: insert = 0 delete = re_deletion.findall(stdout) if delete: delete = int(delete[0]) else: delete = 0 queue.put({'insertions': insert, 'deletions': delete, 'previous_lines': current_lines, 'changes': max(insert, delete)}) queue = multiprocessing.Queue() p = multiprocessing.Process(target=_commit, args=(id_, base_name, content, queue)) p.start() res = queue.get() p.join() email = schedule.get('email') if not email: return True changes = res.get('changes') if not changes: return True min_change = schedule.get('minimum_change') previous_lines = res.get('previous_lines') if min_change and previous_lines: min_change = float(min_change) change_fraction = res.get('changes') / previous_lines if change_fraction < min_change: return True diff = get_diff(id_).get('diff') if not diff: return True send_email(to=email, subject='%s page changed' % schedule.get('title'), body='changes:\n\n%s' % diff) return True def safe_run_job(id_=None, *args, **kwargs): try: run_job(id_, *args, **kwargs) except Exception as e: send_email('error executing job %s: %s' % (id_, e)) def send_email(to, subject='diffido', body='', from_=None): msg = MIMEText(body) msg['Subject'] = subject msg['From'] = from_ or EMAIL_FROM msg['To'] = to msg["Date"] = formatdate(localtime=True) starttls = SMTP_SETTINGS.get('smtp-starttls') use_ssl = SMTP_SETTINGS.get('smtp-use-ssl') username = SMTP_SETTINGS.get('smtp-username') password = SMTP_SETTINGS.get('smtp-password') args = {} for key, value in SMTP_SETTINGS.items(): if key in ('smtp-starttls', 'smtp-use-ssl', 'smtp-username', 'smtp-password'): continue if key in ('smtp-port'): value = int(value) key = key.replace('smtp-', '', 1).replace('-', '_') args[key] = value try: if use_ssl: logger.debug('STMP SSL connection with args: %s' % repr(args)) with smtplib.SMTP_SSL(**args) as s: if username: logger.debug('STMP LOGIN for username %s and password of length %d' % (username, len(password))) s.login(username, password) s.send_message(msg) else: tls_args = {} for key in ('ssl_keyfile', 'ssl_certfile', 'ssl_context'): if key in args: tls_args[key.replace('ssl_', '')] = args[key] del args[key] logger.debug('STMP connection with args: %s' % repr(args)) with smtplib.SMTP(**args) as s: if starttls: logger.debug('STMP STARTTLS connection with args: %s' % repr(tls_args)) s.ehlo_or_helo_if_needed() s.starttls(**tls_args) if username: logger.debug('STMP LOGIN for username %s and password of length %d' % (username, len(password))) s.login(username, password) s.send_message(msg) except Exception as e: logger.error('unable to send email to %s: %s' % (to, e)) return False return True def get_history(id_, limit=None, add_info=False): def _history(id_, limit, queue): try: os.chdir('storage/%s' % id_) except Exception as e: logger.info('unable to move to storage/%s directory: %s' % (id_, e)) return queue.put(b'') cmd = [GIT_CMD, 'log', '--pretty=oneline', '--shortstat'] if limit is not None: cmd.append('-%s' % limit) p = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout, _ = p.communicate() queue.put(stdout) queue = multiprocessing.Queue() p = multiprocessing.Process(target=_history, args=(id_, limit, queue)) p.start() res = queue.get().decode('utf-8') p.join() history = [] for match in re_commit.finditer(res): info = match.groupdict() info['insertions'] = int(info['insertions'] or 0) info['deletions'] = int(info['deletions'] or 0) info['changes'] = max(info['insertions'], info['deletions']) history.append(info) last_id = None if history and 'id' in history[0]: last_id = history[0]['id'] for idx, item in enumerate(history): item['seq'] = idx + 1 data = {'history': history, 'last_id': last_id} if add_info: data['schedule'] = get_schedule(id_) return data def get_last_history(id_): history = get_history(id_, limit=1) hist = history.get('history') or [{}] return hist[0] def get_diff(id_, commit_id='HEAD', old_commit_id=None): def _history(id_, commit_id, old_commit_id, queue): try: os.chdir('storage/%s' % id_) except Exception as e: logger.info('unable to move to storage/%s directory: %s' % (id_, e)) return queue.put(b'') p = subprocess.Popen([GIT_CMD, 'diff', old_commit_id or '%s~' % commit_id, commit_id], stdout=subprocess.PIPE) stdout, _ = p.communicate() queue.put(stdout) queue = multiprocessing.Queue() p = multiprocessing.Process(target=_history, args=(id_, commit_id, old_commit_id, queue)) p.start() res = queue.get().decode('utf-8') p.join() schedule = get_schedule(id_) return {'diff': res, 'schedule': schedule} def scheduler_update(scheduler, id_): schedule = get_schedule(id_, add_id=False) if not schedule: logger.warn('unable to update empty schedule %s' % id_) return False trigger = schedule.get('trigger') if trigger not in ('interval', 'cron'): logger.warn('unable to update empty schedule %s: trigger not in ("cron", "interval")' % id_) return False args = {} if trigger == 'interval': args['trigger'] = 'interval' for unit in 'weeks', 'days', 'hours', 'minutes', 'seconds': if 'interval_%s' % unit not in schedule: continue try: val = schedule['interval_%s' % unit] if not val: continue args[unit] = int(val) except Exception: logger.warn('invalid argument on schedule %s: %s parameter %s is not an integer' % (id_, 'interval_%s' % unit, schedule['interval_%s' % unit])) if len(args) == 1: logger.error('no valid interval specified, skipping schedule %s' % id_) return False elif trigger == 'cron': try: cron_trigger = CronTrigger.from_crontab(schedule['cron_crontab']) args['trigger'] = cron_trigger except Exception: logger.warn('invalid argument on schedule %s: cron_tab parameter %s is not a valid crontab' % (id_, schedule.get('cron_crontab'))) return False git_create_repo(id_) try: scheduler.add_job(safe_run_job, id=id_, replace_existing=True, kwargs={'id_': id_}, **args) except Exception as e: logger.warn('unable to update job %s: %s' % (id_, e)) return False return True def scheduler_delete(scheduler, id_): try: scheduler.remove_job(job_id=id_) except Exception as e: logger.warn('unable to delete job %s: %s' % (id_, e)) return False return git_delete_repo(id_) def reset_from_schedules(scheduler): ret = False try: scheduler.remove_all_jobs() for key in read_schedules().get('schedules', {}).keys(): ret |= scheduler_update(scheduler, id_=key) except Exception as e: logger.warn('unable to reset all jobs: %s' % e) return False return ret def git_init(): p = subprocess.Popen([GIT_CMD, 'config', '--global', 'user.email', '"%s"' % EMAIL_FROM]) p.communicate() p = subprocess.Popen([GIT_CMD, 'config', '--global', 'user.name', '"Diffido"']) p.communicate() def git_create_repo(id_): repo_dir = 'storage/%s' % id_ if os.path.isdir(repo_dir): return True p = subprocess.Popen([GIT_CMD, 'init', repo_dir]) p.communicate() return p.returncode == 0 def git_delete_repo(id_): repo_dir = 'storage/%s' % id_ if not os.path.isdir(repo_dir): return False try: shutil.rmtree(repo_dir) except Exception as e: logger.warn('unable to delete Git repository %s: %s' % (id_, e)) return False return True class DiffidoBaseException(Exception): def __init__(self, message, status=400): super(DiffidoBaseException, self).__init__(message) self.message = message self.status = status class BaseHandler(tornado.web.RequestHandler): arguments = property(lambda self: dict([(k, v[0].decode('utf-8')) for k, v in self.request.arguments.items()])) @property def clean_body(self): return escape.json_decode(self.request.body or '{}') def write_error(self, status_code, **kwargs): if isinstance(kwargs.get('exc_info', (None, None))[1], DiffidoBaseException): exc = kwargs['exc_info'][1] status_code = exc.status message = exc.message else: message = 'internal error' self.build_error(message, status=status_code) def initialize(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def build_error(self, message='', status=400): self.set_status(status) self.write({'error': True, 'message': message})
Apache License 2.0
inmanta/inmanta-core
src/inmanta/execute/dataflow/__init__.py
NodeReference.ref_to_node
python
def ref_to_node(self, node: "Node") -> bool: return node in self.nodes()
Returns true iff this NodeReference refers to node.
https://github.com/inmanta/inmanta-core/blob/7e57295314e30276204b74ddcb8e2402c0a50b19/src/inmanta/execute/dataflow/__init__.py#L245-L249
from functools import reduce from itertools import chain, filterfalse from typing import TYPE_CHECKING, Callable, Dict, FrozenSet, Generic, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar if TYPE_CHECKING: from inmanta.ast import Locatable from inmanta.ast.attribute import Attribute from inmanta.ast.entity import Entity from inmanta.ast.statements.generator import Constructor from inmanta.execute.runtime import Resolver, ResultVariable class DataflowGraph: __slots__ = ("resolver", "parent", "_own_variables", "_own_instances") def __init__(self, resolver: "Resolver", parent: Optional["DataflowGraph"] = None) -> None: self.resolver: "Resolver" = resolver self.parent: Optional[DataflowGraph] = parent if parent is not None else None self._own_variables: Dict[str, AssignableNode] = {} self._own_instances: Dict["Constructor", InstanceNode] = {} def get_own_variable(self, name: str) -> "AssignableNodeReference": if name not in self._own_variables: self._own_variables[name] = AssignableNode(name) return self._own_variables[name].reference() def own_instance_node_for_responsible( self, entity: "Entity", responsible: "Constructor", get_new: Callable[[], "InstanceNode"] ) -> "InstanceNode": if responsible not in self._own_instances: new: InstanceNode = get_new() new.entity = entity new.responsible = responsible new.context = self self._own_instances[responsible] = new return self._own_instances[responsible] def add_index_match(self, instances: Iterable["InstanceNodeReference"]) -> None: iterator: Iterator[InstanceNodeReference] = iter(instances) try: first: InstanceNode = next(iter(iterator)).node() for instance in iterator: instance.node().index_match(first) except StopIteration: pass class Node: __slots__ = () def __init__(self) -> None: pass def reference(self) -> "NodeReference": raise NotImplementedError() class NodeReference: __slots__ = () def __init__(self) -> None: pass def nodes(self) -> Iterator["Node"]: raise NotImplementedError()
Apache License 2.0
lvtk/lvtk
waflib/Build.py
BuildContext.get_tasks_group
python
def get_tasks_group(self, idx): tasks = [] for tg in self.groups[idx]: try: tasks.extend(tg.tasks) except AttributeError: tasks.append(tg) return tasks
Returns all task instances for the build group at position idx, used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` :rtype: list of :py:class:`waflib.Task.Task`
https://github.com/lvtk/lvtk/blob/c9e351c480c7f335ced85cbe1ce599e43ae72d4c/waflib/Build.py#L787-L800
import os, sys, errno, re, shutil, stat try: import cPickle except ImportError: import pickle as cPickle from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors CACHE_DIR = 'c4che' CACHE_SUFFIX = '_cache.py' INSTALL = 1337 UNINSTALL = -1337 SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() CFG_FILES = 'cfg_files' POST_AT_ONCE = 0 POST_LAZY = 1 PROTOCOL = -1 if sys.platform == 'cli': PROTOCOL = 0 class BuildContext(Context.Context): cmd = 'build' variant = '' def __init__(self, **kw): super(BuildContext, self).__init__(**kw) self.is_install = 0 self.top_dir = kw.get('top_dir', Context.top_dir) self.out_dir = kw.get('out_dir', Context.out_dir) self.run_dir = kw.get('run_dir', Context.run_dir) self.launch_dir = Context.launch_dir self.post_mode = POST_LAZY self.cache_dir = kw.get('cache_dir') if not self.cache_dir: self.cache_dir = os.path.join(self.out_dir, CACHE_DIR) self.all_envs = {} self.node_sigs = {} self.task_sigs = {} self.imp_sigs = {} self.node_deps = {} self.raw_deps = {} self.task_gen_cache_names = {} self.jobs = Options.options.jobs self.targets = Options.options.targets self.keep = Options.options.keep self.progress_bar = Options.options.progress_bar self.deps_man = Utils.defaultdict(list) self.current_group = 0 self.groups = [] self.group_names = {} for v in SAVED_ATTRS: if not hasattr(self, v): setattr(self, v, {}) def get_variant_dir(self): if not self.variant: return self.out_dir return os.path.join(self.out_dir, os.path.normpath(self.variant)) variant_dir = property(get_variant_dir, None) def __call__(self, *k, **kw): kw['bld'] = self ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} self.add_to_group(ret, group=kw.get('group')) return ret def __copy__(self): raise Errors.WafError('build contexts cannot be copied') def load_envs(self): node = self.root.find_node(self.cache_dir) if not node: raise Errors.WafError('The project was not configured: run "waf configure" first!') lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True) if not lst: raise Errors.WafError('The cache directory is empty: reconfigure the project') for x in lst: name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/') env = ConfigSet.ConfigSet(x.abspath()) self.all_envs[name] = env for f in env[CFG_FILES]: newnode = self.root.find_resource(f) if not newnode or not newnode.exists(): raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f) def init_dirs(self): if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') self.path = self.srcnode = self.root.find_dir(self.top_dir) self.bldnode = self.root.make_node(self.variant_dir) self.bldnode.mkdir() def execute(self): self.restore() if not self.all_envs: self.load_envs() self.execute_build() def execute_build(self): Logs.info("Waf: Entering directory `%s'", self.variant_dir) self.recurse([self.run_dir]) self.pre_build() self.timer = Utils.Timer() try: self.compile() finally: if self.progress_bar == 1 and sys.stderr.isatty(): c = self.producer.processed or 1 m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL) Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on}) Logs.info("Waf: Leaving directory `%s'", self.variant_dir) try: self.producer.bld = None del self.producer except AttributeError: pass self.post_build() def restore(self): try: env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py')) except EnvironmentError: pass else: if env.version < Context.HEXVERSION: raise Errors.WafError('Project was configured with a different version of Waf, please reconfigure it') for t in env.tools: self.setup(**t) dbfn = os.path.join(self.variant_dir, Context.DBFILE) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): Logs.debug('build: Could not load the build cache %s (missing)', dbfn) else: try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class try: data = cPickle.loads(data) except Exception as e: Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e) else: for x in SAVED_ATTRS: setattr(self, x, data.get(x, {})) finally: Node.pickle_lock.release() self.init_dirs() def store(self): data = {} for x in SAVED_ATTRS: data[x] = getattr(self, x) db = os.path.join(self.variant_dir, Context.DBFILE) try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class x = cPickle.dumps(data, PROTOCOL) finally: Node.pickle_lock.release() Utils.writef(db + '.tmp', x, m='wb') try: st = os.stat(db) os.remove(db) if not Utils.is_win32: os.chown(db + '.tmp', st.st_uid, st.st_gid) except (AttributeError, OSError): pass os.rename(db + '.tmp', db) def compile(self): Logs.debug('build: compile()') self.producer = Runner.Parallel(self, self.jobs) self.producer.biter = self.get_build_iterator() try: self.producer.start() except KeyboardInterrupt: if self.is_dirty(): self.store() raise else: if self.is_dirty(): self.store() if self.producer.error: raise Errors.BuildError(self.producer.error) def is_dirty(self): return self.producer.dirty def setup(self, tool, tooldir=None, funs=None): if isinstance(tool, list): for i in tool: self.setup(i, tooldir) return module = Context.load_tool(tool, tooldir) if hasattr(module, "setup"): module.setup(self) def get_env(self): try: return self.all_envs[self.variant] except KeyError: return self.all_envs[''] def set_env(self, val): self.all_envs[self.variant] = val env = property(get_env, set_env) def add_manual_dependency(self, path, value): if not path: raise ValueError('Invalid input path %r' % path) if isinstance(path, Node.Node): node = path elif os.path.isabs(path): node = self.root.find_resource(path) else: node = self.path.find_resource(path) if not node: raise ValueError('Could not find the path %r' % path) if isinstance(value, list): self.deps_man[node].extend(value) else: self.deps_man[node].append(value) def launch_node(self): try: return self.p_ln except AttributeError: self.p_ln = self.root.find_dir(self.launch_dir) return self.p_ln def hash_env_vars(self, env, vars_lst): if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass lst = [env[a] for a in vars_lst] cache[idx] = ret = Utils.h_list(lst) Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst) return ret def get_tgen_by_name(self, name): cache = self.task_gen_cache_names if not cache: for g in self.groups: for tg in g: try: cache[tg.name] = tg except AttributeError: pass try: return cache[name] except KeyError: raise Errors.WafError('Could not find a task generator for the name %r' % name) def progress_line(self, idx, total, col1, col2): if not sys.stderr.isatty(): return '' n = len(str(total)) Utils.rot_idx += 1 ind = Utils.rot_chr[Utils.rot_idx % 4] pc = (100. * idx)/total fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind) left = fs % (idx, total, col1, pc, col2) right = '][%s%s%s]' % (col1, self.timer, col2) cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2) if cols < 7: cols = 7 ratio = ((cols * idx)//total) - 1 bar = ('='*ratio+'>').ljust(cols) msg = Logs.indicator % (left, bar, right) return msg def declare_chain(self, *k, **kw): return TaskGen.declare_chain(*k, **kw) def pre_build(self): for m in getattr(self, 'pre_funs', []): m(self) def post_build(self): for m in getattr(self, 'post_funs', []): m(self) def add_pre_fun(self, meth): try: self.pre_funs.append(meth) except AttributeError: self.pre_funs = [meth] def add_post_fun(self, meth): try: self.post_funs.append(meth) except AttributeError: self.post_funs = [meth] def get_group(self, x): if not self.groups: self.add_group() if x is None: return self.groups[self.current_group] if x in self.group_names: return self.group_names[x] return self.groups[x] def add_to_group(self, tgen, group=None): assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.Task)) tgen.bld = self self.get_group(group).append(tgen) def get_group_name(self, g): if not isinstance(g, list): g = self.groups[g] for x in self.group_names: if id(self.group_names[x]) == id(g): return x return '' def get_group_idx(self, tg): se = id(tg) for i, tmp in enumerate(self.groups): for t in tmp: if id(t) == se: return i return None def add_group(self, name=None, move=True): if name and name in self.group_names: raise Errors.WafError('add_group: name %s already present', name) g = [] self.group_names[name] = g self.groups.append(g) if move: self.current_group = len(self.groups) - 1 def set_group(self, idx): if isinstance(idx, str): g = self.group_names[idx] for i, tmp in enumerate(self.groups): if id(g) == id(tmp): self.current_group = i break else: self.current_group = idx def total(self): total = 0 for group in self.groups: for tg in group: try: total += len(tg.tasks) except AttributeError: total += 1 return total def get_targets(self): to_post = [] min_grp = 0 for name in self.targets.split(','): tg = self.get_tgen_by_name(name) m = self.get_group_idx(tg) if m > min_grp: min_grp = m to_post = [tg] elif m == min_grp: to_post.append(tg) return (min_grp, to_post) def get_all_task_gen(self): lst = [] for g in self.groups: lst.extend(g) return lst def post_group(self): def tgpost(tg): try: f = tg.post except AttributeError: pass else: f() if self.targets == '*': for tg in self.groups[self.current_group]: tgpost(tg) elif self.targets: if self.current_group < self._min_grp: for tg in self.groups[self.current_group]: tgpost(tg) else: for tg in self._exact_tg: tg.post() else: ln = self.launch_node() if ln.is_child_of(self.bldnode): Logs.warn('Building from the build directory, forcing --targets=*') ln = self.srcnode elif not ln.is_child_of(self.srcnode): Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath()) ln = self.srcnode def is_post(tg, ln): try: p = tg.path except AttributeError: pass else: if p.is_child_of(ln): return True def is_post_group(): for i, g in enumerate(self.groups): if i > self.current_group: for tg in g: if is_post(tg, ln): return True if self.post_mode == POST_LAZY and ln != self.srcnode: if is_post_group(): ln = self.srcnode for tg in self.groups[self.current_group]: if is_post(tg, ln): tgpost(tg)
ISC License
opensearch-project/opensearch-py
opensearchpy/_async/client/snapshot.py
SnapshotClient.cleanup_repository
python
async def cleanup_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, "_cleanup"), params=params, headers=headers, )
Removes stale data from repository. :arg repository: A repository name :arg master_timeout: Explicit operation timeout for connection to master node :arg timeout: Explicit operation timeout
https://github.com/opensearch-project/opensearch-py/blob/4281fe0e2c6baefc7abeda115b1f0cb1f746ebba/opensearchpy/_async/client/snapshot.py#L248-L266
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class SnapshotClient(NamespacedClient): @query_params("master_timeout", "wait_for_completion") async def create(self, repository, snapshot, body=None, params=None, headers=None): for param in (repository, snapshot): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return await self.transport.perform_request( "PUT", _make_path("_snapshot", repository, snapshot), params=params, headers=headers, body=body, ) @query_params("master_timeout") async def delete(self, repository, snapshot, params=None, headers=None): for param in (repository, snapshot): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return await self.transport.perform_request( "DELETE", _make_path("_snapshot", repository, snapshot), params=params, headers=headers, ) @query_params( "ignore_unavailable", "include_repository", "index_details", "master_timeout", "verbose", ) async def get(self, repository, snapshot, params=None, headers=None): for param in (repository, snapshot): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return await self.transport.perform_request( "GET", _make_path("_snapshot", repository, snapshot), params=params, headers=headers, ) @query_params("master_timeout", "timeout") async def delete_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") return await self.transport.perform_request( "DELETE", _make_path("_snapshot", repository), params=params, headers=headers, ) @query_params("local", "master_timeout") async def get_repository(self, repository=None, params=None, headers=None): return await self.transport.perform_request( "GET", _make_path("_snapshot", repository), params=params, headers=headers ) @query_params("master_timeout", "timeout", "verify") async def create_repository(self, repository, body, params=None, headers=None): for param in (repository, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return await self.transport.perform_request( "PUT", _make_path("_snapshot", repository), params=params, headers=headers, body=body, ) @query_params("master_timeout", "wait_for_completion") async def restore(self, repository, snapshot, body=None, params=None, headers=None): for param in (repository, snapshot): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, snapshot, "_restore"), params=params, headers=headers, body=body, ) @query_params("ignore_unavailable", "master_timeout") async def status(self, repository=None, snapshot=None, params=None, headers=None): return await self.transport.perform_request( "GET", _make_path("_snapshot", repository, snapshot, "_status"), params=params, headers=headers, ) @query_params("master_timeout", "timeout") async def verify_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, "_verify"), params=params, headers=headers, ) @query_params("master_timeout", "timeout")
Apache License 2.0
luci/luci-py
appengine/components/components/auth/b64.py
decode
python
def decode(data): if not isinstance(data, str): raise TypeError('Expecting str with base64 data') mod = len(data) % 4 if mod: data += '=' * (4 - mod) return base64.b64decode(data.replace('-', '+').replace('_', '/'))
URL safe base64 with stripped '=' -> bytes str.
https://github.com/luci/luci-py/blob/0417a3f6d73d0bcb92626dafe277ef79214c9087/appengine/components/components/auth/b64.py#L22-L29
import base64 def encode(data): if not isinstance(data, str): raise TypeError('Expecting str with binary data') urlsafe = base64.b64encode(data) return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')
Apache License 2.0
xoeye/xoto3
xoto3/utils/stack_context.py
StackContext.__call__
python
def __call__(self) -> T: return self.get()
4 fewer characters than .get()
https://github.com/xoeye/xoto3/blob/ef91cde3cce81e1ded311389358271d5c8eba02b/xoto3/utils/stack_context.py#L62-L64
import contextlib as cl import contextvars as cv import typing as ty T = ty.TypeVar("T") F = ty.TypeVar("F", bound=ty.Callable) @cl.contextmanager def stack_context(contextvar: cv.ContextVar[T], value: T) -> ty.Iterator: try: token = contextvar.set(value) yield finally: contextvar.reset(token) class StackContext(ty.Generic[T]): def __init__(self, name: str, default: T): self._contextvar = cv.ContextVar(name, default=default) def set(self, value: T) -> ty.ContextManager[T]: return stack_context(self._contextvar, value) def get(self) -> T: return self._contextvar.get()
MIT License
cohesity/management-sdk-python
cohesity_management_sdk/models/user_quota_and_usage.py
UserQuotaAndUsage.from_dictionary
python
def from_dictionary(cls, dictionary): if dictionary is None: return None quota_policy = cohesity_management_sdk.models.quota_policy.QuotaPolicy.from_dictionary(dictionary.get('quotaPolicy')) if dictionary.get('quotaPolicy') else None sid = dictionary.get('sid') unix_uid = dictionary.get('unixUid') usage_bytes = dictionary.get('usageBytes') return cls(quota_policy, sid, unix_uid, usage_bytes)
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.
https://github.com/cohesity/management-sdk-python/blob/1c085d5a10f5f1a87b700e7ad1fc1dcabda41ae5/cohesity_management_sdk/models/user_quota_and_usage.py#L70-L96
import cohesity_management_sdk.models.quota_policy class UserQuotaAndUsage(object): _names = { "quota_policy":'quotaPolicy', "sid":'sid', "unix_uid":'unixUid', "usage_bytes":'usageBytes' } def __init__(self, quota_policy=None, sid=None, unix_uid=None, usage_bytes=None): self.quota_policy = quota_policy self.sid = sid self.unix_uid = unix_uid self.usage_bytes = usage_bytes @classmethod
Apache License 2.0
square/connect-python-sdk
squareconnect/models/v1_create_item_request.py
V1CreateItemRequest.__init__
python
def __init__(self, body=None): self.swagger_types = { 'body': 'V1Item' } self.attribute_map = { 'body': 'body' } self._body = body
V1CreateItemRequest - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/v1_create_item_request.py#L30-L47
from pprint import pformat from six import iteritems import re class V1CreateItemRequest(object):
Apache License 2.0
emc-openstack/storops
storops/vnx/resource/nqm.py
VNXIOClass.policy
python
def policy(self): policies = VNXIOPolicy.get(cli=self._cli) ret = None for policy in policies: contained = policy.ioclasses.name if self._get_name() in contained: ret = VNXIOPolicy.get(name=policy.name, cli=self._cli) break return ret
Returns policy which contains this ioclass.
https://github.com/emc-openstack/storops/blob/7092c516c55b4c2f00c7c22383e1ad46ecfec091/storops/vnx/resource/nqm.py#L119-L128
from __future__ import unicode_literals from contextlib import contextmanager from storops.vnx.resource import VNXCliResourceList, VNXCliResource from storops.vnx.resource.lun import VNXLun, VNXLunList from storops.vnx.enums import VNXCtrlMethod from storops import exception as ex __author__ = "Peter Wang" def convert_lun(luns): lun_ids = [] smp_names = [] for lun in luns: if lun.is_snap_mount_point: smp_names.append(lun.name) else: lun_ids.append(lun.lun_id) return lun_ids, smp_names def normalize_lun(luns, cli): if isinstance(luns, int): return [VNXLun(lun_id=luns, cli=cli)] elif isinstance(luns, VNXLun): return [luns] elif isinstance(luns, list) or isinstance(luns, VNXLunList): return luns elif luns is None: return [] else: raise ValueError('Invalid format for luns.') def convert_ioclass(ioclasses): names = [] if ioclasses: for ioclass in ioclasses: names.append(ioclass.name) return names @contextmanager def restart_policy(policy): VNXIOPolicy.stop_policy(cli=policy._cli) try: yield policy except ex.StoropsException: pass policy.run_policy() class VNXIOClassList(VNXCliResourceList): def __init__(self, cli=None, name=None): super(VNXIOClassList, self).__init__() self._cli = cli self._name = name @classmethod def get_resource_class(cls): return VNXIOClass def _get_raw_resource(self): return self._cli.get_ioclass(name=self._name, poll=self.poll) def _filter(self, item): return 'Background Class' not in item.name class VNXIOClass(VNXCliResource): def __init__(self, name=None, cli=None): super(VNXIOClass, self).__init__() self._cli = cli self._name = name def _get_raw_resource(self): if self._cli is None: raise ValueError('client is not available for this resource.') return self._cli.get_ioclass(name=self._name, poll=self.poll) @staticmethod def get(cli, name=None): ret = VNXIOClassList(cli=cli) if name: ret = VNXIOClass(name=name, cli=cli) return ret @property def luns(self): lun_list, smp_list = [], [] if self.ioclass_luns: lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name, cli=self._cli), self.ioclass_luns) if self.ioclass_snapshots: smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli), self.ioclass_snapshots) return list(lun_list) + list(smp_list) @property
Apache License 2.0
raymondbutcher/pretf
pretf/pretf/util.py
import_file
python
def import_file(path: Union[PurePath, str]) -> Generator[ModuleType, None, None]: pathdir = os.path.dirname(path) if pathdir in sys.path: added_to_sys_path = False else: sys.path.insert(0, pathdir) added_to_sys_path = True try: name = os.path.basename(path).split(".")[0] spec = spec_from_file_location(name, str(path)) module = module_from_spec(spec) assert isinstance(spec.loader, Loader) loader: Loader = spec.loader try: loader.exec_module(module) except Exception as error: log.bad(error) raise yield module finally: if added_to_sys_path: sys.path.remove(pathdir)
Imports a Python module from any local filesystem path. Temporarily alters sys.path to allow the imported module to import other modules in the same directory.
https://github.com/raymondbutcher/pretf/blob/3d2fe7619ce0adc38d5f0765e993bee2fd8c4bbd/pretf/pretf/util.py#L190-L218
import os import shlex import sys from contextlib import contextmanager from fnmatch import fnmatch from importlib.abc import Loader from importlib.util import module_from_spec, spec_from_file_location from io import StringIO from pathlib import Path, PurePath from subprocess import PIPE, CalledProcessError, CompletedProcess, Popen from threading import Thread from types import ModuleType from typing import ( IO, BinaryIO, Generator, List, Optional, Sequence, TextIO, Tuple, Union, ) from . import log def execute( file: str, args: Sequence[str], cwd: Optional[Union[Path, str]] = None, env: Optional[dict] = None, capture: bool = False, verbose: Optional[bool] = None, ) -> CompletedProcess: if env is None: env = os.environ.copy() if is_verbose(verbose): log.ok(f"run: {' '.join(shlex.quote(arg) for arg in args)}") if capture: return _execute_and_capture(file, args, cwd, env, verbose) else: return _execute(file, args, cwd, env) def _execute( file: str, args: Sequence[str], cwd: Optional[Union[Path, str]], env: dict ) -> CompletedProcess: proc = Popen(args, executable=file, cwd=cwd, env=env) while True: try: returncode = proc.wait() except KeyboardInterrupt: pass else: break if returncode != 0: raise CalledProcessError( returncode=returncode, cmd=" ".join(shlex.quote(arg) for arg in args), ) return CompletedProcess(args=args, returncode=returncode) def _execute_and_capture( file: str, args: Sequence[str], cwd: Optional[Union[Path, str]], env: dict, verbose: Optional[bool], ) -> CompletedProcess: stdout_buffer = StringIO() stderr_buffer = StringIO() proc = Popen(args, executable=file, stdout=PIPE, stderr=PIPE, cwd=cwd, env=env) stdout_args: List[Optional[IO]] = [proc.stdout, stdout_buffer] if is_verbose(verbose): stdout_args.append(sys.stdout) stdout_thread = Thread(target=_fan_out, args=stdout_args) stdout_thread.start() stderr_args = [proc.stderr, stderr_buffer, sys.stderr] stderr_thread = Thread(target=_fan_out, args=stderr_args) stderr_thread.start() while True: try: returncode = proc.wait() except KeyboardInterrupt: pass else: break stdout_thread.join() stderr_thread.join() stdout_buffer.seek(0) stderr_buffer.seek(0) if returncode != 0: raise CalledProcessError( returncode=returncode, cmd=" ".join(shlex.quote(arg) for arg in args), output=stdout_buffer.read(), stderr=stderr_buffer.read(), ) return CompletedProcess( args=args, returncode=returncode, stdout=stdout_buffer.read(), stderr=stderr_buffer.read(), ) def _fan_out(input_steam: BinaryIO, *output_streams: TextIO) -> None: while True: char = input_steam.read(1).decode() if char: for output_stream in output_streams: output_stream.write(char) else: break def find_paths( path_patterns: Sequence[str], exclude_name_patterns: Sequence[str] = [], cwd: Optional[Union[Path, str]] = None, ) -> Generator[Path, None, None]: if cwd is None: cwd = Path.cwd() elif isinstance(cwd, str): cwd = Path(cwd) for pattern in path_patterns: for path in cwd.glob(pattern): for exclude_name_pattern in exclude_name_patterns: if fnmatch(path.name, exclude_name_pattern): break else: yield path def find_workflow_path(cwd: Optional[Union[Path, str]] = None) -> Optional[Path]: if cwd is None: cwd = Path.cwd() elif isinstance(cwd, str): cwd = Path(cwd) for name in ("pretf.workflow.py", "pretf.py"): path = cwd / name if path.exists(): return path for dir_path in path.parents: path = dir_path / name if path.exists(): return path return None @contextmanager
MIT License
qiskit/qiskit-aqua
qiskit/chemistry/applications/molecular_ground_state_energy.py
MolecularGroundStateEnergy.get_default_solver
python
def get_default_solver(quantum_instance: Union[QuantumInstance, Backend, BaseBackend]) -> Optional[Callable[[List, int, str, bool, Z2Symmetries], MinimumEigensolver]]: def cb_default_solver(num_particles, num_orbitals, qubit_mapping, two_qubit_reduction, z2_symmetries): initial_state = HartreeFock(num_orbitals, num_particles, qubit_mapping, two_qubit_reduction, z2_symmetries.sq_list) var_form = UCCSD(num_orbitals=num_orbitals, num_particles=num_particles, initial_state=initial_state, qubit_mapping=qubit_mapping, two_qubit_reduction=two_qubit_reduction, z2_symmetries=z2_symmetries) vqe = VQE(var_form=var_form) vqe.quantum_instance = quantum_instance return vqe return cb_default_solver
Get the default solver callback that can be used with :meth:`compute_energy` Args: quantum_instance: A Backend/Quantum Instance for the solver to run on Returns: Default solver callback
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/chemistry/applications/molecular_ground_state_energy.py#L144-L168
import warnings from typing import List, Optional, Callable, Union from qiskit.providers import BaseBackend from qiskit.providers import Backend from qiskit.aqua import QuantumInstance from qiskit.aqua.algorithms import MinimumEigensolver, VQE from qiskit.aqua.operators import Z2Symmetries from qiskit.chemistry import QiskitChemistryError from qiskit.chemistry.circuit.library import HartreeFock from qiskit.chemistry.components.variational_forms import UCCSD from qiskit.chemistry.core import (Hamiltonian, TransformationType, QubitMappingType, ChemistryOperator, MolecularGroundStateResult) from qiskit.chemistry.drivers import FermionicDriver class MolecularGroundStateEnergy: def __init__(self, driver: FermionicDriver, solver: Optional[MinimumEigensolver] = None, transformation: TransformationType = TransformationType.FULL, qubit_mapping: QubitMappingType = QubitMappingType.PARITY, two_qubit_reduction: bool = True, freeze_core: bool = False, orbital_reduction: Optional[List[int]] = None, z2symmetry_reduction: Optional[Union[str, List[int]]] = None) -> None: warnings.warn('The MolecularGroundStateEnergy class is deprecated as of Qiskit Aqua 0.8.0 ' 'and will be removed no earlier than 3 months after the release date. ' 'Instead, the GroundStateCalculation class can be used.', DeprecationWarning, stacklevel=2) self._driver = driver self._solver = solver self._transformation = transformation self._qubit_mapping = qubit_mapping self._two_qubit_reduction = two_qubit_reduction self._freeze_core = freeze_core self._orbital_reduction = orbital_reduction self._z2symmetry_reduction = z2symmetry_reduction @property def driver(self) -> FermionicDriver: return self._driver @driver.setter def driver(self, driver: FermionicDriver) -> None: self._driver = driver @property def solver(self) -> MinimumEigensolver: return self._solver @solver.setter def solver(self, solver: MinimumEigensolver) -> None: self._solver = solver def compute_energy(self, callback: Optional[Callable[[List, int, str, bool, Z2Symmetries], MinimumEigensolver]] = None ) -> MolecularGroundStateResult: if self.solver is None and callback is None: raise QiskitChemistryError('MinimumEigensolver was not provided') q_molecule = self.driver.run() core = Hamiltonian(transformation=self._transformation, qubit_mapping=self._qubit_mapping, two_qubit_reduction=self._two_qubit_reduction, freeze_core=self._freeze_core, orbital_reduction=self._orbital_reduction, z2symmetry_reduction=self._z2symmetry_reduction) operator, aux_operators = core.run(q_molecule) if callback is not None: num_particles = core.molecule_info[ChemistryOperator.INFO_NUM_PARTICLES] num_orbitals = core.molecule_info[ChemistryOperator.INFO_NUM_ORBITALS] two_qubit_reduction = core.molecule_info[ChemistryOperator.INFO_TWO_QUBIT_REDUCTION] z2_symmetries = core.molecule_info[ChemistryOperator.INFO_Z2SYMMETRIES] self.solver = callback(num_particles, num_orbitals, self._qubit_mapping.value, two_qubit_reduction, z2_symmetries) aux_operators = aux_operators if self.solver.supports_aux_operators() else None raw_result = self.solver.compute_minimum_eigenvalue(operator, aux_operators) return core.process_algorithm_result(raw_result) @staticmethod
Apache License 2.0
threatconnect-inc/tcex
tcex/tcex_error_codes.py
TcExErrorCodes.message
python
def message(self, code): return self.errors.get(code)
Return the error message. Args: code (integer): The error code integer. Returns: (string): The error message.
https://github.com/threatconnect-inc/tcex/blob/dae37b73d8b33cf26360f6d25c6b305a68f2f0e2/tcex/tcex_error_codes.py#L73-L82
class TcExErrorCodes: @property def errors(self): return { 100: 'Generic error. See log for more details ({}).', 105: 'Required Module is not installed ({}).', 200: 'Failed retrieving Custom Indicator Associations types from API ({}).', 210: 'Failure during token renewal ({}).', 215: 'HMAC authorization requires a PreparedRequest Object.', 220: 'Failed retrieving indicator types from API ({}).', 300: 'Failed retrieving Bulk JSON ({}).', 305: 'An invalid action/association name ({}) was provided.', 350: 'Data Store request failed. API status code: {}, API message: {}.', 520: 'File Occurrences can only be added to a File. Current type: {}.', 540: 'Failed polling batch status ({}).', 545: 'Failed polling batch status. API status code: {}, API message: {}.', 550: 'Batch status check reached timeout ({} seconds).', 560: 'Failed retrieving batch errors ({}).', 580: 'Failed posting file data ({}).', 585: 'Failed posting file data. API status code: {}, API message: {}.', 590: 'No hash values provided.', 600: 'Failed adding group type "{}" with name "{}" ({}).', 605: 'Failed adding attribute type "{}" with value "{}" to group id "{}" ({}).', 610: 'Failed adding label "{}" to group id "{}" ({}).', 615: 'Failed adding tag "{}" to group id "{}" ({}).', 650: 'Failed adding label "{}" to attribute id "{}" ({}).', 700: 'Failed to create metric. API status code: {}, API message: {}.', 705: 'Error while finding metric by name. API status code: {}, API message: {}.', 710: 'Failed to add metric data. API status code: {}, API message: {}.', 715: 'No metric ID found for "{}".', 750: 'Failed to send notification. API status code: {}, API message: {}.', 800: 'Failed to create index. API status code: {}, API message: {}.', 805: 'Failed to {} record data. API status code: {}, API message: {}.', 905: 'Error during update. {} does not have a unique_id set and cannot be updated.', 910: 'Error during get. {} does not have a unique_id set and cannot be fetched.', 915: 'Error during delete. {} does not have a unique_id set and cannot be deleted.', 920: ( 'Error during create. {} does not have required values set and cannot be ' 'created.' ), 925: 'Error invalid {}. {} does not accept that {}, {}: {}.', 950: 'Error during pagination. API status code: {}, API message: {}, API Url: {}.', 951: 'Error during {}. API status code: {}, API message: {}, API Url: {}.', 952: 'Error during {}. API status code: {}, API message: {}, API Url: {}.', 10500: 'Critical batch error ({}).', 10505: 'Failed submitting batch job requests ({}).', 10510: 'Failed submitting batch job requests. API status code: {}, API message: {}.', 10520: 'Failed submitting batch data ({}).', 10525: 'Failed submitting batch data. API status code: {}, API message: {}.', }
Apache License 2.0
shinmorino/qgate
qgate/openqasm/yacc.py
p_include
python
def p_include(p) : if p[2] == '"qelib1.inc"' : p[0] == True else : raise NotImplementedError('include is not implemented.')
include : INCLUDE FILENAME ';'
https://github.com/shinmorino/qgate/blob/b8a7da1475c7f43c1050e8f2e10806a3cfe748e3/qgate/openqasm/yacc.py#L47-L52
import ply.yacc as yacc import logging from . import lex from .lex import tokens, literals precedence = ( ('left', '+', '-'), ('left', '*', '/'), ('left', '^'), ('right', 'uminus'), ) def p_mainprogram(p) : def p_program_begin(p) : this.analyzer.open_program() def p_program_end(p) : this.analyzer.close_program() def p_header_or_empty(p) : pass def p_includelist_or_empty(p) : pass def p_includelist(p) : pass
Apache License 2.0
fisco-bcos/generator
pys/tool/utils.py
valid_chain_id
python
def valid_chain_id(chain_id): try: int(chain_id) return True except ValueError as utils_exp: LOGGER.error('%s is not a valid chain_id', utils_exp) except Exception as utils_exp: LOGGER.error('%s is not a valid chain_id', utils_exp) raise MCError( '%s is not a valid chain_id' % utils_exp)
[Determine if the chain id is valid] Arguments: ip {[string]} -- [chain id] Returns: [bool] -- [true or false]
https://github.com/fisco-bcos/generator/blob/59ad43980aa0a7ea3efb018eba968f411f0adcd4/pys/tool/utils.py#L169-L187
import sys import re import os import subprocess import shutil from pys.error.exp import MCError from pys.log import LOGGER, CONSOLER from pys import path if sys.version > '3': import urllib.request import urllib.error else: import urllib import urllib2 class Status(object): gm_option = False gm_ssl = False unit_time = False allow_unsecure_cfg = False use_cdn = False download_console_shell_script = "download_console.sh" default_console_version = "2.8.0" download_console_version = default_console_version download_console_version_specified = False solidity_version = "" solidity_version_specified = False def __init__(self): self. gm_option = False def get_gm_status(self): return self.gm_option def get_gmssl_status(self): return self.gm_ssl def get_ut_status(self): return self.unit_time def set_allow_unsecure_cfg(self): self.allow_unsecure_cfg = True def get_cnd_status(self): return self.use_cdn def set_cdn(): Status.use_cdn = True def console_use_xml_configuration(): version_list = Status.download_console_version.split(".") if len(version_list) < 3: raise MCError( '%s invalid download_console_version' % Status.download_console_version) major_version = version_list[0] if int(major_version) >= 2: return False else: return True def set_download_console_version(version): Status.download_console_version = version Status.download_console_version_specified = True CONSOLER.debug("expect to download console %s", Status.download_console_version) def set_solidity_version(version): Status.solidity_version = version Status.solidity_version_specified = True LOGGER.debug('expected solidity version is %s', Status.solidity_version) def set_gm(): Status.gm_option = True def set_gmssl(): Status.gm_ssl = True def off_gm(): Status.gm_option = False def off_gmssl(): Status.gm_ssl = False def get_status(): return Status.gm_option
Apache License 2.0
shunk031/chainer-skin-lesion-detector
src/make_dataset.py
get_bbox_from_gt
python
def get_bbox_from_gt(gt): return gt.convert('RGB').getbbox()
Get bounding box from ground truth image
https://github.com/shunk031/chainer-skin-lesion-detector/blob/1865eb0bd287b3493f2bbe48720b567ebd9f3850/src/make_dataset.py#L72-L76
import argparse import xml.etree.ElementTree as ET from multiprocessing.pool import Pool from xml.dom import minidom from PIL import Image from tqdm import tqdm from util import const def get_fpaths(data_dir, suffix): return [fpath for fpath in sorted(data_dir.iterdir(), key=lambda x: x.name) if fpath.suffix == suffix] def make_voc_based_xml(folder_name, file_name, bbox): left, upper, right, lower = bbox annotation = ET.Element('annotation') annotation = ET.Element('annotation') tree = ET.ElementTree(element=annotation) folder = ET.SubElement(annotation, 'folder') filename = ET.SubElement(annotation, 'filename') objects = ET.SubElement(annotation, 'object') name = ET.SubElement(objects, 'name') pose = ET.SubElement(objects, 'pose') truncated = ET.SubElement(objects, 'truncated') difficult = ET.SubElement(objects, 'difficult') bndbox = ET.SubElement(objects, 'bndbox') xmin = ET.SubElement(bndbox, 'xmin') ymin = ET.SubElement(bndbox, 'ymin') xmax = ET.SubElement(bndbox, 'xmax') ymax = ET.SubElement(bndbox, 'ymax') folder.text = folder_name filename.text = file_name name.text = 'lesion' pose.text = 'frontal' truncated.text = '1' difficult.text = '0' xmin.text = str(left) ymin.text = str(upper) xmax.text = str(right) ymax.text = str(lower) return annotation def save_voc_based_xml(xml_file, xml_fpath): xml_file = pretify_xml(xml_file) with xml_fpath.open('w') as wf: wf.write(xml_file) def load_image(img_fpath): return Image.open(str(img_fpath))
MIT License
yaqwsx/kikit
kikit/present.py
Template._copyResources
python
def _copyResources(self, outputDirectory): for pattern in self.parameters["resources"]: for path in glob.glob(os.path.join(self.directory, pattern), recursive=True): copyRelativeTo(self.directory, path, outputDirectory) for pattern in self.extraResources: for path in glob.glob(pattern, recursive=True): copyRelativeTo(".", path, outputDirectory)
Copy all resource files specified by template.json and further specified by addResource to the output directory.
https://github.com/yaqwsx/kikit/blob/cddb09d784fbd84d3736538f466597f43e6ab045/kikit/present.py#L69-L79
import click from pathlib import Path import sys import os import json import glob import shutil import subprocess import tempfile import markdown2 import pybars from datetime import datetime from kikit import export def resolveTemplatePath(path): if os.path.exists(os.path.join(path, "template.json")): return path PKG_BASE = os.path.dirname(__file__) TEMPLATES = os.path.join(PKG_BASE, "resources/present/templates") if os.path.exists(os.path.join(TEMPLATES, path, "template.json")): return os.path.join(TEMPLATES, path) raise RuntimeError("'{}' is not a name or a path for existing template. Perhaps you miss template.json in the template?") def readTemplate(path): templateClasses = { "HtmlTemplate": HtmlTemplate } path = resolveTemplatePath(path) with open(os.path.join(path, "template.json")) as jsonFile: parameters = json.load(jsonFile) try: tType = parameters["type"] except KeyError: raise RuntimeError("Invalid template.json - missing 'type'") try: return templateClasses[tType](path) except KeyError: raise RuntimeError("Unknown template type '{}'".format(tType)) def copyRelativeTo(sourceTree, sourceFile, outputDir): sourceTree = os.path.abspath(sourceTree) sourceFile = os.path.abspath(sourceFile) relPath = os.path.relpath(sourceFile, sourceTree) outputDir = os.path.join(outputDir, os.path.dirname(relPath)) Path(outputDir).mkdir(parents=True, exist_ok=True) shutil.copy(sourceFile, outputDir) class Template: def __init__(self, directory): self.directory = directory with open(os.path.join(directory, "template.json")) as jsonFile: self.parameters = json.load(jsonFile) self.extraResources = [] self.boards = [] self.name = None self.repository = None
MIT License
codefordc/housing-insights
back_end/app.py
make_table
python
def make_table(table_name, password): if password != get_credentials('load-data-password'): send_mail('Invalid data loading attempted.') return '<h1>Invalid Password: Please Try Again</h1>' if table_name not in table_loaders.keys(): return ''' <h1>Invalid Table Name: Please Try Again</h1> <h2>Tables Are:</h2> <ul> <li>crime</li> <li>acs</li> <li>permit</li> <li>project</li> <li>wmata</li> </ul> ''' if table_loaders[table_name](engine): send_mail('Loaded {} table.'.format(table_name)) return '<h1>Success! Loaded {} table.</h1>'.format(table_name) return ''' <h1>Unable to load {} table.</h1> <h2>The source data may be unavailable.</h2> <h2>Housing insights will load the backup data.</h2> '''.format(table_name)
This function allows CNHED staff to load a database table "manually". See the documentation for clear instructions on creating tables.
https://github.com/codefordc/housing-insights/blob/131260edf4482e176d9a93cae44553060cd33209/back_end/app.py#L166-L194
import datetime from mailer import send_mail from flask import Flask, jsonify, request from flask_cors import cross_origin from flask_apscheduler import APScheduler import ETL from sqlalchemy import create_engine from ETL.utils import get_credentials, basic_query app = Flask(__name__) app.config['SCHEDULER_API_ENABLED'] = True scheduler = APScheduler() scheduler.init_app(app) engine = create_engine(get_credentials('engine-string')) table_loaders = { 'acs': ETL.load_acs_data, 'crime': ETL.load_crime_data, 'permit': ETL.load_permit_data, 'project': ETL.load_project_data, 'subsidy': ETL.load_subsidy_data, 'zone_facts': ETL.make_zone_facts, 'wmata': ETL.make_wmata_tables, } @app.route('/', methods=['GET']) def index(): return 'At the housing-insights back-end.' @app.route('/api/meta', methods=['GET']) @cross_origin() def get_meta(): result = basic_query("SELECT meta FROM meta") print(result) return result[0]['meta'] @app.route('/site-map', methods=['GET']) def site_map(): return jsonify([str(rule) for rule in app.url_map.iter_rules() if 'GET' in rule.methods]) @app.route('/api/project', methods=['GET']) @cross_origin() def all_projects(nlihc_id=None): where = f" WHERE nlihc_id = '{nlihc_id}'" if nlihc_id else '' result = basic_query('SELECT * FROM new_project'+where+';') return jsonify({'objects': result}) @app.route('/api/project/<string:nlihc_id>', methods=['GET']) @cross_origin() def project(nlihc_id=None): where = f" WHERE nlihc_id = '{nlihc_id}'" if nlihc_id else '' result = basic_query('SELECT * FROM new_project'+where+';') return jsonify({'objects': result}) @app.route('/api/project/<nlihc_id>/subsidies/', methods=['GET']) @cross_origin() def project_subsidies(nlihc_id): result = basic_query(f"SELECT * FROM new_subsidy WHERE nlihc_id = '{nlihc_id}';") return jsonify({'objects': result}) @app.route('/api/filter') @cross_origin() def filter(): result = basic_query(ETL.filter_query) return jsonify({'objects': result}) @app.route('/api/zone_facts/<column_name>/<grouping>', methods = ['GET']) @cross_origin() def zone_facts(column_name='poverty_rate', grouping='ward'): try: if grouping not in ['ward', 'tract', 'neighborhood_cluster']: if grouping == 'census_tract': grouping = 'tract' else: raise ValueError('Not valid grouping') result = basic_query(''' SELECT zone, {} FROM new_zone_facts WHERE zone_type = '{}' ORDER BY zone;'''.format(column_name, grouping)) status = 'success' except: result = [] status = 'Not found' output = {'status': status, 'grouping': grouping, 'column_name': column_name, 'objects': result} return jsonify(output) @app.route('/api/wmata/<nlihc_id>', methods=['GET']) @cross_origin() def nearby_transit(nlihc_id): result = basic_query(f"SELECT * FROM new_wmata_dist WHERE nlihc_id = '{nlihc_id}';") result = ETL.wmata_helper(result) return jsonify(result) @app.route('/api/projects/<dist>', methods=['GET']) @cross_origin() def nearby_projects(dist): latitude = request.args.get('latitude', None) longitude = request.args.get('longitude', None) if not (latitude and longitude): return "Please supply latitude and longitude" return jsonify(ETL.nearby_projects( float(dist), float(latitude), float(longitude))) @app.route('/make_table/<table_name>/<password>')
MIT License
viblo/pymunk
pymunk/vec2d.py
Vec2d.get_angle_degrees_between
python
def get_angle_degrees_between(self, other: "Vec2d") -> float: return math.degrees(self.get_angle_between(other))
Get the angle between the vector and the other in degrees :return: The angle (in degrees)
https://github.com/viblo/pymunk/blob/56885484b8cd794e995480d448b9c089fc7135d9/pymunk/vec2d.py#L260-L265
__docformat__ = "reStructuredText" import math import numbers import operator from typing import NamedTuple, Tuple __all__ = ["Vec2d"] class Vec2d(NamedTuple): x: float y: float def __repr__(self) -> str: return "Vec2d(%s, %s)" % (self.x, self.y) def __add__(self, other: Tuple[float, float]) -> "Vec2d": assert ( len(other) == 2 ), f"{other} not supported. Only Vec2d and Sequence of length 2 is supported." return Vec2d(self.x + other[0], self.y + other[1]) def __radd__(self, other: Tuple[float, float]) -> "Vec2d": return self.__add__(other) def __sub__(self, other: Tuple[float, float]) -> "Vec2d": return Vec2d(self.x - other[0], self.y - other[1]) def __rsub__(self, other: Tuple[float, float]) -> "Vec2d": assert ( len(other) == 2 ), f"{other} not supported. Only Vec2d and Sequence of length 2 is supported." return Vec2d(other[0] - self.x, other[1] - self.y) def __mul__(self, other: float) -> "Vec2d": assert isinstance(other, numbers.Real) return Vec2d(self.x * other, self.y * other) def __rmul__(self, other: float) -> "Vec2d": return self.__mul__(other) def __floordiv__(self, other: float) -> "Vec2d": assert isinstance(other, numbers.Real) return Vec2d(self.x // other, self.y // other) def __truediv__(self, other: float) -> "Vec2d": assert isinstance(other, numbers.Real) return Vec2d(self.x / other, self.y / other) def __neg__(self) -> "Vec2d": return Vec2d(operator.neg(self.x), operator.neg(self.y)) def __pos__(self) -> "Vec2d": return Vec2d(operator.pos(self.x), operator.pos(self.y)) def __abs__(self) -> float: return self.length def get_length_sqrd(self) -> float: return self.x ** 2 + self.y ** 2 @property def length(self) -> float: return math.sqrt(self.x ** 2 + self.y ** 2) def scale_to_length(self, length: float) -> "Vec2d": old_length = self.length return Vec2d(self.x * length / old_length, self.y * length / old_length) def rotated(self, angle_radians: float) -> "Vec2d": cos = math.cos(angle_radians) sin = math.sin(angle_radians) x = self.x * cos - self.y * sin y = self.x * sin + self.y * cos return Vec2d(x, y) def rotated_degrees(self, angle_degrees: float) -> "Vec2d": return self.rotated(math.radians(angle_degrees)) @property def angle(self) -> float: if self.get_length_sqrd() == 0: return 0 return math.atan2(self.y, self.x) @property def angle_degrees(self) -> float: return math.degrees(self.angle) def get_angle_between(self, other: Tuple[float, float]) -> float: assert len(other) == 2 cross = self.x * other[1] - self.y * other[0] dot = self.x * other[0] + self.y * other[1] return math.atan2(cross, dot)
MIT License
shenyunhang/ws-jds
detectron/roi_data/retinanet_wsl.py
get_wsl_blob_names
python
def get_wsl_blob_names(is_training=True): blob_names = ['im_info'] if is_training: blob_names += ['cls_labels'] return blob_names
Returns blob names in the order in which they are read by the data loader.
https://github.com/shenyunhang/ws-jds/blob/4827791640c5affef1af4b548333694973f5c0d5/detectron/roi_data/retinanet_wsl.py#L16-L26
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import logging import detectron.utils.boxes as box_utils import detectron.roi_data.data_utils as data_utils from detectron.core.config import cfg logger = logging.getLogger(__name__)
Apache License 2.0
nickjj/flask-pg-extras
flask_pg_extras/__init__.py
FlaskPGExtras.init_app
python
def init_app(self, app): pass
Mutate the application passed in as explained here: https://flask.palletsprojects.com/en/1.1.x/extensiondev/ :param app: Flask application :return: None
https://github.com/nickjj/flask-pg-extras/blob/c69200665f5a17dff163206367418c41c7953bf2/flask_pg_extras/__init__.py#L12-L20
from sqlalchemy import text from tabulate import tabulate class FlaskPGExtras(object): def __init__(self, app=None): self.app = app if app is not None: self.init_app(app)
MIT License
daili0015/modelfeast
models/classifiers/squeezenet.py
squeezenet1_0
python
def squeezenet1_0(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): param = {'model_url': model_urls['squeezenet1_0'], 'file_name': model_names['squeezenet1_0'], 'model_version': 1.0, 'n_class': n_class, 'img_size': img_size } return get_squeezenet(param, pretrained, pretrained_path)
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
https://github.com/daili0015/modelfeast/blob/0689ced4d0f37be438d3a91908e5e4cc5b7d54b8/models/classifiers/squeezenet.py#L71-L77
import logging import os import torch from torch import load as TorchLoad import torch.utils.model_zoo as model_zoo from models.classifiers.Squeezenet_module import SqueezeNet __all__ = ['squeezenet', 'squeezenet1_0', 'squeezenet1_1'] model_urls = { 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', } model_names = { 'squeezenet1_0': 'squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'squeezenet1_1-f364aa15.pth', } def get_squeezenet(param, pretrained = False, pretrained_path="./pretrained/"): if isinstance(param['img_size'], (tuple, list)): h, w = param['img_size'][0], param['img_size'][1] else: h = w = param['img_size'] model = SqueezeNet(param['model_version'], num_classes=1000) model.img_size = (h, w) if pretrained: if os.path.exists(os.path.join(pretrained_path, param['file_name'])): model.load_state_dict(TorchLoad(os.path.join(pretrained_path, param['file_name']))) logging.info("Find local model file, load model from local !!") logging.info("找到本地下载的预训练模型!!载入权重!!") else: logging.info("pretrained 文件夹下没有,从网上下载 !!") model.load_state_dict(model_zoo.load_url(param['model_url'], model_dir = pretrained_path)) logging.info("下载完毕!!载入权重!!") model.adaptive_set_classifier(param['n_class']) return model def squeezenet(n_class, img_size=(224, 224), pretrained=False, pretrained_path="./pretrained/"): return squeezenet1_1(n_class, img_size, pretrained, pretrained_path)
MIT License
vincent-lg/tsunami
src/secondaires/navigation/equipage/volontes/virer_babord.py
VirerBabord.crier_ordres
python
def crier_ordres(self, personnage): direction = int((self.navire.direction.direction - self.direction) % 180) msg = "{} s'écrie : virez de {}° bâbord !".format( personnage.distinction_audible, direction) self.navire.envoyer(msg)
On fait crier l'ordre au personnage.
https://github.com/vincent-lg/tsunami/blob/36b3b974f6eefbf15cd5d5f099fc14630e66570b/src/secondaires/navigation/equipage/volontes/virer_babord.py#L65-L71
import re from corps.fonctions import lisser from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer from secondaires.navigation.equipage.ordres.relacher_gouvernail import RelacherGouvernail from secondaires.navigation.equipage.ordres.tenir_gouvernail import TenirGouvernail from secondaires.navigation.equipage.ordres.virer import Virer as OrdreVirer from secondaires.navigation.equipage.volontes.virer_gouvernail import VirerGouvernail class VirerBabord(VirerGouvernail): cle = "virer_babord" ordre_court = re.compile(r"^vb([0-9]{1,3})$", re.I) ordre_long = re.compile(r"^virer\s+babord\s+([0-9]{1,3})$", re.I) def executer(self, couple): self.navire.equipage.retirer_controle("direction") VirerGouvernail.executer(self, couple)
BSD 3-Clause New or Revised License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/stream_metadata.py
StreamMetadata.__eq__
python
def __eq__(self, other): if not isinstance(other, StreamMetadata): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/stream_metadata.py#L97-L102
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model import pprint import six class StreamMetadata(object): @poscheck_model def __init__(self, language=None): self._language = None self.discriminator = None if language is not None: self.language = language @property def openapi_types(self): types = { 'language': 'string_types' } return types @property def attribute_map(self): attributes = { 'language': 'language' } return attributes @property def language(self): return self._language @language.setter def language(self, language): if language is not None: if not isinstance(language, string_types): raise TypeError("Invalid type for `language`, type has to be `string_types`") self._language = language def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if value is None: continue if isinstance(value, list): if len(value) == 0: continue result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]] elif hasattr(value, "to_dict"): result[self.attribute_map.get(attr)] = value.to_dict() elif isinstance(value, Enum): result[self.attribute_map.get(attr)] = value.value elif isinstance(value, dict): result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()} else: result[self.attribute_map.get(attr)] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
csbdeep/csbdeep
csbdeep/data/prepare.py
PercentileNormalizer.before
python
def before(self, x, axes): self.axes_before = axes_check_and_normalize(axes,x.ndim) axis = tuple(d for d,a in enumerate(self.axes_before) if a != 'C') self.mi = np.percentile(x,self.pmin,axis=axis,keepdims=True).astype(self.dtype,copy=False) self.ma = np.percentile(x,self.pmax,axis=axis,keepdims=True).astype(self.dtype,copy=False) return normalize_mi_ma(x, self.mi, self.ma, dtype=self.dtype, **self.kwargs)
Percentile-based normalization of raw input image. See :func:`csbdeep.predict.Normalizer.before` for parameter descriptions. Note that percentiles are computed individually for each channel (if present in `axes`).
https://github.com/csbdeep/csbdeep/blob/7d0c957bcd5d9923d5d3a8d2a25e509650a58dca/csbdeep/data/prepare.py#L120-L130
from __future__ import print_function, unicode_literals, absolute_import, division from six.moves import range, zip, map, reduce, filter from ..utils import _raise, consume, normalize_mi_ma, axes_dict, axes_check_and_normalize, move_image_axes import warnings import numpy as np from six import add_metaclass from abc import ABCMeta, abstractmethod, abstractproperty @add_metaclass(ABCMeta) class Normalizer(): @abstractmethod def before(self, x, axes): @abstractmethod def after(self, mean, scale, axes): def __call__(self, x, axes): return self.before(x, axes) @abstractproperty def do_after(self): class NoNormalizer(Normalizer): def __init__(self, do_after=False): self._do_after = do_after def before(self, x, axes): return x def after(self, mean, scale, axes): self.do_after or _raise(ValueError()) return mean, scale @property def do_after(self): return self._do_after class PercentileNormalizer(Normalizer): def __init__(self, pmin=2, pmax=99.8, do_after=True, dtype=np.float32, **kwargs): (np.isscalar(pmin) and np.isscalar(pmax) and 0 <= pmin < pmax <= 100) or _raise(ValueError()) self.pmin = pmin self.pmax = pmax self._do_after = do_after self.dtype = dtype self.kwargs = kwargs
BSD 3-Clause New or Revised License
google/clusterfuzz
src/clusterfuzz/_internal/google_cloud_utils/storage.py
get_cache_file_metadata_path
python
def get_cache_file_metadata_path(cache_file_path): return '%s%s' % (cache_file_path, CACHE_METADATA_FILE_EXTENSION)
Return metadata file path for a cache file.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/google_cloud_utils/storage.py#L942-L944
import copy import datetime import json import os import shutil import threading import time from googleapiclient.discovery import build from googleapiclient.errors import HttpError from clusterfuzz._internal.base import retry from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.datastore import locks from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import shell from . import credentials try: import google.cloud from google.cloud import storage as gcs except ImportError: pass AUTH_TOKEN_EXPIRY_TIME = 10 * 60 CACHE_COPY_WAIT_TIME = 10 CACHE_DIRNAME = 'cache' CACHE_LOCK_TIMEOUT = 30 * 60 CACHE_METADATA_FILE_EXTENSION = '.metadata' CACHE_SIZE_LIMIT = 5 * 1024 * 1024 * 1024 CACHE_TIMEOUT = 3 * 60 * 60 DEFAULT_FAIL_RETRIES = 8 DEFAULT_FAIL_WAIT = 2 GS_PREFIX = 'gs:/' MAX_CACHED_FILES_PER_DIRECTORY = 15 CREATE_BUCKET_DELAY = 4 BLOB_FILENAME_METADATA_KEY = 'filename' _local = threading.local() OBJECT_URL = 'https://storage.cloud.google.com' DIRECTORY_URL = 'https://console.cloud.google.com/storage' class StorageProvider(object): def create_bucket(self, name, object_lifecycle, cors): raise NotImplementedError def get_bucket(self, name): raise NotImplementedError def list_blobs(self, remote_path, recursive=True): raise NotImplementedError def copy_file_from(self, remote_path, local_path): raise NotImplementedError def copy_file_to(self, local_path_or_handle, remote_path, metadata=None): raise NotImplementedError def copy_blob(self, remote_source, remote_target): raise NotImplementedError def read_data(self, remote_path): raise NotImplementedError def write_data(self, data, remote_path, metadata=None): raise NotImplementedError def get(self, remote_path): raise NotImplementedError def delete(self, remote_path): raise NotImplementedError class GcsProvider(StorageProvider): def _chunk_size(self): if environment.is_running_on_app_engine(): return 10 * 1024 * 1024 return None def create_bucket(self, name, object_lifecycle, cors): project_id = utils.get_application_id() request_body = {'name': name} if object_lifecycle: request_body['lifecycle'] = object_lifecycle if cors: request_body['cors'] = cors client = create_discovery_storage_client() try: client.buckets().insert(project=project_id, body=request_body).execute() except HttpError as e: logs.log_warn('Failed to create bucket %s: %s' % (name, e)) raise return True def get_bucket(self, name): client = create_discovery_storage_client() try: return client.buckets().get(bucket=name).execute() except HttpError as e: if e.resp.status == 404: return None raise def list_blobs(self, remote_path, recursive=True): bucket_name, path = get_bucket_name_and_path(remote_path) if path and not path.endswith('/'): path += '/' client = _storage_client() bucket = client.bucket(bucket_name) properties = {} if recursive: delimiter = None else: delimiter = '/' iterator = bucket.list_blobs(prefix=path, delimiter=delimiter) for blob in iterator: properties['bucket'] = bucket_name properties['name'] = blob.name properties['updated'] = blob.updated properties['size'] = blob.size yield properties if not recursive: for prefix in iterator.prefixes: properties['bucket'] = bucket_name properties['name'] = prefix yield properties def copy_file_from(self, remote_path, local_path): client = _storage_client() bucket_name, path = get_bucket_name_and_path(remote_path) try: bucket = client.bucket(bucket_name) blob = bucket.blob(path, chunk_size=self._chunk_size()) blob.download_to_filename(local_path) except google.cloud.exceptions.GoogleCloudError: logs.log_warn('Failed to copy cloud storage file %s to local file %s.' % (remote_path, local_path)) raise return True def copy_file_to(self, local_path_or_handle, remote_path, metadata=None): client = _storage_client() bucket_name, path = get_bucket_name_and_path(remote_path) try: bucket = client.bucket(bucket_name) blob = bucket.blob(path, chunk_size=self._chunk_size()) if metadata: blob.metadata = metadata if isinstance(local_path_or_handle, str): blob.upload_from_filename(local_path_or_handle) else: blob.upload_from_file(local_path_or_handle, rewind=True) except google.cloud.exceptions.GoogleCloudError: logs.log_warn('Failed to copy local file %s to cloud storage file %s.' % (local_path_or_handle, remote_path)) raise return True def copy_blob(self, remote_source, remote_target): source_bucket_name, source_path = get_bucket_name_and_path(remote_source) target_bucket_name, target_path = get_bucket_name_and_path(remote_target) client = _storage_client() try: source_bucket = client.bucket(source_bucket_name) source_blob = source_bucket.blob(source_path) target_bucket = client.bucket(target_bucket_name) source_bucket.copy_blob(source_blob, target_bucket, target_path) except google.cloud.exceptions.GoogleCloudError: logs.log_warn('Failed to copy cloud storage file %s to cloud storage ' 'file %s.' % (remote_source, remote_target)) raise return True def read_data(self, remote_path): bucket_name, path = get_bucket_name_and_path(remote_path) client = _storage_client() try: bucket = client.bucket(bucket_name) blob = bucket.blob(path, chunk_size=self._chunk_size()) return blob.download_as_string() except google.cloud.exceptions.GoogleCloudError as e: if e.code == 404: return None logs.log_warn('Failed to read cloud storage file %s.' % remote_path) raise def write_data(self, data, remote_path, metadata=None): client = _storage_client() bucket_name, path = get_bucket_name_and_path(remote_path) try: bucket = client.bucket(bucket_name) blob = bucket.blob(path, chunk_size=self._chunk_size()) if metadata: blob.metadata = metadata blob.upload_from_string(data) except google.cloud.exceptions.GoogleCloudError: logs.log_warn('Failed to write cloud storage file %s.' % remote_path) raise return True def get(self, remote_path): client = create_discovery_storage_client() bucket, path = get_bucket_name_and_path(remote_path) try: return client.objects().get(bucket=bucket, object=path).execute() except HttpError as e: if e.resp.status == 404: return None raise def delete(self, remote_path): client = _storage_client() bucket_name, path = get_bucket_name_and_path(remote_path) try: bucket = client.bucket(bucket_name) bucket.delete_blob(path) except google.cloud.exceptions.GoogleCloudError: logs.log_warn('Failed to delete cloud storage file %s.' % remote_path) raise return True class FileSystemProvider(StorageProvider): OBJECTS_DIR = 'objects' METADATA_DIR = 'metadata' def __init__(self, filesystem_dir): self.filesystem_dir = os.path.abspath(filesystem_dir) def _get_object_properties(self, remote_path): bucket, path = get_bucket_name_and_path(remote_path) fs_path = self.convert_path(remote_path) data = { 'bucket': bucket, 'name': path, } if not os.path.isdir(fs_path): data.update({ 'updated': datetime.datetime.utcfromtimestamp(os.stat(fs_path).st_mtime), 'size': os.path.getsize(fs_path), 'metadata': self._get_metadata(bucket, path), }) return data def _get_metadata(self, bucket, path): fs_metadata_path = self._fs_path(bucket, path, self.METADATA_DIR) if os.path.exists(fs_metadata_path): with open(fs_metadata_path) as f: return json.load(f) return {} def _fs_bucket_path(self, bucket): return os.path.join(self.filesystem_dir, bucket) def _fs_objects_dir(self, bucket): return os.path.join(self._fs_bucket_path(bucket), self.OBJECTS_DIR) def _fs_path(self, bucket, path, directory): return os.path.join(self._fs_bucket_path(bucket), directory, path) def _write_metadata(self, remote_path, metadata): if not metadata: return fs_metadata_path = self.convert_path_for_write(remote_path, self.METADATA_DIR) with open(fs_metadata_path, 'w') as f: json.dump(metadata, f) def convert_path(self, remote_path, directory=OBJECTS_DIR): bucket, path = get_bucket_name_and_path(remote_path) return self._fs_path(bucket, path, directory) def convert_path_for_write(self, remote_path, directory=OBJECTS_DIR): bucket, path = get_bucket_name_and_path(remote_path) if not os.path.exists(self._fs_bucket_path(bucket)): raise RuntimeError( 'Bucket {bucket} does not exist.'.format(bucket=bucket)) fs_path = self._fs_path(bucket, path, directory) shell.create_directory(os.path.dirname(fs_path), create_intermediates=True) return fs_path def create_bucket(self, name, object_lifecycle, cors): bucket_path = self._fs_bucket_path(name) if os.path.exists(bucket_path): return False os.makedirs(bucket_path) return True def get_bucket(self, name): bucket_path = self._fs_bucket_path(name) if not os.path.exists(bucket_path): return None return { 'name': name, } def _list_files_recursive(self, fs_path): for root, _, filenames in shell.walk(fs_path): for filename in filenames: yield os.path.join(root, filename) def _list_files_nonrecursive(self, fs_path): for filename in os.listdir(fs_path): yield os.path.join(fs_path, filename) def list_blobs(self, remote_path, recursive=True): bucket, _ = get_bucket_name_and_path(remote_path) fs_path = self.convert_path(remote_path) if recursive: file_paths = self._list_files_recursive(fs_path) else: file_paths = self._list_files_nonrecursive(fs_path) for fs_path in file_paths: path = os.path.relpath(fs_path, self._fs_objects_dir(bucket)) yield self._get_object_properties( get_cloud_storage_file_path(bucket, path)) def copy_file_from(self, remote_path, local_path): fs_path = self.convert_path(remote_path) return shell.copy_file(fs_path, local_path) def copy_file_to(self, local_path_or_handle, remote_path, metadata=None): fs_path = self.convert_path_for_write(remote_path) if isinstance(local_path_or_handle, str): if not shell.copy_file(local_path_or_handle, fs_path): return False else: with open(fs_path, 'wb') as f: shutil.copyfileobj(local_path_or_handle, f) self._write_metadata(remote_path, metadata) return True def copy_blob(self, remote_source, remote_target): fs_source_path = self.convert_path(remote_source) fs_target_path = self.convert_path_for_write(remote_target) return shell.copy_file(fs_source_path, fs_target_path) def read_data(self, remote_path): fs_path = self.convert_path(remote_path) if not os.path.exists(fs_path): return None with open(fs_path, 'rb') as f: return f.read() def write_data(self, data, remote_path, metadata=None): fs_path = self.convert_path_for_write(remote_path) if isinstance(data, str): data = data.encode() with open(fs_path, 'wb') as f: f.write(data) self._write_metadata(remote_path, metadata) return True def get(self, remote_path): fs_path = self.convert_path(remote_path) if not os.path.exists(fs_path): return None return self._get_object_properties(remote_path) def delete(self, remote_path): fs_path = self.convert_path(remote_path) shell.remove_file(fs_path) fs_metadata_path = self.convert_path(remote_path, self.METADATA_DIR) shell.remove_file(fs_metadata_path) return True class GcsBlobInfo(object): def __init__(self, bucket, object_path, filename=None, size=None, legacy_key=None): self.bucket = bucket self.object_path = object_path if filename is not None and size is not None: self.filename = filename self.size = size else: gcs_object = get(get_cloud_storage_file_path(bucket, object_path)) self.filename = gcs_object['metadata'].get(BLOB_FILENAME_METADATA_KEY) self.size = int(gcs_object['size']) self.legacy_key = legacy_key def key(self): if self.legacy_key: return self.legacy_key return self.object_path @property def gcs_path(self): return '/%s/%s' % (self.bucket, self.object_path) @staticmethod def from_key(key): try: return GcsBlobInfo(blobs_bucket(), key) except Exception: logs.log_error('Failed to get blob from key %s.' % key) return None @staticmethod def from_legacy_blob_info(blob_info): bucket, path = get_bucket_name_and_path(blob_info.gs_object_name) return GcsBlobInfo(bucket, path, blob_info.filename, blob_info.size, blob_info.key.id()) def _provider(): local_buckets_path = environment.get_value('LOCAL_GCS_BUCKETS_PATH') if local_buckets_path: return FileSystemProvider(local_buckets_path) return GcsProvider() def _create_storage_client_new(): creds, project = credentials.get_default() if not project: project = utils.get_application_id() return gcs.Client(project=project, credentials=creds) def _storage_client(): if hasattr(_local, 'client'): return _local.client _local.client = _create_storage_client_new() return _local.client def get_bucket_name_and_path(cloud_storage_file_path): filtered_path = utils.strip_from_left(cloud_storage_file_path, GS_PREFIX) _, bucket_name_and_path = filtered_path.split('/', 1) if '/' in bucket_name_and_path: bucket_name, path = bucket_name_and_path.split('/', 1) else: bucket_name = bucket_name_and_path path = '' return bucket_name, path def get_cloud_storage_file_path(bucket, path): return GS_PREFIX + '/' + bucket + '/' + path def _get_error_reason(http_error): try: data = json.loads(http_error.content.decode('utf-8')) return data['error']['message'] except (ValueError, KeyError): logs.log_error('Failed to decode error content: %s' % http_error.content) return None @environment.local_noop def add_single_bucket_iam(storage, iam_policy, role, bucket_name, member): binding = get_bucket_iam_binding(iam_policy, role) binding['members'].append(member) result = set_bucket_iam_policy(storage, bucket_name, iam_policy) binding['members'].pop() return result @environment.local_noop def get_bucket_iam_binding(iam_policy, role): return next(( binding for binding in iam_policy['bindings'] if binding['role'] == role), None) @environment.local_noop def get_or_create_bucket_iam_binding(iam_policy, role): binding = get_bucket_iam_binding(iam_policy, role) if not binding: binding = {'role': role, 'members': []} iam_policy['bindings'].append(binding) return binding @environment.local_noop def remove_bucket_iam_binding(iam_policy, role): iam_policy['bindings'] = [ binding for binding in iam_policy['bindings'] if binding['role'] != role ] @environment.local_noop def get_bucket_iam_policy(storage, bucket_name): try: iam_policy = storage.buckets().getIamPolicy(bucket=bucket_name).execute() except HttpError as e: logs.log_error('Failed to get IAM policies for %s: %s' % (bucket_name, e)) return None return iam_policy @environment.local_noop def set_bucket_iam_policy(client, bucket_name, iam_policy): filtered_iam_policy = copy.deepcopy(iam_policy) for binding in filtered_iam_policy['bindings']: binding['members'] = sorted(list(set(binding['members']))) filtered_iam_policy['bindings'] = [ b for b in filtered_iam_policy['bindings'] if b['members'] ] try: return client.buckets().setIamPolicy( bucket=bucket_name, body=filtered_iam_policy).execute() except HttpError as e: error_reason = _get_error_reason(e) if error_reason == 'Invalid argument': logs.log_warn('Invalid Google email or group being added to bucket %s.' % bucket_name) elif error_reason and 'is of type "group"' in error_reason: logs.log_warn('Failed to set IAM policy for %s bucket for a group: %s.' % (bucket_name, error_reason)) else: logs.log_error('Failed to set IAM policies for bucket %s.' % bucket_name) return None def create_bucket_if_needed(bucket_name, object_lifecycle=None, cors=None): provider = _provider() if provider.get_bucket(bucket_name): return True if not provider.create_bucket(bucket_name, object_lifecycle, cors): return False time.sleep(CREATE_BUCKET_DELAY) return True @environment.local_noop def create_discovery_storage_client(): return build('storage', 'v1', cache_discovery=False) def generate_life_cycle_config(action, age=None, num_newer_versions=None): rule = {} rule['action'] = {'type': action} rule['condition'] = {} if age is not None: rule['condition']['age'] = age if num_newer_versions is not None: rule['condition']['numNewerVersions'] = num_newer_versions config = {'rule': [rule]} return config @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.copy_file_from', exception_type=google.cloud.exceptions.GoogleCloudError) def copy_file_from(cloud_storage_file_path, local_file_path, use_cache=False): if use_cache and get_file_from_cache_if_exists(local_file_path): logs.log('Copied file %s from local cache.' % cloud_storage_file_path) return True if not _provider().copy_file_from(cloud_storage_file_path, local_file_path): return False if use_cache: store_file_in_cache(local_file_path) return True @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.copy_file_to', exception_type=google.cloud.exceptions.GoogleCloudError) def copy_file_to(local_file_path_or_handle, cloud_storage_file_path, metadata=None): if (isinstance(local_file_path_or_handle, str) and not os.path.exists(local_file_path_or_handle)): logs.log_error('Local file %s not found.' % local_file_path_or_handle) return False return _provider().copy_file_to( local_file_path_or_handle, cloud_storage_file_path, metadata=metadata) @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.copy_blob', exception_type=google.cloud.exceptions.GoogleCloudError) def copy_blob(cloud_storage_source_path, cloud_storage_target_path): return _provider().copy_blob(cloud_storage_source_path, cloud_storage_target_path) @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.delete', exception_type=google.cloud.exceptions.GoogleCloudError) def delete(cloud_storage_file_path): return _provider().delete(cloud_storage_file_path) @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.exists') def exists(cloud_storage_file_path, ignore_errors=False): try: return bool(_provider().get(cloud_storage_file_path)) except HttpError: if not ignore_errors: logs.log_error('Failed when trying to find cloud storage file %s.' % cloud_storage_file_path) return False @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.last_updated', exception_type=google.cloud.exceptions.GoogleCloudError) def last_updated(cloud_storage_file_path): last_update = None for blob in _provider().list_blobs(cloud_storage_file_path): if not last_update or blob['updated'] > last_update: last_update = blob['updated'] if last_update: last_update = last_update.replace(tzinfo=None) return last_update @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.read_data', exception_type=google.cloud.exceptions.GoogleCloudError) def read_data(cloud_storage_file_path): return _provider().read_data(cloud_storage_file_path) @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.write_data', exception_type=google.cloud.exceptions.GoogleCloudError) def write_data(data, cloud_storage_file_path, metadata=None): return _provider().write_data( data, cloud_storage_file_path, metadata=metadata) @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.get_blobs', exception_type=google.cloud.exceptions.GoogleCloudError) def get_blobs(cloud_storage_path, recursive=True): for blob in _provider().list_blobs(cloud_storage_path, recursive=recursive): yield blob @retry.wrap( retries=DEFAULT_FAIL_RETRIES, delay=DEFAULT_FAIL_WAIT, function='google_cloud_utils.storage.list_blobs', exception_type=google.cloud.exceptions.GoogleCloudError) def list_blobs(cloud_storage_path, recursive=True): for blob in _provider().list_blobs(cloud_storage_path, recursive=recursive): yield blob['name'] def get_download_file_size(cloud_storage_file_path, file_path=None, use_cache=False): if use_cache and file_path: size_from_cache = get_file_size_from_cache_if_exists(file_path) if size_from_cache is not None: return size_from_cache return get_object_size(cloud_storage_file_path) @utils.timeout(CACHE_TIMEOUT) def get_file_from_cache_if_exists(file_path, update_modification_time_on_access=True): cache_file_path = get_cache_file_path(file_path) if not cache_file_path or not file_exists_in_cache(cache_file_path): return False cache_file_size = get_cache_file_size_from_metadata(cache_file_path) if not shell.copy_file(cache_file_path, file_path): return False if update_modification_time_on_access: update_access_and_modification_timestamp(cache_file_path) return (os.path.exists(file_path) and os.path.getsize(file_path) == cache_file_size) @utils.timeout(CACHE_TIMEOUT) def get_file_size_from_cache_if_exists(file_path): cache_file_path = get_cache_file_path(file_path) if not cache_file_path or not file_exists_in_cache(cache_file_path): return None return get_cache_file_size_from_metadata(cache_file_path) def get_cache_file_path(file_path): if not environment.get_value('NFS_ROOT'): return None return os.path.join( environment.get_value('NFS_ROOT'), CACHE_DIRNAME, utils.get_directory_hash_for_path(file_path), os.path.basename(file_path))
Apache License 2.0
forseti-security/forseti-security
google/cloud/forseti/scanner/audit/audit_logging_rules_engine.py
AuditLoggingRulesEngine.find_violations
python
def find_violations(self, project, audit_config, force_rebuild=False): if self.rule_book is None or force_rebuild: self.build_rule_book() violations = self.rule_book.find_violations(project, audit_config) return set(violations)
Determine whether a project's audit logging config violates rules. Args: project (gcp_type): The project with audit log config. audit_config (IamAuditConfig): The audit config for this project, merged with ancestor configs. force_rebuild (bool): If True, rebuilds the rule book. This will reload the rules definition file and add the rules to the book. Returns: iterable: A generator of rule violations.
https://github.com/forseti-security/forseti-security/blob/de5d0f4d047c293a2a72545a76c3783980865551/google/cloud/forseti/scanner/audit/audit_logging_rules_engine.py#L65-L84
from builtins import object import collections import itertools import threading from google.cloud.forseti.common.gcp_type import resource_util from google.cloud.forseti.common.gcp_type.iam_policy import IamAuditConfig from google.cloud.forseti.common.util import logger from google.cloud.forseti.common.util import relationship from google.cloud.forseti.scanner.audit import base_rules_engine as bre from google.cloud.forseti.scanner.audit import errors as audit_errors LOGGER = logger.get_logger(__name__) VIOLATION_TYPE = 'AUDIT_LOGGING_VIOLATION' class AuditLoggingRulesEngine(bre.BaseRulesEngine): def __init__(self, rules_file_path, snapshot_timestamp=None): super(AuditLoggingRulesEngine, self).__init__( rules_file_path=rules_file_path, snapshot_timestamp=snapshot_timestamp) self.rule_book = None def build_rule_book(self, global_configs=None): self.rule_book = AuditLoggingRuleBook( global_configs, self._load_rule_definitions(), snapshot_timestamp=self.snapshot_timestamp)
Apache License 2.0
kaggle/kaggle-api
kaggle/models/license.py
License.__eq__
python
def __eq__(self, other): if not isinstance(other, License): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/kaggle/kaggle-api/blob/49057db362903d158b1e71a43d888b981dd27159/kaggle/models/license.py#L128-L133
import pprint import re import six class License(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str' } attribute_map = { 'name': 'name' } def __init__(self, name=None): self._name = None self.discriminator = None self.name = name @property def name(self): return self._name @name.setter def name(self, name): if name is None: raise ValueError("Invalid value for `name`, must not be `None`") allowed_values = ["CC0-1.0", "CC-BY-SA-4.0", "GPL-2.0", "ODbL-1.0", "CC-BY-NC-SA-4.0", "unknown", "DbCL-1.0", "CC-BY-SA-3.0", "copyright-authors", "other", "reddit-api", "world-bank"] if name not in allowed_values: raise ValueError( "Invalid value for `name` ({0}), must be one of {1}" .format(name, allowed_values) ) self._name = name def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
whosaysni/robotframework-seriallibrary
src/SerialLibrary/__init__.py
SerialLibrary.set_dtr
python
def set_dtr(self, value, port_locator=None): self._port(port_locator).dtr = is_truthy_on_off(value)
Sets DTR (Data Terminal Ready) status.
https://github.com/whosaysni/robotframework-seriallibrary/blob/d5c33014acafc35f3190d345d422b43699a382f8/src/SerialLibrary/__init__.py#L638-L642
from __future__ import unicode_literals import codecs import re from collections import OrderedDict from os import SEEK_CUR from sys import platform from serial import Serial, SerialBase, serial_for_url from serial.rs485 import RS485Settings from serial.serialutil import LF from serial.tools.list_ports import comports, grep from serial.tools import hexlify_codec from robot.api import logger from robot.utils import asserts, is_truthy, is_string from robot.utils.unic import unic from .version import VERSION as __version__ if platform == 'win32': import ntpath as ospath else: import os.path as ospath abspath = ospath.abspath isabs = ospath.isabs join = ospath.join unicode_ = type('') def hexlify_decode_plus(data, errors='strict'): udata, length = hexlify_codec.hex_decode(data, errors) return (udata.rstrip(), length) hexlify_codec_plus = codecs.CodecInfo( name='hexlify', encode=hexlify_codec.hex_encode, decode=hexlify_decode_plus, incrementalencoder=hexlify_codec.IncrementalEncoder, incrementaldecoder=hexlify_codec.IncrementalDecoder, streamwriter=hexlify_codec.StreamWriter, streamreader=hexlify_codec.StreamReader) codecs.register(lambda c: hexlify_codec_plus if c == 'hexlify' else None) DEFAULT_SETTINGS = SerialBase( timeout=1.0, write_timeout=1.0, inter_byte_timeout=0.0).get_settings() def is_truthy_on_off(item): if is_string(item): item = item.strip() if item.isdigit(): return bool(int(item)) return item.strip().upper() not in ['FALSE', 'NO', '0', 'OFF', ''] return bool(item) def to_on_off(value): return 'On' if bool(value) is True else 'Off' class SerialLibrary: ROBOT_LIBRARY_SCOPE = 'GLOBAL' ROBOT_LIBRARY_VERSION = __version__ LOGGER_MAP = dict(INFO=logger.info, DEBUG=logger.debug, WARN=logger.warn) def __init__(self, port_locator=None, encoding='hexlify', **kwargs): self._encoding = encoding self._ports = OrderedDict() self._defaults = dict(DEFAULT_SETTINGS) self.set_default_parameters(kwargs) self._current_port_locator = None if port_locator is not None: self.add_port(port_locator) self._current_port_str = port_locator def _encode(self, ustring, encoding=None, encoding_mode='strict'): return ustring.encode(encoding or self._encoding, encoding_mode) def _decode(self, bstring, encoding=None, encoding_mode='replace'): return bstring.decode(encoding or self._encoding, encoding_mode) def _port(self, port_locator=None, fail=True): if port_locator in [None, '_']: port_locator = self._current_port_locator port = self._ports.get(port_locator, None) if port is None and is_truthy(fail) is True: asserts.fail('No such port.') return port def get_encoding(self): return self._encoding def set_encoding(self, encoding=None): prev_encoding = self._encoding if encoding: self._encoding = encoding return prev_encoding def list_com_ports(self): return comports() def list_com_port_names(self): return sorted(port_info.device for port_info in self.list_com_ports()) def com_port_should_exist_regexp(self, regexp): found = list(grep(regexp)) asserts.assert_true(len(found) > 0, 'Matching port does not exist.') return found def set_default_parameters(self, params): prev_value = OrderedDict(self._defaults) for key, value in params.items(): if key in self._defaults: value_type = type(self._defaults.get(key)) self._defaults[key] = value_type(value) return prev_value def reset_default_parameters(self): self._defaults = dict(DEFAULT_SETTINGS) def get_current_port_locator(self): return self._current_port_locator def current_port_should_be(self, port_locator): if port_locator not in [self._current_port_locator, '_']: asserts.fail('Port does not match.') def current_port_should_be_regexp(self, port_locator_regexp): current_port_locator = self._current_port_locator if current_port_locator is None: current_port_locator = '' regexp = re.compile(port_locator_regexp, re.I) asserts.assert_not_none( regexp.match(current_port_locator), 'Port does not match.', values=False) def add_port(self, port_locator, open=True, make_current=False, **kwargs): if port_locator in [None, '', '_']: asserts.fail('Invalid port locator.') elif port_locator in self._ports: asserts.fail('Port already exists.') serial_kw = dict( (k, type(v)(kwargs.get(k, v))) for k, v in self._defaults.items()) try: port = serial_for_url(port_locator, **serial_kw) except (AttributeError, ValueError): port = Serial(port_locator, **serial_kw) asserts.assert_not_none(port, 'Port initialization failed.') self._ports[port_locator] = port if port.is_open and (is_truthy(open) is False): port.close() if self._current_port_locator is None or make_current: self._current_port_locator = port_locator return port def delete_port(self, port_locator=None): if port_locator is None: port_locator = self._current_port_locator if port_locator not in self._ports: asserts.fail('Invalid port locator.') port = self._ports.pop(port_locator) if port.is_open: port.close() if port_locator == self._current_port_locator: self._current_port_locator = None if self._ports.keys(): self._current_port_locator = self._ports.keys()[-1] del port def delete_all_ports(self): self._current_port_locator = None while self._ports: locator, port = self._ports.popitem() if port.is_open: port.close() del port def open_port(self, port_locator=None): port = self._port(port_locator) if not port.is_open: port.open() def close_port(self, port_locator=None): port = self._port(port_locator) if port.is_open: port.close() def port_should_be_open(self, port_locator=None): asserts.assert_true( self._port(port_locator).is_open, 'Port is closed.' ) def port_should_be_closed(self, port_locator=None): asserts.assert_false( self._port(port_locator).is_open, 'Port is open.' ) def switch_port(self, port_locator): if port_locator not in self._ports: asserts.fail('No such port.') self._current_port_locator = port_locator def get_port_parameter(self, param_name, port_locator=None): if param_name not in self._defaults: asserts.fail('Wrong parameter name.') port = self._port(port_locator) return getattr(port, param_name) def set_port_parameter(self, param_name, value, port_locator=None): if param_name not in self._defaults: asserts.fail('Wrong parameter name.') port = self._port(port_locator, fail=True) prev_value = getattr(port, param_name) param_type = type(self._defaults.get(param_name)) setattr(port, param_name, param_type(value)) return prev_value def read_all_data(self, encoding=None, port_locator=None): return self._decode(self._port(port_locator).read_all(), encoding) def read_all_and_log(self, loglevel='debug', encoding=None, port_locator=None): loglevel = loglevel.upper() logger_func = self.LOGGER_MAP.get(loglevel, None) if logger_func is None: raise asserts.fail('Invalid loglevel.') logger_func(self.read_all_data(encoding, port_locator)) def read_data_should_be( self, data, encoding=None, port_locator=None): bdata = self._encode(data, encoding) bread = self._port(port_locator).read_all() if bread != bdata: hex_bread = self._decode(bread, 'hexlify') hex_bdata = self._decode(bdata, 'hexlify') msg = "'%s'(read) != '%s'(data)" % (hex_bread, hex_bdata) asserts.fail(msg) def read_until(self, terminator=LF, size=None, encoding=None, port_locator=None): if size is not None: size = float(size) if terminator != LF: terminator = self._encode(terminator) return self._decode( self._port(port_locator).read_until(terminator=terminator, size=size), encoding) def port_should_have_unread_bytes(self, port_locator=None): asserts.assert_true( self._port(port_locator).in_waiting, 'Port has no in-waiting data.') def port_should_not_have_unread_bytes(self, port_locator=None): asserts.assert_false( self._port(port_locator).in_waiting, 'Port has in-waiting data.') def port_should_have_unsent_bytes(self, port_locator=None): asserts.assert_true( self._port(port_locator).out_waiting, 'Port has no out-waiting data.') def port_should_not_have_unsent_bytes(self, port_locator=None): asserts.assert_false( self._port(port_locator).out_waiting, 'Port has out-waiting data.') def read_n_bytes(self, size=1, encoding=None, port_locator=None): if is_string(size): size = int(size) return self._decode(self._port(port_locator).read(size)) def write_data(self, data, encoding=None, port_locator=None): if not isinstance(data, unicode_): data = unic(data) if isinstance(data, unicode_): data = self._encode(data, encoding) self._port(port_locator).write(data) def flush_port(self, port_locator=None): self._port(port_locator).flush() def reset_input_buffer(self, port_locator=None): self._port(port_locator).reset_input_buffer() def reset_output_buffer(self, port_locator=None): self._port(port_locator).reset_output_buffer() def send_break(self, duration=0.25, port_locator=None): self._port(port_locator).send_break(float(duration)) def set_rts(self, value, port_locator=None): self._port(port_locator).rts = is_truthy_on_off(value) value = 1 if is_truthy_on_off(value) else 0 self._port(port_locator).rts = value
BSD 3-Clause New or Revised License
simphony/osp-core
tests/test_sql_wrapper_session.py
MockSqlWrapperSession.__init__
python
def __init__(self, engine=None, data_tables=(data_tbl("XSD_string"), data_tbl("VECTOR-2-2"))): self._data_tables = data_tables super().__init__(engine)
Call the super constructor.
https://github.com/simphony/osp-core/blob/19f233ebe6c40e92884aa07bf498304d772f7f27/tests/test_sql_wrapper_session.py#L433-L437
import unittest2 as unittest import rdflib import uuid import numpy as np from osp.core.ontology.cuba import rdflib_cuba from osp.core.session import SqlWrapperSession from osp.core.session.db.sql_util import AndCondition, JoinCondition, EqualsCondition from osp.core.utils.general import iri_from_uid from osp.core.namespaces import cuba try: from osp.core.namespaces import city except ImportError: from osp.core.ontology import Parser from osp.core.ontology.namespace_registry import namespace_registry Parser().parse("city") city = namespace_registry.city CUDS_TABLE = SqlWrapperSession.CUDS_TABLE ENTITIES_TABLE = SqlWrapperSession.ENTITIES_TABLE TYPES_TABLE = SqlWrapperSession.TYPES_TABLE NAMESPACES_TABLE = SqlWrapperSession.NAMESPACES_TABLE RELATIONSHIP_TABLE = SqlWrapperSession.RELATIONSHIP_TABLE DATA_TABLE_PREFIX = SqlWrapperSession.DATA_TABLE_PREFIX def data_tbl(suffix): return DATA_TABLE_PREFIX + suffix class TestSqlWrapperSession(unittest.TestCase): def setUp(self): self.session = MockSqlWrapperSession() self.session._ns_to_idx = {str(rdflib_cuba): 1} self.session._idx_to_ns = {1: str(rdflib_cuba)} def test_queries_subject_given(self): r = sorted(self.session._queries( pattern=(iri_from_uid(uuid.UUID(int=1)), None, None)), key=lambda x: x[1]) self.assertEqual(len(r), 4) self.assertEqual(r[0][1], data_tbl("VECTOR-2-2")) self.assertEqual(r[1][1], data_tbl("XSD_string")) self.assertEqual(r[2][1], RELATIONSHIP_TABLE) self.assertEqual(r[3][1], TYPES_TABLE) self.assertEqual(r[0][2], rdflib_cuba["_datatypes/VECTOR-2-2"]) self.assertEqual(r[1][2], rdflib.XSD.string) self.assertEqual(r[2][2], rdflib.XSD.integer) self.assertEqual(r[3][2], rdflib.XSD.integer) self.assertEqual(r[0][0].order, ["ts", "tp", data_tbl("VECTOR-2-2")]) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], data_tbl("VECTOR-2-2"): ["o___0", "o___1", "o___2", "o___3"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(data_tbl("VECTOR-2-2"), "s", "ts", "cuds_idx"), JoinCondition(data_tbl("VECTOR-2-2"), "p", "tp", "entity_idx"), EqualsCondition("ts", "uid", str(uuid.UUID(int=1)), "UID") )) self.assertEqual(r[0][0].datatypes, { data_tbl("VECTOR-2-2"): {"o": rdflib_cuba["_datatypes/VECTOR-2-2"], "o___0": rdflib.XSD.float, "o___1": rdflib.XSD.float, "o___2": rdflib.XSD.float, "o___3": rdflib.XSD.float}, "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, data_tbl("VECTOR-2-2"): data_tbl("VECTOR-2-2") }) self.assertEqual(r[1][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], data_tbl("XSD_string"): ["o"]}) self.assertEqual(r[1][0].condition, AndCondition( JoinCondition(data_tbl("XSD_string"), "s", "ts", "cuds_idx"), JoinCondition(data_tbl("XSD_string"), "p", "tp", "entity_idx"), EqualsCondition("ts", "uid", str(uuid.UUID(int=1)), "UID") )) self.assertEqual(r[1][0].datatypes, { data_tbl("XSD_string"): {"o": rdflib.XSD.string}, "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer} }) self.assertEqual(r[1][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, data_tbl("XSD_string"): data_tbl("XSD_string") }) self.assertEqual(r[2][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], RELATIONSHIP_TABLE: [], "to": ["uid"]}) self.assertEqual(r[2][0].condition, AndCondition( JoinCondition(RELATIONSHIP_TABLE, "s", "ts", "cuds_idx"), JoinCondition(RELATIONSHIP_TABLE, "p", "tp", "entity_idx"), JoinCondition(RELATIONSHIP_TABLE, "o", "to", "cuds_idx"), EqualsCondition("ts", "uid", str(uuid.UUID(int=1)), "UID") )) self.assertEqual(r[2][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, "to": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, RELATIONSHIP_TABLE: {} }) self.assertEqual(r[2][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, "to": CUDS_TABLE, RELATIONSHIP_TABLE: RELATIONSHIP_TABLE }) self.assertEqual(r[3][0]._columns, { "ts": ["uid"], TYPES_TABLE: [], "to": ["ns_idx", "name"]}) self.assertEqual(r[3][0].condition, AndCondition( JoinCondition(TYPES_TABLE, "s", "ts", "cuds_idx"), JoinCondition(TYPES_TABLE, "o", "to", "entity_idx"), EqualsCondition("ts", "uid", str(uuid.UUID(int=1)), "UID") )) self.assertEqual(r[3][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "to": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, TYPES_TABLE: {} }) self.assertEqual(r[3][0].tables, { "ts": CUDS_TABLE, "to": ENTITIES_TABLE, TYPES_TABLE: TYPES_TABLE }) def test_queries_predicate_given(self): r = sorted(self.session._queries( pattern=(None, cuba.activeRelationship.iri, None)), key=lambda x: x[1] ) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (RELATIONSHIP_TABLE, rdflib.XSD.integer)) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], RELATIONSHIP_TABLE: [], "to": ["uid"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(RELATIONSHIP_TABLE, "s", "ts", "cuds_idx"), JoinCondition(RELATIONSHIP_TABLE, "p", "tp", "entity_idx"), JoinCondition(RELATIONSHIP_TABLE, "o", "to", "cuds_idx"), EqualsCondition("tp", "ns_idx", 1, rdflib.XSD.integer), EqualsCondition("tp", "name", "activeRelationship", rdflib.XSD.string) )) self.assertEqual(r[0][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, "to": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, RELATIONSHIP_TABLE: {} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, "to": CUDS_TABLE, RELATIONSHIP_TABLE: RELATIONSHIP_TABLE }) r = sorted(self.session._queries( pattern=(None, rdflib.RDF.type, None)), key=lambda x: x[1] ) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (TYPES_TABLE, rdflib.XSD.integer)) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], TYPES_TABLE: [], "to": ["ns_idx", "name"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(TYPES_TABLE, "s", "ts", "cuds_idx"), JoinCondition(TYPES_TABLE, "o", "to", "entity_idx") )) self.assertEqual(r[0][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "to": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, TYPES_TABLE: {} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "to": ENTITIES_TABLE, TYPES_TABLE: TYPES_TABLE }) r = sorted(self.session._queries( pattern=(None, city.coordinates.iri, None)), key=lambda x: x[1] ) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (data_tbl("VECTOR-INT-2"), rdflib_cuba["_datatypes/VECTOR-INT-2"])) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], data_tbl("VECTOR-INT-2"): ["o___0", "o___1"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(data_tbl("VECTOR-INT-2"), "s", "ts", "cuds_idx"), JoinCondition(data_tbl("VECTOR-INT-2"), "p", "tp", "entity_idx"), EqualsCondition("tp", "ns_idx", 2, rdflib.XSD.integer), EqualsCondition("tp", "name", "coordinates", rdflib.XSD.string) )) self.assertEqual(r[0][0].datatypes, { data_tbl("VECTOR-INT-2"): { "o": rdflib_cuba["_datatypes/VECTOR-INT-2"], "o___1": rdflib.XSD.integer, "o___0": rdflib.XSD.integer }, "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, data_tbl("VECTOR-INT-2"): data_tbl("VECTOR-INT-2") }) dtype = rdflib_cuba["_datatypes/VECTOR-INT-2"] r = sorted(self.session._queries( pattern=(None, city.coordinates.iri, rdflib.Literal(np.array([1, 1]), datatype=dtype))), key=lambda x: x[1]) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (data_tbl("VECTOR-INT-2"), rdflib_cuba["_datatypes/VECTOR-INT-2"])) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], data_tbl("VECTOR-INT-2"): ["o___0", "o___1"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(data_tbl("VECTOR-INT-2"), "s", "ts", "cuds_idx"), JoinCondition(data_tbl("VECTOR-INT-2"), "p", "tp", "entity_idx"), EqualsCondition("tp", "ns_idx", 2, rdflib.XSD.integer), EqualsCondition("tp", "name", "coordinates", rdflib.XSD.string), AndCondition( EqualsCondition(data_tbl("VECTOR-INT-2"), "o___0", 1, rdflib.XSD.integer), EqualsCondition(data_tbl("VECTOR-INT-2"), "o___1", 1, rdflib.XSD.integer), ) )) self.assertEqual(r[0][0].datatypes, { data_tbl("VECTOR-INT-2"): { "o": rdflib_cuba["_datatypes/VECTOR-INT-2"], "o___0": rdflib.XSD.integer, "o___1": rdflib.XSD.integer }, "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, data_tbl("VECTOR-INT-2"): data_tbl("VECTOR-INT-2") }) def test_queries_object_given(self): r = sorted(self.session._queries( pattern=(None, None, city.City.iri)), key=lambda x: x[1]) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (TYPES_TABLE, rdflib.XSD.integer)) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "to": ["ns_idx", "name"], TYPES_TABLE: []}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(TYPES_TABLE, "s", "ts", "cuds_idx"), JoinCondition(TYPES_TABLE, "o", "to", "entity_idx"), EqualsCondition("to", "ns_idx", 2, rdflib.XSD.integer), EqualsCondition("to", "name", "City", rdflib.XSD.string) )) self.assertEqual(r[0][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "to": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, TYPES_TABLE: {} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "to": ENTITIES_TABLE, TYPES_TABLE: TYPES_TABLE }) r = sorted(self.session._queries( pattern=(None, None, iri_from_uid(uuid.UUID(int=1)))), key=lambda x: x[1]) self.assertEqual(len(r), 1) self.assertEqual(r[0][1:], (RELATIONSHIP_TABLE, rdflib.XSD.integer)) self.assertEqual(r[0][0]._columns, { "ts": ["uid"], "tp": ["ns_idx", "name"], RELATIONSHIP_TABLE: [], "to": ["uid"]}) self.assertEqual(r[0][0].condition, AndCondition( JoinCondition(RELATIONSHIP_TABLE, "s", "ts", "cuds_idx"), JoinCondition(RELATIONSHIP_TABLE, "p", "tp", "entity_idx"), JoinCondition(RELATIONSHIP_TABLE, "o", "to", "cuds_idx"), EqualsCondition("to", "uid", str(uuid.UUID(int=1)), "UID") )) self.assertEqual(r[0][0].datatypes, { "ts": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, "tp": {"name": rdflib.XSD.string, "ns_idx": rdflib.XSD.integer, "entity_idx": rdflib.XSD.integer}, "to": {"uid": "UID", "cuds_idx": rdflib.XSD.integer}, RELATIONSHIP_TABLE: {} }) self.assertEqual(r[0][0].tables, { "ts": CUDS_TABLE, "tp": ENTITIES_TABLE, "to": CUDS_TABLE, RELATIONSHIP_TABLE: RELATIONSHIP_TABLE }) def test_construct_remove_condition(self): r = sorted(self.session._queries( pattern=(iri_from_uid(uuid.UUID(int=1)), city.hasInhabitant.iri, iri_from_uid(uuid.UUID(int=2))), mode="delete"), key=lambda x: x[1]) self.assertEqual(len(r), 1) self.assertEqual(r[0][1], RELATIONSHIP_TABLE) self.assertEqual(r[0][2], rdflib.XSD.integer) self.assertEqual(r[0][0], AndCondition( EqualsCondition(RELATIONSHIP_TABLE, "s", 1, rdflib.XSD.integer), EqualsCondition(RELATIONSHIP_TABLE, "p", 42, rdflib.XSD.integer), EqualsCondition(RELATIONSHIP_TABLE, "o", 2, rdflib.XSD.integer) )) r = sorted(self.session._queries( pattern=(iri_from_uid(uuid.UUID(int=1)), rdflib.RDF.type, city.City.iri), mode="delete"), key=lambda x: x[1]) self.assertEqual(len(r), 1) self.assertEqual(r[0][1], TYPES_TABLE) self.assertEqual(r[0][2], rdflib.XSD.integer) self.assertEqual(r[0][0], AndCondition( EqualsCondition(TYPES_TABLE, "s", 1, rdflib.XSD.integer), EqualsCondition(TYPES_TABLE, "o", 42, rdflib.XSD.integer) )) def test_rows_to_triples(self): cursor = iter([ (uuid.UUID(int=1), 2, "hasInhabitant", uuid.UUID(int=2)), (uuid.UUID(int=1), 1, "activeRelationship", uuid.UUID(int=3)) ]) triples = list( self.session._rows_to_triples(cursor, RELATIONSHIP_TABLE, rdflib.XSD.integer) ) self.assertEqual(triples, [ (iri_from_uid(uuid.UUID(int=1)), city.hasInhabitant.iri, iri_from_uid(uuid.UUID(int=2))), (iri_from_uid(uuid.UUID(int=1)), cuba.activeRelationship.iri, iri_from_uid(uuid.UUID(int=3))) ]) cursor = iter([ (uuid.UUID(int=1), 2, "City"), (uuid.UUID(int=2), 1, "Entity") ]) triples = list( self.session._rows_to_triples(cursor, TYPES_TABLE, rdflib.XSD.integer) ) self.assertEqual(sorted(triples), sorted([ (iri_from_uid(uuid.UUID(int=1)), rdflib.RDF.type, city.City.iri), (iri_from_uid(uuid.UUID(int=2)), rdflib.RDF.type, cuba.Entity.iri) ])) cursor = iter([ (uuid.UUID(int=1), 2, "coordinates", np.array([1, 2])), (uuid.UUID(int=2), 1, "attribute", np.array([3, 4])) ]) triples = list( self.session._rows_to_triples( cursor, data_tbl("VECTOR-INT-2"), rdflib_cuba["_datatypes/VECTOR-INT-2"]) ) self.assertEqual(triples, [ (iri_from_uid(uuid.UUID(int=1)), city.coordinates.iri, rdflib.Literal(np.array([1, 2]), datatype=rdflib_cuba["_datatypes/VECTOR-INT-2"])), (iri_from_uid(uuid.UUID(int=2)), cuba.attribute.iri, rdflib.Literal(np.array([3, 4]), datatype=rdflib_cuba["_datatypes/VECTOR-INT-2"])) ]) def test_get_values(self): v = self.session._get_values( (iri_from_uid(uuid.UUID(int=1)), city.hasInhabitant.iri, iri_from_uid(uuid.UUID(int=2))), RELATIONSHIP_TABLE) self.assertEqual(v, (1, 42, 2)) v = self.session._get_values( (iri_from_uid(uuid.UUID(int=1)), city.coordinates.iri, rdflib.Literal(np.array([1, 2]), datatype=rdflib_cuba["_datatypes/VECTOR-INT-2"])), data_tbl("VECTOR-INT-2")) np.testing.assert_equal(v, (1, 42, np.array([1, 2]))) v = self.session._get_values( (iri_from_uid(uuid.UUID(int=1)), rdflib.XSD.anyURI, city.City.iri), TYPES_TABLE) self.assertEqual(v, (1, 42)) class MockSqlWrapperSession(SqlWrapperSession):
BSD 3-Clause New or Revised License
google-research/ott
ott/tools/soft_sort.py
_ranks
python
def _ranks(inputs: jnp.ndarray, num_targets, **kwargs) -> jnp.ndarray: num_points = inputs.shape[0] num_targets = num_points if num_targets is None else num_targets a = jnp.ones((num_points,)) / num_points b = jnp.ones((num_targets,)) / num_targets ot = transport_for_sort(inputs, a, b, **kwargs) out = 1.0 / a * ot.apply(jnp.arange(num_targets), axis=1) return jnp.reshape(out, inputs.shape)
Applies the soft ranks operator on a one dimensional array.
https://github.com/google-research/ott/blob/03afbebdb55ec37891f90e4d89376c4838cf4770/ott/tools/soft_sort.py#L157-L165
import functools from typing import Callable, Optional import jax import jax.numpy as jnp import numpy as np from ott.tools import transport def transport_for_sort( inputs: jnp.ndarray, weights: jnp.ndarray, target_weights: jnp.ndarray, squashing_fun: Callable[[jnp.ndarray], jnp.ndarray] = None, epsilon: float = 1e-2, **kwargs) -> jnp.ndarray: shape = inputs.shape if len(shape) > 2 or (len(shape) == 2 and shape[1] != 1): raise ValueError( 'Shape ({shape}) not supported. The input should be one-dimensional.') x = jnp.expand_dims(jnp.squeeze(inputs), axis=1) if squashing_fun is None: squashing_fun = lambda z: jax.nn.sigmoid((z - jnp.mean(z)) / (jnp.std(z) + 1e-10)) x = squashing_fun(x) a = jnp.squeeze(weights) b = jnp.squeeze(target_weights) num_targets = b.shape[0] y = jnp.linspace(0.0, 1.0, num_targets)[:, jnp.newaxis] return transport.Transport(x, y, a=a, b=b, epsilon=epsilon, **kwargs) def apply_on_axis(op, inputs, axis, *args, **kwargs): op_inner = functools.partial(op, **kwargs) axis = (axis,) if isinstance(axis, int) else axis num_points = np.prod(np.array(inputs.shape)[tuple([axis])]) permutation = np.arange(len(inputs.shape)) axis = tuple(permutation[a] for a in axis) permutation = tuple(sorted(set(permutation) - set(axis)) + sorted(axis)) inputs = jnp.transpose(inputs, permutation) batch_fn = jax.vmap(op_inner, in_axes=(0,) + (None,) * len(args)) result = batch_fn(jnp.reshape(inputs, (-1, num_points)), *args) shrink = len(axis) result = jnp.reshape(result, inputs.shape[:-shrink] + result.shape[-1:]) permutation = tuple(range(len(result.shape))) rank = len(result.shape) - 1 axis = min(axis) permutation = permutation[:axis] + (rank,) + permutation[axis:-1] result = jnp.transpose(result, permutation) return result def _sort(inputs: jnp.ndarray, topk, num_targets, **kwargs) -> jnp.ndarray: num_points = inputs.shape[0] a = jnp.ones((num_points,)) / num_points if 0 < topk < num_points: start_index = 1 b = jnp.concatenate([ jnp.array([(num_points - topk) / num_points]), jnp.ones(topk, dtype=inputs.dtype) / num_points ]) else: num_targets = num_points if num_targets is None else num_targets start_index = 0 b = jnp.ones((num_targets,)) / num_targets ot = transport_for_sort(inputs, a, b, **kwargs) out = 1.0 / b * ot.apply(inputs, axis=0) return out[start_index:] def sort(inputs: jnp.ndarray, axis: int = -1, topk: int = -1, num_targets: int = None, **kwargs) -> jnp.ndarray: return apply_on_axis(_sort, inputs, axis, topk, num_targets, **kwargs)
Apache License 2.0
demisto/demisto-py
demisto_client/demisto_api/models/investigation_filter.py
InvestigationFilter.from_date_license
python
def from_date_license(self): return self._from_date_license
Gets the from_date_license of this InvestigationFilter. # noqa: E501 :return: The from_date_license of this InvestigationFilter. # noqa: E501 :rtype: datetime
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/investigation_filter.py#L284-L291
import pprint import re import six from demisto_client.demisto_api.models.duration import Duration from demisto_client.demisto_api.models.investigation_status import InvestigationStatus from demisto_client.demisto_api.models.investigation_type import InvestigationType from demisto_client.demisto_api.models.order import Order from demisto_client.demisto_api.models.period import Period class InvestigationFilter(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'cache': 'dict(str, list[str])', 'and_op': 'bool', 'category': 'list[str]', 'from_close_date': 'datetime', 'from_date': 'datetime', 'from_date_license': 'datetime', 'id': 'list[str]', 'ids_only': 'bool', 'include_child_inv': 'bool', 'name': 'list[str]', 'not_category': 'list[str]', 'not_i_ds': 'list[str]', 'page': 'int', 'period': 'Period', 'reason': 'list[str]', 'search_after': 'list[str]', 'search_before': 'list[str]', 'size': 'int', 'sort': 'list[Order]', 'status': 'list[InvestigationStatus]', 'time_frame': 'Duration', 'to_close_date': 'datetime', 'to_date': 'datetime', 'type': 'list[InvestigationType]', 'user': 'list[str]' } attribute_map = { 'cache': 'Cache', 'and_op': 'andOp', 'category': 'category', 'from_close_date': 'fromCloseDate', 'from_date': 'fromDate', 'from_date_license': 'fromDateLicense', 'id': 'id', 'ids_only': 'idsOnly', 'include_child_inv': 'includeChildInv', 'name': 'name', 'not_category': 'notCategory', 'not_i_ds': 'notIDs', 'page': 'page', 'period': 'period', 'reason': 'reason', 'search_after': 'searchAfter', 'search_before': 'searchBefore', 'size': 'size', 'sort': 'sort', 'status': 'status', 'time_frame': 'timeFrame', 'to_close_date': 'toCloseDate', 'to_date': 'toDate', 'type': 'type', 'user': 'user' } def __init__(self, cache=None, and_op=None, category=None, from_close_date=None, from_date=None, from_date_license=None, id=None, ids_only=None, include_child_inv=None, name=None, not_category=None, not_i_ds=None, page=None, period=None, reason=None, search_after=None, search_before=None, size=None, sort=None, status=None, time_frame=None, to_close_date=None, to_date=None, type=None, user=None): self._cache = None self._and_op = None self._category = None self._from_close_date = None self._from_date = None self._from_date_license = None self._id = None self._ids_only = None self._include_child_inv = None self._name = None self._not_category = None self._not_i_ds = None self._page = None self._period = None self._reason = None self._search_after = None self._search_before = None self._size = None self._sort = None self._status = None self._time_frame = None self._to_close_date = None self._to_date = None self._type = None self._user = None self.discriminator = None if cache is not None: self.cache = cache if and_op is not None: self.and_op = and_op if category is not None: self.category = category if from_close_date is not None: self.from_close_date = from_close_date if from_date is not None: self.from_date = from_date if from_date_license is not None: self.from_date_license = from_date_license if id is not None: self.id = id if ids_only is not None: self.ids_only = ids_only if include_child_inv is not None: self.include_child_inv = include_child_inv if name is not None: self.name = name if not_category is not None: self.not_category = not_category if not_i_ds is not None: self.not_i_ds = not_i_ds if page is not None: self.page = page if period is not None: self.period = period if reason is not None: self.reason = reason if search_after is not None: self.search_after = search_after if search_before is not None: self.search_before = search_before if size is not None: self.size = size if sort is not None: self.sort = sort if status is not None: self.status = status if time_frame is not None: self.time_frame = time_frame if to_close_date is not None: self.to_close_date = to_close_date if to_date is not None: self.to_date = to_date if type is not None: self.type = type if user is not None: self.user = user @property def cache(self): return self._cache @cache.setter def cache(self, cache): self._cache = cache @property def and_op(self): return self._and_op @and_op.setter def and_op(self, and_op): self._and_op = and_op @property def category(self): return self._category @category.setter def category(self, category): self._category = category @property def from_close_date(self): return self._from_close_date @from_close_date.setter def from_close_date(self, from_close_date): self._from_close_date = from_close_date @property def from_date(self): return self._from_date @from_date.setter def from_date(self, from_date): self._from_date = from_date @property
Apache License 2.0
rapidpro/rapidpro-python
temba_client/utils.py
format_iso8601
python
def format_iso8601(value): if value is None: return None return str(value.astimezone(pytz.UTC).strftime("%Y-%m-%dT%H:%M:%S.%fZ"))
Formats a datetime as a UTC ISO8601 date or returns None if value is None
https://github.com/rapidpro/rapidpro-python/blob/715d27b7197c2bb7bb42bdba17a30aebe05598ba/temba_client/utils.py#L18-L25
import json import iso8601 import pytz import requests def parse_iso8601(value): if not value: return None return iso8601.parse_date(value)
BSD 3-Clause New or Revised License
cc1-cloud/cc1
src/clm/views/guest/user.py
is_mailer_active
python
def is_mailer_active(): return {'mailer_active': settings.MAILER_ACTIVE, 'contact_email': settings.CONTACT_EMAIL}
Info, whether mailer is active @clmview_guest @response{dict} fiedls: @dictkey{mailer_active} @dictkey{contact_email}
https://github.com/cc1-cloud/cc1/blob/8113673fa13b6fe195cea99dedab9616aeca3ae8/src/clm/views/guest/user.py#L258-L267
from datetime import datetime import random from smtplib import SMTPRecipientsRefused import string from django.conf import settings from django.utils.http import int_to_base36 from clm.models.cluster import Cluster from clm.models.user import User from clm.utils import mail from clm.utils.cm import CM from clm.utils.decorators import guest_log from clm.utils.exception import CLMException from clm.utils.tokens import default_token_generator as token_generator from common.signature import Signature from common.states import user_active_states, registration_states, cluster_states @guest_log(log=False) def check_password(login, password): try: user = User.objects.get(login=login) except User.DoesNotExist: raise CLMException('user_get') if user.is_active == user_active_states['ok']: try: user.last_login_date = datetime.now() user.save() except: raise CLMException('user_edit') else: return False if user.password == password: return user.dict else: return False @guest_log(log=True) def check_signature(parameters): try: auth_header = parameters['authorization'] space = auth_header.index(' ') auth_header = auth_header[space + 1:] login_and_signature = auth_header.split(':') login = login_and_signature[0] user_signature = login_and_signature[1] user = User.objects.get(login=login) except User.DoesNotExist, error: print 'ERROR', error raise CLMException('user_get') except KeyError: raise CLMException('user_parameter') if not Signature.checkSignature(user.password, user_signature, parameters): raise CLMException('user_get') return True @guest_log(log=True) def register(first, last, login, email, new_password, organization, wi_data): user = User() user.first = first user.last = last try: default_cluster_id = Cluster.objects.filter(state=cluster_states['ok'])[0].id except: default_cluster_id = None user.default_cluster_id = default_cluster_id user.login = login user.email = email user.password = new_password user.organization = organization user.act_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for n in range(40)) user.is_active = user_active_states['inactive'] try: user.save() except: raise CLMException('user_register') reg_state = -1 if settings.MAILER_ACTIVE: try: mail.send_activation_email(user.act_key, user, wi_data) except SMTPRecipientsRefused: reg_state = registration_states['error'] reg_state = registration_states['mail_confirmation'] else: if settings.AUTOACTIVATION: for cluster in Cluster.objects.filter(state__exact=0): resp = CM(cluster.id).send_request("guest/user/add/", new_user_id=user.id) if resp['status'] != 'ok': raise CLMException('cm_get') user.is_active = user_active_states['ok'] user.activation_date = datetime.now() user.act_key = '' reg_state = registration_states['completed'] else: user.is_active = user_active_states['email_confirmed'] reg_state = registration_states['admin_confirmation'] try: user.save() except: raise CLMException('user_activate') return {'user': user.dict, 'registration_state': reg_state} @guest_log(log=False) def exists(login): return User.objects.filter(login=login).exists() @guest_log(log=False) def email_exists(email): return User.objects.filter(email__exact=email).exists() @guest_log(log=True) def activate(act_key, wi_data): try: user = User.objects.get(act_key=act_key) except: raise CLMException('user_get') user.is_active = user_active_states['email_confirmed'] reg_state = registration_states['admin_confirmation'] if settings.AUTOACTIVATION: for cluster in Cluster.objects.filter(state__exact=0): resp = CM(cluster.id).send_request("guest/user/add/", new_user_id=user.id) if resp['status'] != 'ok': raise CLMException('cm_get') user.is_active = user_active_states['ok'] reg_state = registration_states['completed'] user.activation_date = datetime.now() user.act_key = '' try: user.save() except: raise CLMException('user_activate') if settings.MAILER_ACTIVE and reg_state == registration_states['admin_confirmation']: try: mail.send_admin_registration_notification(user, wi_data) except SMTPRecipientsRefused: pass return {'user': user.dict, 'registration_state': reg_state} @guest_log(log=True)
Apache License 2.0
pandas-ml/pandas-ml
pandas_ml/confusion_matrix/abstract.py
ConfusionMatrixAbstract.max
python
def max(self): return(self.to_dataframe().max().max())
Returns max value of confusion matrix
https://github.com/pandas-ml/pandas-ml/blob/26717cc33ddc3548b023a6410b2235fb21a7b382/pandas_ml/confusion_matrix/abstract.py#L461-L465
import numpy as np import pandas as pd import collections import pandas_ml as pdml from pandas_ml.confusion_matrix.stats import binom_interval, class_agreement, prop_test class ConfusionMatrixAbstract(object): TRUE_NAME = 'Actual' PRED_NAME = 'Predicted' def __init__(self, y_true, y_pred, labels=None, display_sum=True, backend='matplotlib', true_name='Actual', pred_name='Predicted'): self.true_name = true_name self.pred_name = pred_name if isinstance(y_true, pd.Series): self._y_true = y_true self._y_true.name = self.true_name else: self._y_true = pd.Series(y_true, name=self.true_name) if isinstance(y_pred, pd.Series): self._y_pred = y_pred self._y_pred.name = self.pred_name else: self._y_pred = pd.Series(y_pred, name=self.pred_name) if labels is not None: if not self.is_binary: self._y_true = self._y_true.map(lambda i: self._label(i, labels)) self._y_pred = self._y_pred.map(lambda i: self._label(i, labels)) else: N = len(labels) assert len(labels) == 2, "labels be a list with length=2 - length=%d" % N d = {labels[0]: False, labels[1]: True} self._y_true = self._y_true.map(d) self._y_pred = self._y_pred.map(d) raise(NotImplementedError) N_true = len(y_true) N_pred = len(y_pred) assert N_true == N_pred, "y_true must have same size - %d != %d" % (N_true, N_pred) df = pd.crosstab(self._y_true, self._y_pred) idx = self._classes(df) if self.is_binary and pdml.compat._PANDAS_ge_021: df = df.reindex([False, True]) df = df.reindex([False, True], axis=1) df = df.fillna(0) else: df = df.loc[idx, idx.copy()].fillna(0) self._df_confusion = df self._df_confusion.index.name = self.true_name self._df_confusion.columns.name = self.pred_name self._df_confusion = self._df_confusion.astype(np.int64) self._len = len(idx) self.backend = backend self.display_sum = display_sum def _label(self, i, labels): try: return(labels[i]) except IndexError: return(i) def __repr__(self): return(self.to_dataframe(calc_sum=self.display_sum).__repr__()) def __str__(self): return(self.to_dataframe(calc_sum=self.display_sum).__str__()) @property def classes(self): return(self._classes()) def _classes(self, df=None): if df is None: df = self.to_dataframe() idx_classes = (df.columns | df.index).copy() idx_classes.name = 'Classes' return(idx_classes) def to_dataframe(self, normalized=False, calc_sum=False, sum_label='__all__'): if normalized: a = self._df_confusion.values.astype('float') a = a.astype('float') / a.sum(axis=1)[:, np.newaxis] df = pd.DataFrame(a, index=self._df_confusion.index.copy(), columns=self._df_confusion.columns.copy()) else: df = self._df_confusion if calc_sum: df = df.copy() df[sum_label] = df.sum(axis=1) df = pd.concat([df, pd.DataFrame(df.sum(axis=0), columns=[sum_label]).T]) df.index.name = self.true_name return(df) @property def true(self): s = self.to_dataframe().sum(axis=1) s.name = self.true_name return(s) @property def pred(self): s = self.to_dataframe().sum(axis=0) s.name = self.pred_name return(s) def to_array(self, normalized=False, sum=False): return(self.to_dataframe(normalized, sum).values) def toarray(self, *args, **kwargs): return(self.to_array(*args, **kwargs)) def len(self): return(self._len) def sum(self): return(self.to_dataframe().sum().sum()) @property def population(self): return(self.sum()) def y_true(self, func=None): if func is None: return(self._y_true) else: return(self._y_true.map(func)) def y_pred(self, func=None): if func is None: return(self._y_pred) else: return(self._y_pred.map(func)) @property def title(self): if self.is_binary: return("Binary confusion matrix") else: return("Confusion matrix") def plot(self, normalized=False, backend='matplotlib', ax=None, max_colors=10, **kwargs): df = self.to_dataframe(normalized) try: cmap = kwargs['cmap'] except KeyError: import matplotlib.pyplot as plt cmap = plt.cm.gray_r title = self.title if normalized: title += " (normalized)" if backend == 'matplotlib': import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(9, 8)) plt.imshow(df, cmap=cmap, interpolation='nearest') ax.set_title(title) tick_marks_col = np.arange(len(df.columns)) tick_marks_idx = tick_marks_col.copy() ax.set_yticks(tick_marks_idx) ax.set_xticks(tick_marks_col) ax.set_xticklabels(df.columns, rotation=45, ha='right') ax.set_yticklabels(df.index) ax.set_ylabel(df.index.name) ax.set_xlabel(df.columns.name) N_max = self.max() if N_max > max_colors: plt.colorbar() else: pass return ax elif backend == 'seaborn': import seaborn as sns ax = sns.heatmap(df, **kwargs) return ax else: msg = "'backend' must be either 'matplotlib' or 'seaborn'" raise ValueError(msg) def binarize(self, select): if not isinstance(select, collections.Iterable): select = np.array(select) y_true_bin = self.y_true().map(lambda x: x in select) y_pred_bin = self.y_pred().map(lambda x: x in select) from pandas_ml.confusion_matrix.bcm import BinaryConfusionMatrix binary_cm = BinaryConfusionMatrix(y_true_bin, y_pred_bin) return(binary_cm) def enlarge(self, select): if not isinstance(select, collections.Iterable): idx_new_cls = pd.Index([select]) else: idx_new_cls = pd.Index(select) new_idx = self._df_confusion.index | idx_new_cls new_idx.name = self.true_name new_col = self._df_confusion.columns | idx_new_cls new_col.name = self.pred_name print(new_col) self._df_confusion = self._df_confusion.loc[:, new_col] @property def stats_overall(self): df = self._df_confusion d_stats = collections.OrderedDict() d_class_agreement = class_agreement(df) key = 'Accuracy' try: d_stats[key] = d_class_agreement['diag'] except KeyError: d_stats[key] = np.nan key = '95% CI' try: d_stats[key] = binom_interval(np.sum(np.diag(df)), df.sum().sum()) except: d_stats[key] = np.nan d_prop_test = prop_test(df) d_stats['No Information Rate'] = 'ToDo' d_stats['P-Value [Acc > NIR]'] = d_prop_test['p.value'] d_stats['Kappa'] = d_class_agreement['kappa'] d_stats['Mcnemar\'s Test P-Value'] = 'ToDo' return(d_stats) @property def stats_class(self): df = pd.DataFrame(columns=self.classes) for cls in self.classes: binary_cm = self.binarize(cls) binary_cm_stats = binary_cm.stats() for key, value in binary_cm_stats.items(): df.loc[key, cls] = value d_name = { 'population': 'Population', 'P': 'P: Condition positive', 'N': 'N: Condition negative', 'PositiveTest': 'Test outcome positive', 'NegativeTest': 'Test outcome negative', 'TP': 'TP: True Positive', 'TN': 'TN: True Negative', 'FP': 'FP: False Positive', 'FN': 'FN: False Negative', 'TPR': 'TPR: (Sensitivity, hit rate, recall)', 'TNR': 'TNR=SPC: (Specificity)', 'PPV': 'PPV: Pos Pred Value (Precision)', 'NPV': 'NPV: Neg Pred Value', 'prevalence': 'Prevalence', 'FPR': 'FPR: False-out', 'FDR': 'FDR: False Discovery Rate', 'FNR': 'FNR: Miss Rate', 'ACC': 'ACC: Accuracy', 'F1_score': 'F1 score', 'MCC': 'MCC: Matthews correlation coefficient', 'informedness': 'Informedness', 'markedness': 'Markedness', 'LRP': 'LR+: Positive likelihood ratio', 'LRN': 'LR-: Negative likelihood ratio', 'DOR': 'DOR: Diagnostic odds ratio', 'FOR': 'FOR: False omission rate', } df.index = df.index.map(lambda id: self._name_from_dict(id, d_name)) return(df) def stats(self, lst_stats=None): d_stats = collections.OrderedDict() d_stats['cm'] = self d_stats['overall'] = self.stats_overall d_stats['class'] = self.stats_class return(d_stats) def _name_from_dict(self, key, d_name): try: return(d_name[key]) except (KeyError, TypeError): return(key) def _str_dict(self, d, line_feed_key_val='\n', line_feed_stats='\n\n', d_name=None): s = "" for i, (key, val) in enumerate(d.items()): name = self._name_from_dict(key, d_name) if i != 0: s = s + line_feed_stats s = s + "%s:%s%s" % (name, line_feed_key_val, val) return(s) def _str_stats(self, lst_stats=None): d_stats_name = { "cm": "Confusion Matrix", "overall": "Overall Statistics", "class": "Class Statistics", } stats = self.stats(lst_stats) d_stats_str = collections.OrderedDict([ ("cm", str(stats['cm'])), ("overall", self._str_dict( stats['overall'], line_feed_key_val=' ', line_feed_stats='\n')), ("class", str(stats['class'])), ]) s = self._str_dict( d_stats_str, line_feed_key_val='\n\n', line_feed_stats='\n\n\n', d_name=d_stats_name) return(s) def print_stats(self, lst_stats=None): print(self._str_stats(lst_stats)) def get(self, actual=None, predicted=None): if actual is None: actual = predicted if predicted is None: predicted = actual return(self.to_dataframe().loc[actual, predicted])
BSD 3-Clause New or Revised License
richardaecn/class-balanced-loss
tpu/models/official/retinanet/retinanet_architecture.py
resnet_v1_generator
python
def resnet_v1_generator(block_fn, layers, data_format='channels_last'): def model(inputs, is_training_bn=False): inputs = conv2d_fixed_padding( inputs=inputs, filters=64, kernel_size=7, strides=2, data_format=data_format) inputs = tf.identity(inputs, 'initial_conv') inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=data_format) inputs = tf.identity(inputs, 'initial_max_pool') c2 = block_group( inputs=inputs, filters=64, blocks=layers[0], strides=1, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group1', data_format=data_format) c3 = block_group( inputs=c2, filters=128, blocks=layers[1], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group2', data_format=data_format) c4 = block_group( inputs=c3, filters=256, blocks=layers[2], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group3', data_format=data_format) c5 = block_group( inputs=c4, filters=512, blocks=layers[3], strides=2, block_fn=block_fn, is_training_bn=is_training_bn, name='block_group4', data_format=data_format) return c2, c3, c4, c5 return model
Generator of ResNet v1 model with classification layers removed. Our actual ResNet network. We return the output of c2, c3,c4,c5 N.B. batch norm is always run with trained parameters, as we use very small batches when training the object layers. Args: block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layers: list of 4 `int`s denoting the number of blocks to include in each of the 4 block groups. Each group consists of blocks that take inputs of the same resolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: Model `function` that takes in `inputs` and `is_training` and returns the output `Tensor` of the ResNet model.
https://github.com/richardaecn/class-balanced-loss/blob/1d7857208a2abc03d84e35a9d5383af8225d4b4d/tpu/models/official/retinanet/retinanet_architecture.py#L321-L398
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf _WEIGHT_DECAY = 1e-4 _BATCH_NORM_DECAY = 0.997 _BATCH_NORM_EPSILON = 1e-4 _RESNET_MAX_LEVEL = 5 def batch_norm_relu(inputs, is_training_bn, relu=True, init_zero=False, data_format='channels_last', name=None): if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == 'channels_first': axis = 1 else: axis = 3 inputs = tf.layers.batch_normalization( inputs=inputs, axis=axis, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=is_training_bn, fused=True, gamma_initializer=gamma_initializer, name=name) if relu: inputs = tf.nn.relu(inputs) return inputs def fixed_padding(inputs, kernel_size, data_format='channels_last'): pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad( inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad( inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format='channels_last'): if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) return tf.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format) def residual_block(inputs, filters, is_training_bn, strides, use_projection=False, data_format='channels_last'): shortcut = inputs if use_projection: shortcut = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=strides, data_format=data_format) shortcut = batch_norm_relu( shortcut, is_training_bn, relu=False, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) inputs = batch_norm_relu( inputs, is_training_bn, relu=False, init_zero=True, data_format=data_format) return tf.nn.relu(inputs + shortcut) def bottleneck_block(inputs, filters, is_training_bn, strides, use_projection=False, data_format='channels_last'): shortcut = inputs if use_projection: filters_out = 4 * filters shortcut = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) shortcut = batch_norm_relu( shortcut, is_training_bn, relu=False, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm_relu(inputs, is_training_bn, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm_relu( inputs, is_training_bn, relu=False, init_zero=True, data_format=data_format) return tf.nn.relu(inputs + shortcut) def block_group(inputs, filters, block_fn, blocks, strides, is_training_bn, name, data_format='channels_last'): inputs = block_fn( inputs, filters, is_training_bn, strides, use_projection=True, data_format=data_format) for _ in range(1, blocks): inputs = block_fn( inputs, filters, is_training_bn, 1, data_format=data_format) return tf.identity(inputs, name)
MIT License
metatab/geoid
geoid/core.py
CountyName.medium_name
python
def medium_name(self): return self.state_name_re.sub('', self.name)
The census name without the state
https://github.com/metatab/geoid/blob/7f91dee11f2131381bb57628a55587f5c10f8e6b/geoid/core.py#L479-L481
import inspect import re import sys import six names = { 'null': 1, 'us': 10, 'region': 20, 'division': 30, 'state': 40, 'county': 50, 'cosub': 60, 'place': 160, 'ua': 400, 'tract': 140, 'blockgroup': 150, 'block': 101, 'puma': 795, 'sdelm': 950, 'sdsec': 960, 'sduni': 970, 'zcta': 860, 'zip': 1200, 'sldl': 620, 'sldu': 610, 'cdcurr': 500, 'state_aianhh': 260, 'necta_nectadiv_state_county_cousub': 358, 'state_aianhh_place': 269, 'aianhh_state_county': 270, 'state_cbsa_metdiv': 323, 'state_aianhh280': 280, 'state_place_county': 155, 'aianhh_aitsce_state': 290, 'state_aianhh_aihhtli': 283, 'state_cdcurr_aianhh': 550, 'state_concit': 170, 'state_concit_place': 172, 'state_aianhh_aihhtli286': 286, 'cbsa': 310, 'cbsa_state': 311, 'cbsa_state_place': 312, 'cbsa_state_county': 313, 'cbsa_metdiv': 314, 'cbsa_metdiv_state': 315, 'state_cbsa': 320, 'state_cbsa_place': 321, 'state_cbsa_county': 322, 'state_county_cousub_submcd': 67, 'state_cbsa_metdiv_county': 324, 'state_county_cousub_place': 70, 'necta_state_county': 353, 'csa': 330, 'csa_state': 331, 'csa_cbsa': 332, 'csa_cbsa_state': 333, 'cnecta': 335, 'state_county_cousub_place_tract': 80, 'cnecta_necta': 337, 'cnecta_necta_state': 338, 'state_csa': 340, 'state_csa_cbsa': 341, 'state_cnecta': 345, 'state_cnecta_necta': 346, 'necta': 350, 'necta_state': 351, 'necta_state_place': 352, 'cnecta_state': 336, 'necta_state_county_cousub': 354, 'necta_nectadiv': 355, 'necta_nectadiv_state': 356, 'state_anrc': 230, 'necta_nectadiv_state_county': 357, 'state_necta': 360, 'cbsa_metdiv_state_county': 316, 'state_necta_county': 362, 'state_necta_county_cousub': 363, 'state_necta_nectadiv': 364, 'state_necta_nectadiv_county': 365, 'state_necta_nectadiv_county_cousub': 366, 'ua_state': 410, 'ua_state_county': 430, 'state_sldu_county': 612, 'state_sldu': 610, 'state_sldl_county': 622, 'state_sldl': 620, 'state_cdcurr_county': 510, 'state_necta_place': 361, 'aianhh': 250, 'aianhh_aitsce': 251, 'aianhh_aihhtli': 252, 'state_sldl_county': 622, 'aianhh_aihhtli254': 254 } lengths = { 'null': 1, 'aianhh': 4, 'aihhtli': '1', 'aitsce': 3, 'anrc': 5, 'blkgrp': 1, 'blockgroup': 1, 'block': 4, 'cbsa': 5, 'cdcurr': 2, 'cnecta': 3, 'concit': 5, 'county': 3, 'cousub': 5, 'cosub': 5, 'csa': 3, 'division': 1, 'metdiv': 5, 'necta': 5, 'nectadiv': 5, 'place': 5, 'puma5': 5, 'region': 1, 'sdelm': 5, 'sdsec': 5, 'sduni': 5, 'sldl': '3', 'sldu': '3', 'state': 2, 'submcd': 5, 'tract': 6, 'ua': 5, 'ur': 1, 'us': 0, 'zcta': 5, 'zip': 5, } segments = { 1: ['null'], 10: ['us'], 20: ['region'], 30: ['division'], 40: ['state'], 50: ['state', 'county'], 60: ['state', 'county', 'cousub'], 67: ['state', 'county', 'cousub', 'submcd'], 70: ['state', 'county', 'cousub', 'place'], 80: ['state', 'county', 'cousub', 'place', 'tract'], 101: ['state', 'county', 'tract', 'block'], 140: ['state', 'county', 'tract'], 150: ['state', 'county', 'tract', 'blockgroup'], 155: ['state', 'place', 'county'], 160: ['state', 'place'], 170: ['state', 'concit'], 172: ['state', 'concit', 'place'], 230: ['state', 'anrc'], 250: ['aianhh'], 251: ['aianhh', 'aitsce'], 252: ['aianhh', 'aihhtli'], 254: ['aianhh', 'aihhtli'], 260: ['state', 'aianhh'], 269: ['state', 'aianhh', 'place'], 270: ['aianhh', 'state', 'county'], 280: ['state', 'aianhh'], 283: ['state', 'aianhh', 'aihhtli'], 286: ['state', 'aianhh', 'aihhtli'], 290: ['aianhh', 'aitsce', 'state'], 310: ['cbsa'], 311: ['cbsa', 'state'], 312: ['cbsa', 'state', 'place'], 313: ['cbsa', 'state', 'county'], 314: ['cbsa', 'metdiv'], 315: ['cbsa', 'metdiv', 'state'], 316: ['cbsa', 'metdiv', 'state', 'county'], 320: ['state', 'cbsa'], 321: ['state', 'cbsa', 'place'], 322: ['state', 'cbsa', 'county'], 323: ['state', 'cbsa', 'metdiv'], 324: ['state', 'cbsa', 'metdiv', 'county'], 330: ['csa'], 331: ['csa', 'state'], 332: ['csa', 'cbsa'], 333: ['csa', 'cbsa', 'state'], 335: ['cnecta'], 336: ['cnecta', 'state'], 337: ['cnecta', 'necta'], 338: ['cnecta', 'necta', 'state'], 340: ['state', 'csa'], 341: ['state', 'csa', 'cbsa'], 345: ['state', 'cnecta'], 346: ['state', 'cnecta', 'necta'], 350: ['necta'], 351: ['necta', 'state'], 352: ['necta', 'state', 'place'], 353: ['necta', 'state', 'county'], 354: ['necta', 'state', 'county', 'cousub'], 355: ['necta', 'nectadiv'], 356: ['necta', 'nectadiv', 'state'], 357: ['necta', 'nectadiv', 'state', 'county'], 358: ['necta', 'nectadiv', 'state', 'county', 'cousub'], 360: ['state', 'necta'], 361: ['state', 'necta', 'place'], 362: ['state', 'necta', 'county'], 363: ['state', 'necta', 'county', 'cousub'], 364: ['state', 'necta', 'nectadiv'], 365: ['state', 'necta', 'nectadiv', 'county'], 366: ['state', 'necta', 'nectadiv', 'county', 'cousub'], 400: ['ua'], 410: ['ua', 'state'], 430: ['ua','state','county'], 500: ['state', 'cdcurr'], 510: ['state', 'cdcurr', 'county'], 550: ['state', 'cdcurr', 'aianhh'], 610: ['state', 'sldu'], 612: ['state', 'sldu', 'county'], 620: ['state', 'sldl'], 622: ['state', 'sldl', 'county'], 795: ['state', 'puma5'], 860: ['zcta'], 950: ['state', 'sdelm'], 960: ['state', 'sdsec'], 970: ['state', 'sduni'], 1200: ['zip'] } descriptions = { 1: 'United States', 10: 'United States', 20: 'Region', 30: 'Division', 40: 'State', 50: 'County', 60: 'County Subdivision', 67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio', 70: 'County Subdivision-Place/Remainder', 80: 'County Subdivision-Place/Remainder-Census Tract', 101: 'block', 140: 'Census Tract', 150: 'Census Tract-Block Group', 155: 'Place-County', 160: 'Place', 170: 'Consolidated City', 172: 'Consolidated City-Place Within Consolidated City', 230: 'State-Alaska Native Regional Corporation', 250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land', 251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder', 252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)', 254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land', 260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State', 269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder', 270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County', 280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land', 283: 'aihhtli', 286: 'aihhtli', 290: 'state', 310: 'CBSA', 311: 'CBSA-State-County', 312: 'CBSA-State-Principal City', 313: 'CBSA-State-County', 314: 'Metropolitan Statistical Area/Metropolitan Division', 315: 'Metropolitan Statistical Area/Metropolitan Division-State', 316: 'Metropolitan Statistical Area/Metropolitan Division-State-County', 320: 'State- CBSA', 321: 'State- CBSA -Principal City', 322: 'State- CBSA -County', 323: 'State- Metropolitan Statistical Area/Metropolitan Division', 324: 'State- Metropolitan Statistical Area/Metropolitan Division-County', 330: 'Combined Statistical Area', 331: 'Combined Statistical Area-State', 332: 'Combined Statistical Area-CBSA', 333: 'Combined Statistical Area-CBSA-State', 335: 'Combined New England City and Town Area', 336: 'Combined New England City and Town Area -State', 337: 'Combined New England City and Town Area -New England City and Town Area', 338: 'Combined New England City and Town Area -New England City and Town Area-State', 340: 'State-Combined Statistical Area', 341: 'State-Combined Statistical Area-CBSA', 345: 'State-Combined New England City and Town Area', 346: 'State-Combined New England City and Town Area-New England City and Town Area', 350: 'New England City and Town Area', 351: 'New England City and Town Area-State', 352: 'New England City and Town Area-State-Principal City', 353: 'New England City and Town Area-State-County', 354: 'New England City and Town Area-State-County-County Subdivision', 355: 'New England City and Town Area (NECTA)-NECTA Division', 356: 'New England City and Town Area (NECTA)-NECTA Division-State', 357: 'New England City and Town Area (NECTA)-NECTA Division-State-County', 358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision', 360: 'State-New England City and Town Area', 361: 'State-New England City and Town Area-Principal City', 362: 'State-New England City and Town Area-County', 363: 'State-New England City and Town Area-County-County Subdivision', 364: 'State-New England City and Town Area (NECTA)-NECTA Division', 365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision', 400: 'Urban Area,', 410: 'Urban Area, State,', 430: 'Urban Area, State, County,', 500: 'Congressional District', 510: 'Congressional District, County', 550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land', 610: 'State Senate District', 612: 'State Senate District-County', 620: 'State House District', 622: 'State House District-County', 795: 'State-Public Use MicroSample Area 5%', 860: 'ZIP Code Tabulation Area', 950: 'State-Elementary School District', 960: 'State-High School District', 970: 'State-Unified School District', } plurals = { 'county': 'counties', 'place': 'places', 'Sdlu': 'State ' } class NotASummaryName(Exception): class ParseError(Exception): def parse_to_gvid(v): from geoid.civick import GVid from geoid.acs import AcsGeoid m1 = '' try: return GVid.parse(v) except ValueError as e: m1 = str(e) try: return AcsGeoid.parse(v).convert(GVid) except ValueError as e: raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e))) def base62_encode(num): num = int(num) alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if (num == 0): return alphabet[0] arr = [] base = len(alphabet) while num: rem = num % base num = num // base arr.append(alphabet[rem]) arr.reverse() return ''.join(arr) def base62_decode(string): alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) num += alphabet.index(char) * (base ** power) idx += 1 return int(num) def augment(module_name, base_class): for name, cls in inspect.getmembers(sys.modules[module_name], lambda x : inspect.isclass(x) and issubclass(x, base_class) ): if cls == base_class: continue cls.augment() def get_class(module, sl): for name, named_sl in names.items(): if named_sl == sl or sl == name: return getattr(module, name.capitalize()) raise NotASummaryName("No class for summary_level {}".format(sl)) def make_classes(base_class, module): from functools import partial for k in names: cls = base_class.class_factory(k.capitalize()) cls.augment() setattr(module, k.capitalize(), cls) setattr(module, 'get_class', partial(get_class, module)) class CountyName(object): state_name_pattern = r', (.*)$' state_name_re = re.compile(state_name_pattern) def __init__(self, name): self.name = name def intuit_name(self, name): raise NotImplementedError @property def state(self): try: county, state = self.name.split(',') return state except ValueError: return '' @property
BSD 2-Clause Simplified License
dickreuter/neuron_poker
agents/agent_custom_q1.py
Player.initiate_agent
python
def initiate_agent(self, nb_actions): self.model = Sequential() self.model.add(Dense(512, activation='relu', input_shape=env.observation_space)) self.model.add(Dropout(0.2)) self.model.add(Dense(512, activation='relu')) self.model.add(Dropout(0.2)) self.model.add(Dense(512, activation='relu')) self.model.add(Dropout(0.2)) self.model.add(Dense(nb_actions, activation='linear')) memory = SequentialMemory(limit=memory_limit, window_length=window_length) policy = TrumpPolicy()
initiate a deep Q agent
https://github.com/dickreuter/neuron_poker/blob/9f841e5aeead681fa1fb2955524c53081fba2078/agents/agent_custom_q1.py#L23-L38
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from rl.memory import SequentialMemory from agents.agent_keras_rl_dqn import TrumpPolicy, memory_limit, window_length from gym_env import env class Player: def __init__(self, name='Custom_Q1'): self.equity_alive = 0 self.actions = [] self.last_action_in_stage = '' self.temp_stack = [] self.name = name self.autoplay = True self.model = None
MIT License
uzaymacar/attention-mechanisms
examples/machine_translation.py
unicode_to_ascii
python
def unicode_to_ascii(string): return ''.join(char for char in unicodedata.normalize('NFD', string) if unicodedata.category(char) != 'Mn')
Function to convert the string from unicode file to ascii format
https://github.com/uzaymacar/attention-mechanisms/blob/37f131dee1148cd1dec8b59116efe8e52134004f/examples/machine_translation.py#L71-L74
import argparse import time import os import unicodedata import re import numpy as np from sklearn.model_selection import train_test_split import tensorflow as tf from tensorflow.keras.utils import get_file, to_categorical from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras import Model from tensorflow.keras.layers import Input, Embedding, Bidirectional, Dense, RepeatVector, TimeDistributed, Flatten, Lambda, Concatenate, Permute, Reshape from tensorflow.compat.v1.keras.layers import CuDNNLSTM from tensorflow.keras.backend import permute_dimensions import sys sys.path.append('..') from layers import Attention parser = argparse.ArgumentParser() parser.add_argument("--config", default=0, help="Integer value representing a model configuration") args = parser.parse_args() np.random.seed(500) tf.random.set_seed(500) embedding_dim = 128 batch_size = 100 num_epochs = 30 config = int(args.config) zipped = get_file( fname='spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip', extract=True ) file = os.path.join(os.path.dirname(zipped), 'spa-eng/spa.txt')
MIT License
hypothesis/h
h/emails/signup.py
generate
python
def generate(request, user_id, email, activation_code): context = { "activate_link": request.route_url("activate", id=user_id, code=activation_code) } subject = _("Please activate your account") text = render("h:templates/emails/signup.txt.jinja2", context, request=request) html = render("h:templates/emails/signup.html.jinja2", context, request=request) return [email], subject, text, html
Generate an email for a user signup. :param request: the current request :type request: pyramid.request.Request :param user_id: the new user's primary key ID :type user_id: int :param email: the new user's email address :type email: text :param activation_code: the activation code :type activation_code: text :returns: a 4-element tuple containing: recipients, subject, text, html
https://github.com/hypothesis/h/blob/1bf1fe34fd471f26a216e682d15ce986dd400fdb/h/emails/signup.py#L6-L30
from pyramid.renderers import render from h.i18n import TranslationString as _
BSD 2-Clause Simplified License
ryanbhayward/games-puzzles-algorithms
old/tournament/tournament.py
Tournament._play_round
python
def _play_round(self, first_to_play): self._initialize_round() player_index = first_to_play player_mapping = {} while not self._round_finished(): player = self._players[player_index] if player not in player_mapping.values(): player_mapping[player.player_to_move()] = player move = player.play() self._logger.debug('{} plays {}\n{}'.format(player, move, player.board())) if move == 'resign': self._player_has_resigned = True elif move == 'pass': self._consecutive_passes += 1 else: self._consecutive_passes = 0 self._notify_players(move, player_index) player_index = self._next_player(player_index) if self._player_has_resigned: winner = player.player_to_move() else: (winner, score) = player.final_score() if winner == '0': self._logger.debug('Round ends in a draw.') self._results.increment_win_count('', winner) else: winning_player = player_mapping[winner] self._logger.debug('{} wins round {} as {}'.format(winning_player, self._round, winner)) self._results.increment_win_count(winning_player, winner) self._round += 1
Play a single round of a tournament.
https://github.com/ryanbhayward/games-puzzles-algorithms/blob/53b12cf37324f8757024cec5839e8cb2625cc4e2/old/tournament/tournament.py#L86-L129
from collections import defaultdict class TournamentResult(object): def __init__(self): self._sequence = [] self._results = defaultdict(int) self._draws = 0 def increment_win_count(self, player, side): if side == '0': self._sequence.append(('Draw', '0')) self._draws += 1 else: self._sequence.append((player, side)) self._results[player] += 1 def __str__(self): outcomes = [] for (player, games) in self._results.items(): outcomes.append('{} won {} games.'.format(player, games)) if self._draws > 0: outcomes.append('{} draws occurred.'.format(self._draws)) return '\n'.join(outcomes) class Tournament(object): def __init__(self, players, num_games, game_size, time_limit, logger): self._logger = logger self._results = TournamentResult() self._round = 1 self._players = players self._games = num_games self._size = game_size self._time_limit = time_limit self._configure_players() def _configure_players(self): for player in self._players: player.configure(size=self._size, time_limit=self._time_limit) def _initialize_round(self): self._player_has_resigned = False self._consecutive_passes = 0 for player in self._players: player.clear() def _round_finished(self): return self._player_has_resigned or self._consecutive_passes > 1 def _notify_players(self, move, skip_index): for (i, player) in enumerate(self._players): if i == skip_index: continue player.play(move) def _next_player(self, player_index): num_players = len(self._players) return (player_index + 1) % num_players
MIT License
fatescript/centernet-better
dl_lib/evaluation/coco_evaluation.py
COCOEvaluator._eval_predictions
python
def _eval_predictions(self, tasks): self._logger.info("Preparing results for COCO format ...") self._coco_results = list(itertools.chain(*[x["instances"] for x in self._predictions])) if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = { v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() } for result in self._coco_results: category_id = result["category_id"] assert ( category_id in reverse_id_mapping ), "A prediction has category_id={}, which is not available in the dataset.".format( category_id ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: file_path = os.path.join(self._output_dir, "coco_instances_results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(self._coco_results)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return self._logger.info("Evaluating predictions ...") for task in sorted(tasks): coco_eval = ( _evaluate_predictions_on_coco( self._coco_api, self._coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas ) if len(self._coco_results) > 0 else None ) res = self._derive_coco_results( coco_eval, task, class_names=self._metadata.get("thing_classes") ) self._results[task] = res
Evaluate self._predictions on the given tasks. Fill self._results with the metrics of the tasks.
https://github.com/fatescript/centernet-better/blob/972dbf2882375d54ec0e06ada21f39baaa997f0c/dl_lib/evaluation/coco_evaluation.py#L143-L189
import contextlib import copy import io import itertools import json import logging import os import pickle import sys from collections import OrderedDict import numpy as np import pycocotools.mask as mask_util import torch from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval import dl_lib.utils.comm as comm from dl_lib.data import MetadataCatalog from dl_lib.data.datasets.coco import convert_to_coco_json from dl_lib.structures import Boxes, BoxMode, pairwise_iou from dl_lib.utils.file_io import PathManager from dl_lib.utils.logger import create_small_table, create_table_with_header from .evaluator import DatasetEvaluator class COCOEvaluator(DatasetEvaluator): def __init__(self, dataset_name, cfg, distributed, output_dir=None, dump=False): self._dump = dump self._tasks = self._tasks_from_config(cfg) self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): self._logger.warning(f"json_file was not found in MetaDataCatalog for '{dataset_name}'") cache_path = convert_to_coco_json(dataset_name, output_dir) self._metadata.json_file = cache_path json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._coco_api = COCO(json_file) self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS self._do_evaluation = "annotations" in self._coco_api.dataset def reset(self): self._predictions = [] self._coco_results = [] def _tasks_from_config(self, cfg): if self._dump: with open("README.md", "w") as f: name = cfg.OUTPUT_DIR.split("/")[-1] f.write("# {} \n".format(name)) tasks = ("bbox",) if cfg.MODEL.MASK_ON: tasks = tasks + ("segm",) if cfg.MODEL.KEYPOINT_ON: tasks = tasks + ("keypoints",) return tasks def process(self, inputs, outputs): for input, output in zip(inputs, outputs): prediction = {"image_id": input["image_id"]} if "instances" in output: instances = output["instances"].to(self._cpu_device) prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) if "proposals" in output: prediction["proposals"] = output["proposals"].to(self._cpu_device) self._predictions.append(prediction) def evaluate(self): if self._distributed: comm.synchronize() self._predictions = comm.gather(self._predictions, dst=0) self._predictions = list(itertools.chain(*self._predictions)) if not comm.is_main_process(): return {} if len(self._predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(self._predictions, f) self._results = OrderedDict() if "proposals" in self._predictions[0]: self._eval_box_proposals() if "instances" in self._predictions[0]: self._eval_predictions(set(self._tasks)) return copy.deepcopy(self._results)
Apache License 2.0
ip2location/ip2proxy-python
IP2Proxy.py
IP2Proxy.open
python
def open(self, filename): self.close() if os.path.isfile(filename) == False: raise ValueError("The database file does not seem to exist.") self._f = open(filename, 'rb') self._dbtype = struct.unpack('B', self._f.read(1))[0] self._dbcolumn = struct.unpack('B', self._f.read(1))[0] self._dbyear = 2000 + struct.unpack('B', self._f.read(1))[0] self._dbmonth = struct.unpack('B', self._f.read(1))[0] self._dbday = struct.unpack('B', self._f.read(1))[0] self._ipv4dbcount = struct.unpack('<I', self._f.read(4))[0] self._ipv4dbaddr = struct.unpack('<I', self._f.read(4))[0] self._ipv6dbcount = struct.unpack('<I', self._f.read(4))[0] self._ipv6dbaddr = struct.unpack('<I', self._f.read(4))[0] self._ipv4indexbaseaddr = struct.unpack('<I', self._f.read(4))[0] self._ipv6indexbaseaddr = struct.unpack('<I', self._f.read(4))[0] self._productcode = struct.unpack('B', self._f.read(1))[0] self._licensecode = struct.unpack('B', self._f.read(1))[0] self._databasesize = struct.unpack('B', self._f.read(1))[0] if (self._productcode != 2) : if (self._dbyear > 20 and self._productcode != 0) : self._f.close() del self._f raise ValueError("Incorrect IP2Location BIN file format. Please make sure that you are using the latest IP2Location BIN file.")
Opens a database file
https://github.com/ip2location/ip2proxy-python/blob/31e88100875d610a945fe8abdc1e026540f8f235/IP2Proxy.py#L148-L176
import sys import struct import socket import ipaddress import os import json import re if sys.version < '3': import urllib, httplib def urlencode(x): return urllib.urlencode(x) def httprequest(x, usessl): try: if (usessl is True): conn = httplib.HTTPSConnection("api.ip2proxy.com") else: conn = httplib.HTTPConnection("api.ip2proxy.com") conn.request("GET", "/?" + x) res = conn.getresponse() return json.loads(res.read()) except: return None def u(x): return x.decode('utf-8') def b(x): return str(x) else: import urllib.parse, http.client def urlencode(x): return urllib.parse.urlencode(x) def httprequest(x, usessl): try: if (usessl is True): conn = http.client.HTTPSConnection("api.ip2proxy.com") else: conn = http.client.HTTPConnection("api.ip2proxy.com") conn.request("GET", "/?" + x) res = conn.getresponse() return json.loads(res.read()) except: return None def u(x): if isinstance(x, bytes): return x.decode() return x def b(x): if isinstance(x, bytes): return x return x.encode('ascii') if not hasattr(socket, 'inet_pton'): def inet_pton(t, addr): import ctypes a = ctypes.WinDLL('ws2_32.dll') in_addr_p = ctypes.create_string_buffer(b(addr)) if t == socket.AF_INET: out_addr_p = ctypes.create_string_buffer(4) elif t == socket.AF_INET6: out_addr_p = ctypes.create_string_buffer(16) n = a.inet_pton(t, in_addr_p, out_addr_p) if n == 0: raise ValueError('Invalid address') return out_addr_p.raw socket.inet_pton = inet_pton _VERSION = '3.3.0' _NO_IP = 'MISSING IP ADDRESS' _FIELD_NOT_SUPPORTED = 'NOT SUPPORTED' _INVALID_IP_ADDRESS = 'INVALID IP ADDRESS' MAX_IPV4_RANGE = 4294967295 MAX_IPV6_RANGE = 340282366920938463463374607431768211455 class IP2ProxyRecord: ip = None country_short = _FIELD_NOT_SUPPORTED country_long = _FIELD_NOT_SUPPORTED region = _FIELD_NOT_SUPPORTED city = _FIELD_NOT_SUPPORTED isp = _FIELD_NOT_SUPPORTED proxy_type = _FIELD_NOT_SUPPORTED usage_type = _FIELD_NOT_SUPPORTED as_name = _FIELD_NOT_SUPPORTED asn = _FIELD_NOT_SUPPORTED last_seen = _FIELD_NOT_SUPPORTED domain = _FIELD_NOT_SUPPORTED threat = _FIELD_NOT_SUPPORTED provider = _FIELD_NOT_SUPPORTED def __str__(self): return str(self.__dict__) def __repr__(self): return repr(self.__dict__) _COUNTRY_POSITION = (0, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) _REGION_POSITION = (0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4) _CITY_POSITION = (0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5) _ISP_POSITION = (0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6, 6) _PROXYTYPE_POSITION = (0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) _DOMAIN_POSITION = (0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7) _USAGETYPE_POSITION = (0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 8, 8) _ASN_POSITION = (0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9) _AS_POSITION = (0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10) _LASTSEEN_POSITION = (0, 0, 0, 0, 0, 0, 0, 0, 11, 11, 11, 11) _THREAT_POSITION = (0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 12) _PROVIDER_POSITION = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13) class IP2Proxy(object): def __init__(self, filename=None): if filename: self.open(filename) def __enter__(self): if not hasattr(self, '_f') or self._f.closed: raise ValueError("Cannot enter context with closed file") return self def __exit__(self, exc_type, exc_value, traceback): self.close()
MIT License