repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
openmdao/dymos
dymos/transcriptions/solve_ivp/solve_ivp.py
SolveIVP.setup_boundary_constraints
python
def setup_boundary_constraints(self, loc, phase): pass
Not used in SolveIVP. Parameters ---------- loc : str The kind of boundary constraints being setup. Must be one of 'initial' or 'final'. phase : dymos.Phase The phase object to which this transcription instance applies.
https://github.com/openmdao/dymos/blob/936b838133088cf0f79bf980cc9c0f5938b3db7a/dymos/transcriptions/solve_ivp/solve_ivp.py#L527-L538
from fnmatch import filter import warnings import numpy as np import openmdao.api as om from ..transcription_base import TranscriptionBase from .components import SegmentSimulationComp, SegmentStateMuxComp, SolveIVPControlGroup, SolveIVPPolynomialControlGroup, SolveIVPTimeseriesOutputComp from ..common import TimeComp from ...utils.misc import get_rate_units, _unspecified from ...utils.introspection import get_targets, get_target_metadata, get_source_metadata from ...utils.indexing import get_src_indices_by_row class SolveIVP(TranscriptionBase): def __init__(self, grid_data=None, **kwargs): super(SolveIVP, self).__init__(**kwargs) self.grid_data = grid_data self._rhs_source = 'ode' def initialize(self): super(SolveIVP, self).initialize() self.options.declare('output_nodes_per_seg', default=None, types=(int,), allow_none=True, desc='If None, results are provided at the all nodes within each' 'segment. If an int (n) then results are provided at n ' 'equally distributed points in time within each segment.') def init_grid(self): pass def setup_time(self, phase): time_options = phase.time_options time_units = time_options['units'] num_seg = self.grid_data.num_segments grid_data = self.grid_data output_nodes_per_seg = self.options['output_nodes_per_seg'] super(SolveIVP, self).setup_time(phase) if output_nodes_per_seg is None: num_nodes = grid_data.num_nodes node_ptau = grid_data.node_ptau node_dptau_dstau = grid_data.node_dptau_dstau else: num_nodes = num_seg * output_nodes_per_seg node_stau = np.linspace(-1, 1, output_nodes_per_seg) node_ptau = np.empty(0, ) node_dptau_dstau = np.empty(0, ) for iseg in range(num_seg): v0 = grid_data.segment_ends[iseg] v1 = grid_data.segment_ends[iseg + 1] node_ptau = np.concatenate((node_ptau, v0 + 0.5 * (node_stau + 1) * (v1 - v0))) node_dptau_dstau = np.concatenate((node_dptau_dstau, 0.5 * (v1 - v0) * np.ones_like(node_stau))) time_comp = TimeComp(num_nodes=num_nodes, node_ptau=node_ptau, node_dptau_dstau=node_dptau_dstau, units=time_units) phase.add_subsystem('time', time_comp, promotes=['*']) def configure_time(self, phase): super(SolveIVP, self).configure_time(phase) num_seg = self.grid_data.num_segments grid_data = self.grid_data output_nodes_per_seg = self.options['output_nodes_per_seg'] ode = phase._get_subsystem('ode') phase.time.configure_io() for i in range(num_seg): if output_nodes_per_seg is None: i1, i2 = grid_data.subset_segment_indices['all'][i, :] src_idxs = grid_data.subset_node_indices['all'][i1:i2] else: src_idxs = np.arange(i * output_nodes_per_seg, output_nodes_per_seg * (i + 1), dtype=int) phase.connect('time', f'segment_{i}.time', src_indices=src_idxs, flat_src_indices=True) phase.connect('time_phase', f'segment_{i}.time_phase', src_indices=src_idxs, flat_src_indices=True) phase.segments.promotes(f'segment_{i}', inputs=['t_initial', 't_duration']) options = phase.time_options for name, usr_tgts, dynamic in [('time', options['targets'], True), ('time_phase', options['time_phase_targets'], True)]: targets = get_targets(phase.ode, name=name, user_targets=usr_tgts) if targets: phase.connect(name, [f'ode.{t}' for t in targets]) for name, usr_tgts, dynamic in [('t_initial', options['t_initial_targets'], False), ('t_duration', options['t_duration_targets'], False)]: targets = get_targets(ode, name=name, user_targets=usr_tgts) shape, units, static_target = get_target_metadata(ode, name=name, user_targets=targets, user_units=options['units'], user_shape=(1,)) if shape == (1,): src_idxs = None flat_src_idxs = None src_shape = None else: src_idxs = np.zeros(self.grid_data.subset_num_nodes['all']) flat_src_idxs = True src_shape = (1,) for t in targets: phase.promotes('ode', inputs=[(t, name)], src_indices=src_idxs, flat_src_indices=flat_src_idxs, src_shape=src_shape) if targets: phase.set_input_defaults(name=name, val=np.ones((1,)), units=options['units']) def setup_states(self, phase): phase.add_subsystem('indep_states', om.IndepVarComp(), promotes_outputs=['*']) def configure_states(self, phase): num_seg = self.grid_data.num_segments for state_name, options in phase.state_options.items(): phase.indep_states.add_output(f'initial_states:{state_name}', val=np.ones(((1,) + options['shape'])), units=options['units']) for state_name, options in phase.state_options.items(): src_idxs = get_src_indices_by_row([0], options['shape']) phase.connect(f'initial_states:{state_name}', f'segment_0.initial_states:{state_name}', src_indices=(src_idxs,), flat_src_indices=True) phase.connect(f'segment_0.states:{state_name}', f'state_mux_comp.segment_0_states:{state_name}') targets = get_targets(ode=phase.ode, name=state_name, user_targets=options['targets']) if targets: phase.connect(f'state_mux_comp.states:{state_name}', [f'ode.{t}' for t in targets]) for i in range(1, num_seg): if self.options['output_nodes_per_seg'] is None: nnps_i = self.grid_data.subset_num_nodes_per_segment['all'][i] else: nnps_i = self.options['output_nodes_per_seg'] src_idxs = get_src_indices_by_row([nnps_i-1], shape=options['shape']) phase.connect(f'segment_{i - 1}.states:{state_name}', f'segment_{i}.initial_states:{state_name}', src_indices=(src_idxs,), flat_src_indices=True) phase.connect(f'segment_{i}.states:{state_name}', f'state_mux_comp.segment_{i}_states:{state_name}') def setup_ode(self, phase): gd = self.grid_data num_seg = gd.num_segments segments_group = phase.add_subsystem(name='segments', subsys=om.Group(), promotes_outputs=['*'], promotes_inputs=['*']) for i in range(num_seg): seg_i_comp = SegmentSimulationComp( index=i, simulate_options=phase.simulate_options, grid_data=self.grid_data, ode_class=phase.options['ode_class'], ode_init_kwargs=phase.options['ode_init_kwargs'], time_options=phase.time_options, state_options=phase.state_options, control_options=phase.control_options, polynomial_control_options=phase.polynomial_control_options, parameter_options=phase.parameter_options, output_nodes_per_seg=self.options['output_nodes_per_seg']) segments_group.add_subsystem(f'segment_{i}', subsys=seg_i_comp) phase.add_subsystem('state_mux_comp', SegmentStateMuxComp(grid_data=gd, state_options=phase.state_options, output_nodes_per_seg=self.options['output_nodes_per_seg'])) if self.options['output_nodes_per_seg'] is None: self.num_output_nodes = gd.subset_num_nodes['all'] else: self.num_output_nodes = num_seg * self.options['output_nodes_per_seg'] phase.add_subsystem('ode', phase.options['ode_class'](num_nodes=self.num_output_nodes, **phase.options['ode_init_kwargs'])) def configure_ode(self, phase): gd = self.grid_data num_seg = gd.num_segments for i in range(num_seg): seg_comp = phase.segments._get_subsystem(f'segment_{i}') seg_comp.configure_io() def setup_controls(self, phase): output_nodes_per_seg = self.options['output_nodes_per_seg'] phase._check_control_options() if phase.control_options: control_group = SolveIVPControlGroup(control_options=phase.control_options, time_units=phase.time_options['units'], grid_data=self.grid_data, output_nodes_per_seg=output_nodes_per_seg) phase.add_subsystem('control_group', subsys=control_group, promotes=['controls:*', 'control_values:*', 'control_values_all:*', 'control_rates:*']) def configure_controls(self, phase): ode = phase._get_subsystem(self._rhs_source) for name, options in phase.control_options.items(): shape, units, static_target = get_target_metadata(ode, name=name, user_targets=options['targets'], user_units=options['units'], user_shape=options['shape'], control_rate=True) options['units'] = units options['shape'] = shape if static_target: raise ValueError(f"Control '{name}' cannot be connected to its targets because one" f"or more targets are tagged with 'dymos.static_target'.") _, _, static_target = get_target_metadata(ode, name=name, user_targets=options['rate_targets'], user_units=options['units'], user_shape=options['shape'], control_rate=True) if static_target: raise ValueError(f"Control rate of '{name}' cannot be connected to its targets " f"because one or more targets are tagged with 'dymos.static_target'.") _, _, static_target = get_target_metadata(ode, name=name, user_targets=options['rate2_targets'], user_units=options['units'], user_shape=options['shape'], control_rate=True) if static_target: raise ValueError(f"Control rate2 of '{name}' cannot be connected to its targets " f"because one or more targets are tagged with 'dymos.static_target'.") grid_data = self.grid_data if phase.control_options: phase.control_group.configure_io() phase.connect('dt_dstau', 'control_group.dt_dstau') for name, options in phase.control_options.items(): for i in range(grid_data.num_segments): i1, i2 = grid_data.subset_segment_indices['control_disc'][i, :] seg_idxs = grid_data.subset_node_indices['control_disc'][i1:i2] src_idxs = get_src_indices_by_row(row_idxs=seg_idxs, shape=options['shape']) phase.connect(src_name=f'control_values_all:{name}', tgt_name=f'segment_{i}.controls:{name}', src_indices=(src_idxs,), flat_src_indices=True) targets = get_targets(ode=phase.ode, name=name, user_targets=options['targets']) if targets: phase.connect(f'control_values:{name}', [f'ode.{t}' for t in targets]) targets = get_targets(ode=phase.ode, name=f'{name}_rate', user_targets=options['rate_targets']) if targets: phase.connect(f'control_rates:{name}_rate', [f'ode.{t}' for t in targets]) targets = get_targets(ode=phase.ode, name=f'{name}_rate2', user_targets=options['rate2_targets']) if targets: phase.connect(f'control_rates:{name}_rate2', [f'ode.{t}' for t in targets]) def setup_polynomial_controls(self, phase): if phase.polynomial_control_options: sys = SolveIVPPolynomialControlGroup(grid_data=self.grid_data, polynomial_control_options=phase.polynomial_control_options, time_units=phase.time_options['units'], output_nodes_per_seg=self.options['output_nodes_per_seg']) phase.add_subsystem('polynomial_control_group', subsys=sys, promotes_inputs=['*'], promotes_outputs=['*']) def configure_polynomial_controls(self, phase): super(SolveIVP, self).configure_polynomial_controls(phase) for name, options in phase.polynomial_control_options.items(): for iseg in range(self.grid_data.num_segments): phase.connect(src_name=f'polynomial_controls:{name}', tgt_name=f'segment_{iseg}.polynomial_controls:{name}') targets = get_targets(ode=phase.ode, name=name, user_targets=options['targets']) if targets: phase.connect(f'polynomial_control_values:{name}', [f'ode.{t}' for t in targets]) targets = get_targets(ode=phase.ode, name=f'{name}_rate', user_targets=options['rate_targets']) if targets: phase.connect(f'polynomial_control_rates:{name}_rate', [f'ode.{t}' for t in targets]) targets = get_targets(ode=phase.ode, name=f'{name}_rate2', user_targets=options['rate2_targets']) if targets: phase.connect(f'polynomial_control_rates:{name}_rate2', [f'ode.{t}' for t in targets]) def configure_parameters(self, phase): super(SolveIVP, self).configure_parameters(phase) gd = self.grid_data segs = phase._get_subsystem('segments') for name, options in phase.parameter_options.items(): prom_name = f'parameters:{name}' shape, units, static_target = get_target_metadata(phase.ode, name=name, user_targets=options['targets'], user_shape=options['shape'], user_units=options['units']) options['units'] = units options['shape'] = shape for i in range(gd.num_segments): seg_comp = segs._get_subsystem(f'segment_{i}') seg_comp.add_input(name=prom_name, val=np.ones(shape), units=units, desc=f'values of parameter {name}.') segs.promotes(f'segment_{i}', inputs=[prom_name]) def setup_defects(self, phase): pass def configure_defects(self, phase): pass def configure_objective(self, phase): pass def setup_path_constraints(self, phase): pass def configure_path_constraints(self, phase): pass
Apache License 2.0
rwl/muntjac
muntjac/addon/invient/invient_charts_util.py
writeTitleConfig
python
def writeTitleConfig(target, titleOptions): target.startTag('title') writeTitleBaseOptions(target, titleOptions) if titleOptions.getMargin() is not None: target.addAttribute('margin', titleOptions.getMargin()) target.endTag('title')
Writes configuration attributes of the chart title. @param target @param titleOptions @throws PaintException
https://github.com/rwl/muntjac/blob/8db97712edd81b4d25deaaa48587d2a08010f2c8/muntjac/addon/invient/invient_charts_util.py#L53-L64
from datetime import datetime from muntjac.util import totalseconds from muntjac.addon.invient.invient_charts_config import AreaConfig, AreaSplineConfig, DateTimeRange, DateTimeValue, Grid, NumberRange, NumberValue, Tick, BarConfig, CategoryAxis, ColumnConfig, DateTimeAxis, ImageMarker, LineConfig, NonLinearSeriesState, NumberAxis, NumberXAxis, NumberYAxis, PieConfig, PieDataLabel, ScatterConfig, SplineConfig, SymbolMarker, XAxisDataLabel def writeTitleBaseOptions(target, titleBaseOptions): if titleBaseOptions.getText() is not None: target.addAttribute('text', titleBaseOptions.getText()) if titleBaseOptions.getX() is not None: target.addAttribute('x', titleBaseOptions.getX()) if titleBaseOptions.getY() is not None: target.addAttribute('y', titleBaseOptions.getY()) if titleBaseOptions.getFloating() is not None: target.addAttribute('floating', titleBaseOptions.getFloating()) if titleBaseOptions.getAlign() is not None: target.addAttribute('align', titleBaseOptions.getAlign().getName()) if titleBaseOptions.getVertAlign() is not None: target.addAttribute('verticalAlign', titleBaseOptions.getVertAlign().getName()) if titleBaseOptions.getStyle() is not None: target.addAttribute('style', titleBaseOptions.getStyle())
Apache License 2.0
mesnico/teran
models/teran.py
TERAN.forward_loss
python
def forward_loss(self, img_emb, cap_emb, img_emb_set, cap_emb_seq, img_lengths, cap_lengths): losses = {} if 'matching' in self.config['training']['loss-type']: matching_loss = self.matching_criterion(img_emb, cap_emb) losses.update({'matching-loss': matching_loss}) self.logger.update('matching_loss', matching_loss.item(), img_emb.size(0)) if 'alignment' in self.config['training']['loss-type']: img_emb_set = img_emb_set.permute(1, 0, 2) cap_emb_seq = cap_emb_seq.permute(1, 0, 2) alignment_loss = self.alignment_criterion(img_emb_set, cap_emb_seq, img_lengths, cap_lengths) losses.update({'alignment-loss': alignment_loss}) self.logger.update('alignment_loss', alignment_loss.item(), img_emb_set.size(0)) return losses
Compute the loss given pairs of image and caption embeddings
https://github.com/mesnico/teran/blob/7129add41ca6a08fc082ea840ecaf362b36e3121/models/teran.py#L230-L249
import torch import torch.nn.init import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn from transformers import BertTokenizer from models.loss import ContrastiveLoss, PermInvMatchingLoss, AlignmentContrastiveLoss from models.text import EncoderTextBERT, EncoderText from models.visual import TransformerPostProcessing, EncoderImage from .utils import l2norm, PositionalEncodingImageBoxes, PositionalEncodingText, Aggregator, generate_square_subsequent_mask from nltk.corpus import stopwords, words as nltk_words class JointTextImageTransformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.txt_enc = EncoderText(config) visual_feat_dim = config['image-model']['feat-dim'] caption_feat_dim = config['text-model']['word-dim'] dropout = config['model']['dropout'] layers = config['model']['layers'] embed_size = config['model']['embed-size'] self.order_embeddings = config['training']['measure'] == 'order' self.img_enc = EncoderImage(config) self.img_proj = nn.Linear(visual_feat_dim, embed_size) self.cap_proj = nn.Linear(caption_feat_dim, embed_size) self.embed_size = embed_size self.shared_transformer = config['model']['shared-transformer'] transformer_layer_1 = nn.TransformerEncoderLayer(d_model=embed_size, nhead=4, dim_feedforward=2048, dropout=dropout, activation='relu') self.transformer_encoder_1 = nn.TransformerEncoder(transformer_layer_1, num_layers=layers) if not self.shared_transformer: transformer_layer_2 = nn.TransformerEncoderLayer(d_model=embed_size, nhead=4, dim_feedforward=2048, dropout=dropout, activation='relu') self.transformer_encoder_2 = nn.TransformerEncoder(transformer_layer_2, num_layers=layers) self.text_aggregation = Aggregator(embed_size, aggregation_type=config['model']['text-aggregation']) self.image_aggregation = Aggregator(embed_size, aggregation_type=config['model']['image-aggregation']) self.text_aggregation_type = config['model']['text-aggregation'] self.img_aggregation_type = config['model']['image-aggregation'] def forward(self, features, captions, feat_len, cap_len, boxes): full_cap_emb_aggr, c_emb = self.txt_enc(captions, cap_len) full_img_emb_aggr, i_emb = self.img_enc(features, feat_len, boxes) bs = features.shape[0] if self.text_aggregation_type is not None: c_emb = self.cap_proj(c_emb) mask = torch.zeros(bs, max(cap_len)).bool() mask = mask.to(features.device) for m, c_len in zip(mask, cap_len): m[c_len:] = True full_cap_emb = self.transformer_encoder_1(c_emb.permute(1, 0, 2), src_key_padding_mask=mask) full_cap_emb_aggr = self.text_aggregation(full_cap_emb, cap_len, mask) else: full_cap_emb = None if self.img_aggregation_type is not None: i_emb = self.img_proj(i_emb) mask = torch.zeros(bs, max(feat_len)).bool() mask = mask.to(features.device) for m, v_len in zip(mask, feat_len): m[v_len:] = True if self.shared_transformer: full_img_emb = self.transformer_encoder_1(i_emb.permute(1, 0, 2), src_key_padding_mask=mask) else: full_img_emb = self.transformer_encoder_2(i_emb.permute(1, 0, 2), src_key_padding_mask=mask) full_img_emb_aggr = self.image_aggregation(full_img_emb, feat_len, mask) else: full_img_emb = None full_cap_emb_aggr = l2norm(full_cap_emb_aggr) full_img_emb_aggr = l2norm(full_img_emb_aggr) full_img_emb = F.normalize(full_img_emb, p=2, dim=2) full_cap_emb = F.normalize(full_cap_emb, p=2, dim=2) if self.order_embeddings: full_cap_emb_aggr = torch.abs(full_cap_emb_aggr) full_img_emb_aggr = torch.abs(full_img_emb_aggr) return full_img_emb_aggr, full_cap_emb_aggr, full_img_emb, full_cap_emb class TERAN(torch.nn.Module): def __init__(self, config): super().__init__() self.img_txt_enc = JointTextImageTransformerEncoder(config) if torch.cuda.is_available(): self.img_txt_enc.cuda() cudnn.benchmark = True loss_type = config['training']['loss-type'] if 'alignment' in loss_type: self.alignment_criterion = AlignmentContrastiveLoss(margin=config['training']['margin'], measure=config['training']['measure'], max_violation=config['training']['max-violation'], aggregation=config['training']['alignment-mode']) if 'matching' in loss_type: self.matching_criterion = ContrastiveLoss(margin=config['training']['margin'], measure=config['training']['measure'], max_violation=config['training']['max-violation']) self.Eiters = 0 self.config = config if 'exclude-stopwords' in config['model'] and config['model']['exclude-stopwords']: self.en_stops = set(stopwords.words('english')) self.tokenizer = BertTokenizer.from_pretrained(config['text-model']['pretrain']) else: self.tokenizer = None def forward_emb(self, images, captions, img_len, cap_len, boxes): if torch.cuda.is_available(): images = images.cuda() captions = captions.cuda() boxes = boxes.cuda() img_emb_aggr, cap_emb_aggr, img_feats, cap_feats = self.img_txt_enc(images, captions, img_len, cap_len, boxes) if self.tokenizer is not None: good_word_indexes = [[i for i, (tok, w) in enumerate(zip(self.tokenizer.convert_ids_to_tokens(ids), ids)) if tok not in self.en_stops or w == 0] for ids in captions] cap_len = [len(w) - (cap_feats.shape[0] - orig_len) for w, orig_len in zip(good_word_indexes, cap_len)] min_cut_len = min([len(w) for w in good_word_indexes]) good_word_indexes = [words[:min_cut_len] for words in good_word_indexes] good_word_indexes = torch.LongTensor(good_word_indexes).to(cap_feats.device) good_word_indexes = good_word_indexes.t().unsqueeze(2).expand(-1, -1, cap_feats.shape[2]) cap_feats = cap_feats.gather(dim=0, index=good_word_indexes) return img_emb_aggr, cap_emb_aggr, img_feats, cap_feats, cap_len def get_parameters(self): lr_multiplier = 1.0 if self.config['text-model']['fine-tune'] else 0.0 ret = [] params = list(self.img_txt_enc.img_enc.parameters()) params += list(self.img_txt_enc.img_proj.parameters()) params += list(self.img_txt_enc.cap_proj.parameters()) params += list(self.img_txt_enc.transformer_encoder_1.parameters()) params += list(self.img_txt_enc.image_aggregation.parameters()) params += list(self.img_txt_enc.text_aggregation.parameters()) if not self.config['model']['shared-transformer']: params += list(self.img_txt_enc.transformer_encoder_2.parameters()) ret.append(params) ret.append(list(self.img_txt_enc.txt_enc.parameters())) return ret, lr_multiplier
Apache License 2.0
neuroassassin/toxic-cogs
twenty/twenty.py
Twenty.twenty
python
async def twenty(self, ctx): board = [ ["_", "_", "_", "_"], ["_", "_", "_", "_"], ["_", "_", "_", "_"], ["_", "_", "_", 2], ] score = 0 total = 0 await ctx.send( "Starting game...\nIf a reaction is not received every 5 minutes, the game will time out." ) message = await ctx.send(f"Score: **{score}**```{self.print_board(board)}```") await message.add_reaction("\u2B06") await message.add_reaction("\u2B07") await message.add_reaction("\u2B05") await message.add_reaction("\u27A1") await message.add_reaction("\u274C") def check(reaction, user): return ( (user.id == ctx.author.id) and (str(reaction.emoji) in ["\u2B06", "\u2B07", "\u2B05", "\u27A1", "\u274C"]) and (reaction.message.id == message.id) ) while True: try: reaction, user = await self.bot.wait_for( "reaction_add", check=check, timeout=300.0 ) except asyncio.TimeoutError: await ctx.send("Ending game") await message.delete() return else: try: await message.remove_reaction(str(reaction.emoji), ctx.author) except discord.errors.Forbidden: pass if str(reaction.emoji) == "\u2B06": msg, nb, total = self.execute_move("up", board) elif str(reaction.emoji) == "\u2B07": msg, nb, total = self.execute_move("down", board) elif str(reaction.emoji) == "\u2B05": msg, nb, total = self.execute_move("left", board) elif str(reaction.emoji) == "\u27A1": msg, nb, total = self.execute_move("right", board) elif str(reaction.emoji) == "\u274C": await ctx.send("Ending game") await message.delete() return score += total if msg == "Lost": await ctx.send( f"Oh no! It appears you have lost {ctx.author.mention}. You finished with a score of {score}!" ) await message.delete() return board = nb await message.edit(content=f"Score: **{score}**```{self.print_board(board)}```")
Starts a 2048 game inside of Discord.
https://github.com/neuroassassin/toxic-cogs/blob/2deae2ea19f634551ce052e442879812ceace9c8/twenty/twenty.py#L23-L84
import asyncio import random from copy import deepcopy as dc import discord from redbot.core import checks, commands class Twenty(commands.Cog): def __init__(self, bot): self.bot = bot __author__ = "Neuro Assassin#4779 <@473541068378341376>" async def red_delete_data_for_user(self, **kwargs): return @checks.bot_has_permissions(add_reactions=True) @commands.command()
MIT License
petercorke/spatialmath-python
spatialmath/geom3d.py
Line3.TwoPoints
python
def TwoPoints(cls, P=None, Q=None): P = base.getvector(P, 3) Q = base.getvector(Q, 3) w = P - Q v = np.cross(w, P) return cls(np.r_[v, w])
Create Plucker line object from two 3D points :param P: First 3D point :type P: 3-element array_like :param Q: Second 3D point :type Q: 3-element array_like :return: Plucker line :rtype: Plucker ``L = Plucker(P, Q)`` create a Plucker object that represents the line joining the 3D points ``P`` (3-vector) and ``Q`` (3-vector). The direction is from ``Q`` to ``P``. :seealso: Plucker, Plucker.Planes, Plucker.PointDir
https://github.com/petercorke/spatialmath-python/blob/a3116021b8bd95b4f6015b180053941599ebd6cc/spatialmath/geom3d.py#L271-L293
import numpy as np import math from collections import namedtuple import matplotlib.pyplot as plt import spatialmath.base as base from spatialmath import SE3 from spatialmath.baseposelist import BasePoseList _eps = np.finfo(np.float64).eps class Plane3: def __init__(self, c): self.plane = base.getvector(c, 4) @classmethod def PN(cls, p, n): n = base.getvector(n, 3) p = base.getvector(p, 3) return cls(np.r_[n, -np.dot(n, p)]) @classmethod def P3(cls, p): p = base.ismatrix(p, (3,3)) v1 = p[:,0] v2 = p[:,1] v3 = p[:,2] n = np.cross(v2-v1, v3-v1) return cls(n, v1) @property def n(self): return self.plane[:3] @property def d(self): return self.plane[3] def contains(self, p, tol=10*_eps): return abs(np.dot(self.n, p) - self.d) < tol def plot(self, bounds=None, ax=None, **kwargs): ax = base.axes_logic(ax, 3) if bounds is None: bounds = np.r_[ax.get_xlim(), ax.get_ylim(), ax.get_zlim()] X, Y = np.meshgrid(np.linspace(bounds[0], bounds[1], 50), np.linspace(bounds[2], bounds[3], 50)) Z = -(X * self.plane[0] + Y * self.plane[1] + self.plane[3]) / self.plane[2] Z[Z < bounds[4]] = np.nan Z[Z > bounds[5]] = np.nan ax.plot_surface(X, Y, Z, **kwargs) def __str__(self): return str(self.plane) class Line3(BasePoseList): def __init__(self, v=None, w=None): super().__init__() if w is None: if super().arghandler(v, convertfrom=(SE3,)): return else: assert base.isvector(v, 3) and base.isvector(w, 3), 'expecting two 3-vectors' self.data = [np.r_[v, w]] @property def shape(self): return (6,) @staticmethod def _identity(): return np.zeros((6,)) @staticmethod def isvalid(x, check=False): return x.shape == (6,) @classmethod
MIT License
decred/tinydecred
decred/decred/dcr/dcrdata.py
DcrdataBlockchain.tinyBlockForTx
python
def tinyBlockForTx(self, txid): block = self.blockForTx(txid) if not block: return None return account.TinyBlock(block.cachedHash(), block.height)
Get the TinyBlock for txid. Args: txid(str): The transaction ID for the block. Returns: account.TinyBlock: The block hash and height.
https://github.com/decred/tinydecred/blob/f7f7d9f7da8d49d9ae9a72e5579b07a3b8572267/decred/decred/dcr/dcrdata.py#L669-L682
import atexit import calendar import json import time from urllib.parse import urlencode, urljoin, urlsplit, urlunsplit from decred import DecredError from decred.crypto import crypto from decred.dcr import addrlib from decred.util import database, tinyhttp, ws from decred.util.encode import ByteArray from decred.util.helpers import formatTraceback, getLogger, makeWebsocketURL from . import account, agenda, txscript from .wire import msgblock, msgtx, wire log = getLogger("DCRDATA") VERSION = "0.0.1" GET_HEADERS = {"User-Agent": "PyDcrData/%s" % VERSION} POST_HEADERS = { "User-Agent": "tinydecred/%s" % VERSION, "Content-Type": "application/json; charset=utf-8", } WS_DONE = object() MaxInsightAddrsTxns = 250 AddrsPerRequest = 25 class DcrdataError(DecredError): pass class InsufficientFundsError(DecredError): pass class DcrdataPath: def __init__(self): self.subpaths = {} self.callSigns = [] def getSubpath(self, subpathPart): if subpathPart in self.subpaths: return self.subpaths[subpathPart] p = self.subpaths[subpathPart] = DcrdataPath() return p def addCallsign(self, argList, template): self.callSigns.append((argList, template)) def getCallsignPath(self, *args, **kwargs): argLen = len(args) if args else len(kwargs) for argList, template in self.callSigns: if len(argList) != argLen: continue if args: url = template % args if len(kwargs): url += "?" + urlencode(kwargs) return url if all([x in kwargs for x in argList]): return template % tuple(kwargs[x] for x in argList) raise DcrdataError( "Supplied arguments, %r, do not match any of the know call signatures, %r." % (args if args else kwargs, [argList for argList, _ in self.callSigns]), ) def __getattr__(self, key): if key in self.subpaths: return self.subpaths[key] raise DcrdataError("No subpath %s found in datapath" % (key,)) def __call__(self, *args, **kwargs): return tinyhttp.get(self.getCallsignPath(*args, **kwargs), headers=GET_HEADERS) def post(self, data): return tinyhttp.post(self.getCallsignPath(), data, headers=POST_HEADERS) InsightPaths = [ "/tx/send", "/insight/api/addr/{address}/utxo", "/insight/api/addr/{address}", "/insight/api/tx/send", "/insight/api/addrs/{addresses}/txs", ] class DcrdataClient: timeFmt = "%Y-%m-%d %H:%M:%S" rfc3339Z = "%Y-%m-%dT%H:%M:%SZ" def __init__(self, url, emitter=None): url = urlsplit(url) self.baseURL = urlunsplit((url.scheme, url.netloc, "/", "", "")) self.baseApi = urlunsplit((url.scheme, url.netloc, "/api/", "", "")) self.psURL = makeWebsocketURL(self.baseURL, "ps") self.ps = None self.subscribedAddresses = [] self.emitter = emitter if emitter else lambda msg: None atexit.register(self.close) root = self.root = DcrdataPath() self.listEntries = [] listURL = urljoin(self.baseApi, "list") endpoints = tinyhttp.get(listURL, headers=GET_HEADERS) endpoints += InsightPaths def getParam(part): if part.startswith("{") and part.endswith("}"): return part[1:-1] return None pathlog = [] for path in endpoints: path = path.rstrip("/") if path in pathlog or path == "": continue pathlog.append(path) baseURL = self.baseURL if "insight" in path else self.baseApi params = [] pathSequence = [] templateParts = [] for i, part in enumerate(path.strip("/").split("/")): param = getParam(part) if param: params.append(param) templateParts.append("%s") else: pathSequence.append(part) templateParts.append(part) pathPointer = root for pathPart in pathSequence: pathPointer = pathPointer.getSubpath(pathPart) pathPointer.addCallsign(params, baseURL + "/".join(templateParts)) if len(pathSequence) == 1: continue self.listEntries.append( ("%s(%s)" % (".".join(pathSequence), ", ".join(params)), path) ) def __getattr__(self, key): return getattr(self.root, key) def close(self): if self.ps: self.ps.close() self.ps = None def endpointList(self): return [entry[1] for entry in self.listEntries] def endpointGuide(self): print("\n".join(["%s -> %s" % entry for entry in self.listEntries])) def psClient(self): if self.ps is None: def on_message(ws, msg): try: msg = json.loads(msg) if msg.get("event") == "ping": return except json.JSONDecodeError: pass self.emitter(msg) def on_close(ws): self.emitter(WS_DONE) def on_error(ws, error): log.error(f"pubsub error: {error}") self.ps = ws.Client( url=self.psURL, on_message=on_message, on_close=on_close, on_error=on_error, ) return self.ps def subscribeAddresses(self, addrs): if isinstance(addrs, str): addrs = [addrs] ps = self.psClient() subscribed = self.subscribedAddresses for a in addrs: if a in subscribed: continue subscribed.append(a) ps.send(Sub.address(a)) def subscribeBlocks(self): ps = self.psClient() ps.send(Sub.newblock) @staticmethod def timeStringToUnix(fmtStr): return calendar.timegm(time.strptime(fmtStr, DcrdataClient.timeFmt)) @staticmethod def RFC3339toUnix(fmtStr): return calendar.timegm(time.strptime(fmtStr, DcrdataClient.rfc3339Z)) _subcounter = 0 def makeSubscription(eventID): global _subcounter _subcounter += 1 return json.dumps( { "event": "subscribe", "message": {"request_id": _subcounter, "message": eventID}, } ) class Sub: newblock = makeSubscription("newblock") mempool = makeSubscription("mempool") newtxs = makeSubscription("newtxs") blockchainSync = makeSubscription("blockchainSync") def address(addr): global _subcounter _subcounter += 1 return json.dumps( { "event": "subscribe", "message": {"request_id": _subcounter, "message": "address:%s" % addr}, } ) class TicketPoolInfo: def __init__( self, height, size, value, valAvg, winners, ): self.height = height self.size = size self.value = value self.valAvg = valAvg self.winners = winners @staticmethod def parse(obj): return TicketPoolInfo( height=obj["height"], size=obj["size"], value=obj["value"], valAvg=obj["valavg"], winners=[winner for winner in obj["winners"]], ) def makeOutputs(pairs, netParams): outputs = [] for idx, (addrStr, amt) in enumerate(pairs): if not isinstance(amt, int): raise DecredError(f"Decred amount #{idx} is not an integer") if amt < 0: raise DecredError(f"Decred amount #{idx} is negative") pkScript = txscript.makePayToAddrScript(addrStr, netParams) outputs.append(msgtx.TxOut(value=amt, pkScript=pkScript)) return outputs def checkOutput(output, fee): if output.value < 0: raise DecredError("transaction output amount is negative") if output.value > txscript.MaxAmount: raise DecredError("transaction output amount exceeds maximum value") if output.value == 0: raise DecredError("zero-value output") if txscript.isDustOutput(output, fee): raise DecredError("policy violation: transaction output is dust") def hashFromHex(s): return reversed(ByteArray(s)) def hexFromHash(h): return reversed(ByteArray(h)).hex() class DcrdataBlockchain: def __init__(self, db, netParams, datapath, skipConnect=False): self.ownsDB = False if not isinstance(db, (database.Bucket, database.KeyValueDatabase)): self.ownsDB = True db = database.KeyValueDatabase(db) self.db = db self.netParams = netParams self.datapath = datapath self.dcrdata = None self.txDB = db.child("tx", blobber=msgtx.MsgTx) self.heightMap = db.child("height", datatypes=("INTEGER", "BLOB")) self.headerDB = db.child("header", blobber=msgblock.BlockHeader) self.txBlockMap = db.child("blocklink") self.tipHeight = -1 self.subsidyCache = txscript.SubsidyCache(netParams) self.addrSubscribers = {} self.blockSubscribers = [] if not skipConnect: self.connect() def connect(self): self.dcrdata = DcrdataClient(self.datapath, emitter=self.pubsubSignal) self.updateTip() def changeServer(self, url): oldClient = self.dcrdata self.dcrdata = DcrdataClient(url, emitter=self.pubsubSignal) try: if self.blockSubscribers: self.dcrdata.subscribeBlocks() if self.addrSubscribers: addrGroups = {} for addr, receiver in self.addrSubscribers.items(): rid = id(receiver) if rid not in addrGroups: addrGroups[rid] = dict(receiver=receiver, addrs=[]) addrGroups[rid]["addrs"].append(addr) for group in addrGroups.values(): self.subscribeAddresses(group["addrs"], group["receiver"]) self.updateTip() self.datapath = url oldClient.close() except Exception as e: self.dcrdata = oldClient raise e def close(self): if self.dcrdata: self.dcrdata.close() if self.ownsDB: self.db.close() def subscribeBlocks(self, receiver): self.blockSubscribers.append(receiver) if len(self.blockSubscribers) == 1: self.dcrdata.subscribeBlocks() def getAgendasInfo(self): return agenda.AgendasInfo.parse(self.dcrdata.stake.vote.info()) def subscribeAddresses(self, addrs, receiver): log.debug("subscribing to addresses %s" % repr(addrs)) for addr in addrs: self.addrSubscribers[addr] = receiver self.dcrdata.subscribeAddresses(addrs) def processNewUTXO(self, utxo): utxo = account.UTXO.parse(utxo) tx = self.tx(utxo.txid) if tx.looksLikeCoinbase(): utxo.maturity = utxo.height + self.netParams.CoinbaseMaturity if utxo.isTicket(): utxo.tinfo = self.ticketInfo(utxo.txid) return utxo def UTXOs(self, addrs): utxos = [] addrCount = len(addrs) get = lambda addrs: self.dcrdata.insight.api.addr.utxo(",".join(addrs)) for i in range(addrCount // AddrsPerRequest + 1): start = i * AddrsPerRequest end = start + AddrsPerRequest if start < addrCount: ads = addrs[start:end] utxos += [self.processNewUTXO(u) for u in get(ads)] return utxos def txidsForAddr(self, addr): addrInfo = self.dcrdata.insight.api.addr(addr) if "transactions" not in addrInfo: return [] return addrInfo["transactions"] def addrsHaveTxs(self, addrs): def gettxs(addrs): return self.dcrdata.insight.api.addrs.txs( ",".join(addrs), **{"from": 0, "to": 1} ).get("items") addrCount = len(addrs) for i in range(addrCount // AddrsPerRequest + 1): start = i * AddrsPerRequest end = start + AddrsPerRequest if start < addrCount: ads = addrs[start:end] if gettxs(ads): return True return False def txVout(self, txid, vout): tx = self.tx(txid) txout = tx.txOut[vout] utxo = account.UTXO( address=None, txHash=reversed(ByteArray(txid)), vout=vout, scriptPubKey=txout.pkScript, satoshis=txout.value, ) self.confirmUTXO(utxo, None, tx) return utxo def ticketInfo(self, txid): try: tinfo = account.TicketInfo.parse(self.dcrdata.tx.tinfo(txid)) except Exception: tinfo = account.TicketInfo("mempool", None, 0, 0) return tinfo def tx(self, txid): hashKey = hashFromHex(txid).bytes() try: return self.txDB[hashKey] except database.NoValueError: try: txHex = self.dcrdata.tx.hex(txid) msgTx = msgtx.MsgTx.deserialize(ByteArray(txHex)) self.txDB[hashKey] = msgTx return msgTx except Exception as e: log.warning( "unable to retrieve tx data from dcrdata at %s: %s" % (self.dcrdata.baseURL, e) ) raise DecredError("failed to retrieve transaction")
ISC License
demisto/demisto-sdk
demisto_sdk/commands/common/tools.py
get_child_directories
python
def get_child_directories(directory): if not os.path.isdir(directory): return [] child_directories = [ os.path.join(directory, path) for path in os.listdir(directory) if os.path.isdir(os.path.join(directory, path)) ] return child_directories
Return a list of paths of immediate child directories of the 'directory' argument
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/common/tools.py#L390-L398
import argparse import glob import io import json import os import re import shlex import sys import urllib.parse from configparser import ConfigParser, MissingSectionHeaderError from contextlib import contextmanager from distutils.version import LooseVersion from enum import Enum from functools import lru_cache, partial from pathlib import Path from subprocess import DEVNULL, PIPE, Popen, check_output from typing import (Callable, Dict, List, Match, Optional, Set, Tuple, Type, Union) import click import colorama import demisto_client import git import requests import urllib3 import yaml from packaging.version import parse from ruamel.yaml import YAML from demisto_sdk.commands.common.constants import ( ALL_FILES_VALIDATION_IGNORE_WHITELIST, API_MODULES_PACK, CLASSIFIERS_DIR, DASHBOARDS_DIR, DEF_DOCKER, DEF_DOCKER_PWSH, DOC_FILES_DIR, ID_IN_COMMONFIELDS, ID_IN_ROOT, INCIDENT_FIELDS_DIR, INCIDENT_TYPES_DIR, INDICATOR_FIELDS_DIR, INTEGRATIONS_DIR, LAYOUTS_DIR, OFFICIAL_CONTENT_ID_SET_PATH, PACK_METADATA_IRON_BANK_TAG, PACKAGE_SUPPORTING_DIRECTORIES, PACKAGE_YML_FILE_REGEX, PACKS_DIR, PACKS_DIR_REGEX, PACKS_PACK_IGNORE_FILE_NAME, PACKS_PACK_META_FILE_NAME, PACKS_README_FILE_NAME, PLAYBOOKS_DIR, PRE_PROCESS_RULES_DIR, RELEASE_NOTES_DIR, RELEASE_NOTES_REGEX, REPORTS_DIR, SCRIPTS_DIR, TEST_PLAYBOOKS_DIR, TYPE_PWSH, UNRELEASE_HEADER, UUID_REGEX, WIDGETS_DIR, XSOAR_CONFIG_FILE, FileType, GitContentConfig, urljoin) from demisto_sdk.commands.common.git_util import GitUtil urllib3.disable_warnings() colorama.init() ryaml = YAML() ryaml.preserve_quotes = True ryaml.allow_duplicate_keys = True class LOG_COLORS: NATIVE = colorama.Style.RESET_ALL RED = colorama.Fore.RED GREEN = colorama.Fore.GREEN YELLOW = colorama.Fore.YELLOW WHITE = colorama.Fore.WHITE LOG_VERBOSE = False LAYOUT_CONTAINER_FIELDS = {'details', 'detailsV2', 'edit', 'close', 'mobile', 'quickView', 'indicatorsQuickView', 'indicatorsDetails'} SDK_PYPI_VERSION = r'https://pypi.org/pypi/demisto-sdk/json' class XsoarLoader(yaml.SafeLoader): def reference(self, node): build_string = '!reference ' + str(self.construct_sequence(node)) return self.construct_yaml_str(yaml.ScalarNode(tag='!reference', value=build_string)) XsoarLoader.add_constructor('!reference', XsoarLoader.reference) def set_log_verbose(verbose: bool): global LOG_VERBOSE LOG_VERBOSE = verbose def get_log_verbose() -> bool: return LOG_VERBOSE def get_yml_paths_in_dir(project_dir: str, error_msg: str = '') -> Tuple[list, str]: yml_files = glob.glob(os.path.join(project_dir, '*.yml')) if not yml_files: if error_msg: print(error_msg) return [], '' return yml_files, yml_files[0] def print_color(obj, color): print(u'{}{}{}'.format(color, obj, LOG_COLORS.NATIVE)) def get_files_in_dir(project_dir: str, file_endings: list, recursive: bool = True) -> list: files = [] project_path = Path(project_dir) glob_function = project_path.rglob if recursive else project_path.glob for file_type in file_endings: if project_dir.endswith(file_type): return [project_dir] files.extend([str(f) for f in glob_function(f'*.{file_type}')]) return files def src_root() -> Path: git_dir = git.Repo(Path.cwd(), search_parent_directories=True).working_tree_dir return Path(git_dir) / 'demisto_sdk' def print_error(error_str): print_color(error_str, LOG_COLORS.RED) def print_warning(warning_str): print_color(warning_str, LOG_COLORS.YELLOW) def print_success(success_str): print_color(success_str, LOG_COLORS.GREEN) def run_command(command, is_silenced=True, exit_on_error=True, cwd=None): if is_silenced: p = Popen(command.split(), stdout=PIPE, stderr=PIPE, universal_newlines=True, cwd=cwd) else: p = Popen(command.split(), cwd=cwd) output, err = p.communicate() if err: if exit_on_error: print_error('Failed to run command {}\nerror details:\n{}'.format(command, err)) sys.exit(1) else: raise RuntimeError('Failed to run command {}\nerror details:\n{}'.format(command, err)) return output core_pack_list: Optional[ list] = None def get_core_pack_list() -> list: global core_pack_list if isinstance(core_pack_list, list): return core_pack_list if not is_external_repository(): core_pack_list = get_remote_file( 'Tests/Marketplace/core_packs_list.json', github_repo=GitContentConfig.OFFICIAL_CONTENT_REPO_NAME ) or [] else: core_pack_list = [] return core_pack_list def get_remote_file( full_file_path: str, tag: str = 'master', return_content: bool = False, suppress_print: bool = False, github_repo: Optional[str] = None ): git_config = GitContentConfig(github_repo) if git_config.GITLAB_ID: full_file_path_quote_plus = urllib.parse.quote_plus(full_file_path) git_path = urljoin(git_config.BASE_RAW_GITLAB_LINK, 'files', full_file_path_quote_plus, 'raw') tag = tag.replace('origin/', '') else: tag = tag.replace('origin/', '').replace('demisto/', '') git_path = urljoin(git_config.CONTENT_GITHUB_LINK, tag, full_file_path) local_content = '{}' github_token: Optional[str] = None gitlab_token: Optional[str] = None try: external_repo = is_external_repository() if external_repo: github_token = git_config.Credentials.GITHUB_TOKEN gitlab_token = git_config.Credentials.GITLAB_TOKEN if gitlab_token and git_config.GITLAB_ID: res = requests.get(git_path, params={'ref': tag}, headers={'PRIVATE-TOKEN': gitlab_token}, verify=False) res.raise_for_status() elif github_token: res = requests.get(git_path, verify=False, timeout=10, headers={ 'Authorization': f"Bearer {github_token}", 'Accept': f'application/vnd.github.VERSION.raw', }) if not res.ok: res = requests.get( git_path, verify=False, timeout=10, params={'token': github_token} ) res.raise_for_status() else: res = requests.get(git_path, verify=False, timeout=10) if not res.ok: if not suppress_print: click.secho( f'You are working in a private repository: "{git_config.CURRENT_REPOSITORY}".\n' f'The github token in your environment is undefined.\n' f'Getting file from local repository instead. \n' f'If you wish to get the file from the remote repository, \n' f'Please define your github or gitlab token in your environment.\n' f'`export {git_config.Credentials.ENV_GITHUB_TOKEN_NAME}=<TOKEN> or`\n' f'export {git_config.Credentials.ENV_GITLAB_TOKEN_NAME}=<TOKEN>', fg='yellow' ) click.echo("Getting file from local environment") repo = git.Repo(os.path.dirname(full_file_path), search_parent_directories=True) repo_git_util = GitUtil(repo) git_path = repo_git_util.get_local_remote_file_path(full_file_path, tag) local_content = repo_git_util.get_local_remote_file_content(git_path) else: res = requests.get(git_path, verify=False, timeout=10) res.raise_for_status() except Exception as exc: err_msg: str = str(exc).replace(github_token, 'XXX') if github_token else str(exc) err_msg = err_msg.replace(gitlab_token, 'XXX') if gitlab_token else err_msg if not suppress_print: click.secho( f'Could not find the old entity file under "{git_path}".\n' 'please make sure that you did not break backward compatibility.\n' f'Reason: {err_msg}', fg='yellow' ) return {} file_content = res.content if res.ok else local_content if return_content: return file_content if full_file_path.endswith('json'): details = res.json() if res.ok else json.loads(local_content) elif full_file_path.endswith('yml'): details = yaml.safe_load(file_content) else: details = {} return details def filter_files_on_pack(pack: str, file_paths_list=str()) -> set: files_paths_on_pack = set() for file in file_paths_list: if get_pack_name(file) == pack: files_paths_on_pack.add(file) return files_paths_on_pack def filter_packagify_changes(modified_files, added_files, removed_files, tag='master'): packagify_diff = {} for file_path in removed_files: if file_path.split("/")[0] in PACKAGE_SUPPORTING_DIRECTORIES: if PACKS_README_FILE_NAME in file_path: continue details = get_remote_file(file_path, tag) if details: uniq_identifier = '_'.join([ details['name'], details.get('fromversion', '0.0.0'), details.get('toversion', '99.99.99') ]) packagify_diff[uniq_identifier] = file_path updated_added_files = set() for file_path in added_files: if file_path.split("/")[0] in PACKAGE_SUPPORTING_DIRECTORIES: if PACKS_README_FILE_NAME in file_path: updated_added_files.add(file_path) continue with open(file_path) as f: details = yaml.safe_load(f.read()) uniq_identifier = '_'.join([ details['name'], details.get('fromversion', '0.0.0'), details.get('toversion', '99.99.99') ]) if uniq_identifier in packagify_diff: removed_files.remove(packagify_diff[uniq_identifier]) modified_files.add((packagify_diff[uniq_identifier], file_path)) continue updated_added_files.add(file_path) for file_path in modified_files: if isinstance(file_path, tuple): updated_added_files -= {file_path[1]} else: updated_added_files -= {file_path} return modified_files, updated_added_files, removed_files
MIT License
cetic/python-msp430-tools
msp430/jtag/dco.py
adjust_clock
python
def adjust_clock(out, frequency, tolerance=0.02, dcor=False, define=False): if tolerance < 0.005 or tolerance > 50: raise ValueError('tolerance out of range %f' % (tolerance,)) device = get_msp430_type() >> 8 variables = {} if device == 0xf1: measured_frequency, dco, bcs1 = clock.setDCO( frequency*(1-tolerance), frequency*(1+tolerance), maxrsel=7, dcor=dcor ) variables['freq'] = TYPE_16BIT, measured_frequency/1e3 variables['dcoctl'] = TYPE_8BIT, dco variables['bcsctl1'] = TYPE_8BIT, bcs1 variables['bcsctl2'] = TYPE_8BIT, dcor and 1 or 0 out.write('// BCS settings for %s\n' % (nice_frequency(measured_frequency), )) if define: suffix = '_%s' % nice_frequency(frequency).replace('.','_') out.write('#define DCOCTL%s 0x%02x\n' % (suffix, dco,)) out.write('#define BCSCTL1%s 0x%02x\n' % (suffix, bcs1,)) if dcor: out.write('#define BCSCTL2%s 0x01 // select external ROSC\n' % (suffix,)) else: out.write('#define BCSCTL2%s 0x00 // select internal ROSC\n' % (suffix,)) else: out.write('DCOCTL = 0x%02x;\n' % (dco,)) out.write('BCSCTL1 = 0x%02x;\n' % (bcs1,)) if dcor: out.write('BCSCTL2 = 0x01; // select external ROSC\n') else: out.write('BCSCTL2 = 0x00; // select internal ROSC\n') elif device == 0xf2: measured_frequency, dco, bcs1 = clock.setDCO( frequency*(1-tolerance), frequency*(1+tolerance), maxrsel=15, dcor=dcor ) variables['freq'] = TYPE_16BIT, measured_frequency/1e3 variables['dcoctl'] = TYPE_8BIT, dco variables['bcsctl1'] = TYPE_8BIT, bcs1 variables['bcsctl2'] = TYPE_8BIT, dcor and 1 or 0 out.write('// BCS+ settings for %s\n' % (nice_frequency(measured_frequency), )) if define: suffix = '_%s' % nice_frequency(frequency).replace('.','_') out.write('#define DCOCTL%s 0x%02x\n' % (suffix, dco,)) out.write('#define BCSCTL1%s 0x%02x\n' % (suffix, bcs1,)) if dcor: out.write('#define BCSCTL2%s 0x01 // select external ROSC\n' % (suffix,)) else: out.write('#define BCSCTL2%s 0x00 // select internal ROSC\n' % (suffix,)) out.write('#define BCSCTL3%s 0x00\n' % (suffix,)) else: out.write('DCOCTL = 0x%02x;\n' % (dco,)) out.write('BCSCTL1 = 0x%02x;\n' % (bcs1,)) if dcor: out.write('BCSCTL2 = 0x01; // select external ROSC\n') else: out.write('BCSCTL2 = 0x00; // select internal ROSC\n') out.write('BCSCTL3 = 0x00;\n') elif device == 0xf4: measured_frequency, scfi0, scfi1, scfqctl, fll_ctl0, fll_ctl1 = clock.setDCOPlus( frequency*(1-tolerance), frequency*(1+tolerance) ) variables['freq'] = TYPE_16BIT, measured_frequency/1e3 variables['scfi0'] = TYPE_8BIT, scfi0 variables['scfi1'] = TYPE_8BIT, scfi1 variables['scfqctl'] = TYPE_8BIT, scfqctl variables['fll_ctl0'] = TYPE_8BIT, fll_ctl0 variables['fll_ctl1'] = TYPE_8BIT, fll_ctl1 out.write('// FLL+ settings for %s\n' % (nice_frequency(measured_frequency), )) if define: suffix = '_%s' % nice_frequency(frequency).replace('.','_') out.write('#define SCFI0%(suffix)s 0x%(scfi0)02x\n' '#define SCFI1%(suffix)s 0x%(scfi1)02x\n' '#define SCFQCTL%(suffix)s 0x%(scfqctl)02x\n' '#define FLL_CTL0%(suffix)s 0x%(fll_ctl0)02x\n' '#define FLL_CTL1%(suffix)s 0x%(fll_ctl1)02x\n' % vars() ) else: out.write('SCFI0 = 0x%02x;\nSCFI1 = 0x%02x;\nSCFQCTL = 0x%02x;\nFLL_CTL0 = 0x%02x;\nFLL_CTL1 = 0x%02x;\n' % ( scfi0, scfi1, scfqctl, fll_ctl0, fll_ctl1 )) else: raise IOError("unknown MSP430 type %02x" % device) return variables
\ detect MSP430 type and try to set the clock to the given frequency. when successful, print the clock control register settings. this function assumes that the JTAG connection to the device has already been initialized and that the device is under JTAG control and stopped.
https://github.com/cetic/python-msp430-tools/blob/71a86dd82206aaeb46dcf2c2f4f01c3aeb46eeef/msp430/jtag/dco.py#L56-L151
from msp430 import memory from msp430.jtag import jtag, clock import sys import struct import logging debug = False def nice_frequency(frequency): if frequency < 1e3: return "%dHz" % frequency elif frequency < 10e3: return "%.3fkHz" % (frequency/1e3) elif frequency < 100e3: return "%.2fkHz" % (frequency/1e3) elif frequency < 1e6: return "%.1fkHz" % (frequency/1e3) elif frequency < 10e6: return "%.3fMHz" % (frequency/1e6) elif frequency < 1e9: return "%.2fMHz" % (frequency/1e6) return "%.2fGHz" % (frequency/1e9) TYPE_8BIT = '<B' TYPE_16BIT = '<H' def get_msp430_type(): (device, ) = struct.unpack(">H", jtag._parjtag.memread(0x0ff0, 2)) logging.getLogger('msp430.jtag.dco').info("MSP430 device: 0x%04x" % (device, )) return device
BSD 3-Clause New or Revised License
rackhd/rackhd
test/util/display_node_firmware_versions.py
mon_get_ip_info
python
def mon_get_ip_info(node): nodeurl = "/api/2.0/nodes/" + node nodedata = fit_common.rackhdapi(nodeurl, action="get") nodeinfo = nodedata['json'] result = nodedata['status'] if result != 200: print "Error on node command ", nodeurl fit_common.TEST_CASE["test_error"] += 1 return monurl = "/api/2.0/nodes/" + node + "/catalogs/bmc" mondata = fit_common.rackhdapi(monurl, action="get") catalog = mondata['json'] result = mondata['status'] if result != 200: print "Error on catalog/bmc command ", monurl else: print " BMC MAC Address: " + catalog["data"]["MAC Address"] bmc_ip_value = catalog["data"].get("IP Address") print " Shared NIC BMC IP Address: " + bmc_ip_value print " Shared NIC BMC IP Address Source: " + catalog["data"]["IP Address Source"] try: obmlist = nodeinfo["obmSettings"] except: print "ERROR: Node has no OBM settings configured" else: if fit_common.VERBOSITY >= 3: print " OBM Settings:" print fit_common.json.dumps(obmlist, indent=4) try: obmlist[0]["config"]["host"] except: print "ERROR: Invalid or empty OBM setting" monurl = "/api/2.0/nodes/" + node + "/catalogs/rmm" mondata = fit_common.rackhdapi(monurl, action="get") catalog = mondata['json'] result = mondata['status'] if result != 200: print "\nNo RMM catalog for node" else: print " RMM MAC Address: " + catalog["data"].get("MAC Address") print " RMM IP Address: " + catalog["data"].get("IP Address") print " RMM IP Address Source: " + catalog["data"].get("IP Address Source")
This routine will grab the IP information from the compute node
https://github.com/rackhd/rackhd/blob/d33f3c0425f41643004895bb9740a7be403e01cc/test/util/display_node_firmware_versions.py#L23-L74
import fit_path import json import pprint import fit_common import test_api_utils NODELIST = fit_common.node_select() if NODELIST == []: print "No nodes found on stack" exit fit_common.VERBOSITY = 1
Apache License 2.0
natashamjaques/neural_chat
HierarchicalRL/hrl_rewards.py
reward_question
python
def reward_question(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 rewards = np.zeros(num_convs * episode_len) bot_responses = [resp for conv in conversations for resp in conv[1::2]] question_words = ['who', 'what', 'why', 'where', 'how', 'when'] for i, resp in enumerate(bot_responses): resp = resp.lower() if any(q in resp for q in question_words) and '?' in resp: rewards[i] += 1 rewards = rewards.reshape(num_convs, episode_len) return rewards
Allocates reward for any bot utterance that asks questions.
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/HierarchicalRL/hrl_rewards.py#L385-L401
import os import string import pickle from pathlib import Path import numpy as np import gensim from nltk.corpus import stopwords import tensorflow as tf import tensorflow_hub as hub from model.utils import embedding_metric, Tokenizer, detokenize from torchMoji.api.botmoji import Botmoji from inferSent.api.botsent import Botsent from Toxicity.toxic import NBLogisticRegression, NBTfidfVectorizer, tokenize EPSILON = np.finfo(np.float32).eps ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) tokenizer = Tokenizer('spacy') stopwords = stopwords.words('english') question_words = {'who', 'what', 'why', 'where', 'how', 'when'} _ = [stopwords.remove(q) for q in question_words] punct = list(string.punctuation) contractions = ["'s", "'d", "'ld", "n't", "'re", "'ll", "'ve"] filters = set(stopwords + contractions + punct) def _get_emojis(): EMOJIS = ":joy: :unamused: :weary: :sob: :heart_eyes: :pensive: " + ":ok_hand: :blush: :heart: :smirk: :grin: :notes: :flushed: " + ":100: :sleeping: :relieved: :relaxed: :raised_hands: " + ":two_hearts: :expressionless: :sweat_smile: :pray: " + ":confused: :kissing_heart: :heartbeat: :neutral_face: " + ":information_desk_person: :disappointed: :see_no_evil: " + ":tired_face: :v: :sunglasses: :rage: :thumbsup: :cry: " + ":sleepy: :yum: :triumph: :hand: :mask: :clap: :eyes: :gun: " + ":persevere: :smiling_imp: :sweat: :broken_heart: " + ":yellow_heart: :musical_note: :speak_no_evil: :wink: :skull: " + ":confounded: :smile: :stuck_out_tongue_winking_eye: :angry: " + ":no_good: :muscle: :facepunch: :purple_heart: " + ":sparkling_heart: :blue_heart: :grimacing: :sparkles:" EMOJIS = EMOJIS.split(' ') return EMOJIS def _get_emojis_to_rewards_dict(): emojis_to_rewards = { ':kissing_heart:': 1, ':thumbsup:': 1, ':ok_hand:': 1, ':smile:': 1, ':blush:': 0.75, ':wink:': 0.75, ':muscle:': 0.75, ':grin:': 0.75, ':heart_eyes:': 0.75, ':100:': 0.75, ':smirk:': 0.5, ':stuck_out_tongue_winking_eye:': 0.5, ':sunglasses:': 0.5, ':relieved:': 0.5, ':relaxed:': 0.5, ':blue_heart:': 0.5, ':two_hearts:': 0.5, ':heartbeat:': 0.5, ':yellow_heart:': 0.5, ':disappointed:': -0.5, ':eyes:': -0.5, ':expressionless:': -0.5, ':sleeping:': -0.5, ':grimacing:': -0.5, ':neutral_face:': -0.75, ':confused:': -0.75, ':triumph:': -0.75, ':confounded:': -0.75, ':unamused:': -1, ':angry:': -1, ':rage:': -1 } return emojis_to_rewards def _get_reward_multiplier(): EMOJIS = _get_emojis() emojis_to_rewards = _get_emojis_to_rewards_dict() reward_multiplier = np.zeros(len(EMOJIS)) for emoji, reward_val in emojis_to_rewards.items(): loc = EMOJIS.index(emoji) reward_multiplier[loc] = reward_val return reward_multiplier def normalizeZ(x): x = np.array(x) mean = np.mean(x) std = np.std(x) return (x - mean) / (std + EPSILON) def discount(rewards, gamma=0.9): batch_size = rewards.shape[0] episode_len = rewards.shape[1] discounted = np.zeros_like(rewards) running_add = np.zeros((batch_size)) for step in reversed(range(episode_len)): running_add = gamma * running_add + rewards[:, step] discounted[:, step] = running_add return discounted def cosine_similarity(a, b): return np.sum(a * b, axis=1) / np.sqrt((np.sum(a * a, axis=1) * np.sum(b * b, axis=1))) def reward_you(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] rewards = np.array([resp.count('you') for resp in bot_responses]) rewards = rewards.reshape(num_convs, episode_len) return rewards def reward_conversation_repetition(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 rewards = np.zeros((num_convs, episode_len)) for i in range(num_convs): conv = conversations[i] bot_responses = conv[1::2] tokenized = [resp.split() for resp in bot_responses] filtered = [set(resp).difference(filters) for resp in tokenized] for j in range(1, episode_len): current = filtered[j] prev = set.union(*filtered[:j]) repeats = current.intersection(prev) rewards[i][j] = len(repeats) return -1 * rewards def reward_utterance_repetition(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 rewards = np.zeros((num_convs, episode_len)) for i in range(num_convs): conv = conversations[i] bot_responses = conv[1::2] tokenized = [resp.split() for resp in bot_responses] filtered = [[w for w in resp if w not in filters] for resp in tokenized] for j in range(episode_len): repeats = len(filtered) - len(set(filtered)) rewards[i][j] = repeats return -1 * rewards def reward_bot_response_length(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] punct_map = str.maketrans('', '', string.punctuation) bot_responses = [resp.translate(punct_map) for resp in bot_responses] response_length = [len(resp.split()) for resp in bot_responses] rewards = np.array(response_length).reshape(num_convs, episode_len) return rewards def reward_user_deepmoji(conversations): if 'botmoji' not in globals(): print('Loading deepmoji') global botmoji botmoji = Botmoji() num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 user_responses = [resp for conv in conversations for resp in conv[2::2]] reward_multiplier = _get_reward_multiplier() user_emojis = botmoji.encode_multiple(user_responses) rewards = np.dot(user_emojis, reward_multiplier) for i, resp in enumerate(user_responses): if '<unk>' in user_responses[i]: rewards[i] = -0.5 rewards = rewards.reshape(num_convs, episode_len) return rewards def reward_bot_deepmoji(conversations): if 'botmoji' not in globals(): print('Loading deepmoji') global botmoji botmoji = Botmoji() num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] reward_multiplier = _get_reward_multiplier() bot_emojis = botmoji.encode_multiple(bot_responses) rewards = np.dot(bot_emojis, reward_multiplier) for i, resp in enumerate(bot_responses): if '<unk>' in bot_responses[i]: rewards[i] = -0.5 rewards = rewards.reshape(num_convs, episode_len) return rewards def reward_deepmoji_coherence(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] user_responses = [resp for conv in conversations for resp in conv[2::2]] user_emojis = botmoji.encode_multiple(user_responses) bot_emojis = botmoji.encode_multiple(bot_responses) emoji_coherence = cosine_similarity(user_emojis, bot_emojis) rewards = emoji_coherence.reshape(num_convs, episode_len) return rewards def reward_infersent_coherence(conversations): if 'botsent' not in globals(): print('Loading InferSent') global botsent dataset_dir = Path(ROOT_DIR).joinpath('datasets/reddit_casual/train') botsent = Botsent(dataset_dir, use_pca=False) num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] user_responses = [resp for conv in conversations for resp in conv[2::2]] user_embed = botsent.encode_multiple(user_responses) bot_embed = botsent.encode_multiple(bot_responses) coherence = cosine_similarity(user_embed, bot_embed) rewards = coherence.reshape(num_convs, episode_len) return rewards def reward_word_similarity(conversations): num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] user_responses = [resp for conv in conversations for resp in conv[2::2]] user_tokenized = [sent.split() for sent in user_responses] bot_tokenized = [sent.split() for sent in bot_responses] filter = set.union(filters, question_words, {'<unk>'}) bot_filtered = [set(resp).difference(filter) for resp in bot_tokenized] rewards = np.zeros(num_convs * episode_len) for i in range(num_convs * episode_len): in_common = [w for w in bot_filtered[i] if w in user_tokenized[i]] if len(bot_tokenized[i]): rewards[i] = len(in_common) / len(bot_tokenized[i]) rewards = rewards.reshape(num_convs, episode_len) return rewards def reward_toxicity(conversations): if 'toxicity_model' not in globals(): print('Loading toxicity model') global toxicity_model toxic_path = os.path.join(ROOT_DIR, 'Toxicity', 'toxicity_model.pkl') toxicity_model = pickle.load(open(toxic_path, 'rb')) num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 rewards = np.zeros(num_convs * episode_len) bot_responses = [resp for conv in conversations for resp in conv[1::2]] bot_responses = [detokenize(s) for s in bot_responses] toxicity = toxicity_model.predict_proba(bot_responses) for i, resp in enumerate(bot_responses): if 'depressed' in resp: toxicity[i] = 0.2 rewards = toxicity.reshape(num_convs, episode_len) return -1 * rewards def reward_USE_similarity(conversations): if 'universal_encoder' not in globals(): print('Loading Universal Sentence Encoder') global universal_encoder, sess, sents, embed_op use_path = os.path.join(ROOT_DIR, "UniversalSentenceEncoder") with tf.device('/cpu:0'): universal_encoder = hub.Module(use_path) sents = tf.placeholder(tf.string, shape=None, name="input_sents") embed_op = universal_encoder(sents) sess = tf.Session() sess.run([tf.global_variables_initializer(), tf.tables_initializer()]) num_convs = len(conversations) episode_len = (len(conversations[0]) - 1) // 2 bot_responses = [resp for conv in conversations for resp in conv[1::2]] user_responses = [resp for conv in conversations for resp in conv[2::2]] user_embed = sess.run(embed_op, feed_dict={sents: user_responses}) bot_embed = sess.run(embed_op, feed_dict={sents: bot_responses}) similarity = cosine_similarity(user_embed, bot_embed) rewards = similarity.reshape(num_convs, episode_len) return rewards
MIT License
flyteorg/flytekit
flytekit/models/named_entity.py
NamedEntityMetadata.__init__
python
def __init__(self, description, state): self._description = description self._state = state
:param Text description: :param int state: enum value from NamedEntityState
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/models/named_entity.py#L80-L87
from flyteidl.admin import common_pb2 as _common from flytekit.models import common as _common_models class NamedEntityState(object): ACTIVE = _common.NAMED_ENTITY_ACTIVE ARCHIVED = _common.NAMED_ENTITY_ARCHIVED @classmethod def enum_to_string(cls, val): if val == cls.ACTIVE: return "ACTIVE" elif val == cls.ARCHIVED: return "ARCHIVED" else: return "<UNKNOWN>" class NamedEntityIdentifier(_common_models.FlyteIdlEntity): def __init__(self, project, domain, name): self._project = project self._domain = domain self._name = name @property def project(self): return self._project @property def domain(self): return self._domain @property def name(self): return self._name def to_flyte_idl(self): return _common.NamedEntityIdentifier( project=self.project, domain=self.domain, name=self.name, ) @classmethod def from_flyte_idl(cls, p): return cls( project=p.project, domain=p.domain, name=p.name, ) class NamedEntityMetadata(_common_models.FlyteIdlEntity):
Apache License 2.0
oasis-open/cti-python-stix2
stix2/equivalence/object/__init__.py
_bucket_per_type
python
def _bucket_per_type(graph, mode="type"): buckets = collections.defaultdict(list) if mode == "type": [buckets[obj["type"]].append(obj) for obj in graph] elif mode == "id-split": [buckets[obj.split("--")[0]].append(obj) for obj in graph] return buckets
Given a list of objects or references, bucket them by type. Depending on the list type: extract from 'type' property or using the 'id'.
https://github.com/oasis-open/cti-python-stix2/blob/81550cab92aaacbca5db0d37c607dfd1707ce4c3/stix2/equivalence/object/__init__.py#L510-L520
import collections import itertools import logging import time from ...datastore import DataSource, DataStoreMixin, Filter from ...utils import STIXdatetime, parse_into_datetime from ..pattern import equivalent_patterns logger = logging.getLogger(__name__) def object_equivalence( obj1, obj2, prop_scores={}, threshold=70, ds1=None, ds2=None, ignore_spec_version=False, versioning_checks=False, max_depth=1, **weight_dict ): similarity_result = object_similarity( obj1, obj2, prop_scores, ds1, ds2, ignore_spec_version, versioning_checks, max_depth, **weight_dict ) if similarity_result >= threshold: return True return False def object_similarity( obj1, obj2, prop_scores={}, ds1=None, ds2=None, ignore_spec_version=False, versioning_checks=False, max_depth=1, **weight_dict ): weights = WEIGHTS.copy() if weight_dict: weights.update(weight_dict) weights["_internal"] = { "ignore_spec_version": ignore_spec_version, "versioning_checks": versioning_checks, "ds1": ds1, "ds2": ds2, "max_depth": max_depth, } type1, type2 = obj1["type"], obj2["type"] if type1 != type2: raise ValueError('The objects to compare must be of the same type!') if ignore_spec_version is False and obj1.get("spec_version", "2.0") != obj2.get("spec_version", "2.0"): raise ValueError('The objects to compare must be of the same spec version!') try: weights[type1] except KeyError: logger.warning("'%s' type has no 'weights' dict specified & thus no object similarity method to call!", type1) sum_weights = matching_score = 0 else: try: method = weights[type1]["method"] except KeyError: logger.debug("Starting object similarity process between: '%s' and '%s'", obj1["id"], obj2["id"]) matching_score = 0.0 sum_weights = 0.0 for prop in weights[type1]: if check_property_present(prop, obj1, obj2): w = weights[type1][prop][0] comp_funct = weights[type1][prop][1] prop_scores[prop] = {} if comp_funct == partial_timestamp_based: contributing_score = w * comp_funct(obj1[prop], obj2[prop], weights[type1]["tdelta"]) elif comp_funct == partial_location_distance: threshold = weights[type1]["threshold"] contributing_score = w * comp_funct(obj1["latitude"], obj1["longitude"], obj2["latitude"], obj2["longitude"], threshold) elif comp_funct == reference_check or comp_funct == list_reference_check: if max_depth > 0: weights["_internal"]["max_depth"] = max_depth - 1 ds1, ds2 = weights["_internal"]["ds1"], weights["_internal"]["ds2"] if _datastore_check(ds1, ds2): contributing_score = w * comp_funct(obj1[prop], obj2[prop], ds1, ds2, **weights) elif comp_funct == reference_check: comp_funct = exact_match contributing_score = w * comp_funct(obj1[prop], obj2[prop]) elif comp_funct == list_reference_check: comp_funct = partial_list_based contributing_score = w * comp_funct(obj1[prop], obj2[prop]) prop_scores[prop]["check_type"] = comp_funct.__name__ else: continue weights["_internal"]["max_depth"] = max_depth else: contributing_score = w * comp_funct(obj1[prop], obj2[prop]) sum_weights += w matching_score += contributing_score prop_scores[prop]["weight"] = w prop_scores[prop]["contributing_score"] = contributing_score logger.debug("'%s' check -- weight: %s, contributing score: %s", prop, w, contributing_score) prop_scores["matching_score"] = matching_score prop_scores["sum_weights"] = sum_weights logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights) else: logger.debug("Starting object similarity process between: '%s' and '%s'", obj1["id"], obj2["id"]) try: matching_score, sum_weights = method(obj1, obj2, prop_scores, **weights[type1]) except TypeError: matching_score, sum_weights = method(obj1, obj2, **weights[type1]) logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights) if sum_weights <= 0: return 0 equivalence_score = (matching_score / sum_weights) * 100.0 return equivalence_score def check_property_present(prop, obj1, obj2): if prop == "longitude_latitude": if all(x in obj1 and x in obj2 for x in ('latitude', 'longitude')): return True elif prop in obj1 and prop in obj2: return True return False def partial_timestamp_based(t1, t2, tdelta): if not isinstance(t1, STIXdatetime): t1 = parse_into_datetime(t1) if not isinstance(t2, STIXdatetime): t2 = parse_into_datetime(t2) t1, t2 = time.mktime(t1.timetuple()), time.mktime(t2.timetuple()) result = 1 - min(abs(t1 - t2) / (86400 * tdelta), 1) logger.debug("--\t\tpartial_timestamp_based '%s' '%s' tdelta: '%s'\tresult: '%s'", t1, t2, tdelta, result) return result def partial_list_based(l1, l2): l1_set, l2_set = set(l1), set(l2) result = len(l1_set.intersection(l2_set)) / max(len(l1_set), len(l2_set)) logger.debug("--\t\tpartial_list_based '%s' '%s'\tresult: '%s'", l1, l2, result) return result def exact_match(val1, val2): result = 0.0 if val1 == val2: result = 1.0 logger.debug("--\t\texact_match '%s' '%s'\tresult: '%s'", val1, val2, result) return result def partial_string_based(str1, str2): from rapidfuzz import fuzz result = fuzz.token_sort_ratio(str1, str2) logger.debug("--\t\tpartial_string_based '%s' '%s'\tresult: '%s'", str1, str2, result) return result / 100.0 def custom_pattern_based(pattern1, pattern2): return equivalent_patterns(pattern1, pattern2) def partial_external_reference_based(ext_refs1, ext_refs2): allowed = {"veris", "cve", "capec", "mitre-attack"} matches = 0 ref_pairs = itertools.chain( itertools.product(ext_refs1, ext_refs2), ) for ext_ref1, ext_ref2 in ref_pairs: sn_match = False ei_match = False url_match = False source_name = None if check_property_present("source_name", ext_ref1, ext_ref2): if ext_ref1["source_name"] == ext_ref2["source_name"]: source_name = ext_ref1["source_name"] sn_match = True if check_property_present("external_id", ext_ref1, ext_ref2): if ext_ref1["external_id"] == ext_ref2["external_id"]: ei_match = True if check_property_present("url", ext_ref1, ext_ref2): if ext_ref1["url"] == ext_ref2["url"]: url_match = True if sn_match and (ei_match or url_match) and source_name in allowed: result = 1.0 logger.debug( "--\t\tpartial_external_reference_based '%s' '%s'\tresult: '%s'", ext_refs1, ext_refs2, result, ) return result if (sn_match or ei_match or url_match) and source_name not in allowed: matches += 1 result = matches / max(len(ext_refs1), len(ext_refs2)) logger.debug( "--\t\tpartial_external_reference_based '%s' '%s'\tresult: '%s'", ext_refs1, ext_refs2, result, ) return result def partial_location_distance(lat1, long1, lat2, long2, threshold): from haversine import Unit, haversine distance = haversine((lat1, long1), (lat2, long2), unit=Unit.KILOMETERS) result = 1 - (distance / threshold) logger.debug( "--\t\tpartial_location_distance '%s' '%s' threshold: '%s'\tresult: '%s'", (lat1, long1), (lat2, long2), threshold, result, ) return result def _versioned_checks(ref1, ref2, ds1, ds2, **weights): results = {} pairs = _object_pairs( _bucket_per_type(ds1.query([Filter("id", "=", ref1)])), _bucket_per_type(ds2.query([Filter("id", "=", ref2)])), weights, ) ignore_spec_version = weights["_internal"]["ignore_spec_version"] versioning_checks = weights["_internal"]["versioning_checks"] max_depth = weights["_internal"]["max_depth"] for object1, object2 in pairs: result = object_similarity( object1, object2, ds1=ds1, ds2=ds2, ignore_spec_version=ignore_spec_version, versioning_checks=versioning_checks, max_depth=max_depth, **weights, ) if ref1 not in results: results[ref1] = {"matched": ref2, "value": result} elif result > results[ref1]["value"]: results[ref1] = {"matched": ref2, "value": result} result = results.get(ref1, {}).get("value", 0.0) logger.debug( "--\t\t_versioned_checks '%s' '%s'\tresult: '%s'", ref1, ref2, result, ) return result def reference_check(ref1, ref2, ds1, ds2, **weights): type1, type2 = ref1.split("--")[0], ref2.split("--")[0] result = 0.0 if type1 == type2 and type1 in weights: ignore_spec_version = weights["_internal"]["ignore_spec_version"] versioning_checks = weights["_internal"]["versioning_checks"] max_depth = weights["_internal"]["max_depth"] if versioning_checks: result = _versioned_checks(ref1, ref2, ds1, ds2, **weights) / 100.0 else: o1, o2 = ds1.get(ref1), ds2.get(ref2) if o1 and o2: result = object_similarity( o1, o2, ds1=ds1, ds2=ds2, ignore_spec_version=ignore_spec_version, versioning_checks=versioning_checks, max_depth=max_depth, **weights, ) / 100.0 logger.debug( "--\t\treference_check '%s' '%s'\tresult: '%s'", ref1, ref2, result, ) return result def list_reference_check(refs1, refs2, ds1, ds2, **weights): results = {} pairs = _object_pairs( _bucket_per_type(refs1, "id-split"), _bucket_per_type(refs2, "id-split"), weights, ) for ref1, ref2 in pairs: type1, type2 = ref1.split("--")[0], ref2.split("--")[0] if type1 == type2: score = reference_check(ref1, ref2, ds1, ds2, **weights) if ref1 not in results: results[ref1] = {"matched": ref2, "value": score} elif score > results[ref1]["value"]: results[ref1] = {"matched": ref2, "value": score} if ref2 not in results: results[ref2] = {"matched": ref1, "value": score} elif score > results[ref2]["value"]: results[ref2] = {"matched": ref1, "value": score} result = 0.0 total_sum = sum(x["value"] for x in results.values()) max_score = len(results) if max_score > 0: result = total_sum / max_score logger.debug( "--\t\tlist_reference_check '%s' '%s'\ttotal_sum: '%s'\tmax_score: '%s'\tresult: '%s'", refs1, refs2, total_sum, max_score, result, ) return result def _datastore_check(ds1, ds2): if ( issubclass(ds1.__class__, (DataStoreMixin, DataSource)) or issubclass(ds2.__class__, (DataStoreMixin, DataSource)) ): return True return False
BSD 3-Clause New or Revised License
patlevin/face-detection-tflite
fdlite/transform.py
image_to_tensor
python
def image_to_tensor( image: Union[PILImage, np.ndarray, str], roi: Optional[Rect] = None, output_size: Optional[Tuple[int, int]] = None, keep_aspect_ratio: bool = False, output_range: Tuple[float, float] = (0., 1.), flip_horizontal: bool = False ) -> ImageTensor: img = _normalize_image(image) image_size = img.size if roi is None: roi = Rect(0.5, 0.5, 1.0, 1.0, rotation=0.0, normalized=True) roi = roi.scaled(image_size) if output_size is None: output_size = (int(roi.size[0]), int(roi.size[1])) width, height = (roi.size if keep_aspect_ratio else output_size) src_points = roi.points() dst_points = [(0., 0.), (width, 0.), (width, height), (0., height)] coeffs = _perspective_transform_coeff(src_points, dst_points) roi_image = img.transform(size=(width, height), method=Image.PERSPECTIVE, data=coeffs, resample=Image.LINEAR) if img != image: img.close() pad_x, pad_y = 0., 0. if keep_aspect_ratio: out_aspect = output_size[1] / output_size[0] roi_aspect = roi.height / roi.width new_width, new_height = int(roi.width), int(roi.height) if out_aspect > roi_aspect: new_height = int(roi.width * out_aspect) pad_y = (1 - roi_aspect / out_aspect) / 2 else: new_width = int(roi.height / out_aspect) pad_x = (1 - out_aspect / roi_aspect) / 2 if new_width != int(roi.width) or new_height != int(roi.height): pad_h, pad_v = int(pad_x * new_width), int(pad_y * new_height) roi_image = roi_image.transform( size=(new_width, new_height), method=Image.EXTENT, data=(-pad_h, -pad_v, new_width - pad_h, new_height - pad_v)) roi_image = roi_image.resize(output_size, resample=Image.BILINEAR) if flip_horizontal: roi_image = roi_image.transpose(method=Image.FLIP_LEFT_RIGHT) min_val, max_val = output_range tensor_data = np.asarray(roi_image, dtype=np.float32) tensor_data *= (max_val - min_val) / 255 tensor_data += min_val return ImageTensor(tensor_data, padding=(pad_x, pad_y, pad_x, pad_y), original_size=image_size)
Load an image into an array and return data, image size, and padding. This function combines the mediapipe calculator-nodes ImageToTensor, ImageCropping, and ImageTransformation into one function. Args: image (Image|ndarray|str): Input image; preferably RGB, but will be converted if necessary; loaded from file if a string is given roi (Rect|None): Location within the image where to convert; can be `None`, in which case the entire image is converted. Rotation is supported. output_size (tuple|None): Tuple of `(width, height)` describing the output tensor size; defaults to ROI if `None`. keep_aspect_ratio (bool): `False` (default) will scale the image to the output size; `True` will keep the ROI aspect ratio and apply letterboxing. output_range (tuple): Tuple of `(min_val, max_val)` containing the minimum and maximum value of the output tensor. Defaults to (0, 1). flip_horizontal (bool): Flip the resulting image horizontally if set to `True`. Default: `False` Returns: (ImageTensor) Tensor data, padding for reversing letterboxing and original image dimensions.
https://github.com/patlevin/face-detection-tflite/blob/fd44bd8b9ec9c27a2ddbc3032eed9dddfc1646bf/fdlite/transform.py#L14-L97
from enum import IntEnum from typing import List, Optional, Sequence, Tuple, Union import numpy as np from PIL import Image from PIL.Image import Image as PILImage from fdlite import ArgumentError, CoordinateRangeError, InvalidEnumError from fdlite.types import BBox, Detection, ImageTensor, Landmark, Rect
MIT License
tkassis/orgaquant
keras_retinanet/models/vgg.py
vgg_retinanet
python
def vgg_retinanet(num_classes, backbone='vgg16', inputs=None, modifier=None, **kwargs): if inputs is None: inputs = keras.layers.Input(shape=(None, None, 3)) if backbone == 'vgg16': vgg = keras.applications.VGG16(input_tensor=inputs, include_top=False, weights=None) elif backbone == 'vgg19': vgg = keras.applications.VGG19(input_tensor=inputs, include_top=False, weights=None) else: raise ValueError("Backbone '{}' not recognized.".format(backbone)) if modifier: vgg = modifier(vgg) layer_names = ["block3_pool", "block4_pool", "block5_pool"] layer_outputs = [vgg.get_layer(name).output for name in layer_names] return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=layer_outputs, **kwargs)
Constructs a retinanet model using a vgg backbone. Args num_classes: Number of classes to predict. backbone: Which backbone to use (one of ('vgg16', 'vgg19')). inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)). modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example). Returns RetinaNet model with a VGG backbone.
https://github.com/tkassis/orgaquant/blob/4ee6714e3ad1740bffac4d5c927a2325e7d475d2/keras_retinanet/models/vgg.py#L69-L99
import keras from keras.utils import get_file from . import retinanet from . import Backbone from ..utils.image import preprocess_image class VGGBackbone(Backbone): def retinanet(self, *args, **kwargs): return vgg_retinanet(*args, backbone=self.backbone, **kwargs) def download_imagenet(self): if self.backbone == 'vgg16': resource = keras.applications.vgg16.vgg16.WEIGHTS_PATH_NO_TOP checksum = '6d6bbae143d832006294945121d1f1fc' elif self.backbone == 'vgg19': resource = keras.applications.vgg19.vgg19.WEIGHTS_PATH_NO_TOP checksum = '253f8cb515780f3b799900260a226db6' else: raise ValueError("Backbone '{}' not recognized.".format(self.backbone)) return get_file( '{}_weights_tf_dim_ordering_tf_kernels_notop.h5'.format(self.backbone), resource, cache_subdir='models', file_hash=checksum ) def validate(self): allowed_backbones = ['vgg16', 'vgg19'] if self.backbone not in allowed_backbones: raise ValueError('Backbone (\'{}\') not in allowed backbones ({}).'.format(self.backbone, allowed_backbones)) def preprocess_image(self, inputs): return preprocess_image(inputs, mode='caffe')
MIT License
megvii-research/dpgn
dpgn.py
P2DAgg.__init__
python
def __init__(self, in_c, out_c): super(P2DAgg, self).__init__() self.p2d_transform = nn.Sequential(*[nn.Linear(in_features=in_c, out_features=out_c, bias=True), nn.LeakyReLU()]) self.out_c = out_c
P2D Aggregation (see paper 3.2.1) Ep_(l) -> Vd_(l) :param in_c: number of input channel for the fc layer :param out_c:number of output channel for the fc layer
https://github.com/megvii-research/dpgn/blob/934063b32020f3c23d2adf17788cb32b276e3eb7/dpgn.py#L70-L80
import torch.nn as nn import torch.nn.functional as F import torch class PointSimilarity(nn.Module): def __init__(self, in_c, base_c, dropout=0.0): super(PointSimilarity, self).__init__() self.in_c = in_c self.base_c = base_c self.dropout = dropout layer_list = [] layer_list += [nn.Conv2d(in_channels=self.in_c, out_channels=self.base_c*2, kernel_size=1, bias=False), nn.BatchNorm2d(num_features=self.base_c*2), nn.LeakyReLU()] if self.dropout > 0: layer_list += [nn.Dropout2d(p=self.dropout)] layer_list += [nn.Conv2d(in_channels=self.base_c*2, out_channels=self.base_c, kernel_size=1, bias=False), nn.BatchNorm2d(num_features=self.base_c), nn.LeakyReLU()] if self.dropout > 0: layer_list += [nn.Dropout2d(p=self.dropout)] layer_list += [nn.Conv2d(in_channels=self.base_c, out_channels=1, kernel_size=1)] self.point_sim_transform = nn.Sequential(*layer_list) def forward(self, vp_last_gen, ep_last_gen, distance_metric): vp_i = vp_last_gen.unsqueeze(2) vp_j = torch.transpose(vp_i, 1, 2) if distance_metric == 'l2': vp_similarity = (vp_i - vp_j)**2 elif distance_metric == 'l1': vp_similarity = torch.abs(vp_i - vp_j) trans_similarity = torch.transpose(vp_similarity, 1, 3) ep_ij = torch.sigmoid(self.point_sim_transform(trans_similarity)) diagonal_mask = 1.0 - torch.eye(vp_last_gen.size(1)).unsqueeze(0).repeat(vp_last_gen.size(0), 1, 1).to(ep_last_gen.get_device()) ep_last_gen *= diagonal_mask ep_last_gen_sum = torch.sum(ep_last_gen, -1, True) ep_ij = F.normalize(ep_ij.squeeze(1) * ep_last_gen, p=1, dim=-1) * ep_last_gen_sum diagonal_reverse_mask = torch.eye(vp_last_gen.size(1)).unsqueeze(0).to(ep_last_gen.get_device()) ep_ij += (diagonal_reverse_mask + 1e-6) ep_ij /= torch.sum(ep_ij, dim=2).unsqueeze(-1) node_similarity_l2 = -torch.sum(vp_similarity, 3) return ep_ij, node_similarity_l2 class P2DAgg(nn.Module):
MIT License
tensorpack/tensorpack
tensorpack/utils/argtools.py
shape4d
python
def shape4d(a, data_format='NHWC'): s2d = shape2d(a) if get_data_format(data_format, False) == 'NHWC': return [1] + s2d + [1] else: return [1, 1] + s2d
Ensuer a 4D shape, to use with 4D symbolic functions. Args: a: a int or tuple/list of length 2 Returns: list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]`` or ``[1, 1, a, a]`` depending on data_format.
https://github.com/tensorpack/tensorpack/blob/1a79d595f7eda9dc9dc8428f4461680ed2222ab6/tensorpack/utils/argtools.py#L110-L125
import inspect import functools from . import logger __all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d', 'memoized_ignoreargs', 'log_once'] def map_arg(**maps): def deco(func): @functools.wraps(func) def wrapper(*args, **kwargs): sig = inspect.signature(func) argmap = sig.bind_partial(*args, **kwargs).arguments for k, map_func in maps.items(): if k in argmap: argmap[k] = map_func(argmap[k]) return func(**argmap) return wrapper return deco memoized = functools.lru_cache(maxsize=None) def graph_memoized(func): from ..compat import tfv1 GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__' @memoized def func_with_graph_arg(*args, **kwargs): kwargs.pop(GRAPH_ARG_NAME) return func(*args, **kwargs) @functools.wraps(func) def wrapper(*args, **kwargs): assert GRAPH_ARG_NAME not in kwargs, "No Way!!" graph = tfv1.get_default_graph() kwargs[GRAPH_ARG_NAME] = graph return func_with_graph_arg(*args, **kwargs) return wrapper _MEMOIZED_NOARGS = {} def memoized_ignoreargs(func): def wrapper(*args, **kwargs): if func not in _MEMOIZED_NOARGS: res = func(*args, **kwargs) _MEMOIZED_NOARGS[func] = res return res return _MEMOIZED_NOARGS[func] return wrapper def shape2d(a): if type(a) == int: return [a, a] if isinstance(a, (list, tuple)): assert len(a) == 2 return list(a) raise RuntimeError("Illegal shape: {}".format(a)) def get_data_format(data_format, keras_mode=True): if keras_mode: dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'} else: dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'} ret = dic.get(data_format, data_format) if ret not in dic.values(): raise ValueError("Unknown data_format: {}".format(data_format)) return ret
Apache License 2.0
clovaai/frostnet
Semantic_Segmentation/model/layers/espnet_utils.py
C.__init__
python
def __init__(self,nIn, nOut, kernel_size, stride=1, padding=0, groups=1, bias=False): super().__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(nIn, nOut, (kernel_size, kernel_size), stride=stride, padding=(padding, padding),groups=groups, bias=False)
:param nIn: number of input channels :param nOut: number of output channels :param kernel_size: kernel size :param stride: optional stride rate for down-sampling
https://github.com/clovaai/frostnet/blob/0d553865b97e46a234b575c22154ec3aa9495096/Semantic_Segmentation/model/layers/espnet_utils.py#L132-L143
import torch import torch.nn as nn import torch.quantization import math from torch.nn import functional as F import warnings warnings.filterwarnings( action='ignore', category=DeprecationWarning, module=r'.*' ) warnings.filterwarnings( action='default', module=r'torch.quantization' ) from torch.quantization import QuantStub, DeQuantStub class hard_sigmoid(nn.Module): def __init__(self): super().__init__() self.relu6 = nn.ReLU6(inplace=False) self.quant_mul = nn.quantized.FloatFunctional() self.quant_add = nn.quantized.FloatFunctional() def forward(self, input): output = input output = self.quant_add.add_scalar(output, 3) output = self.relu6(output) output = self.quant_mul.mul_scalar(output, 1/6) return output class PSPModule(nn.Module): def __init__(self, features, out_features=1024, sizes=(1, 2, 4, 8)): super().__init__() self.stages = [] self.stages = nn.ModuleList([C(features, features, 3, 1, groups=features) for size in sizes]) self.project = CBR(features * (len(sizes) + 1), out_features, 1, 1) self.quant_cat = nn.quantized.FloatFunctional() def forward(self, feats): h, w = feats.size(2), feats.size(3) out = [feats] for stage in self.stages: feats = F.avg_pool2d(feats, kernel_size=3, stride=2, padding=1) upsampled = F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) out.append(upsampled) return self.project(self.quant_cat.cat(out, dim=1)) def fuse_model(self): self.project.fuse_model() class CBR(nn.Module): def __init__(self, nIn, nOut, kernel_size, stride=1, padding=0, groups=1, bias=False): super().__init__() padding = int((kernel_size - 1) / 2) self.cbr = nn.Sequential(nn.Conv2d(nIn, nOut, (kernel_size,kernel_size), stride=stride, padding=(padding, padding),groups=groups, bias=False), nn.BatchNorm2d(nOut, eps=1e-03), nn.ReLU(inplace=False) ) def forward(self, input): output = self.cbr(input) return output def fuse_model(self): torch.quantization.fuse_modules(self.cbr, ['0', '1','2'], inplace=True) class CB(nn.Module): def __init__(self,nIn, nOut, kernel_size, stride=1, padding=0, groups=1, bias=False): super().__init__() padding = int((kernel_size - 1) / 2) self.cb = nn.Sequential(nn.Conv2d(nIn, nOut, (kernel_size,kernel_size), stride=stride, padding=(padding, padding), groups=groups, bias=False), nn.BatchNorm2d(nOut, eps=1e-03) ) def forward(self, input): output = self.cb(input) return output def fuse_model(self): torch.quantization.fuse_modules(self.cb, ['0','1'], inplace=True) class C(nn.Module):
MIT License
google/crmint
backends/core/models.py
Job.start_as_single
python
def start_as_single(self): if self.status != Job.STATUS.WAITING: return None else: self.set_status(Job.STATUS.RUNNING) return self.run()
Returns: Task object that was added to the task queue, otherwise None.
https://github.com/google/crmint/blob/ff0949e3f46591b69fe692374f02453d82afbc05/backends/core/models.py#L377-L385
from datetime import datetime import json import re import uuid from google.appengine.api import taskqueue from simpleeval import simple_eval from simpleeval import InvalidExpression from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import String from sqlalchemy import DateTime from sqlalchemy import Text from sqlalchemy import Boolean from sqlalchemy import ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.orm import load_only from core import inline from core.database import BaseModel from core.mailers import NotificationMailer def _parse_num(s): try: return int(s) except ValueError: try: return float(s) except ValueError: return 0 class Pipeline(BaseModel): __tablename__ = 'pipelines' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) emails_for_notifications = Column(String(255)) status = Column(String(50), nullable=False, default='idle') status_changed_at = Column(DateTime) jobs = relationship('Job', backref='pipeline', lazy='dynamic') run_on_schedule = Column(Boolean, nullable=False, default=False) schedules = relationship('Schedule', lazy='dynamic') params = relationship('Param', lazy='dynamic', order_by='asc(Param.name)') class STATUS(object): IDLE = 'idle' FAILED = 'failed' SUCCEEDED = 'succeeded' STOPPING = 'stopping' RUNNING = 'running' INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED] def __init__(self, name=None): super(Pipeline, self).__init__() self.name = name @property def state(self): return self.status @property def has_jobs(self): return self.jobs.count() > 0 @property def recipients(self): if self.emails_for_notifications: return self.emails_for_notifications.split() return [] def assign_attributes(self, attributes): for key, value in attributes.iteritems(): if key in ['schedules', 'jobs', 'params']: continue if key == 'run_on_schedule': self.__setattr__(key, value == 'True') continue self.__setattr__(key, value) def save_relations(self, relations): for key, value in relations.iteritems(): if key == 'schedules': self.assign_schedules(value) elif key == 'params': self.assign_params(value) def assign_params(self, parameters): Param.update_list(parameters, self) def assign_schedules(self, arg_schedules): arg_schedule_ids = [] for arg_schedule in arg_schedules: if arg_schedule.get('id') is not None: schedule = Schedule.find(arg_schedule.get('id')) schedule.update(cron=arg_schedule['cron']) arg_schedule_ids.append(arg_schedule['id']) else: schedule = Schedule.create(pipeline_id=self.id, cron=arg_schedule['cron']) arg_schedule_ids.append(schedule.id) ids_for_removing = [] for schedule in self.schedules: if schedule.id not in arg_schedule_ids: ids_for_removing.append(schedule.id) Schedule.destroy(*ids_for_removing) def populate_params_runtime_values(self): inline.open_session() try: global_context = {} for param in Param.where(pipeline_id=None, job_id=None).all(): global_context[param.name] = param.populate_runtime_value() pipeline_context = global_context.copy() for param in self.params.all(): pipeline_context[param.name] = param.populate_runtime_value(global_context) for job in self.jobs.all(): for param in job.params.all(): param.populate_runtime_value(pipeline_context) inline.close_session() return True except (InvalidExpression, TypeError, ValueError, SyntaxError) as e: inline.close_session() from core import cloud_logging job_id = 'N/A' worker_class = 'N/A' if param.job_id is not None: job_id = param.job_id worker_class = param.job.worker_class message = 'Invalid job parameter "%s": %s' % (param.label, e) elif param.pipeline_id is not None: message = 'Invalid pipeline variable "%s": %s' % (param.label, e) else: message = 'Invalid global variable "%s": %s' % (param.label, e) cloud_logging.logger.log_struct({ 'labels': { 'pipeline_id': self.id, 'job_id': job_id, 'worker_class': worker_class, }, 'log_level': 'ERROR', 'message': message, }) return False def set_status(self, status): self.update(status=status, status_changed_at=datetime.now()) def get_ready(self): if not self.populate_params_runtime_values(): return False for job in self.jobs.all(): if not job.get_ready(): return False self.set_status(Pipeline.STATUS.RUNNING) return True def start(self): if self.status not in Pipeline.STATUS.INACTIVE_STATUSES: return False jobs = self.jobs.all() if len(jobs) < 1: return False for job in jobs: if job.status not in Job.STATUS.INACTIVE_STATUSES: return False if not self.get_ready(): return False for job in jobs: job.start() return True def _cancel_all_tasks(self): for job in self.jobs: job.cancel_tasks() def stop(self): if self.status != Pipeline.STATUS.RUNNING: return False for job in self.jobs: job.stop() for job in self.jobs: if job.status not in [Job.STATUS.FAILED, Job.STATUS.SUCCEEDED]: job.set_status(Job.STATUS.STOPPING) self._cancel_all_tasks() return self.job_finished() def start_single_job(self, job): if self.status not in Pipeline.STATUS.INACTIVE_STATUSES: return False if not self.populate_params_runtime_values(): return False if not job.get_ready(): return False self.set_status(Pipeline.STATUS.RUNNING) job.start_as_single() return True def job_finished(self): for job in self.jobs: if job.status == Job.STATUS.STOPPING: job.set_status(Job.STATUS.FAILED) for job in self.jobs: if job.status not in Job.STATUS.INACTIVE_STATUSES: return False self._finish() return True def _finish(self): jobs = Job.query.outerjoin((StartCondition, Job.id == StartCondition.preceding_job_id)) jobs = jobs.filter(Job.pipeline_id == self.id) jobs = jobs.filter(StartCondition.preceding_job_id == None) jobs = jobs.options(load_only('status')).all() status = Pipeline.STATUS.SUCCEEDED for job in jobs: if job.status == Job.STATUS.FAILED: status = Pipeline.STATUS.FAILED break self.set_status(status) NotificationMailer().finished_pipeline(self) def import_data(self, data): self.assign_params(data['params']) self.assign_schedules(data['schedules']) job_mapping = {} jobs = [] if data['jobs']: for job_data in data['jobs']: job = Job() job.pipeline_id = self.id job.assign_attributes(job_data) job.save() job.save_relations(job_data) jobs.append(job) job_mapping[job_data['id']] = job.id for job in jobs: job_id = job_mapping.keys()[job_mapping.values().index(job.id)] job_data = next((j for j in data['jobs'] if j['id'] == job_id), None) job.assign_hash_start_conditions(job_data['hash_start_conditions'], job_mapping) def is_blocked(self): return (self.run_on_schedule or self.status in [Pipeline.STATUS.RUNNING, Pipeline.STATUS.STOPPING]) def destroy(self): sc_ids = [sc.id for sc in self.schedules] if sc_ids: Schedule.destroy(*sc_ids) for job in self.jobs: job.destroy() param_ids = [p.id for p in self.params.all()] if param_ids: Param.destroy(*param_ids) self.delete() class Job(BaseModel): __tablename__ = 'jobs' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) status = Column(String(50), nullable=False, default='idle') status_changed_at = Column(DateTime) worker_class = Column(String(255)) pipeline_id = Column(Integer, ForeignKey('pipelines.id')) params = relationship('Param', backref='job', lazy='dynamic') start_conditions = relationship( 'StartCondition', primaryjoin='Job.id==StartCondition.job_id') dependent_jobs = relationship( 'Job', secondary='start_conditions', primaryjoin='Job.id==StartCondition.preceding_job_id', secondaryjoin='StartCondition.job_id==Job.id') class STATUS(object): IDLE = 'idle' FAILED = 'failed' SUCCEEDED = 'succeeded' RUNNING = 'running' WAITING = 'waiting' STOPPING = 'stopping' INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED] def __init__(self, name=None, worker_class=None, pipeline_id=None): super(Job, self).__init__() self.name = name self.worker_class = worker_class self.pipeline_id = pipeline_id def destroy(self): sc_ids = [sc.id for sc in self.start_conditions] if sc_ids: StartCondition.destroy(*sc_ids) dependent_job_sc_ids = [ sc.id for sc in StartCondition.where(preceding_job_id=self.id).all()] if dependent_job_sc_ids: StartCondition.destroy(*dependent_job_sc_ids) param_ids = [p.id for p in self.params.all()] if param_ids: Param.destroy(*param_ids) self.delete() def get_ready(self): if self.status not in Job.STATUS.INACTIVE_STATUSES: return False self.set_status(Job.STATUS.WAITING) return True def _get_task_namespace(self): return 'pipeline=%s_job=%s' % (str(self.pipeline_id), str(self.id)) def _add_task_with_name(self, task_name): task_namespace = self._get_task_namespace() TaskEnqueued.create(task_namespace=task_namespace, task_name=task_name) return True def _delete_task_with_name(self, task_name): task_namespace = self._get_task_namespace() TaskEnqueued.where(task_namespace=task_namespace, task_name=task_name).delete() return self._enqueued_task_count() def cancel_tasks(self): task_namespace = self._get_task_namespace() enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace) if enqueued_tasks: tasks = [taskqueue.Task(name=t.task_name) for t in enqueued_tasks] taskqueue.Queue().delete_tasks(tasks) TaskEnqueued.where(task_namespace=task_namespace).delete() def _enqueued_task_count(self): task_namespace = self._get_task_namespace() return TaskEnqueued.count_in_namespace(task_namespace) def _start_condition_is_fulfilled(self, start_condition): preceding_job_status = start_condition.preceding_job.status if start_condition.condition == StartCondition.CONDITION.SUCCESS: if preceding_job_status == Job.STATUS.FAILED: return False elif start_condition.condition == StartCondition.CONDITION.FAIL: if preceding_job_status == Job.STATUS.SUCCEEDED: return False return True
Apache License 2.0
python-openxml/python-docx
docx/opc/rel.py
Relationships._get_matching
python
def _get_matching(self, reltype, target, is_external=False): def matches(rel, reltype, target, is_external): if rel.reltype != reltype: return False if rel.is_external != is_external: return False rel_target = rel.target_ref if rel.is_external else rel.target_part if rel_target != target: return False return True for rel in self.values(): if matches(rel, reltype, target, is_external): return rel return None
Return relationship of matching *reltype*, *target*, and *is_external* from collection, or None if not found.
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/opc/rel.py#L87-L105
from __future__ import ( absolute_import, division, print_function, unicode_literals ) from .oxml import CT_Relationships class Relationships(dict): def __init__(self, baseURI): super(Relationships, self).__init__() self._baseURI = baseURI self._target_parts_by_rId = {} def add_relationship(self, reltype, target, rId, is_external=False): rel = _Relationship(rId, reltype, target, self._baseURI, is_external) self[rId] = rel if not is_external: self._target_parts_by_rId[rId] = target return rel def get_or_add(self, reltype, target_part): rel = self._get_matching(reltype, target_part) if rel is None: rId = self._next_rId rel = self.add_relationship(reltype, target_part, rId) return rel def get_or_add_ext_rel(self, reltype, target_ref): rel = self._get_matching(reltype, target_ref, is_external=True) if rel is None: rId = self._next_rId rel = self.add_relationship( reltype, target_ref, rId, is_external=True ) return rel.rId def part_with_reltype(self, reltype): rel = self._get_rel_of_type(reltype) return rel.target_part @property def related_parts(self): return self._target_parts_by_rId @property def xml(self): rels_elm = CT_Relationships.new() for rel in self.values(): rels_elm.add_rel( rel.rId, rel.reltype, rel.target_ref, rel.is_external ) return rels_elm.xml
MIT License
cszn/dpir
utils/utils_sisr_beforepytorchversion8.py
BlockMM
python
def BlockMM(nr, nc, Nb, m, x1): fun = fun_reshape x1 = blockproc(x1, blocksize=(nr, nc), fun=fun) x1 = np.reshape(x1, (m, Nb, x1.shape[-1]), order='F') x1 = np.sum(x1, 1) x = np.reshape(x1, (nr, nc, x1.shape[-1]), order='F') return x
myfun = @(block_struct) reshape(block_struct.data,m,1); x1 = blockproc(x1,[nr nc],myfun); x1 = reshape(x1,m,Nb); x1 = sum(x1,2); x = reshape(x1,nr,nc);
https://github.com/cszn/dpir/blob/fc5dae5f8d50ff03ebd92632af27608760cff829/utils/utils_sisr_beforepytorchversion8.py#L50-L63
import numpy as np from scipy import fftpack import torch from scipy import ndimage from utils import utils_image as util from scipy.interpolate import interp2d from scipy import signal import scipy.stats as ss import scipy.io as io import scipy def blockproc(im, blocksize, fun): xblocks = np.split(im, range(blocksize[0], im.shape[0], blocksize[0]), axis=0) xblocks_proc = [] for xb in xblocks: yblocks = np.split(xb, range(blocksize[1], im.shape[1], blocksize[1]), axis=1) yblocks_proc = [] for yb in yblocks: yb_proc = fun(yb) yblocks_proc.append(yb_proc) xblocks_proc.append(np.concatenate(yblocks_proc, axis=1)) proc = np.concatenate(xblocks_proc, axis=0) return proc def fun_reshape(a): return np.reshape(a, (-1,1,a.shape[-1]), order='F') def fun_mul(a, b): return a*b
MIT License
nlp-uoregon/trankit
trankit/adapter_transformers/modeling_encoder_decoder.py
EncoderDecoderModel.from_encoder_decoder_pretrained
python
def from_encoder_decoder_pretrained( cls, encoder_pretrained_model_name_or_path: str = None, decoder_pretrained_model_name_or_path: str = None, *model_args, **kwargs ) -> PreTrainedModel: kwargs_encoder = { argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_") } kwargs_decoder = { argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_") } encoder = kwargs_encoder.pop("model", None) if encoder is None: assert ( encoder_pretrained_model_name_or_path is not None ), "If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined" from .modeling_auto import AutoModel encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) encoder.config.is_decoder = False decoder = kwargs_decoder.pop("model", None) if decoder is None: assert ( decoder_pretrained_model_name_or_path is not None ), "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined" from .modeling_auto import AutoModelWithLMHead if "config" not in kwargs_decoder: from transformers import AutoConfig decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) if decoder_config.is_decoder is False: logger.info( f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers." ) decoder_config.is_decoder = True kwargs_decoder["config"] = decoder_config if kwargs_decoder["config"].is_decoder is False: logger.warning( f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attribute `is_decoder` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` is set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`" ) decoder = AutoModelWithLMHead.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) return cls(encoder=encoder, decoder=decoder)
r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you need to first set it back in training mode with `model.train()`. Params: encoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): information necessary to initiate the encoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. decoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): information necessary to initiate the decoder. Either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method kwargs: (`optional`) Remaining dictionary of keyword arguments. Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: Examples:: from transformers import EncoderDecoder model = EncoderDecoder.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
https://github.com/nlp-uoregon/trankit/blob/5b56554efb9123758615a74cfa4d0f1a7d746d67/trankit/adapter_transformers/modeling_encoder_decoder.py#L91-L181
import logging from typing import Optional from .configuration_encoder_decoder import EncoderDecoderConfig from .configuration_utils import PretrainedConfig from .modeling_utils import PreTrainedModel logger = logging.getLogger(__name__) class EncoderDecoderModel(PreTrainedModel): config_class = EncoderDecoderConfig base_model_prefix = "encoder_decoder" def __init__( self, config: Optional[PretrainedConfig] = None, encoder: Optional[PreTrainedModel] = None, decoder: Optional[PreTrainedModel] = None, ): assert config is not None or ( encoder is not None and decoder is not None ), "Either a configuration or an Encoder and a decoder has to be provided" if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) else: assert isinstance(config, self.config_class), "config: {} has to be of type {}".format( config, self.config_class ) super().__init__(config) if encoder is None: from transformers import AutoModel encoder = AutoModel.from_config(config.encoder) if decoder is None: from transformers import AutoModelWithLMHead decoder = AutoModelWithLMHead.from_config(config.decoder) self.encoder = encoder self.decoder = decoder assert ( self.encoder.get_output_embeddings() is None ), "The encoder {} should not have a LM Head. Please use a model without LM Head" def tie_weights(self): pass def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() @classmethod
Apache License 2.0
jsleb333/python2latex
python2latex/colormap.py
Palette.__init__
python
def __init__(self, colors=LinearColorMap(), color_model='hsb', n_colors=None, color_names=None, cmap_range=lambda n_colors: (1/(2*n_colors), 1-1/(2*n_colors)), color_transform=None, max_n_colors=10_000): self.colors = colors self.color_model = color_model self.n_colors = n_colors self.color_names = color_names if not callable(cmap_range): old_cmap_range = (cmap_range[0], cmap_range[1]) cmap_range = lambda n_colors: old_cmap_range self.cmap_range = cmap_range self.color_transform = color_transform or (lambda x: x) self.max_n_colors = max_n_colors self.tex_colors = [] if not (callable(self.colors) and self.n_colors is None): self._init_colors()
The default behavior of this palette is to create dynamically evenly spaced colors from a color map as needed. One can change this behavior by specifying a fixed number of colors, or by passing an iterable of colors instead of a color map. Args: colors (Union[Iterable, Callable]): Colors used to generate the color palette. If is an iterable, should be a sequence of valid color specifications as explained in the documentation of the Color class. If a callable, the callable should be a color map (i.e. takes as input a scalar and outputs a color in the correct color model in the form of a tuple). color_model (str): Color model of the colors. See the Color class documentation. n_colors (Union[int, None]): Number of colors to sample from colors if it is a callable. If colors is a sequence, n_colors is ignored. color_names (Union[Iterable[str], None]): If colors is a sequence, one can provide the names of the colors to be used in the TeX file. Must be the same length as colors. cmap_range (Union[Tuple[float], Callable[[int], Tuple]]): Range of the color map used. Ignored if 'colors' is an iterable. If is a tuple of floats, the colors will be sampled from the color map in the interval [cmap_range[0], cmap_range[1]]. The range can be dynamic if it is a callable which takes as input the number of colors and outputs a tuple of floats. The default is dynamic and is designed to spread colors equally in hue space (given that the color maps covers 360 of hue). color_transform (Union[Callable, None]): Transformation to be applied on the color before the Color object is created. For example, can be used to convert JCh colors from a color map to rgb or hsb colors. max_n_colors (int): Upper bound on the number of generated colors to avoid infinite iteration when generating dynamically the palette from a color map.
https://github.com/jsleb333/python2latex/blob/8736e750e0ee9b76ff1d521023efba72cec3ec22/python2latex/colormap.py#L88-L128
import numpy as np import sys from python2latex import Color from python2latex.utils import JCh2rgb class LinearColorMap: def __init__(self, color_anchors=[(0.5, 1, 0.5), (1.07, 0.7, 1)], anchor_pos=None, color_model='hsb', color_transform=None): self.color_anchors = color_anchors self.anchor_pos = anchor_pos or np.linspace(0, 1, len(color_anchors)) self.color_model = color_model self.color_transform = color_transform or (lambda x: x) def _interp_between_colors(self, frac, color_start, color_end): color = [self._lin_interp(frac, c1, c2) for c1, c2 in zip(color_start, color_end)] if self.color_model == 'RGB': color = [int(c) for c in color] if self.color_model == 'hsb': color[0] %= 1 if self.color_model == 'Hsb': color[0] %= 360 if self.color_model == 'JCh': color[2] %= 360 return tuple(color) def _lin_interp(self, frac, scalar_1, scalar_2): return scalar_1*(1-frac) + scalar_2*frac def __call__(self, scalar): idx_color_start, idx_color_end = 0, 1 while scalar > self.anchor_pos[idx_color_end]: idx_color_start += 1 idx_color_end += 1 interval_width = self.anchor_pos[idx_color_end] - self.anchor_pos[idx_color_start] interp_frac = (scalar - self.anchor_pos[idx_color_start])/interval_width interp_color = self._interp_between_colors(interp_frac, self.color_anchors[idx_color_start], self.color_anchors[idx_color_end]) if self.color_transform is not None: interp_color = self.color_transform(interp_color) return interp_color class Palette:
MIT License
yeasy/hyperledger-py
hyperledger/utils/utils.py
datetime_to_timestamp
python
def datetime_to_timestamp(dt): delta = dt - datetime.utcfromtimestamp(0) return delta.seconds + delta.days * 24 * 3600
Convert a UTC datetime to a Unix timestamp
https://github.com/yeasy/hyperledger-py/blob/f24e9cc409b50628b911950466786be6fe74f09f/hyperledger/utils/utils.py#L123-L126
import base64 import os import os.path import json import shlex from distutils.version import StrictVersion from fnmatch import fnmatch from datetime import datetime import six from .. import errors from .. import tls DEFAULT_HTTP_HOST = "127.0.0.1" DEFAULT_HTTP_PORT = "7050" BYTE_UNITS = { 'b': 1, 'k': 1024, 'm': 1024 * 1024, 'g': 1024 * 1024 * 1024 } def decode_json_header(header): data = base64.b64decode(header) if six.PY3: data = data.decode('utf-8') return json.loads(data) def match_path(path, pattern): pattern = pattern.rstrip('/') pattern_components = pattern.split('/') path_components = path.split('/')[:len(pattern_components)] return fnmatch('/'.join(path_components), pattern) def compare_version(v1, v2): s1 = StrictVersion(v1) s2 = StrictVersion(v2) if s1 == s2: return 0 elif s1 > s2: return -1 else: return 1 def version_lt(v1, v2): return compare_version(v1, v2) > 0 def version_gte(v1, v2): return not version_lt(v1, v2) def convert_tmpfs_mounts(tmpfs): if isinstance(tmpfs, dict): return tmpfs if not isinstance(tmpfs, list): raise ValueError( 'Expected tmpfs value to be either a list or a dict, found: {}' .format(type(tmpfs).__name__) ) result = {} for mount in tmpfs: if isinstance(mount, six.string_types): if ":" in mount: name, options = mount.split(":", 1) else: name = mount options = "" else: raise ValueError( "Expected item in tmpfs list to be a string, found: {}" .format(type(mount).__name__) ) result[name] = options return result def parse_repository_tag(repo_name): parts = repo_name.rsplit('@', 1) if len(parts) == 2: return tuple(parts) parts = repo_name.rsplit(':', 1) if len(parts) == 2 and '/' not in parts[1]: return tuple(parts) return repo_name, None
Apache License 2.0
turreted/spotify-lyrics
spotifylyrics/api.py
SpotifyApi.__exchange_refresh_token
python
def __exchange_refresh_token(self): self.access_token, self.refresh_token, token_duration = exchange_refresh_token( self.refresh_token ) self.expires_at = time.time() + token_duration
Exchanges the current refresh token for a new API key and refresh token. Used when the current API key is unknown or expired.
https://github.com/turreted/spotify-lyrics/blob/6bbbf6c29f6183536668e557e8aebe70a0d2f5e3/spotifylyrics/api.py#L55-L65
import re import os import time import schedule import requests import colorama from bs4 import BeautifulSoup from spotifylyrics.auth.utils import read_refresh_token from spotifylyrics.auth.crypto import exchange_refresh_token from spotifylyrics.auth.oauth import perform_authorization_code_flow colorama.init() class SpotifyApi: lyric_container_scopes = {"jsname": "YS01Ge"} def __init__(self, clear=True, **kwargs): self.current_song = None self.clear_screen = clear self.access_token = None self.refresh_token = read_refresh_token() if self.refresh_token: self.__exchange_refresh_token() else: ( self.access_token, self.refresh_token, token_duration, ) = perform_authorization_code_flow() self.expires_at = time.time() + token_duration self.__update_song_info()
MIT License
librosa/librosa
librosa/core/fft.py
get_fftlib
python
def get_fftlib(): global __FFTLIB return __FFTLIB
Get the FFT library currently used by librosa Returns ------- fft : module The FFT library currently used by librosa. Must API-compatible with `numpy.fft`.
https://github.com/librosa/librosa/blob/eb603e7a91598d1e72d3cdeada0ade21a33f9c0c/librosa/core/fft.py#L43-L53
__all__ = ["get_fftlib", "set_fftlib"] __FFTLIB = None def set_fftlib(lib=None): global __FFTLIB if lib is None: from numpy import fft lib = fft __FFTLIB = lib
ISC License
ieeerobotics/bot
bot/client/sub_client.py
SubClient.add_topic
python
def add_topic(self, topic): self.sub_sock.setsockopt(zmq.SUBSCRIBE, topic)
Set SUB socket to listen for the given topic. Note that the format of topics is a bit interesting. From what I can tell, any topic that matches the regex ^topic* will be subscribed to. For example, if there's a topic drive_motor_detail_br and another drive_motor_detail_fr, then passing drive_motor_detail will subscribe to both of them. On the other hand, passing motor_detail_fr will not subscribe to any topic (unless there's another one that starts with motor_detail_fr). Also note that with ZMQ (libzmq) >= 3.0, filtering of topics is done at the publisher. So, topics that are not subscribed to by any clients will not be published, reducing the load on the bot. :param topic: Topic to listen for. :type topic: string
https://github.com/ieeerobotics/bot/blob/9228b00f55ec949f3c39a0020a1e0f61dc64d601/bot/client/sub_client.py#L54-L73
import sys from pprint import pprint try: import zmq except ImportError: sys.stderr.write("ERROR: Failed to import zmq. Is it installed?") raise class SubClient(object): def __init__(self, sub_addr="tcp://127.0.0.1:60001"): self.context = zmq.Context() self.sub_addr = sub_addr self.sub_sock = self.context.socket(zmq.SUB) self.sub_sock.connect(self.sub_addr) print "SubClient subscribed to PubServer at {}".format(self.sub_addr) def print_msgs(self): print "Printing messages, ctrl+c to quit loop..." while True: try: pprint(self.sub_sock.recv()) except KeyboardInterrupt: print return
BSD 2-Clause Simplified License
projectatomic/papr
papr/utils/gh.py
_parse_args
python
def _parse_args(): parser = argparse.ArgumentParser() required_args = ['repo', 'commit', 'token', 'state'] optional_args = ['context', 'description', 'url'] for arg in required_args: parser.add_argument('--' + arg, required=True) for arg in optional_args: parser.add_argument('--' + arg) args = parser.parse_args() for arg in required_args + optional_args: val = getattr(args, arg) if val is not None: if val.startswith('env:'): new_val = os.environ.get(val[4:]) if new_val is None and arg in required_args: parser.error( "Parameter '%s' is required, but the given " "environment variable '%s' is missing." % ( arg, val[4:])) setattr(args, arg, new_val) elif val == "": if arg in required_args: parser.error( "Parameter '%s' is required, but the given " "argument is empty." % arg) setattr(args, arg, None) return args
Parses program arguments and optionally resolves pointers to environment variables.
https://github.com/projectatomic/papr/blob/7b80d5c4be84a6497b13b43ae1f80363958b870a/papr/utils/gh.py#L33-L69
import os import sys import json import argparse import requests import datetime from simplejson.scanner import JSONDecodeError class CommitNotFoundException(Exception): pass def _main(): args = _parse_args() status(args.repo, args.commit, args.token, args.state, args.context, args.description, args.url)
MIT License
google-research/language
language/mentionmemory/encoders/encoder_registry.py
register_encoder
python
def register_encoder( name): def _wrap(cls): if not issubclass(cls, base_encoder.BaseEncoder): raise TypeError( 'Invalid encoder. Encoder %s does not subclass BaseEncoder.' % cls.__name__) if name in _ENCODER_REGISTRY: raise ValueError( 'Encoder name %s has already been registered with class %s' % (name, _ENCODER_REGISTRY[name].__name__)) _ENCODER_REGISTRY[name] = cls return cls return _wrap
Register encoder. Encoder should implement BaseEncoder abstraction. Used as decorator, for example: @register_encoder('my_encoder') class MyEncoder(BaseEncoder): Args: name: name of registered encoder. Returns: Mapping from BaseEncoder to BaseEncoder.
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/mentionmemory/encoders/encoder_registry.py#L26-L59
from language.mentionmemory.encoders import base_encoder _ENCODER_REGISTRY = {} _BaseEncoderVar = TypeVar('_BaseEncoderVar', bound='base_encoder.BaseEncoder')
Apache License 2.0
flyteorg/flytekit
flytekit/contrib/sensors/base_sensor.py
Sensor.sense
python
def sense(self, timeout=None): started = _datetime.datetime.utcnow() while True: sensed, time_to_wait = self.sense_with_wait_hint() if sensed: return True if time_to_wait: _time.sleep(time_to_wait.total_seconds()) if timeout is not None and (_datetime.datetime.utcnow() - started) > timeout: return False
Attempts :param datetime.timedelta timeout: :rtype: bool
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/contrib/sensors/base_sensor.py#L76-L90
import abc as _abc import datetime as _datetime import logging as _logging import sys as _sys import time as _time import traceback as _traceback import six as _six class Sensor(object, metaclass=_abc.ABCMeta): def __init__(self, evaluation_interval=None, max_failures=0): if evaluation_interval is None: evaluation_interval = _datetime.timedelta(seconds=30) self._evaluation_interval = evaluation_interval self._max_failures = max_failures self._failures = 0 self._exc_info = None self._last_executed_time = _datetime.datetime(year=1990, month=6, day=30) self._sensed = False @_abc.abstractmethod def _do_poll(self): pass def sense_with_wait_hint(self): if self._sensed: return self._sensed, self._evaluation_interval if self._failures > self._max_failures: _six.reraise(*self._exc_info) now = _datetime.datetime.utcnow() time_to_wait_eval_period = self._evaluation_interval - (now - self._last_executed_time) if time_to_wait_eval_period > _datetime.timedelta(): return self._sensed, time_to_wait_eval_period try: self._sensed, time_to_wait = self._do_poll() time_to_wait = time_to_wait or self._evaluation_interval except BaseException: self._failures += 1 self._exc_info = _sys.exc_info() if self._failures > self._max_failures: _logging.error( "{} failed (with no remaining retries) due to:\n\n{}".format(self, _traceback.format_exc()), ) raise else: _logging.warn("{} failed (but will retry) due to:\n\n{}".format(self, _traceback.format_exc())) time_to_wait = self._evaluation_interval self._last_executed_time = _datetime.datetime.utcnow() return self._sensed, time_to_wait
Apache License 2.0
wildmeorg/wildbook-ia
wbia/web/apis_engine.py
ensure_simple_server
python
def ensure_simple_server(port=5832): if ut.is_local_port_open(port): bgserver = ut.spawn_background_process(ut.start_simple_webserver, port=port) return bgserver else: bgserver = ut.DynStruct() bgserver.terminate2 = lambda: None logger.info('server is running elsewhere') return bgserver
r""" CommandLine: python -m wbia.web.apis_engine --exec-ensure_simple_server python -m utool.util_web --exec-start_simple_webserver Example: >>> # DISABLE_DOCTEST >>> from wbia.web.apis_engine import * # NOQA >>> result = ensure_simple_server() >>> print(result)
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/web/apis_engine.py#L22-L41
import logging import utool as ut import uuid from wbia.control import controller_inject import wbia.constants as const print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia') CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator( __name__ ) register_api = controller_inject.get_wbia_flask_api(__name__)
Apache License 2.0
googleapis/python-spanner
google/cloud/spanner_v1/keyset.py
KeySet._to_dict
python
def _to_dict(self): if self.all_: return {"all": True} return { "keys": self.keys, "ranges": [keyrange._to_dict() for keyrange in self.ranges], }
Return the state of the keyset as a dict. The result can be used to serialize the instance and reconstitute it later using :meth:`_from_dict`. :rtype: dict :returns: state of this instance.
https://github.com/googleapis/python-spanner/blob/b5a567f1db8762802182a3319c16b6456bb208d8/google/cloud/spanner_v1/keyset.py#L157-L172
from google.cloud.spanner_v1 import KeyRangePB from google.cloud.spanner_v1 import KeySetPB from google.cloud.spanner_v1._helpers import _make_list_value_pb from google.cloud.spanner_v1._helpers import _make_list_value_pbs class KeyRange(object): def __init__( self, start_open=None, start_closed=None, end_open=None, end_closed=None ): if not any([start_open, start_closed, end_open, end_closed]): raise ValueError("Must specify at least a start or end row.") if start_open and start_closed: raise ValueError("Specify one of 'start_open' / 'start_closed'.") elif start_open is None and start_closed is None: start_closed = [] if end_open and end_closed: raise ValueError("Specify one of 'end_open' / 'end_closed'.") elif end_open is None and end_closed is None: end_closed = [] self.start_open = start_open self.start_closed = start_closed self.end_open = end_open self.end_closed = end_closed def _to_pb(self): kwargs = {} if self.start_open is not None: kwargs["start_open"] = _make_list_value_pb(self.start_open) if self.start_closed is not None: kwargs["start_closed"] = _make_list_value_pb(self.start_closed) if self.end_open is not None: kwargs["end_open"] = _make_list_value_pb(self.end_open) if self.end_closed is not None: kwargs["end_closed"] = _make_list_value_pb(self.end_closed) return KeyRangePB(**kwargs) def _to_dict(self): mapping = {} if self.start_open: mapping["start_open"] = self.start_open if self.start_closed: mapping["start_closed"] = self.start_closed if self.end_open: mapping["end_open"] = self.end_open if self.end_closed: mapping["end_closed"] = self.end_closed return mapping def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._to_dict() == other._to_dict() class KeySet(object): def __init__(self, keys=(), ranges=(), all_=False): if all_ and (keys or ranges): raise ValueError("'all_' is exclusive of 'keys' / 'ranges'.") self.keys = list(keys) self.ranges = list(ranges) self.all_ = all_ def _to_pb(self): if self.all_: return KeySetPB(all_=True) kwargs = {} if self.keys: kwargs["keys"] = _make_list_value_pbs(self.keys) if self.ranges: kwargs["ranges"] = [krange._to_pb() for krange in self.ranges] return KeySetPB(**kwargs)
Apache License 2.0
gwpy/gwosc
gwosc/datasets.py
run_at_gps
python
def run_at_gps(gps, host=api.DEFAULT_URL): for run, meta in api.fetch_dataset_json( 0, api._MAX_GPS, host=host)['runs'].items(): if run in _IGNORE: continue start, end = meta['GPSstart'], meta['GPSend'] if start <= gps < end: return run raise ValueError("no run dataset found containing GPS {0}".format(gps))
Returns the name of the open-data run dataset matching the GPS time This function will return the first event for which ``start <= gps < end`` Parameters ---------- gps : `float` The GPS time to locate host : `str`, optional the URL of the GWOSC host to query, defaults to https://www.gw-openscience.org Returns ------- run : `str` the name of the matched observing run Raises ------- ValueError if no datasets are found matching the GPS time Examples -------- >>> from gwosc.datasets import run_at_gps >>> run_at_gps(1135136350) 'O1' >>> run_at_gps(0) ValueError: no run dataset found containing GPS 0
https://github.com/gwpy/gwosc/blob/6ae371c6bf2d12cdefe1560631c671ae2b9985bc/gwosc/datasets.py#L517-L557
import re import warnings from . import ( api, urls, utils, ) __author__ = 'Duncan Macleod <duncan.macleod@ligo.org>' _IGNORE = { "tenyear", "history", "oldhistory", } def _match_dataset(targetdetector, detectors, targetsegment, segment): if targetdetector not in set(detectors) | {None}: return False if targetsegment is None or utils.segments_overlap( targetsegment, segment): return True def _run_datasets(detector=None, segment=None, host=api.DEFAULT_URL): meta = api.fetch_dataset_json(0, api._MAX_GPS, host=host)["runs"] for epoch, metadata in meta.items(): if epoch in _IGNORE: continue rundets = set(metadata["detectors"]) runseg = (metadata['GPSstart'], metadata['GPSend']) if _match_dataset(detector, rundets, segment, runseg): yield epoch def _catalog_datasets(host=api.DEFAULT_URL): yield from api.fetch_cataloglist_json(host=host).keys() def _match_event_dataset( dataset, catalog=None, version=None, detector=None, segment=None, host=api.DEFAULT_URL, ): full = bool(detector or segment) try: meta = _event_metadata( dataset, catalog=catalog, version=version, full=full, host=host, ) except ValueError: return False if not full: return True try: strain = meta["strain"] except KeyError: return False if detector not in {None} | {u["detector"] for u in strain}: return False if segment is None: return True if not strain: return False eseg = utils.strain_extent(urls.sieve(strain, detector=detector)) return utils.segments_overlap(segment, eseg) def _event_datasets( detector=None, segment=None, catalog=None, version=None, host=api.DEFAULT_URL, ): full = detector is not None or segment is not None events = {} for dset, meta in api.fetch_allevents_json( host=host, full=full, )["events"].items(): if _match_event_dataset( dset, detector=detector, segment=segment, catalog=catalog, version=version, host=host, ): events[dset] = meta def _rank_catalog(x): cat = x["catalog.shortName"].lower() if "confident" in cat: return 1 for word in ("marginal", "preliminary"): if word in cat: return 10 return 5 def _gps_distance(x): gps = x["GPS"] if not segment: return 0 return int(abs(segment[0] - gps)) for dset, _ in sorted( events.items(), key=lambda x: ( _gps_distance(x[1]), _rank_catalog(x[1]), -x[1]["version"], ), ): yield dset def _iter_datasets( detector=None, type=None, segment=None, catalog=None, version=None, match=None, host=api.DEFAULT_URL, ): type = str(type).rstrip("s").lower() needruns = type in {"none", "run"} needcatalogs = type in {"none", "catalog"} needevents = type in {"none", "event"} if not needevents: for key, value in dict(catalog=catalog, version=version).items(): if value is not None: warnings.warn( "the '{}' keyword is only relevant when querying " "for event datasets, it will be ignored now".format(key), ) if match: reg = re.compile(match) def _yield_matches(iter_): for x in iter_: if not match or reg.search(x): yield x for needthis, collection in ( (needruns, _run_datasets( detector=detector, host=host, segment=segment, )), (needcatalogs, _catalog_datasets( host=host, )), (needevents, _event_datasets( detector=detector, segment=segment, host=host, version=version, catalog=catalog, )), ): if not needthis: continue yield from _yield_matches(collection) def find_datasets( detector=None, type=None, segment=None, match=None, catalog=None, version=None, host=api.DEFAULT_URL, ): return sorted(list(_iter_datasets( detector=detector, type=type, segment=segment, catalog=catalog, version=version, match=match, host=host, ))) def _event_metadata( event, catalog=None, version=None, full=True, host=api.DEFAULT_URL, ): return list(api._fetch_allevents_event_json( event, catalog=catalog, version=version, full=full, host=host, )["events"].values())[0] def event_gps(event, catalog=None, version=None, host=api.DEFAULT_URL): return _event_metadata( event, catalog=catalog, version=version, full=False, host=host, )['GPS'] def event_segment( event, detector=None, catalog=None, version=None, host=api.DEFAULT_URL, ): data = _event_metadata( event, catalog=catalog, version=version, full=True, host=host, ) if not data["strain"]: raise ValueError( "event '{}' has no strain files".format(event), ) return utils.strain_extent( urls.sieve(data["strain"], detector=detector), ) def event_at_gps(gps, host=api.DEFAULT_URL, tol=1): for meta in api.fetch_allevents_json(host=host)["events"].values(): egps = meta['GPS'] if abs(egps - gps) <= tol: return meta["commonName"] raise ValueError("no event found within {0} seconds of {1}".format( tol, gps)) def event_detectors(event, catalog=None, version=None, host=api.DEFAULT_URL): data = _event_metadata( event, catalog=catalog, version=version, full=True, host=host, ) return set(u["detector"] for u in data["strain"]) def run_segment(run, host=api.DEFAULT_URL): try: meta = api.fetch_dataset_json(0, api._MAX_GPS, host=host)['runs'][run] except KeyError as exc: raise ValueError('no run dataset found for {!r}'.format(exc.args[0])) return meta['GPSstart'], meta['GPSend']
MIT License
ddmal/rodan
rodan/admin/helpers.py
view_or_basicauth
python
def view_or_basicauth(view, request, test_func, realm="", *args, **kwargs): if 'HTTP_AUTHORIZATION' in request.META: auth = request.META['HTTP_AUTHORIZATION'].split() if len(auth) == 2: if auth[0].lower() == "basic": uname, passwd = base64.b64decode(auth[1]).split(':') user = authenticate(username=uname, password=passwd) if user is not None: if user.is_active: request.user = user return view(request, *args, **kwargs) response = HttpResponse() response.status_code = 401 response['WWW-Authenticate'] = 'Basic realm="%s"' % realm return response
This is a helper function used by both 'logged_in_or_basicauth' and 'has_perm_or_basicauth' that does the nitty of determining if they are already logged in or if they have provided proper http-authorization and returning the view if all goes well, otherwise responding with a 401.
https://github.com/ddmal/rodan/blob/458e72990c2571fa727a0d026fb235faf30bffec/rodan/admin/helpers.py#L81-L113
def required(wrapping_functions, patterns_rslt): if not hasattr(wrapping_functions, '__iter__'): wrapping_functions = (wrapping_functions,) return [ _wrap_instance__resolve(wrapping_functions, instance) for instance in patterns_rslt ] def _wrap_instance__resolve(wrapping_functions, instance): if not hasattr(instance, 'resolve'): return instance resolve = getattr(instance, 'resolve') def _wrap_func_in_returned_resolver_match(*args, **kwargs): rslt = resolve(*args, **kwargs) if not hasattr(rslt, 'func'): return rslt f = getattr(rslt, 'func') for _f in reversed(wrapping_functions): f = _f(f) setattr(rslt, 'func', f) return rslt setattr(instance, 'resolve', _wrap_func_in_returned_resolver_match) return instance import base64 from django.http import HttpResponse from django.contrib.auth import authenticate
MIT License
chandrikadeb7/jpmorgan-chase-virtual-internship
JPMC-tech-task-1-py3/server3.py
clear_order
python
def clear_order(order, size, book, op = operator.ge, _notional = 0): (top_order, top_size, age), tail = book[0], book[1:] if op(order, top_order): _notional += min(size, top_size) * top_order sdiff = top_size - size if sdiff > 0: return _notional, list(add_book(tail, top_order, sdiff, age)) elif len(tail) > 0: return clear_order(order, -sdiff, tail, op, _notional)
Try to clear a sized order against a book, returning a tuple of (notional, new_book) if successful, and None if not. _notional is a recursive accumulator and should not be provided by the caller.
https://github.com/chandrikadeb7/jpmorgan-chase-virtual-internship/blob/ccb8860d2c8b20c5d3d69ece5e276da5a27975a0/JPMC-tech-task-1-py3/server3.py#L100-L112
from random import normalvariate, random from datetime import timedelta, datetime import csv import dateutil.parser import os.path import operator import json import re import threading import http.server from socketserver import ThreadingMixIn REALTIME = True SIM_LENGTH = timedelta(days = 365 * 5) MARKET_OPEN = datetime.today().replace(hour = 0, minute = 30, second = 0) SPD = (2.0, 6.0, 0.1) PX = (60.0, 150.0, 1) FREQ = (12, 36, 50) OVERLAP = 4 def bwalk(min, max, std): rng = max - min while True: max += normalvariate(0, std) yield abs((max % (rng * 2)) - rng) + min def market(t0 = MARKET_OPEN): for hours, px, spd in zip(bwalk(*FREQ), bwalk(*PX), bwalk(*SPD)): yield t0, px, spd t0 += timedelta(hours = abs(hours)) def orders(hist): for t, px, spd in hist: stock = 'ABC' if random() > 0.5 else 'DEF' side, d = ('sell', 2) if random() > 0.5 else ('buy', -2) order = round(normalvariate(px + (spd / d), spd / OVERLAP), 2) size = int(abs(normalvariate(0, 100))) yield t, stock, side, order, size def add_book(book, order, size, _age = 10): yield order, size, _age for o, s, age in book: if age > 0: yield o, s, age - 1
MIT License
dayyass/pytorch_ner
pytorch_ner/config.py
get_config
python
def get_config(path_to_config: str) -> Dict[str, Any]: with open(path_to_config, mode="r") as fp: config = yaml.safe_load(fp) config["save"]["path_to_folder"] = ( Path(config["save"]["path_to_folder"]) / f"model_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}" ) config["save"]["path_to_folder"].absolute().mkdir(parents=True, exist_ok=True) config["save"]["path_to_config"] = path_to_config config["save"]["path_to_save_logfile"] = ( config["save"]["path_to_folder"] / "logging.txt" ) return config
Get config. Args: path_to_config (str): Path to config. Returns: Dict[str, Any]: Config.
https://github.com/dayyass/pytorch_ner/blob/4bcb627f8f4539f57aa48367085d724a2ee47b02/pytorch_ner/config.py#L8-L34
import datetime from pathlib import Path from typing import Any, Dict import yaml
Apache License 2.0
syseleven/icinga2-migration-utils
icinga_migration_utils/icinga1/icinga1.py
Icinga1Config.get_acknowledgements
python
def get_acknowledgements(self, hostname): return [ack for ack in self.service_acknowledgements + self.host_acknowledgements if ack['host_name'] == hostname]
Get all acks related to host :param hostname: hostname :return:
https://github.com/syseleven/icinga2-migration-utils/blob/1272a4f432d134546f4d2d96eb5504aaa66bd12e/icinga_migration_utils/icinga1/icinga1.py#L386-L394
import glob import logging import os import re from collections import defaultdict logger = logging.getLogger(__name__) OBJECT_CACHE_REGEX = r'define\s+(?P<object_type>\w+)\s+\{\n' r'|\t(?P<keyonly>\w+)\t\n|\t(?P<key>\w+)\s+(?P<value>[^\n]*)\n|\t' r'\}' STATUS_FILE_REGEX = r'(?P<object_type>\w+)\s+\{\n|\t(?P<key>\w+)\=(?P<value>[^\n]*)\n|\t\}' CACHE_DIR = os.path.expanduser('~/.cache/icingadiff') DEFAULT_OBJECTS_FILES = os.path.join(CACHE_DIR, 'objects_monitoring*.cache') DEFAULT_STATUS_FILES = os.path.join(CACHE_DIR, 'status_monitoring*.cache') class Icinga1Error(Exception): pass class StatusType(object): CONTACTSTATUS = 'contactstatus' SERVICECOMMENT = 'servicecomment' SERVICESTATUS = 'servicestatus' HOSTSTATUS = 'hoststatus' SERVICEDOWNTIME = 'servicedowntime' HOSTCOMMENT = 'hostcomment' PROGRAMSTATUS = 'programstatus' HOSTDOWNTIME = 'hostdowntime' INFO = 'info' class ObjectType(object): COMMAND = 'command' CONTACT = 'contact' CONTACTGROUP = 'contactgroup' HOST = 'host' HOSTGROUP = 'hostgroup' MODULE = 'module' SERVICE = 'service' SERVICEDEPENDENCY = 'servicedependency' SERVICEGROUP = 'servicegroup' TIMEPERIOD = 'timeperiod' def parse_icinga_cache(file, source, regex): logger.debug("Parsing cache file {}".format(file)) content = open(file).read() matches = re.finditer(regex, content, re.DOTALL) result = [] obj = {} for match in matches: groups = match.groups() if all(not x for x in groups): result.append(obj) obj = {} else: groupdict = match.groupdict() if groupdict['object_type']: obj['object_type'] = groupdict['object_type'] obj['monitoring_source'] = source elif groupdict.get('keyonly', None): key = groupdict['keyonly'] obj[key] = None elif groupdict['key'] and groupdict['value']: key = groupdict['key'] value = groupdict['value'] obj[key] = value return result class Icinga1Config(object): def __init__(self, status_files=None, objects_files=None): self.status_files = status_files or os.environ.get('ICINGA_STATUS_FILES', DEFAULT_STATUS_FILES) self.objects_files = objects_files or os.environ.get('ICINGA_OBJECT_FILES', DEFAULT_OBJECTS_FILES) self._status = [] self._objects = [] self._hostdowntimes = [] self._servicedowntimes = [] self._service_acknowledgements = [] self._host_acknowledgements = [] self._contacts = [] @property def objects(self): if not self._objects: for objects_file in glob.glob(self.objects_files): source = re.findall(r'(monitoring[\d]+)\.cache', objects_file)[0] self._objects.extend( parse_icinga_cache(objects_file, source, OBJECT_CACHE_REGEX)) if not self._objects: raise Exception("Error - no objects found in object cache files ({})" .format(self.objects_files)) return self._objects @property def status(self): if not self._status: for status_file in glob.glob(self.status_files): source = re.findall(r'(monitoring[\d]+)\.cache', status_file)[0] self._status.extend( parse_icinga_cache(status_file, source, STATUS_FILE_REGEX)) if not self._status: raise Exception("Error - no objects found in status cache files" .format(self.status_files)) return self._status def _get_objects(self, object_type, **kwargs): objects = [obj for obj in self.objects if obj.get('object_type', None) == object_type] if kwargs: for key, value in kwargs.items(): objects = [obj for obj in objects if key in obj.keys() and obj[key] == value] return objects def _get_status(self, object_type, **kwargs): objects = [obj for obj in self.status if obj.get('object_type', None) == object_type] if kwargs: objects = [obj for key, value in kwargs.items() for obj in objects if key in obj.keys() and obj[key] == value] return objects def get_hosts(self, **kwargs): return self._get_objects(ObjectType.HOST, **kwargs) def get_hosts_dict(self, **kwargs): host_dict = {} for host in self.get_hosts(**kwargs): host_dict[host['host_name']] = host return host_dict def get_hoststatus_by_host(self): host_status_dict = {} host_status = [ status for status in self.status if status['object_type'] == StatusType.HOSTSTATUS ] for status in host_status: host_status_dict[status['host_name']] = status return host_status_dict def get_servicestatus(self, hostname): return [ status for status in self.status if status['object_type'] == StatusType.SERVICESTATUS and status['host_name'] == hostname ] def get_servicestatus_by_host(self): service_status_dict = defaultdict(list) service_status = [ status for status in self.status if status['object_type'] == StatusType.SERVICESTATUS ] for status in service_status: service_status_dict[status['host_name']].append(status) return service_status_dict def get_services(self, hostname=None, **kwargs): if hostname: kwargs['host_name'] = hostname return self._get_objects(ObjectType.SERVICE, **kwargs) def get_services_by_hostname(self, **kwargs): services = defaultdict(list) for service in self.get_services(**kwargs): services[service['host_name']].append(service) return services def get_downtimes(self, hostname): return [ downtime for downtime in self.hostdowntimes + self.servicedowntimes if downtime['host_name'] == hostname ] @property def hostdowntimes(self): if not self._hostdowntimes: self._hostdowntimes = self._get_status(StatusType.HOSTDOWNTIME) return self._hostdowntimes @property def servicedowntimes(self): if not self._servicedowntimes: self._servicedowntimes = self._get_status(StatusType.SERVICEDOWNTIME) return self._servicedowntimes
MIT License
note35/sinon
sinon/lib/assertion.py
SinonAssertion.fail
python
def fail(cls, message): SinonAssertion.message = message
Defining fail message of assertion This function will change message until all tests finished Args: str
https://github.com/note35/sinon/blob/35da87ca6f30eec112ffc8d5c0a56d852d770285/sinon/lib/assertion.py#L27-L33
from .util import ErrorHandler from .spy import SinonSpy class SinonAssertion(object): failException = AssertionError message = "" @classmethod def __is_spy(cls, spy): if not isinstance(spy, SinonSpy): ErrorHandler.is_not_spy_error(spy) @classmethod
BSD 2-Clause Simplified License
romeltorres/alpha_vantage
alpha_vantage/cryptocurrencies.py
CryptoCurrencies.get_digital_currency_monthly
python
def get_digital_currency_monthly(self, symbol, market): _FUNCTION_KEY = 'DIGITAL_CURRENCY_MONTHLY' return _FUNCTION_KEY, 'Time Series (Digital Currency Monthly)', 'Meta Data'
Returns the monthly historical time series for a digital currency (e.g., BTC) traded on a specific market (e.g., CNY/Chinese Yuan), refreshed daily at midnight (UTC). Prices and volumes are quoted in both the market-specific currency and USD.. Keyword Arguments: symbol: The digital/crypto currency of your choice. It can be any of the currencies in the digital currency list. For example: symbol=BTC. market: The exchange market of your choice. It can be any of the market in the market list. For example: market=CNY.
https://github.com/romeltorres/alpha_vantage/blob/c637657579950d72605320c68ded42a447566cdf/alpha_vantage/cryptocurrencies.py#L45-L59
from .alphavantage import AlphaVantage as av class CryptoCurrencies(av): @av._output_format @av._call_api_on_func def get_digital_currency_daily(self, symbol, market): _FUNCTION_KEY = 'DIGITAL_CURRENCY_DAILY' return _FUNCTION_KEY, 'Time Series (Digital Currency Daily)', 'Meta Data' @av._output_format @av._call_api_on_func def get_digital_currency_weekly(self, symbol, market): _FUNCTION_KEY = 'DIGITAL_CURRENCY_WEEKLY' return _FUNCTION_KEY, 'Time Series (Digital Currency Weekly)', 'Meta Data' @av._output_format @av._call_api_on_func
MIT License
kjunelee/metaoptnet
models/ResNet12_embedding.py
resnet12
python
def resnet12(keep_prob=1.0, avg_pool=False, **kwargs): model = ResNet(BasicBlock, keep_prob=keep_prob, avg_pool=avg_pool, **kwargs) return model
Constructs a ResNet-12 model.
https://github.com/kjunelee/metaoptnet/blob/7a8e2ae25ef47cfe75a6fe8bc7920dc9fd29191f/models/ResNet12_embedding.py#L121-L125
import torch.nn as nn import torch import torch.nn.functional as F from models.dropblock import DropBlock def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False, block_size=1): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.LeakyReLU(0.1) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = conv3x3(planes, planes) self.bn3 = nn.BatchNorm2d(planes) self.maxpool = nn.MaxPool2d(stride) self.downsample = downsample self.stride = stride self.drop_rate = drop_rate self.num_batches_tracked = 0 self.drop_block = drop_block self.block_size = block_size self.DropBlock = DropBlock(block_size=self.block_size) def forward(self, x): self.num_batches_tracked += 1 residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) out = self.maxpool(out) if self.drop_rate > 0: if self.drop_block == True: feat_size = out.size()[2] keep_rate = max(1.0 - self.drop_rate / (20*2000) * (self.num_batches_tracked), 1.0 - self.drop_rate) gamma = (1 - keep_rate) / self.block_size**2 * feat_size**2 / (feat_size - self.block_size + 1)**2 out = self.DropBlock(out, gamma=gamma) else: out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True) return out class ResNet(nn.Module): def __init__(self, block, keep_prob=1.0, avg_pool=False, drop_rate=0.0, dropblock_size=5): self.inplanes = 3 super(ResNet, self).__init__() self.layer1 = self._make_layer(block, 64, stride=2, drop_rate=drop_rate) self.layer2 = self._make_layer(block, 160, stride=2, drop_rate=drop_rate) self.layer3 = self._make_layer(block, 320, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size) self.layer4 = self._make_layer(block, 640, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size) if avg_pool: self.avgpool = nn.AvgPool2d(5, stride=1) self.keep_prob = keep_prob self.keep_avg_pool = avg_pool self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False) self.drop_rate = drop_rate for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size)) self.inplanes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) if self.keep_avg_pool: x = self.avgpool(x) x = x.view(x.size(0), -1) return x
Apache License 2.0
kubevirt/client-python
kubevirt/models/v1_virtual_machine_instance_preset_list.py
V1VirtualMachineInstancePresetList.api_version
python
def api_version(self, api_version): self._api_version = api_version
Sets the api_version of this V1VirtualMachineInstancePresetList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param api_version: The api_version of this V1VirtualMachineInstancePresetList. :type: str
https://github.com/kubevirt/client-python/blob/c9f9d3bc429f783076982b46b194d5f7669eab1b/kubevirt/models/v1_virtual_machine_instance_preset_list.py#L77-L86
from pprint import pformat from six import iteritems import re class V1VirtualMachineInstancePresetList(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'items': 'list[V1VirtualMachineInstancePreset]', 'kind': 'str', 'metadata': 'K8sIoApimachineryPkgApisMetaV1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None): self._api_version = None self._items = None self._kind = None self._metadata = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter
Apache License 2.0
cpflat/logcausalanalysis
logcausality/lt_common.py
merge_lt
python
def merge_lt(m1, m2, sym): ret = [] for w1, w2 in zip(m1, m2): if w1 == w2: ret.append(w1) else: ret.append(sym) return ret
Return common area of log message (to be log template)
https://github.com/cpflat/logcausalanalysis/blob/f475f53cb683ab6ad55851c69129758e4ac89fc6/logcausality/lt_common.py#L593-L601
import os import cPickle as pickle from collections import defaultdict import strutil class LTManager(object): def __init__(self, conf, db, lttable, reset_db, lt_alg, ltg_alg, post_alg): self._reset_db = reset_db self.sym = conf.get("log_template", "variable_symbol") self.filename = conf.get("log_template", "indata_filename") self._fail_fn = conf.get("log_template", "fail_output") self._db = db self._lttable = lttable self._table = TemplateTable() self.ltgen = None self.ltspl = None self.ltgroup = None def _set_ltgen(self, ltgen): self.ltgen = ltgen def _set_ltgroup(self, ltgroup): self.ltgroup = ltgroup if not self._reset_db: self.ltgroup.restore_ltg(self._db, self._lttable) def _set_ltspl(self, ltspl): self.ltspl = ltspl def process_init_data(self, l_line): d = self.ltgen.process_init_data(l_line) for mid, line in enumerate(l_line): l_w, l_s = line tid = d[mid] tpl = self._table[tid] ltw = self.ltspl.replace_variable(l_w, tpl, self.sym) ltid = self.ltspl.search(tid, ltw) if ltid is None: ltline = self.add_lt(ltw, l_s) self._table.addcand(tid, ltline.ltid) else: self.count_lt(ltid) ltline = self._lttable[ltid] yield ltline def process_line(self, l_w, l_s): def lt_diff(ltid, ltw): d_diff = {} for wid, new_w, old_w in zip(range(len(ltw)), ltw, self._lttable[ltid].ltw): if new_w == old_w: pass else: d_diff[wid] = new_w return d_diff def lt_repl(ltw, d_diff): ret = [] for wid, w in enumerate(ltw): if d_diff.has_key(wid): ret.append(d_diff[wid]) else: ret.append(w) return ret tid, state = self.ltgen.process_line(l_w, l_s) if tid is None: return None tpl = self._table[tid] ltw = self.ltspl.replace_variable(l_w, tpl, self.sym) if state == LTGen.state_added: ltline = self.add_lt(ltw, l_s) self._table.addcand(tid, ltline.ltid) else: ltid = self.ltspl.search(tid, ltw) if ltid is None: ltline = self.add_lt(ltw, l_s) self._table.addcand(tid, ltline.ltid) else: if state == LTGen.state_changed: d_diff = lt_diff(ltid, ltw) for temp_ltid in self._table.getcand(tid): if temp_ltid == ltid: self.replace_and_count_lt(ltid, ltw) else: old_ltw = self._lttable[temp_ltid] new_ltw = lt_repl(old_ltw, d_diff) self.replace_lt(ltid, new_ltw) elif state == LTGen.state_unchanged: self.count_lt(ltid) else: raise AssertionError ltline = self._lttable[ltid] return ltline def add_lt(self, l_w, l_s, cnt = 1): ltid = self._lttable.next_ltid() ltline = LogTemplate(ltid, None, l_w, l_s, cnt, self.sym) ltgid = self.ltgroup.add(ltline) ltline.ltgid = ltgid self._lttable.add_lt(ltline) self._db.add_lt(ltline) return ltline def replace_lt(self, ltid, l_w, l_s = None, cnt = None): self._lttable[ltid].replace(l_w, l_s, cnt) self._db.update_lt(ltid, l_w, l_s, cnt) def replace_and_count_lt(self, ltid, l_w, l_s = None): cnt = self._lttable[ltid].count() self._lttable[ltid].replace(l_w, l_s, None) self._db.update_lt(ltid, l_w, l_s, cnt) def count_lt(self, ltid): cnt = self._lttable[ltid].count() self._db.update_lt(ltid, None, None, cnt) def remove_lt(self, ltid): self._lttable.remove_lt(ltid) self._db.remove_lt(ltid) def remake_ltg(self): self._db.reset_ltg() self.ltgroup.init_dict() temp_lttable = self._lttable self.ltgroup._lttable = LTTable(self.sym) for ltline in temp_lttable: ltgid = self.ltgroup.add(ltline) ltline.ltgid = ltgid self.ltgroup._lttable.add_lt(ltline) self._db.add_ltg(ltline.ltid, ltgid) assert self.ltgroup._lttable.ltdict == temp_lttable.ltdict def failure_output(self, line): with open(self._fail_fn, "a") as f: line = line.rstrip("\n") f.write(line + "\n") def load(self): with open(self.filename, 'r') as f: obj = pickle.load(f) table_data, ltgen_data, ltgroup_data = obj self._table.load(table_data) self.ltgen.load(ltgen_data) self.ltgroup.load(ltgroup_data) def dump(self): table_data = self._table.dumpobj() ltgen_data = self.ltgen.dumpobj() ltgroup_data = self.ltgroup.dumpobj() obj = (table_data, ltgen_data, ltgroup_data) with open(self.filename, 'w') as f: pickle.dump(obj, f) class LTTable(): def __init__(self, sym): self.ltdict = {} self.sym = sym def __iter__(self): return self._generator() def _generator(self): for ltid in self.ltdict.keys(): yield self.ltdict[ltid] def __len__(self): return len(self.ltdict) def __getitem__(self, key): assert isinstance(key, int) if not self.ltdict.has_key(key): raise IndexError("index out of range") return self.ltdict[key] def next_ltid(self): cnt = 0 while self.ltdict.has_key(cnt): cnt += 1 else: return cnt def restore_lt(self, ltid, ltgid, ltw, lts, count): assert not self.ltdict.has_key(ltid) self.ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count, self.sym) def add_lt(self, ltline): assert not self.ltdict.has_key(ltline.ltid) self.ltdict[ltline.ltid] = ltline def remove_lt(self, ltid): self.ltdict.pop(ltid) class LogTemplate(): def __init__(self, ltid, ltgid, ltw, lts, count, sym): if len(ltw) == 0: raise ValueError("empty ltw, failed to generate LogTemplate") self.ltid = ltid self.ltgid = ltgid self.ltw = ltw self.lts = lts self.cnt = count self.sym = sym def __iter__(self): return self.ltw def __str__(self): return self.restore_message(self.ltw) def var(self, l_w): if len(l_w) == 0: return [self.sym for w in self.ltw if w == self.sym] else: return [w_org for w_org, w_lt in zip(l_w, self.ltw) if w_lt == self.sym] def var_location(self): return [i for i, w_lt in enumerate(self.ltw) if w_lt == self.sym] def restore_message(self, l_w): if len(l_w) == 0: l_w = self.ltw l_w = [strutil.restore_esc(w) for w in l_w] if self.lts is None: return "".join(l_w) else: return "".join([s + w for w, s in zip(l_w + [""], self.lts)]) def count(self): self.cnt += 1 return self.cnt def replace(self, l_w, l_s = None, count = None): self.ltw = l_w if l_s is not None: self.lts = l_s if count is not None: self.cnt = count class TemplateTable(): def __init__(self): self._d_tpl = {} self._d_rtpl = {} self._d_cand = defaultdict(list) def __str__(self): ret = [] for tid, tpl in self._d_tpl.iteritems(): ret.append(" ".join([str(tid)] + tpl)) return "\n".join(ret) def __iter__(self): return self._generator() def _generator(self): for tid in self._d_tpl.keys(): yield self._d_tpl[tid] def __getitem__(self, key): assert isinstance(key, int) if not self._d_tpl.has_key(key): raise IndexError("index out of range") return self._d_tpl[key] def next_tid(self): cnt = 0 while self._d_tpl.has_key(cnt): cnt += 1 else: return cnt def tids(self): return self._d_tpl.keys() def _key_template(self, template): l_word = [strutil.add_esc(w) for w in template] return "@".join(l_word) def exists(self, template): key = self._key_template(template) return self._d_rtpl.has_key(key) def get_tid(self, template): key = self._key_template(template) return self._d_rtpl[key] def get_template(self, tid): return self._d_tpl[tid] def add(self, template): tid = self.next_tid() self._d_tpl[tid] = template self._d_rtpl[self._key_template(template)] = tid return tid def replace(self, tid, template): self._d_tpl[tid] = template self._d_rtpl[self._key_template(template)] = tid def getcand(self, tid): return self._d_cand[tid] def addcand(self, tid, ltid): self._d_cand[tid].append(ltid) def load(self, obj): self._d_tpl, self._d_cand = obj for tid, tpl in self._d_tpl.iteritems(): self._d_rtpl[self._key_template(tpl)] = tid def dumpobj(self): return (self._d_tpl, self._d_cand) class LTGen(object): state_added = 0 state_changed = 1 state_unchanged = 2 def __init__(self, table, sym): self._table = table self._sym = sym def update_table(self, l_w, tid, added_flag): if added_flag: new_tid = self._table.add(l_w) assert new_tid == tid return self.state_added else: old_tpl = self._table[tid] new_tpl = merge_lt(old_tpl, l_w, self._sym) if old_tpl == new_tpl: return self.state_unchanged else: self._table.replace(tid, new_tpl) return self.state_changed def process_init_data(self, lines): d = {} for mid, line in enumerate(lines): l_w, l_s = line tid, state = self.process_line(l_w, l_s) d[mid] = tid return d def process_line(self, l_w, l_s): raise NotImplementedError def load(self, loadobj): pass def dumpobj(self): return None class LTGroup(object): def __init__(self): self.init_dict() def init_dict(self): self.d_group = {} self.d_rgroup = {} def _next_groupid(self): cnt = 0 while self.d_group.has_key(cnt): cnt += 1 else: return cnt def add(self, ltline): gid = ltline.ltid self.add_ltid(gid, ltline) return gid def add_ltid(self, gid, ltline): self.d_group.setdefault(gid, []).append(ltline) self.d_rgroup[ltline.ltid] = gid def restore_ltg(self, db, table): for ltid, ltgid in db.iter_ltg_def(): self.d_group.setdefault(ltgid, []).append(table[ltid]) self.d_rgroup[ltid] = ltgid def load(self, loadobj): pass def dumpobj(self): return None class LTPostProcess(object): def __init__(self, conf, table, lttable, l_alg): self._table = table self._lttable = lttable self._rules = [] for alg in l_alg: if alg == "dummy": self._rules.append(VariableLabelRule()) elif alg == "host": self._rules.append(VariableLabelHost(conf)) else: raise NotImplementedError self.sym_header = conf.get("log_template", "labeled_variable_symbol_header") self.sym_footer = conf.get("log_template", "labeled_variable_symbol_footer") def _labeled_variable(self, w): return "".join((self.sym_header, w, self.sym_footer)) def replace_variable(self, l_w, tpl, sym): ret = [] for org_w, tpl_w in zip(l_w, tpl): if tpl_w == sym: for r in self._rules: ww = r.replace_word(org_w) if ww is not None: ret.append(self._labeled_variable(ww)) break else: ret.append(tpl_w) else: ret.append(tpl_w) return ret def search(self, tid, ltw): l_ltid = self._table.getcand(tid) for ltid in l_ltid: if self._lttable[ltid].ltw == ltw: return ltid else: return None class VariableLabelRule(object): def __init__(self): pass def replace_word(self, w): return None class VariableLabelHost(VariableLabelRule): def __init__(self, conf): import host_alias self.ha = host_alias.HostAlias(conf) def replace_word(self, w): return self.ha.get_group(w) def init_ltmanager(conf, db, table, reset_db): lt_alg = conf.get("log_template", "lt_alg") ltg_alg = conf.get("log_template", "ltgroup_alg") post_alg = conf.gettuple("log_template", "post_alg") sym = conf.get("log_template", "variable_symbol") ltm = LTManager(conf, db, table, reset_db, lt_alg, ltg_alg, post_alg) if lt_alg == "shiso": import lt_shiso ltgen = lt_shiso.LTGenSHISO(ltm._table, sym, threshold = conf.getfloat( "log_template_shiso", "ltgen_threshold"), max_child = conf.getint( "log_template_shiso", "ltgen_max_child") ) elif lt_alg == "import": fn = conf.get("log_template_import", "def_path") mode = conf.get("log_template_import", "mode") import logparser lp = logparser.LogParser(conf, sep_variable = True) import lt_import ltgen = lt_import.LTGenImport(ltm._table, sym, fn, mode, lp) elif lt_alg == "crf": import lt_crf ltgen = lt_crf.LTGenCRF(ltm._table, sym, conf) else: raise ValueError("lt_alg({0}) invalid".format(lt_alg)) ltm._set_ltgen(ltgen) if ltg_alg == "shiso": import lt_shiso ltgroup = lt_shiso.LTGroupSHISO(table, ngram_length = conf.getint( "log_template_shiso", "ltgroup_ngram_length"), th_lookup = conf.getfloat( "log_template_shiso", "ltgroup_th_lookup"), th_distance = conf.getfloat( "log_template_shiso", "ltgroup_th_distance"), mem_ngram = conf.getboolean( "log_template_shiso", "ltgroup_mem_ngram") ) elif ltg_alg == "ssdeep": import lt_misc ltgroup = lt_misc.LTGroupFuzzyHash(table) elif ltg_alg == "none": ltgroup = LTGroup() else: raise ValueError("ltgroup_alg({0}) invalid".format(ltg_alg)) ltm._set_ltgroup(ltgroup) ltspl = LTPostProcess(conf, ltm._table, ltm._lttable, post_alg) ltm._set_ltspl(ltspl) if os.path.exists(ltm.filename) and not reset_db: ltm.load() return ltm
BSD 3-Clause New or Revised License
xgfone/snippet
snippet/example/python/circuit_breaker.py
CircuitBreakerMonitor.all_closed
python
def all_closed(cls): return not cls.get_all_open()
Return True if all circuit breakers are closed.
https://github.com/xgfone/snippet/blob/054af596655007cbec81340bd166489e706fffe6/snippet/example/python/circuit_breaker.py#L264-L267
from time import time from threading import Lock from functools import wraps STATE_CLOSED = "closed" STATE_OPEN = "open" STATE_HALF_OPEN = "half-open" def get_now(): return int(time()) class CircuitBreakerError(Exception): pass class TooManyRequestsError(CircuitBreakerError): pass class OpenStateError(CircuitBreakerError): pass class Count(object): __slots__ = ("requests", "total_successes", "total_failures", "consecutive_successes", "consecutive_failures") def __init__(self): self.requests = 0 self.total_successes = 0 self.total_failures = 0 self.consecutive_successes = 0 self.consecutive_failures = 0 def on_request(self): self.requests += 1 def on_success(self): self.total_successes += 1 self.consecutive_successes += 1 self.consecutive_failures = 0 def on_failure(self): self.total_failures += 1 self.consecutive_failures += 1 self.consecutive_successes = 0 def clear(self): self.requests = 0 self.total_successes = 0 self.total_failures = 0 self.consecutive_successes = 0 self.consecutive_failures = 0 def copy(self): c = self.__class__.__new__() c.requests = self.requests c.total_successes = c.total_successes c.total_failures = c.total_failures c.consecutive_successes = c.consecutive_successes c.consecutive_failures = c.consecutive_failures return c class CircuitBreaker(object): MAX_REQUESTS = 1 COUNT_INTERVAL = 0 RECOVERY_TIMEOUT = 60 FAILURE_THRESHOLD = 5 EXPECTED_EXCEPTION = Exception def __init__(self, name=None, max_requests=None, count_interval=None, recovery_timeout=None, failure_threshold=None, expected_exception=None, on_state_change=None): self._name = name self._max_requests = max_requests or self.MAX_REQUESTS self._count_interval = count_interval or self.COUNT_INTERVAL self._recovery_timeout = recovery_timeout or self.RECOVERY_TIMEOUT self._failure_threshold = failure_threshold or self.FAILURE_THRESHOLD self._expected_exception = expected_exception or self.EXPECTED_EXCEPTION self._on_state_change = on_state_change self._state = STATE_CLOSED self._generation = 0 self._count = Count() self._expiry = 0 self._lock = Lock() self._new_generation(get_now()) @property def name(self): return self._name @property def state(self): with self._lock: return self._current_state(get_now())[0] @property def is_open(self): return self.state == STATE_OPEN @property def is_closed(self): return self.state == STATE_CLOSED @property def is_half_open(self): return self.state == STATE_HALF_OPEN @property def count(self): with self._lock: return self._count.copy() def __call__(self, wrapped): if not self._name: self._name = wrapped.__name__ @wraps(wrapped) def wrapper(*args, **kwargs): return self.call(wrapped, *args, **kwargs) CircuitBreakerMonitor.register(self) return wrapper def allow(self): generation = self._before_request() return lambda ok: self._after_request(generation, ok) def call(self, func, *args, **kwargs): generation = self._before_request() try: result = func(*args, **kwargs) except self._expected_exception: self._after_request(generation, False) raise else: self._after_request(generation, True) return result def _before_request(self): with self._lock: now = get_now() state, generation = self._current_state(now) if state == STATE_OPEN: raise OpenStateError elif state == STATE_HALF_OPEN and self._count.requests >= self._max_requests: raise TooManyRequestsError self._count.on_request() return generation def _after_request(self, before_generation, ok): with self._lock: now = get_now() state, generation = self._current_state(now) if generation != before_generation: return (self._on_success if ok else self._on_failure)(state, now) def _on_success(self, state, now): if state == STATE_CLOSED: self._count.on_success() elif state == STATE_HALF_OPEN: self._count.on_success() if self._count.consecutive_successes >= self._max_requests: self._set_statue(STATE_CLOSED, now) def _on_failure(self, state, now): if state == STATE_CLOSED: self._count.on_failure() if self._count.consecutive_failures > self._failure_threshold: self._set_statue(STATE_OPEN, now) elif state == STATE_HALF_OPEN: self._set_statue(STATE_OPEN, now) def _current_state(self, now): state = self._state if state == STATE_CLOSED: if self._expiry and self._expiry < now: self._new_generation(now) elif state == STATE_OPEN: if self._expiry < now: self._set_statue(STATE_HALF_OPEN, now) return self._state, self._generation def _set_statue(self, state, now): if self._state == state: return prev, self._state = self._state, state self._new_generation(now) if self._on_state_change: self._on_state_change(self._name, prev, state) def _new_generation(self, now): self._generation += 1 self._count.clear() state = self._state if state == STATE_CLOSED: self._expiry = (now + self._count_interval) if self._count_interval else 0 elif state == STATE_OPEN: self._expiry = now + self._recovery_timeout else: self._expiry = 0 class CircuitBreakerMonitor(object): circuit_breakers = {} @classmethod def register(cls, cb): cls.circuit_breakers[cb.name] = cb @classmethod
MIT License
giswqs/leafmap
leafmap/common.py
hex_to_rgb
python
def hex_to_rgb(value="FFFFFF"): value = value.lstrip("#") lv = len(value) return tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3))
Converts hex color to RGB color. Args: value (str, optional): Hex color code as a string. Defaults to 'FFFFFF'. Returns: tuple: RGB color as a tuple.
https://github.com/giswqs/leafmap/blob/410154f671312bc7a29aa32203b8aee9861a5034/leafmap/common.py#L416-L427
import csv import os import shutil import tarfile import urllib.request import zipfile import folium import ipyleaflet import ipywidgets as widgets import whitebox from IPython.display import display, IFrame class WhiteboxTools(whitebox.WhiteboxTools): def __init__(self, **kwargs): super().__init__(**kwargs) def whiteboxgui(verbose=True, tree=False, reset=False, sandbox_path=None): import whiteboxgui return whiteboxgui.show(verbose, tree, reset, sandbox_path) def _in_colab_shell(): import sys if "google.colab" in sys.modules: return True else: return False def _is_drive_mounted(): drive_path = "/content/drive/My Drive" if os.path.exists(drive_path): return True else: return False def set_proxy(port=1080, ip="http://127.0.0.1"): import requests try: if not ip.startswith("http"): ip = "http://" + ip proxy = "{}:{}".format(ip, port) os.environ["HTTP_PROXY"] = proxy os.environ["HTTPS_PROXY"] = proxy a = requests.get("https://google.com") if a.status_code != 200: print( "Failed to connect to Google services. Please double check the port number and ip address." ) except Exception as e: raise Exception(e) def _check_install(package): import subprocess try: __import__(package) except ImportError: print("{} is not installed. Installing ...".format(package)) try: subprocess.check_call(["python", "-m", "pip", "install", package]) except Exception as e: print("Failed to install {}".format(package)) print(e) print("{} has been installed successfully.".format(package)) def update_package(): try: download_dir = os.path.join(os.path.expanduser("~"), "Downloads") if not os.path.exists(download_dir): os.makedirs(download_dir) _clone_repo(out_dir=download_dir) pkg_dir = os.path.join(download_dir, "leafmap-master") work_dir = os.getcwd() os.chdir(pkg_dir) if shutil.which("pip") is None: cmd = "pip3 install ." else: cmd = "pip install ." os.system(cmd) os.chdir(work_dir) print( "\nPlease comment out 'leafmap.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output" ) except Exception as e: raise Exception(e) def check_package(name, URL=""): try: __import__(name.lower()) except Exception: raise ImportError( f"{name} is not installed. Please install it before proceeding. {URL}" ) def _clone_repo(out_dir=".", unzip=True): url = "https://github.com/giswqs/leafmap/archive/master.zip" filename = "leafmap-master.zip" download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip) def __install_from_github(url): try: download_dir = os.path.join(os.path.expanduser("~"), "Downloads") if not os.path.exists(download_dir): os.makedirs(download_dir) repo_name = os.path.basename(url) zip_url = os.path.join(url, "archive/master.zip") filename = repo_name + "-master.zip" download_from_url( url=zip_url, out_file_name=filename, out_dir=download_dir, unzip=True ) pkg_dir = os.path.join(download_dir, repo_name + "-master") pkg_name = os.path.basename(url) work_dir = os.getcwd() os.chdir(pkg_dir) print("Installing {}...".format(pkg_name)) cmd = "pip install ." os.system(cmd) os.chdir(work_dir) print("{} has been installed successfully.".format(pkg_name)) except Exception as e: raise Exception(e) def _check_git_install(): import webbrowser cmd = "git --version" output = os.popen(cmd).read() if "git version" in output: return True else: url = "https://git-scm.com/downloads" print( "Git is not installed. Please download Git from {} and install it.".format( url ) ) webbrowser.open_new_tab(url) return False def _clone_github_repo(url, out_dir): repo_name = os.path.basename(url) url_zip = url + "/archive/master.zip" if os.path.exists(out_dir): print( "The specified output directory already exists. Please choose a new directory." ) return parent_dir = os.path.dirname(out_dir) out_file_path = os.path.join(parent_dir, repo_name + ".zip") try: urllib.request.urlretrieve(url_zip, out_file_path) except Exception: print("The provided URL is invalid. Please double check the URL.") return with zipfile.ZipFile(out_file_path, "r") as zip_ref: zip_ref.extractall(parent_dir) src = out_file_path.replace(".zip", "-master") os.rename(src, out_dir) os.remove(out_file_path) def _is_tool(name): return shutil.which(name) is not None def random_string(string_length=3): import random import string letters = string.ascii_lowercase return "".join(random.choice(letters) for i in range(string_length)) def open_image_from_url(url): from PIL import Image import requests from io import BytesIO try: response = requests.get(url) img = Image.open(BytesIO(response.content)) return img except Exception as e: print(e) def show_image(img_path, width=None, height=None): from IPython.display import display try: out = widgets.Output() out.clear_output(wait=True) display(out) with out: file = open(img_path, "rb") image = file.read() if (width is None) and (height is None): display(widgets.Image(value=image)) elif (width is not None) and (height is not None): display(widgets.Image(value=image, width=width, height=height)) else: print("You need set both width and height.") return except Exception as e: raise Exception(e) def has_transparency(img): if img.mode == "P": transparent = img.info.get("transparency", -1) for _, index in img.getcolors(): if index == transparent: return True elif img.mode == "RGBA": extrema = img.getextrema() if extrema[3][0] < 255: return True return False def upload_to_imgur(in_gif): import subprocess pkg_name = "imgur-uploader" if not _is_tool(pkg_name): _check_install(pkg_name) try: IMGUR_API_ID = os.environ.get("IMGUR_API_ID", None) IMGUR_API_SECRET = os.environ.get("IMGUR_API_SECRET", None) credentials_path = os.path.join( os.path.expanduser("~"), ".config/imgur_uploader/uploader.cfg" ) if ( (IMGUR_API_ID is not None) and (IMGUR_API_SECRET is not None) ) or os.path.exists(credentials_path): proc = subprocess.Popen(["imgur-uploader", in_gif], stdout=subprocess.PIPE) for _ in range(0, 2): line = proc.stdout.readline() print(line.rstrip().decode("utf-8")) else: print( "Imgur API credentials could not be found. Please check https://pypi.org/project/imgur-uploader/ for instructions on how to get Imgur API credentials" ) return except Exception as e: raise Exception(e) def rgb_to_hex(rgb=(255, 255, 255)): return "%02x%02x%02x" % rgb
MIT License
dcos/dcos-launch
dcos_launch/platforms/gcp.py
Deployment.get_info
python
def get_info(self) -> dict: response = self.gcp_wrapper.deployment_manager.deployments().get(project=self.gcp_wrapper.project_id, deployment=self.name).execute() log.debug('get_info response: ' + str(response)) return response
Returns the dictionary representation of a GCE deployment resource. For details on the contents of this resource, see https://cloud.google.com/deployment-manager/docs/reference/latest/deployments#resource
https://github.com/dcos/dcos-launch/blob/5df9c68a5a5f41a5bf201b961eb54140ac324635/dcos_launch/platforms/gcp.py#L275-L282
import copy import logging import typing from functools import wraps import yaml from googleapiclient import discovery from googleapiclient.errors import HttpError from oauth2client.service_account import ServiceAccountCredentials from retrying import retry from dcos_test_utils.helpers import Host log = logging.getLogger(__name__) OS_IMAGE_FAMILIES = { 'cent-os-7': 'centos-7', 'ubuntu-16-04': 'ubuntu-1604-lts', 'coreos': 'coreos-stable', } INSTANCE_TEMPLATE = """ type: compute.v1.instanceTemplate name: {name} metadata: dependsOn: - {network} properties: project: {project} properties: machineType: {machineType} disks: - deviceName: boot type: PERSISTENT boot: true autoDelete: true initializeParams: diskSizeGb: {diskSizeGb} diskType: {diskType} sourceImage: projects/{imageProject}/global/images/{sourceImage} networkInterfaces: - network: global/networks/{network} # Access Config required to give the instance a public IP address accessConfigs: - name: External NAT type: ONE_TO_ONE_NAT metadata: items: - key: ssh-keys value: {ssh_user}:{ssh_public_key} scheduling: preemptible: {usePreemptibleVMs} tags: items: - {deploymentName} """ NETWORK_TEMPLATE = """ type: compute.v1.network name: {name} properties: autoCreateSubnetworks: True """ MANAGED_INSTANCE_GROUP_TEMPLATE = """ type: compute.v1.instanceGroupManager name: {name} metadata: dependsOn: - {instance_template_name} properties: baseInstanceName: vm instanceTemplate: global/instanceTemplates/{instance_template_name} zone: {zone} targetSize: {size} """ EXTERNAL_FIREWALL_TEMPLATE = """ type: compute.v1.firewall name: {name}-external metadata: dependsOn: - {network} properties: description: external network: global/networks/{network} sourceRanges: - 0.0.0.0/0 allowed: - IPProtocol: tcp ports: - 22 - 80 - 443 - 61001 - IPProtocol: icmp """ INTERNAL_FIREWALL_TEMPLATE = """ type: compute.v1.firewall name: {name}-internal metadata: dependsOn: - {network} properties: description: internal network: global/networks/{network} sourceTags: - {deploymentName} allowed: - IPProtocol: all """ IGNITION_CONFIG = """ { "ignition": { "version": "2.0.0", "config": {} }, "storage": {}, "systemd": { "units": [ { "name": "update-engine.service", "mask": true }, { "name": "locksmithd.service", "mask": true } ] }, "networkd": {}, "passwd": {} } """ def tag_dict_to_gce_format(tags: dict): return [{'key': k, 'value': v} for k, v in tags.items()] def catch_http_exceptions(f): @wraps(f) def handle_exception(*args, **kwargs): try: return f(*args, **kwargs) except HttpError as e: if e.resp.status == 404: log.exception("The resource you are trying to access doesn't exist") elif e.resp.status == 409: log.exception('''The specified resources exist and might be under an active operation (operation conflict)''') raise e return handle_exception class GcpWrapper: @catch_http_exceptions def __init__(self, credentials_dict): credentials = ServiceAccountCredentials.from_json_keyfile_dict( credentials_dict, scopes='https://www.googleapis.com/auth/cloud-platform') self.compute = discovery.build('compute', 'v1', credentials=credentials) self.deployment_manager = discovery.build('deploymentmanager', 'v2', credentials=credentials) self.project_id = credentials_dict['project_id'] @catch_http_exceptions def get_instance_info(self, name: str, zone: str): response = self.compute.instances().get(project=self.project_id, zone=zone, instance=name).execute() log.debug('get_instance_info response: ' + str(response)) return response @catch_http_exceptions def list_group_instances(self, group_name: str, zone: str) -> typing.Iterator[dict]: response = self.compute.instanceGroupManagers().listManagedInstances(project=self.project_id, zone=zone, instanceGroupManager=group_name).execute() log.debug('list_group_instances response: ' + str(response)) for instance in response.get('managedInstances', []): yield instance @retry(wait_fixed=2000, retry_on_result=lambda res: res is None, stop_max_delay=30 * 1000) def get_instance_network_properties(self, instance_name: str, zone: str) -> dict: network_info = self.get_instance_info(instance_name, zone)['networkInterfaces'][0] if 'networkIP' not in network_info or 'accessConfigs' not in network_info: return None if 'natIP' not in network_info['accessConfigs'][0]: return None return network_info @catch_http_exceptions def create_deployment(self, name: str, deployment_config: dict, tags: dict=None): if tags is None: tags = dict() body = { 'name': name, 'description': """{"cluster_type": "DC/OS Onprem on GCE"}""", 'target': { 'config': { 'content': yaml.dump(deployment_config, default_flow_style=False)} }, 'labels': tag_dict_to_gce_format(tags) } log.info('Creating GCE deployment...') response = self.deployment_manager.deployments().insert( project=self.project_id, body=body).execute() log.debug('create_deployment response: ' + str(response)) @catch_http_exceptions def get_deployments(self): request = self.deployment_manager.deployments().list(project=self.project_id) while request is not None: response = request.execute() for deployment_info in response.get('deployments', []): if deployment_info['operation']['operationType'] == 'deleted': continue deployment = Deployment(self, deployment_info['name']) zone = None for r in deployment.get_resources()['resources']: if r['type'] == 'compute.v1.instanceGroupManager': zone = r.get('properties', {}).get('zone') if zone is None: yield deployment else: yield BareClusterDeployment(self, deployment_info['name'], zone) request = self.deployment_manager.deployments().list_next(previous_request=request, previous_response=response) class Deployment: def __init__(self, gcp_wrapper: GcpWrapper, name: str): self.gcp_wrapper = gcp_wrapper self.name = name @catch_http_exceptions def delete(self): response = self.gcp_wrapper.deployment_manager.deployments().delete(project=self.gcp_wrapper.project_id, deployment=self.name).execute() log.debug('delete response: ' + str(response)) @catch_http_exceptions
Apache License 2.0
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/drawing_object_insert.py
DrawingObjectInsert.__eq__
python
def __eq__(self, other): if not isinstance(other, DrawingObjectInsert): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/drawing_object_insert.py#L360-L365
import pprint import re import datetime import six import json class DrawingObjectInsert(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'height': 'float', 'left': 'float', 'position': 'DocumentPosition', 'relative_horizontal_position': 'str', 'relative_vertical_position': 'str', 'top': 'float', 'width': 'float', 'wrap_type': 'str' } attribute_map = { 'height': 'Height', 'left': 'Left', 'position': 'Position', 'relative_horizontal_position': 'RelativeHorizontalPosition', 'relative_vertical_position': 'RelativeVerticalPosition', 'top': 'Top', 'width': 'Width', 'wrap_type': 'WrapType' } def __init__(self, height=None, left=None, position=None, relative_horizontal_position=None, relative_vertical_position=None, top=None, width=None, wrap_type=None): self._height = None self._left = None self._position = None self._relative_horizontal_position = None self._relative_vertical_position = None self._top = None self._width = None self._wrap_type = None self.discriminator = None if height is not None: self.height = height if left is not None: self.left = left if position is not None: self.position = position if relative_horizontal_position is not None: self.relative_horizontal_position = relative_horizontal_position if relative_vertical_position is not None: self.relative_vertical_position = relative_vertical_position if top is not None: self.top = top if width is not None: self.width = width if wrap_type is not None: self.wrap_type = wrap_type @property def height(self): return self._height @height.setter def height(self, height): self._height = height @property def left(self): return self._left @left.setter def left(self, left): self._left = left @property def position(self): return self._position @position.setter def position(self, position): self._position = position @property def relative_horizontal_position(self): return self._relative_horizontal_position @relative_horizontal_position.setter def relative_horizontal_position(self, relative_horizontal_position): allowed_values = ["Margin", "Page", "Column", "Default", "Character", "LeftMargin", "RightMargin", "InsideMargin", "OutsideMargin"] if not relative_horizontal_position.isdigit(): if relative_horizontal_position not in allowed_values: raise ValueError( "Invalid value for `relative_horizontal_position` ({0}), must be one of {1}" .format(relative_horizontal_position, allowed_values)) self._relative_horizontal_position = relative_horizontal_position else: self._relative_horizontal_position = allowed_values[int(relative_horizontal_position) if six.PY3 else long(relative_horizontal_position)] @property def relative_vertical_position(self): return self._relative_vertical_position @relative_vertical_position.setter def relative_vertical_position(self, relative_vertical_position): allowed_values = ["Margin", "TableDefault", "Page", "Paragraph", "TextFrameDefault", "Line", "TopMargin", "BottomMargin", "InsideMargin", "OutsideMargin"] if not relative_vertical_position.isdigit(): if relative_vertical_position not in allowed_values: raise ValueError( "Invalid value for `relative_vertical_position` ({0}), must be one of {1}" .format(relative_vertical_position, allowed_values)) self._relative_vertical_position = relative_vertical_position else: self._relative_vertical_position = allowed_values[int(relative_vertical_position) if six.PY3 else long(relative_vertical_position)] @property def top(self): return self._top @top.setter def top(self, top): self._top = top @property def width(self): return self._width @width.setter def width(self, width): self._width = width @property def wrap_type(self): return self._wrap_type @wrap_type.setter def wrap_type(self, wrap_type): allowed_values = ["Inline", "TopBottom", "Square", "None", "Tight", "Through"] if not wrap_type.isdigit(): if wrap_type not in allowed_values: raise ValueError( "Invalid value for `wrap_type` ({0}), must be one of {1}" .format(wrap_type, allowed_values)) self._wrap_type = wrap_type else: self._wrap_type = allowed_values[int(wrap_type) if six.PY3 else long(wrap_type)] def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if value is None: continue if isinstance(value, list): result[self.attribute_map[attr]] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[self.attribute_map[attr]] = value.to_dict() elif isinstance(value, dict): result[self.attribute_map[attr]] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) elif isinstance(value, (datetime.datetime, datetime.date)): result[self.attribute_map[attr]] = value.isoformat() else: result[self.attribute_map[attr]] = value return result def to_json(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[self.attribute_map[attr]] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[self.attribute_map[attr]] = value.to_dict() elif isinstance(value, dict): result[self.attribute_map[attr]] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) elif isinstance(value, (datetime.datetime, datetime.date)): result[self.attribute_map[attr]] = value.isoformat() else: result[self.attribute_map[attr]] = value return json.dumps(result) def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
seldomqa/seldom
seldom/testdata/parameterization.py
data
python
def data(input, name_func=None, doc_func=None, skip_on_empty=False, **legacy): if "testcase_func_name" in legacy: warnings.warn("testcase_func_name= is deprecated; use name_func=", DeprecationWarning, stacklevel=2) if not name_func: name_func = legacy["testcase_func_name"] if "testcase_func_doc" in legacy: warnings.warn("testcase_func_doc= is deprecated; use doc_func=", DeprecationWarning, stacklevel=2) if not doc_func: doc_func = legacy["testcase_func_doc"] doc_func = doc_func or default_doc_func name_func = name_func or default_name_func def parameterized_expand_wrapper(f, instance=None): stack = inspect.stack() frame = stack[1] frame_locals = frame[0].f_locals parameters = parameterized.input_as_callable(input)() if not parameters: if not skip_on_empty: raise ValueError( "Parameters iterable is empty (hint: use " "`parameterized.expand([], skip_on_empty=True)` to skip " "this test when the input is empty)" ) return wraps(f)(lambda: skip_on_empty_helper()) digits = len(str(len(parameters) - 1)) for num, p in enumerate(parameters): name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p) nf = reapply_patches_if_need(f) frame_locals[name] = parameterized.param_as_standalone_func(p, nf, name) frame_locals[name].__doc__ = doc_func(f, num, p) delete_patches_if_need(f) f.__test__ = False return parameterized_expand_wrapper
A "brute force" method of parameterizing test cases. Creates new test cases and injects them into the namespace that the wrapped function is being defined in. Useful for parameterizing tests in subclasses of 'UnitTest', where Nose test generators don't work. >> @data([("foo", 1, 2)]) ... def test_add1(name, input, expected): ... actual = add1(input) ... assert_equal(actual, expected) ... >>
https://github.com/seldomqa/seldom/blob/b8d392cfc6a0e67ceb959f9fd99932cc6443aa62/seldom/testdata/parameterization.py#L73-L135
import os import inspect as sys_inspect import warnings from functools import wraps from parameterized.parameterized import inspect from parameterized.parameterized import parameterized from parameterized.parameterized import default_doc_func from parameterized.parameterized import default_name_func from parameterized.parameterized import skip_on_empty_helper from parameterized.parameterized import reapply_patches_if_need from parameterized.parameterized import delete_patches_if_need from parameterized import parameterized_class from seldom.testdata import conversion from seldom.logging.exceptions import FileTypeError __all__ = [ "file_data", "data", "data_class" ] def file_data(file, line=1, sheet="Sheet1", key=None): if file is None: raise FileExistsError("File name does not exist.") if os.path.isfile(file) is True: file_path = file else: stack_t = sys_inspect.stack() ins = sys_inspect.getframeinfo(stack_t[1][0]) file_dir = os.path.dirname(os.path.dirname(os.path.abspath(ins.filename))) file_path = None for root, dirs, files in os.walk(file_dir, topdown=False): for f in files: if f == file: file_path = os.path.join(root, file) break else: continue break suffix = file.split(".")[-1] if suffix == "csv": data_list = conversion.csv_to_list(file_path, line=line) elif suffix == "xlsx": data_list = conversion.excel_to_list(file_path, sheet=sheet, line=line) elif suffix == "json": data_list = conversion.json_to_list(file_path, key=key) elif suffix == "yaml": data_list = conversion.yaml_to_list(file_path, key=key) else: raise FileTypeError("Your file is not supported: {}".format(file)) return data(data_list)
Apache License 2.0
devicewise/device-cloud-python
share/tests/qos_script.py
properties
python
def properties(session_id, thing_key, end_time): data_params = {"thingKey":thing_key,"key":prop_name,"start":start_time,"end":end_time} data = {"cmd":{"command":"property.history","params":data_params}} return _send(data, session_id)
Get published properties within time frame
https://github.com/devicewise/device-cloud-python/blob/25f16828d2edfdd272a5ad5d8cd91b630c018afa/share/tests/qos_script.py#L142-L149
import socket import subprocess import datetime import random import getpass import json import os import sys import requests from time import sleep from threading import Thread if sys.version_info.major == 2: input = raw_input import queue as queue else: import queue pycommand = "python" if sys.version_info.major == 3 and sys.platform.startswith("linux"): pycommand = "python3" queue = queue.Queue() varray = [] cloud = "core-api.hdcstg.net" app_file = "qos_app.py" app = pycommand+" "+app_file prop_name = "property" def connect(): try: socket.setdefaulttimeout(60) server_socket = socket.socket() server_socket.bind(('localhost', 8080)) server_socket.listen(2) conn, address = server_socket.accept() print ("Connected by {}".format(address)) queue.put(conn) except: print ("Could not connect. Exiting.") sys.exit() def server_program(conn): counter = random.randint(2, 6) print ("Doing {} rounds of tests..".format(counter)) while (counter > 0): dur = random.randint(2, 10) conn.send(str(dur).encode()) print ("**Testing Connection Loss** - Sending {} values".format(dur)) while dur > 0: sys.stdout.write("%i \r" % dur) sys.stdout.flush() value = float(conn.recv(1024).decode()) varray.extend([value]) dur -= 1 counter -= 1 def quit_app(): conn.send(("close").encode()) sleep(2) app.terminate() app.wait() conn.close() def validate(varray, session_id, thing_key): t = datetime.datetime.utcnow() end_time = t.strftime("%Y-%m-%dT%H:%M:%S.%fZ") prop = None values = 0 amount = len(varray) print ("Total values = {}".format(amount)) for i in range(10): prop_info = properties(session_id, thing_key, end_time) if (prop_info["success"] == True) and prop_info["params"]: values = prop_info["params"]["values"] if values: for v in values: value = v["value"] time = v["ts"] time = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ") if value in varray: print ("Found a match for {} at {}".format(value, time)) varray.remove(value) if varray: print ("ERROR: Not all values have been published!") if i == 1: break print ("Trying again in 30 second..") sleep(30) else: break else: print ("No published values found!") if i == 1: break print ("Trying again in 30 second..") sleep(30) else: print ("ERROR: Property or published values not found in Cloud!") if i == 1: break print ("Trying again in 30 second..") sleep(30) if varray: print ("\n{} value(s) were not published: \n{}".format(len(varray), varray)) else: print ("\nAll {} values have been succesfully published!".format(amount))
Apache License 2.0
tensorflow/estimator
tensorflow_estimator/python/estimator/head/sequential_head.py
_flatten_tensor
python
def _flatten_tensor(tensor, sequence_mask, expected_length): shape = tensor.get_shape() if shape.ndims < 2: raise ValueError('Input tensor expected to have at least 2 dimensions, ' 'got {} instead.'.format(shape.ndims)) if isinstance(tensor, tf.sparse.SparseTensor): flat_tensor = tf.sparse.reorder(tensor).values if shape.ndims > 2: new_shape = tf.concat([[-1], shape[2:]], axis=0) flat_tensor = tf.reshape(tensor.values, new_shape) elif isinstance(tensor, tf.Tensor): flat_tensor = tf.boolean_mask(tensor, sequence_mask) else: raise ValueError('`tensor` expected to be a `Tensor` or `SparseTensor` ' 'got `{}` instead.'.format(tensor)) if shape.ndims == 2: flat_tensor = tf.compat.v1.expand_dims(flat_tensor, -1) expected_shape = tf.concat([[expected_length], [1]], axis=0) else: expected_shape = tf.concat([[expected_length], shape[2:]], axis=0) err_message = 'Tensor shape is incompatible with provided mask.' if tf.executing_eagerly(): if flat_tensor._shape_tuple() != tuple(expected_shape.numpy()): raise ValueError(err_message) return flat_tensor with tf.control_dependencies([ tf.compat.v1.debugging.assert_equal( tf.compat.v1.shape(flat_tensor), expected_shape, message=err_message) ]): return tf.identity(flat_tensor)
Flattens the two first dimensions and reshapes a tensor or sparse tensor. If `tensor` is a dense tensor, the sequence_mask is used to infer valid inputs. Note: If `tensor` is a `SparseTensor` and the indices are not sorted, they will be reordered. Args: tensor: A `Tensor` or `SparseTensor` of dimension at least 2, of shape [batch_size, seq_length, D0, D1, ..., DN]. sequence_mask: A boolean `Tensor` of shape [batch_size, seq_length]. expected_length: A integer scalar `Tensor` with the expected length of the resulting flattenned Tensor. Returns: A `Tensor` object of shape [expected_length, D0, D1, ..., DN]. Raises: ValueError: If `tensor` has not at least 2 dimensions. ValueError: If `tensor` is not a `Tensor` or `SparseTensor` object. InvalidArgumentError: If the resulting `Tensor` doesn't have the expected length.
https://github.com/tensorflow/estimator/blob/fe1b51e15bade17d8b5085780c9ab78a6a961963/tensorflow_estimator/python/estimator/head/sequential_head.py#L437-L494
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six import tensorflow as tf if six.PY3: from collections.abc import Iterable else: from collections import Iterable from tensorflow.python.framework import ops from tensorflow_estimator.python.estimator.head import base_head from tensorflow_estimator.python.estimator.head import multi_head from tensorflow_estimator.python.estimator.mode_keys import ModeKeys class _SequentialHead(base_head.Head): __metaclass__ = abc.ABCMeta @abc.abstractproperty def input_sequence_mask_key(self): raise NotImplementedError('Calling an abstract method.') class SequentialHeadWrapper(_SequentialHead): def __init__(self, static_head, sequence_length_mask='sequence_length_mask', feature_columns=None): if not isinstance(sequence_length_mask, six.string_types): raise TypeError('`sequence_mask` column must be a string. ' 'Given type: {}.'.format(type(sequence_length_mask))) self._sequence_length_mask = sequence_length_mask feature_columns = feature_columns or [] if not isinstance(feature_columns, Iterable): raise TypeError('`feature_columns` must be either a string or an ' 'iterable of strings got {} instead.'.format( type(feature_columns))) if isinstance(feature_columns, six.string_types): self._feature_columns = [feature_columns] else: self._feature_columns = feature_columns for column in self._feature_columns: if not isinstance(column, six.string_types): raise TypeError('Column must a string. Given type: {}.'.format( type(column))) if isinstance(static_head, multi_head.MultiHead): raise ValueError( '`MultiHead` is not supported with `SequentialHeadWrapper`.') self._static_head = static_head super(SequentialHeadWrapper, self).__init__() def _flatten(self, labels, logits, features): if self.input_sequence_mask_key not in features: raise ValueError('The provided sequence_length_mask key `{}` should be ' 'included in the features dictionary, but was not ' 'found. Found keys: {}.'.format( self.input_sequence_mask_key, list(features.keys()))) sequence_mask = features[self.input_sequence_mask_key] if sequence_mask.get_shape().ndims != 2: raise ValueError('Mask is expected to have two dimensions, got ' '{} instead.'.format(sequence_mask.get_shape().ndims)) with ops.name_scope('flatten'): expected_length = tf.math.reduce_sum( tf.cast(sequence_mask, tf.dtypes.int32)) flat_logits = _flatten_tensor(logits, sequence_mask, expected_length) flat_labels = _flatten_tensor(labels, sequence_mask, expected_length) flat_features = {} for column in self._feature_columns: if column not in features: raise ValueError('`{}` column expected in features ' 'dictionary.'.format(column)) flat_features[column] = _flatten_tensor(features[column], sequence_mask, expected_length) return flat_labels, flat_logits, flat_features def loss(self, logits, labels, features=None, mode=None, regularization_losses=None): flat_labels, flat_logits, flat_features = self._flatten( labels, logits, features) return self._static_head.loss( logits=flat_logits, labels=flat_labels, features=flat_features, mode=mode, regularization_losses=regularization_losses) def create_estimator_spec(self, features, mode, logits, labels=None, optimizer=None, trainable_variables=None, train_op_fn=None, update_ops=None, regularization_losses=None): if mode == ModeKeys.PREDICT: spec = self._static_head.create_estimator_spec( features=features, mode=mode, logits=logits) spec.predictions[self.input_sequence_mask_key] = features[ self.input_sequence_mask_key] return spec._replace(predictions=spec.predictions) flat_labels, flat_logits, flat_features = self._flatten( labels, logits, features) return self._static_head.create_estimator_spec( features=flat_features, mode=mode, logits=flat_logits, trainable_variables=trainable_variables, labels=flat_labels, optimizer=optimizer, train_op_fn=train_op_fn, regularization_losses=regularization_losses, update_ops=update_ops) def update_metrics(self, eval_metrics, features, logits, labels, regularization_losses=None): flat_labels, flat_logits, flat_features = self._flatten( labels, logits, features) return self._static_head.update_metrics( eval_metrics=eval_metrics, features=flat_features, logits=flat_logits, labels=flat_labels, regularization_losses=regularization_losses) def _create_tpu_estimator_spec(self, features, mode, logits, labels=None, optimizer=None, trainable_variables=None, train_op_fn=None, update_ops=None, regularization_losses=None): raise NotImplementedError def predictions(self, logits, keys=None): return self._static_head.predictions(logits, keys=keys) def metrics(self, regularization_losses=None): return self._static_head.metrics(regularization_losses) @property def input_sequence_mask_key(self): return self._sequence_length_mask @property def logits_dimension(self): return self._static_head.logits_dimension @property def loss_reduction(self): return self._static_head.loss_reduction @property def name(self): if self._static_head.name: return '{}_sequential'.format(self._static_head.name) return None @property def static_head(self): return self._static_head
Apache License 2.0
constantinpape/elf
elf/parallel/io.py
copy
python
def copy(data, out, block_shape=None, n_threads=None, mask=None, verbose=False, roi=None): n_threads = multiprocessing.cpu_count() if n_threads is None else n_threads if out.shape != data.shape: raise ValueError(f"Output shape {out.shape} does not match input shape {data.shape}") if mask is not None and mask.shape != data.shape: raise ValueError(f"Invalid mask shape, got {mask.shape}, expected {data.shape} (= shape of first operand)") if block_shape is None: block_shape = out.chunks blocking = get_blocking(data, block_shape, roi) def _copy_block(block_id): block = blocking.getBlock(blockIndex=block_id) bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end)) if mask is not None: m = mask[bb] if m.sum() == 0: return block_data = data[bb] if mask is not None: block_data[m] = 0 out[bb] = block_data n_blocks = blocking.numberOfBlocks with futures.ThreadPoolExecutor(n_threads) as tp: if verbose: list(tqdm(tp.map(_copy_block, range(n_blocks)), total=n_blocks)) else: list(tp.map(_copy_block, range(n_blocks))) return out
Copy a dataset in parallel. Arguments: data [array_like] - input data, numpy array or similar like h5py or zarr dataset out [array_like] - output dataset block_shape [tuple] - shape of the blocks used for parallelisation, by default chunks of the output will be used, if available (default: None) n_threads [int] - number of threads, by default all are used (default: None) mask [array_like] - mask to exclude data from the computation (default: None) verbose [bool] - verbosity flag (default: False) roi [tuple[slice]] - region of interest for this computation (default: None) Returns: array_like - the copied dataset
https://github.com/constantinpape/elf/blob/f423ea0815949533933bd169b58a464bb7f3bbc0/elf/parallel/io.py#L10-L59
import multiprocessing from concurrent import futures from functools import partial from tqdm import tqdm import numpy as np from .common import get_blocking
MIT License
facebookresearch/dynabench
api/controllers/tasks.py
get_model_leaderboard
python
def get_model_leaderboard(tid): try: score = ScoreModel() limit, offset = util.get_limit_and_offset_from_request() query_result, total_count = score.getOverallModelPerfByTask( tid=tid, n=limit, offset=offset ) return construct_model_board_response_json( query_result=query_result, total_count=total_count ) except Exception as ex: logger.exception("Model leader board data loading failed: (%s)" % (ex)) bottle.abort(400, "Invalid task detail")
Fetch Top perform models based on their test score :param tid: Task Id :return: Json Object
https://github.com/facebookresearch/dynabench/blob/e534f68dd13796f39e8c9825affc36e1c959182f/api/controllers/tasks.py#L689-L706
import secrets from urllib.parse import parse_qs, quote import bottle import sqlalchemy as db import uuid import common.auth as _auth import common.helpers as util import ujson from common.logging import logger from models.dataset import Dataset, DatasetModel from models.leaderboard_configuration import LeaderboardConfigurationModel from models.leaderboard_snapshot import LeaderboardSnapshotModel from models.model import DeploymentStatusEnum, Model, ModelModel from models.round import Round, RoundModel from models.round_user_example_info import RoundUserExampleInfoModel from models.score import ScoreModel from models.task import Task, TaskModel from models.task_proposal import TaskProposal, TaskProposalModel from models.task_user_permission import TaskUserPermission from models.user import UserModel @bottle.put("/tasks/process_proposal/<tpid:int>") @_auth.requires_auth def process_proposal(credentials, tpid): um = UserModel() user = um.get(credentials["id"]) if not user.admin: bottle.abort(403, "Access denied") data = bottle.request.json if not util.check_fields(data, ["accept"]): bottle.abort(400, "Missing data") tpm = TaskProposalModel() tp = tpm.get(tpid) if data["accept"]: t = Task( task_code=tp.task_code, name=tp.name, desc=tp.desc, annotation_config_json=""" { "model_wrong_metric": {"type": "exact_match", "constructor_args": {"reference_names": ["label"]}}, "aggregation_metric": {"type": "dynascore", "constructor_args": {}}, "perf_metric": {"type": "macro_f1", "constructor_args": {"reference_name": "label"}}, "delta_metrics": [{"type": "fairness", "constructor_args": {}}, {"type": "robustness", "constructor_args": {}}], "context": [{"name": "context", "type": "string", "constructor_args": {"placeholder": "Enter context..."}}], "input": [{"name": "statement", "type": "string", "constructor_args": {"placeholder": "Enter statement..."}}, {"name": "label", "type": "target_label", "constructor_args": { "labels": ["negative", "positive", "neutral"]}}], "output": [ {"name": "label", "type": "target_label", "constructor_args": { "labels": ["negative", "positive", "neutral"]}}, {"name": "prob", "type": "multiclass_probs", "constructor_args": {"reference_name": "label"}} ], "metadata": { "create": [ {"name": "example_explanation", "type": "string", "constructor_args": {"placeholder": "Explain why your example is correct..."}, "display_name": "example explanation"}, {"name": "model_explanation_right", "type": "string", "constructor_args": {"placeholder": "Explain why you thought the model would make a mistake..."}, "model_wrong_condition": false, "display_name": "model explanation"}, {"name": "model_explanation_wrong", "type": "string", "constructor_args": {"placeholder": "Explain why you think the model made a mistake..."}, "model_wrong_condition": true, "display_name": "model explanation"} ], "validate": [ {"name": "corrected_label", "type": "multiclass", "constructor_args": { "labels": ["negative", "positive", "entailed"], "placeholder": "Enter corrected label" }, "validated_label_condition": "incorrect"}, {"name": "target_explanation", "type": "string", "constructor_args": {"placeholder": "Explain why your proposed target is correct..."}, "validated_label_condition": "incorrect"}, {"name": "flag_reason", "type": "string", "constructor_args": {"placeholder": "Enter the reason for flagging..."}, "validated_label_condition": "flagged"}, {"name": "validator_example_explanation", "type": "string", "constructor_args": {"placeholder": "Explain why the example is correct..."}, "validated_label_condition": "correct"}, {"name": "validator_model_explanation", "type": "string", "constructor_args": {"placeholder": "Enter what you think was done to try to trick the model..."}} ] } } """, cur_round=1, last_updated=db.sql.func.now(), ) tpm.dbs.add(t) tpm.dbs.flush() logger.info("Added task (%s)" % (t.id)) tup = TaskUserPermission(uid=tp.uid, type="owner", tid=t.id) tpm.dbs.add(tup) tpm.dbs.flush() logger.info("Added task owner") r = Round(tid=t.id, rid=1, secret=secrets.token_hex()) tpm.dbs.add(r) tpm.dbs.flush() tpm.dbs.commit() logger.info("Added round (%s)" % (r.id)) tpm.dbs.query(TaskProposal).filter(TaskProposal.id == tpid).delete() tpm.dbs.flush() tpm.dbs.commit() return util.json_encode({"success": "ok"}) @bottle.get("/tasks/owners/<tid:int>") @_auth.requires_auth def get_owners(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) tm = TaskModel() tups = tm.dbs.query(TaskUserPermission).filter( db.and_(TaskUserPermission.type == "owner", TaskUserPermission.tid == tid) ) um = UserModel() users = [] for obj in tups: user = um.get(obj.uid) users.append({"id": user.id, "username": user.username}) return util.json_encode(users) def ensure_owner_or_admin(tid, uid): um = UserModel() user = um.get(uid) if not user.admin: if not (tid, "owner") in [ (perm.tid, perm.type) for perm in user.task_permissions ]: bottle.abort( 403, "Access denied (you are not an admin or owner of this task)" ) @bottle.post("/tasks/<tid:int>/convert_to_model_io") def convert_to_model_io(tid): data = bottle.request.json tm = TaskModel() task = tm.get(tid) return util.json_encode(task.convert_to_model_io(data)) @bottle.get("/tasks/get_all_rounds/<tid:int>") @_auth.requires_auth def get_all_rounds(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) rm = RoundModel() r_dicts = [] for r in rm.getByTid(tid): r_dicts.append(r.to_dict()) r_dicts.sort(key=lambda r: r["rid"]) return util.json_encode(r_dicts) @bottle.get("/tasks/datasets/<tid:int>") @_auth.requires_auth def get_datasets(credentials, tid): dm = DatasetModel() dataset_list = [] datasets = dm.getByTid(tid) if datasets: for dataset in datasets: dataset_list.append(dataset.to_dict()) return util.json_encode(dataset_list) @bottle.get("/tasks/admin_or_owner/<tid:int>") @_auth.requires_auth def get_admin_or_owner(credentials, tid): um = UserModel() user = um.get(credentials["id"]) admin_or_owner = True if not user.admin: if not (tid, "owner") in [ (perm.tid, perm.type) for perm in user.task_permissions ]: admin_or_owner = False return util.json_encode({"admin_or_owner": admin_or_owner}) @bottle.post("/tasks/create_round/<tid:int>") @_auth.requires_auth def create_round(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) tm = TaskModel() task = tm.get(tid) task.cur_round += 1 tm.dbs.add(task) tm.dbs.flush() r = Round(tid=tid, rid=task.cur_round, secret=secrets.token_hex()) tm.dbs.add(r) tm.dbs.flush() tm.dbs.commit() logger.info("Added round (%s)" % (r.id)) return util.json_encode({"success": "ok"}) @bottle.put("/tasks/update_round/<tid:int>/<rid:int>") @_auth.requires_auth def update_round(credentials, tid, rid): data = bottle.request.json ensure_owner_or_admin(tid, credentials["id"]) rm = RoundModel() round = rm.getByTidAndRid(tid, rid) if "model_ids" in data: tm = TaskModel() task = tm.get(tid) endpoint_urls = [] for model_id in data["model_ids"]: mm = ModelModel() model = mm.get(model_id) if not model.is_published: bottle.abort(400, "Can't use an unpublished model as a target model") if model.tid != tid: bottle.abort( 400, "Can't add a model for another task as a target model" ) endpoint_url = ( "https://obws766r82.execute-api." + task.aws_region + ".amazonaws.com/predict?model=" + model.endpoint_name ) endpoint_urls.append(endpoint_url) if endpoint_urls == []: round.url = None else: round.url = "|".join(endpoint_urls) round.longdesc = data.get("longdesc", round.longdesc) rm.dbs.add(round) rm.dbs.flush() rm.dbs.commit() logger.info("Updated round (%s)" % (round.id)) return util.json_encode({"success": "ok"}) @bottle.get("/tasks/get_model_identifiers_for_target_selection/<tid:int>") @_auth.requires_auth def get_model_identifiers_for_target_selection(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) tm = TaskModel() task = tm.get(tid) mm = ModelModel() models = mm.getByTid(tid) rm = RoundModel() rounds = rm.getByTid(tid) rid_to_model_identifiers = {} for round in rounds: model_identifiers = [] for model in models: if ( model.endpoint_name is not None ): endpoint_url = ( "https://obws766r82.execute-api." + task.aws_region + ".amazonaws.com/predict?model=" + model.endpoint_name ) is_target = False if round.url is not None and endpoint_url in round.url: is_target = True if is_target or ( model.is_published and model.deployment_status == DeploymentStatusEnum.deployed ): model_identifiers.append( { "model_name": model.name, "model_id": model.id, "uid": model.uid, "username": model.user.username, "is_target": is_target, } ) rid_to_model_identifiers[round.rid] = model_identifiers return util.json_encode(rid_to_model_identifiers) @bottle.get("/tasks/get_model_identifiers/<tid:int>") @_auth.requires_auth def get_model_identifiers(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) mm = ModelModel() models = mm.getByTid(tid) model_identifiers = [] for model in models: model_identifiers.append( { "model_name": model.name, "model_id": model.id, "deployment_status": model.deployment_status.name, "is_published": model.is_published, "uid": model.uid, "username": model.user.username, } ) return util.json_encode(model_identifiers) @bottle.put("/tasks/toggle_owner/<tid:int>/<username>") @_auth.requires_auth def toggle_owner(credentials, tid, username): ensure_owner_or_admin(tid, credentials["id"]) um = UserModel() user_to_toggle = um.getByUsername(username) if (tid, "owner") in [ (perm.tid, perm.type) for perm in user_to_toggle.task_permissions ]: tup = ( um.dbs.query(TaskUserPermission) .filter( db.and_( TaskUserPermission.uid == user_to_toggle.id, TaskUserPermission.type == "owner", TaskUserPermission.tid == tid, ) ) .delete() ) um.dbs.flush() um.dbs.commit() logger.info("Removed task owner: " + username) else: tup = TaskUserPermission(uid=user_to_toggle.id, type="owner", tid=tid) um.dbs.add(tup) um.dbs.flush() um.dbs.commit() logger.info("Added task owner: " + username) return util.json_encode({"success": "ok"}) @bottle.put("/tasks/update/<tid:int>") @_auth.requires_auth def update(credentials, tid): ensure_owner_or_admin(tid, credentials["id"]) data = bottle.request.json for field in data: if field not in ( "unpublished_models_in_leaderboard", "validate_non_fooling", "num_matching_validations", "instructions_md", "predictions_upload_instructions_md", "hidden", "submitable", "create_endpoint", ): bottle.abort( 403, """Can only modify unpublished_models_in_leaderboard, validate_non_fooling, num_matching_validations, instructions_md, hidden, predictions_upload_instructions_md, submitable, create_endpoint""", ) tm = TaskModel() tm.update(tid, data) return util.json_encode({"success": "ok"}) @bottle.put("/tasks/activate/<tid:int>") @_auth.requires_auth def activate(credentials, tid): data = bottle.request.json if not util.check_fields(data, ["annotation_config_json"]): bottle.abort(400, "Missing data") ensure_owner_or_admin(tid, credentials["id"]) tm = TaskModel() task = tm.get(tid) if task.active: bottle.abort( 403, """Access denied. Cannot change the annotation_config_json of an already active task.""", ) try: Task.verify_annotation_config(ujson.loads(data["annotation_config_json"])) except Exception as ex: logger.exception("Invalid annotation config: (%s)" % (ex)) bottle.abort(400, "Invalid annotation config") tm.update( tid, {"annotation_config_json": data["annotation_config_json"], "active": True} ) return util.json_encode({"success": "ok"}) @bottle.get("/tasks") def tasks(): t = TaskModel() tasks = t.listWithRounds() return util.json_encode(tasks) @bottle.get("/tasks/submitable") def get_submitable_tasks(): t = TaskModel() tasks = t.listSubmitable() return util.json_encode(tasks) @bottle.get("/tasks/<task_id_or_code>") def get_task(task_id_or_code): t = TaskModel() task = t.getWithRoundAndMetricMetadata(task_id_or_code) if not task: bottle.abort(404, "Not found") return util.json_encode(task) @bottle.get("/tasks/<tid:int>/<rid:int>") def get_task_round(tid, rid): rm = RoundModel() round = rm.getByTidAndRid(tid, rid) if not round: bottle.abort(404, "Not found") return util.json_encode(round.to_dict()) @bottle.get("/tasks/<tid:int>/users") def get_user_leaderboard(tid): info = RoundUserExampleInfoModel() limit, offset = util.get_limit_and_offset_from_request() try: query_result, total_count = info.getUserLeaderByTid( tid=tid, n=limit, offset=offset ) return construct_user_board_response_json( query_result=query_result, total_count=total_count ) except Exception as ex: logger.exception("User leader board data loading failed: (%s)" % (ex)) bottle.abort(400, "Invalid task detail") @bottle.get("/tasks/<tid:int>/rounds/<rid:int>/users") def get_leaderboard_by_task_and_round(tid, rid): info = RoundUserExampleInfoModel() limit, offset = util.get_limit_and_offset_from_request() try: query_result, total_count = info.getUserLeaderByTidAndRid( tid=tid, rid=rid, n=limit, offset=offset ) return construct_user_board_response_json( query_result=query_result, total_count=total_count ) except Exception as ex: logger.exception("User leader board data loading failed: (%s)" % (ex)) bottle.abort(400, "Invalid task/round detail") @bottle.get("/tasks/<tid:int>/rounds/<rid:int>/export") @_auth.requires_auth def export_current_round_data(credentials, tid, rid): um = UserModel() user = um.get(credentials["id"]) if not user.admin: if (tid, "owner") not in [ (perm.tid, perm.type) for perm in user.task_permissions ]: bottle.abort(403, "Access denied") return util.json_encode(util.get_round_data_for_export(tid, rid)) @bottle.get("/tasks/<tid:int>/export") @_auth.requires_auth def export_task_data(credentials, tid): um = UserModel() user = um.get(credentials["id"]) if not user.admin: if (tid, "owner") not in [ (perm.tid, perm.type) for perm in user.task_permissions ]: bottle.abort(403, "Access denied") rm = RoundModel() example_and_validations_dicts = [] for rid in [round.rid for round in rm.getByTid(tid)]: example_and_validations_dicts += util.get_round_data_for_export(tid, rid) return util.json_encode(example_and_validations_dicts) def construct_user_board_response_json(query_result, total_count=0): list_objs = [] for result in query_result: obj = {} obj["uid"] = result[0] obj["username"] = result[1] obj["avatar_url"] = result[2] if result[2] is not None else "" obj["count"] = int(result[3]) obj["MER"] = str(round(result[4] * 100, 2)) obj["total"] = str(result[3]) + "/" + str(result[5]) list_objs.append(obj) if list_objs: resp_obj = {"count": total_count, "data": list_objs} return util.json_encode(resp_obj) else: resp_obj = {"count": 0, "data": []} return util.json_encode(resp_obj) @bottle.get("/tasks/<tid:int>/models/topleaderboardtags") def get_top_leaderboard_tags(tid): offset = 0 limit = 5 specific_tag = None query_dict = parse_qs(bottle.request.query_string) if "offset" in query_dict: offset = int(query_dict["offset"][0]) if "limit" in query_dict: limit = int(query_dict["limit"][0]) if "specific_tag" in query_dict: specific_tag = query_dict["specific_tag"][0] sm = ScoreModel() return sm.getLeaderboardTopPerformingTags(tid, limit, offset, specific_tag) @bottle.get("/tasks/<tid:int>/models/dynaboard") def get_dynaboard_info(tid): sort_by = "dynascore" sort_direction = "asc" offset = 0 limit = 5 query_dict = parse_qs(bottle.request.query_string) if "sort_by" in query_dict: sort_by = query_dict["sort_by"][0] if "sort_direction" in query_dict: sort_direction = query_dict["sort_direction"][0] if sort_direction != "asc" and sort_direction != "desc": bottle.abort(400, "unrecognized sort direction") if "offset" in query_dict: offset = int(query_dict["offset"][0]) if "limit" in query_dict: limit = int(query_dict["limit"][0]) if "ordered_metric_weights" in query_dict: ordered_metric_weights = [ float(weight) for weight in query_dict["ordered_metric_weights"][0].split("|") ] else: bottle.abort(400, "missing metric weight data") if "ordered_scoring_dataset_weights" in query_dict: ordered_dataset_weights = [ float(weight) for weight in query_dict["ordered_scoring_dataset_weights"][0].split("|") ] else: bottle.abort(400, "missing dataset weight data") return get_dynaboard_info_for_params( tid, ordered_metric_weights, ordered_dataset_weights, sort_by, sort_direction, limit, offset, ) def get_dynaboard_info_for_params( tid, ordered_metric_weights, ordered_dataset_weights, sort_by, sort_direction, limit, offset, ): if sort_direction == "asc": reverse_sort = False elif sort_direction == "desc": reverse_sort = True tm = TaskModel() t_dict = tm.getWithRoundAndMetricMetadata(tid) ordered_metrics = t_dict["ordered_metrics"] perf_metric_field_name = t_dict["perf_metric_field_name"] ordered_metric_and_weight = [ dict({"weight": weight}, **metric) for weight, metric in zip(ordered_metric_weights, ordered_metrics) ] ordered_did_and_weight = [ {"weight": weight, "did": did} for weight, did in zip( ordered_dataset_weights, [dataset["id"] for dataset in t_dict["ordered_scoring_datasets"]], ) ] sm = ScoreModel() return sm.getDynaboardByTask( tid, perf_metric_field_name, ordered_metric_and_weight, ordered_did_and_weight, sort_by, reverse_sort, limit, offset, ) @bottle.get("/tasks/<tid:int>/models")
MIT License
mozilla/mozillians
mozillians/users/models.py
UserProfile.get_annotated_access_groups
python
def get_annotated_access_groups(self): access_groups = self._get_annotated_groups().filter(group__is_access_group=True) annotated_access_groups = [] for membership in access_groups: group = membership.group group.pending = (membership.status == GroupMembership.PENDING) group.pending_terms = (membership.status == GroupMembership.PENDING_TERMS) try: invite = Invite.objects.get(group=membership.group, redeemer=self) except Invite.DoesNotExist: invite = None if invite: group.inviter = invite.inviter annotated_access_groups.append(group) return annotated_access_groups
Return a list of all the visible access groups the user is a member of or pending membership. The groups pending membership will have a .pending attribute set to True, others will have it set False. There is also an inviter attribute which displays the inviter of the user in the group.
https://github.com/mozilla/mozillians/blob/bd5da47fef01e4e09d3bb8cb0799735bdfbeb3f9/mozillians/users/models.py#L664-L688
import logging import os import uuid from itertools import chain from django.conf import settings from django.contrib.auth.models import User from django.core.files.storage import default_storage from django.core.mail import send_mail from django.db import models from django.db.models import Q, Manager, ManyToManyField from django.utils.encoding import iri_to_uri from django.utils.http import urlquote from django.utils.timezone import now from django.utils.translation import ugettext as _, ugettext_lazy as _lazy from django.template.loader import get_template from product_details import product_details from PIL import Image from pytz import common_timezones from sorl.thumbnail import ImageField, get_thumbnail from waffle import switch_is_active from mozillians.common import utils from mozillians.common.templatetags.helpers import absolutify, gravatar from mozillians.common.templatetags.helpers import offset_of_timezone from mozillians.common.urlresolvers import reverse from mozillians.groups.models import (Group, GroupAlias, GroupMembership, Invite, Skill, SkillAlias) from mozillians.phonebook.validators import (validate_email, validate_twitter, validate_website, validate_username_not_url, validate_phone_number, validate_linkedin, validate_discord) from mozillians.users import get_languages_for_locale from mozillians.users.managers import (EMPLOYEES, MOZILLIANS, PRIVACY_CHOICES, PRIVACY_CHOICES_WITH_PRIVATE, PRIVATE, PUBLIC, PUBLIC_INDEXABLE_FIELDS, UserProfileQuerySet) from mozillians.users.tasks import send_userprofile_to_cis COUNTRIES = product_details.get_regions('en-US') AVATAR_SIZE = (300, 300) logger = logging.getLogger(__name__) ProfileManager = Manager.from_queryset(UserProfileQuerySet) def _calculate_photo_filename(instance, filename): return os.path.join(settings.USER_AVATAR_DIR, str(uuid.uuid4()) + '.jpg') class PrivacyField(models.PositiveSmallIntegerField): def __init__(self, *args, **kwargs): myargs = {'default': MOZILLIANS, 'choices': PRIVACY_CHOICES} myargs.update(kwargs) super(PrivacyField, self).__init__(*args, **myargs) class UserProfilePrivacyModel(models.Model): _privacy_level = None privacy_photo = PrivacyField() privacy_full_name = PrivacyField() privacy_full_name_local = PrivacyField() privacy_ircname = PrivacyField() privacy_email = PrivacyField(choices=PRIVACY_CHOICES_WITH_PRIVATE, default=MOZILLIANS) privacy_bio = PrivacyField() privacy_geo_city = PrivacyField() privacy_geo_region = PrivacyField() privacy_geo_country = PrivacyField() privacy_city = PrivacyField() privacy_region = PrivacyField() privacy_country = PrivacyField() privacy_groups = PrivacyField() privacy_skills = PrivacyField() privacy_languages = PrivacyField() privacy_date_mozillian = PrivacyField() privacy_timezone = PrivacyField() privacy_tshirt = PrivacyField(choices=((PRIVATE, _lazy(u'Private')),), default=PRIVATE) privacy_title = PrivacyField() privacy_story_link = PrivacyField() CACHED_PRIVACY_FIELDS = None class Meta: abstract = True @classmethod def clear_privacy_fields_cache(cls): cls.CACHED_PRIVACY_FIELDS = None @classmethod def privacy_fields(cls): if cls.CACHED_PRIVACY_FIELDS is None: privacy_fields = {} field_names = list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in cls._meta.get_fields() if not (field.many_to_one and field.related_model is None) ))) for name in field_names: if name.startswith('privacy_') or not 'privacy_%s' % name in field_names: continue field = cls._meta.get_field(name) if isinstance(field, ManyToManyField): default = field.remote_field.model.objects.none() else: default = field.get_default() privacy_fields[name] = default privacy_fields['email'] = u'' cls.CACHED_PRIVACY_FIELDS = privacy_fields return cls.CACHED_PRIVACY_FIELDS class UserProfile(UserProfilePrivacyModel): REFERRAL_SOURCE_CHOICES = ( ('direct', 'Mozillians'), ('contribute', 'Get Involved'), ) objects = ProfileManager() user = models.OneToOneField(User) full_name = models.CharField(max_length=255, default='', blank=False, verbose_name=_lazy(u'Full Name')) full_name_local = models.CharField(max_length=255, blank=True, default='', verbose_name=_lazy(u'Name in local language')) is_vouched = models.BooleanField( default=False, help_text='You can edit vouched status by editing invidual vouches') can_vouch = models.BooleanField( default=False, help_text='You can edit can_vouch status by editing invidual vouches') last_updated = models.DateTimeField(auto_now=True) groups = models.ManyToManyField(Group, blank=True, related_name='members', through=GroupMembership) skills = models.ManyToManyField(Skill, blank=True, related_name='members') bio = models.TextField(verbose_name=_lazy(u'Bio'), default='', blank=True) photo = ImageField(default='', blank=True, upload_to=_calculate_photo_filename) ircname = models.CharField(max_length=63, verbose_name=_lazy(u'IRC Nickname'), default='', blank=True) geo_country = models.ForeignKey('geo.Country', blank=True, null=True, on_delete=models.SET_NULL) geo_region = models.ForeignKey('geo.Region', blank=True, null=True, on_delete=models.SET_NULL) geo_city = models.ForeignKey('geo.City', blank=True, null=True, on_delete=models.SET_NULL) lat = models.FloatField(_lazy(u'Latitude'), blank=True, null=True) lng = models.FloatField(_lazy(u'Longitude'), blank=True, null=True) city = models.ForeignKey('cities_light.City', blank=True, null=True, on_delete=models.SET_NULL) region = models.ForeignKey('cities_light.Region', blank=True, null=True, on_delete=models.SET_NULL) country = models.ForeignKey('cities_light.Country', blank=True, null=True, on_delete=models.SET_NULL) basket_token = models.CharField(max_length=1024, default='', blank=True) date_mozillian = models.DateField('When was involved with Mozilla', null=True, blank=True, default=None) timezone = models.CharField(max_length=100, blank=True, default='', choices=zip(common_timezones, common_timezones)) tshirt = models.IntegerField( _lazy(u'T-Shirt'), blank=True, null=True, default=None, choices=( (1, _lazy(u'Fitted Small')), (2, _lazy(u'Fitted Medium')), (3, _lazy(u'Fitted Large')), (4, _lazy(u'Fitted X-Large')), (5, _lazy(u'Fitted XX-Large')), (6, _lazy(u'Fitted XXX-Large')), (7, _lazy(u'Straight-cut Small')), (8, _lazy(u'Straight-cut Medium')), (9, _lazy(u'Straight-cut Large')), (10, _lazy(u'Straight-cut X-Large')), (11, _lazy(u'Straight-cut XX-Large')), (12, _lazy(u'Straight-cut XXX-Large')) )) title = models.CharField(_lazy(u'What do you do for Mozilla?'), max_length=70, blank=True, default='') story_link = models.URLField( _lazy(u'Link to your contribution story'), help_text=_lazy(u'If you have created something public that ' u'tells the story of how you came to be a ' u'Mozillian, specify that link here.'), max_length=1024, blank=True, default='') referral_source = models.CharField(max_length=32, choices=REFERRAL_SOURCE_CHOICES, default='direct') auth0_user_id = models.CharField(max_length=1024, default='', blank=True) is_staff = models.BooleanField(default=False) def __unicode__(self): return self.display_name def get_absolute_url(self): return reverse('phonebook:profile_view', args=[self.user.username]) class Meta: db_table = 'profile' ordering = ['full_name'] def __getattribute__(self, attrname): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) privacy_fields = UserProfile.privacy_fields() privacy_level = _getattr('_privacy_level') special_functions = { 'accounts': '_accounts', 'alternate_emails': '_alternate_emails', 'email': '_primary_email', 'is_public_indexable': '_is_public_indexable', 'languages': '_languages', 'vouches_made': '_vouches_made', 'vouches_received': '_vouches_received', 'vouched_by': '_vouched_by', 'websites': '_websites', 'identity_profiles': '_identity_profiles' } if attrname in special_functions: return _getattr(special_functions[attrname]) if not privacy_level or attrname not in privacy_fields: return _getattr(attrname) field_privacy = _getattr('privacy_%s' % attrname) if field_privacy < privacy_level: return privacy_fields.get(attrname) return _getattr(attrname) def _filter_accounts_privacy(self, accounts): if self._privacy_level: return accounts.filter(privacy__gte=self._privacy_level) return accounts @property def _accounts(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) excluded_types = [ExternalAccount.TYPE_WEBSITE, ExternalAccount.TYPE_EMAIL] accounts = _getattr('externalaccount_set').exclude(type__in=excluded_types) return self._filter_accounts_privacy(accounts) @property def _alternate_emails(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) accounts = _getattr('externalaccount_set').filter(type=ExternalAccount.TYPE_EMAIL) return self._filter_accounts_privacy(accounts) @property def _api_alternate_emails(self): legacy_emails_qs = self._alternate_emails idp_qs = self._identity_profiles e_exclude = [e.id for e in legacy_emails_qs if idp_qs.filter(email=e.identifier, privacy__gte=e.privacy).exists()] legacy_emails_qs = legacy_emails_qs.exclude(id__in=e_exclude) idp_exclude = [i.id for i in idp_qs if legacy_emails_qs.filter(identifier=i.email, privacy__gte=i.privacy).exists()] idp_qs = idp_qs.exclude(id__in=idp_exclude) return chain(legacy_emails_qs, idp_qs) @property def _identity_profiles(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) accounts = _getattr('idp_profiles').all() return self._filter_accounts_privacy(accounts) @property def _is_public_indexable(self): for field in PUBLIC_INDEXABLE_FIELDS: if getattr(self, field, None) and getattr(self, 'privacy_%s' % field, None) == PUBLIC: return True return False @property def _languages(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) if self._privacy_level > _getattr('privacy_languages'): return _getattr('language_set').none() return _getattr('language_set').all() @property def _primary_email(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) privacy_fields = UserProfile.privacy_fields() if self._privacy_level: if self.idp_profiles.exists(): contact_ids = self.identity_profiles.filter(primary_contact_identity=True) if contact_ids.exists(): return contact_ids[0].email return '' if _getattr('privacy_email') < self._privacy_level: return privacy_fields['email'] if self.idp_profiles.filter(primary_contact_identity=True).exists(): return self.idp_profiles.filter(primary_contact_identity=True)[0].email return _getattr('user').email @property def _vouched_by(self): privacy_level = self._privacy_level voucher = (UserProfile.objects.filter(vouches_made__vouchee=self) .order_by('vouches_made__date')) if voucher.exists(): voucher = voucher[0] if privacy_level: voucher.set_instance_privacy_level(privacy_level) for field in UserProfile.privacy_fields(): if getattr(voucher, 'privacy_%s' % field) >= privacy_level: return voucher return None return voucher return None def _vouches(self, type): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) vouch_ids = [] for vouch in _getattr(type).all(): vouch.vouchee.set_instance_privacy_level(self._privacy_level) for field in UserProfile.privacy_fields(): if getattr(vouch.vouchee, 'privacy_%s' % field, 0) >= self._privacy_level: vouch_ids.append(vouch.id) vouches = _getattr(type).filter(pk__in=vouch_ids) return vouches @property def _vouches_made(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) if self._privacy_level: return self._vouches('vouches_made') return _getattr('vouches_made') @property def _vouches_received(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) if self._privacy_level: return self._vouches('vouches_received') return _getattr('vouches_received') @property def _websites(self): _getattr = (lambda x: super(UserProfile, self).__getattribute__(x)) accounts = _getattr('externalaccount_set').filter(type=ExternalAccount.TYPE_WEBSITE) return self._filter_accounts_privacy(accounts) @property def display_name(self): return self.full_name @property def privacy_level(self): if (self.user.groups.filter(name='Managers').exists() or self.user.is_superuser): return PRIVATE if self.groups.filter(name='staff').exists(): return EMPLOYEES if self.is_vouched: return MOZILLIANS return PUBLIC @property def is_complete(self): return self.display_name.strip() != '' @property def is_public(self): for field in type(self).privacy_fields(): if getattr(self, 'privacy_%s' % field, None) == PUBLIC: return True return False @property def is_manager(self): return self.user.is_superuser or self.user.groups.filter(name='Managers').exists() @property def is_nda(self): query = { 'userprofile__pk': self.pk, 'status': GroupMembership.MEMBER } is_nda_member = (GroupMembership.objects.filter(Q(group__name=settings.NDA_GROUP) | Q(group__name=settings.NDA_STAFF_GROUP)) .filter(**query).exists()) return is_nda_member or self.user.is_superuser @property def date_vouched(self): vouches = self.vouches_received.all().order_by('date')[:1] if vouches: return vouches[0].date return None @property def can_create_access_groups(self): emails = set( [idp.email for idp in IdpProfile.objects.filter(profile=self, type=IdpProfile.PROVIDER_LDAP) if idp.email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS] ) if self.user.is_superuser or emails: return True return False def can_join_access_groups(self): if self.can_create_access_groups or self.is_nda: return True return False def set_instance_privacy_level(self, level): self._privacy_level = level def set_privacy_level(self, level, save=True): for field in type(self).privacy_fields(): setattr(self, 'privacy_%s' % field, level) if save: self.save() def set_membership(self, model, membership_list): if model is Group: m2mfield = self.groups alias_model = GroupAlias elif model is Skill: m2mfield = self.skills alias_model = SkillAlias if model is Group: (GroupMembership.objects.filter(userprofile=self, group__visible=True) .exclude(group__name__in=membership_list).delete()) else: m2mfield.remove(*[g for g in m2mfield.all() if g.name not in membership_list and g.is_visible]) groups_to_add = [] for g in membership_list: if alias_model.objects.filter(name=g).exists(): group = alias_model.objects.get(name=g).alias else: group = model.objects.create(name=g) if group.is_visible: groups_to_add.append(group) if model is Group: for group in groups_to_add: group.add_member(self) else: m2mfield.add(*groups_to_add) def get_photo_thumbnail(self, geometry='160x160', **kwargs): if 'crop' not in kwargs: kwargs['crop'] = 'center' if self.photo and default_storage.exists(self.photo.name): try: image_obj = Image.open(self.photo) except IOError: return get_thumbnail(settings.DEFAULT_AVATAR_PATH, geometry, **kwargs) if image_obj.mode == 'RGBA': new_fh = default_storage.open(self.photo.name, 'w') converted_image_obj = image_obj.convert('RGB') converted_image_obj.save(new_fh, 'JPEG') new_fh.close() return get_thumbnail(self.photo, geometry, **kwargs) return get_thumbnail(settings.DEFAULT_AVATAR_PATH.format(), geometry, **kwargs) def get_photo_url(self, geometry='160x160', **kwargs): privacy_level = getattr(self, '_privacy_level', MOZILLIANS) if (not self.photo and self.privacy_photo >= privacy_level): return gravatar(self.email, size=geometry) photo_url = self.get_photo_thumbnail(geometry, **kwargs).url if photo_url.startswith('https://') or photo_url.startswith('http://'): return photo_url return absolutify(photo_url) def is_vouchable(self, voucher): if voucher and not voucher.can_vouch: return False if self.vouches_received.all().count() >= settings.VOUCH_COUNT_LIMIT: return False vouch_query = self.vouches_received.filter(voucher=voucher) if voucher and vouch_query.exists(): return False return True def vouch(self, vouched_by, description='', autovouch=False): if not self.is_vouchable(vouched_by): return vouch = self.vouches_received.create( voucher=vouched_by, date=now(), description=description, autovouch=autovouch ) if not switch_is_active('dino-park-autologin'): self._email_now_vouched(vouched_by, description) return vouch def auto_vouch(self): employee_vouch_q = self.vouches_received.filter(description=settings.AUTO_VOUCH_REASON, autovouch=True) if not employee_vouch_q.exists(): self.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True) def _email_now_vouched(self, vouched_by, description=''): name = None voucher_profile_link = None vouchee_profile_link = utils.absolutify(self.get_absolute_url()) if vouched_by: name = vouched_by.full_name voucher_profile_link = utils.absolutify(vouched_by.get_absolute_url()) number_of_vouches = self.vouches_received.all().count() template = get_template('phonebook/emails/vouch_confirmation_email.txt') message = template.render({ 'voucher_name': name, 'voucher_profile_url': voucher_profile_link, 'vouchee_profile_url': vouchee_profile_link, 'vouch_description': description, 'functional_areas_url': utils.absolutify(reverse('groups:index_functional_areas')), 'groups_url': utils.absolutify(reverse('groups:index_groups')), 'first_vouch': number_of_vouches == 1, 'can_vouch_threshold': number_of_vouches == settings.CAN_VOUCH_THRESHOLD, }) subject = _(u'You have been vouched on Mozillians.org') filtered_message = message.replace('&#34;', '"').replace('&#39;', "'") send_mail(subject, filtered_message, settings.FROM_NOREPLY, [self.email]) def _get_annotated_groups(self): groups_manager = self.groups user_group_ids = [] if hasattr(groups_manager, 'visible'): user_group_ids = groups_manager.visible().values_list('id', flat=True) return self.groupmembership_set.filter(group__id__in=user_group_ids) def get_annotated_tags(self): tags = self._get_annotated_groups().filter(group__is_access_group=False) annotated_tags = [] for membership in tags: tag = membership.group tag.pending = (membership.status == GroupMembership.PENDING) tag.pending_terms = (membership.status == GroupMembership.PENDING_TERMS) annotated_tags.append(tag) return annotated_tags
BSD 3-Clause New or Revised License
amz-driverless/rbb_core
rbb_server/src/rbb_server/controllers/simulation_controller.py
delete_simulation
python
def delete_simulation(sim_identifier, user=None): session = Database.get_session() try: q = session.query(Simulation).filter(Simulation.uid == sim_identifier) if q.first(): session.delete(q.first()) session.commit() return "", 204 else: return Error(code=404, message="Simulation not found"), 404 except Exception as e: logging.exception("Simulation deletion failed") session.rollback() return Error(code=500, message="Exception occurred"), 500
Delete simulation # noqa: E501 :param sim_identifier: :type sim_identifier: int :rtype: None
https://github.com/amz-driverless/rbb_core/blob/618617270314af5335de30179072244e1f440c4c/rbb_server/src/rbb_server/controllers/simulation_controller.py#L372-L397
import datetime import logging import connexion import rbb_server.helper.auth as auth import rbb_server.helper.database as db_helper from rbb_server.helper.permissions import Permissions from rbb_server.helper.error import handle_exception from sqlalchemy import and_ from sqlalchemy.orm import Query from rbb_server import Database from rbb_server.model.database import SimulationEnvironment, Simulation, SimulationRun, Rosbag, RosbagStore from rbb_server.model.task import Task, TaskState from rbb_server.hooks.new_simulation_hook import NewSimulationHook from rbb_swagger_server.models import SimulationRunDetailed, SimulationDetailed, SimulationEnvironmentDetailed from rbb_swagger_server.models.error import Error @auth.requires_auth_with_permission(Permissions.SimulationRead) def get_simulation(sim_identifier, expand=None, user=None): try: session = Database.get_session() q = session.query(Simulation).filter(Simulation.uid == sim_identifier) model = q.first() if model: return model.to_swagger_model_detailed(user=user, expand=expand) else: return Error(code=404, message="Simulation not found"), 404 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationEnvironmentRead) def get_simulation_environment(env_name, user=None): try: session = Database.get_session() q = session.query(SimulationEnvironment).filter(SimulationEnvironment.name == env_name) model = q.first() if model: return model.to_swagger_model_detailed(user=user) else: return Error(code=404, message="Simulation environment not found"), 404 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationRead) def get_simulation_run(sim_identifier, run_identifier, expand=None, user=None): try: session = Database.get_session() q = session.query(SimulationRun).filter(SimulationRun.uid == run_identifier) model = q.first() if model and model.simulation_id == sim_identifier: return model.to_swagger_model_detailed(user=user, expand=expand) else: return Error(code=404, message="Simulation run not found"), 404 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationEnvironmentRead) def list_simulation_environments(user=None): try: session = Database.get_session() q = session.query(SimulationEnvironment) return [p.to_swagger_model_summary(user=user) for p in q] except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationRead) def list_simulation_runs(sim_identifier, user=None): try: session = Database.get_session() q = session.query(Simulation).filter(Simulation.uid == sim_identifier) model = q.first() if model: return [p.to_swagger_model_summary(user=user) for p in model.runs] else: return Error(code=404, message="Simulation not found"), 404 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationRead) def list_simulations(limit=None, offset=None, ordering=None, user=None): try: session = Database.get_session() q = session.query(Simulation) q = db_helper.query_pagination_ordering(q, offset, limit, ordering, { 'created': Simulation.created, 'identifier': Simulation.uid }) return [p.to_swagger_model_summary(user=user) for p in q] except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationWrite) def new_simulation(simulation, trigger=None, user=None): try: if connexion.request.is_json: simulation = SimulationDetailed.from_dict(connexion.request.get_json()) session = Database.get_session() q = session.query(SimulationEnvironment).filter(SimulationEnvironment.name == simulation.environment_name) if not q.first(): return Error(code=400, message="Simulation environment '%s' not found" % simulation.environment_name), 400 model = Simulation() model.from_swagger_model(simulation, user=user) model.environment_id = q.first().uid model.uid = None session.add(model) session.commit() q = session.query(Simulation).filter(Simulation.uid == model.uid) m = NewSimulationHook.trigger(q.first(), session, trigger, user) return m.to_swagger_model_detailed(user=user), 200 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationWrite) def new_simulation_run(sim_identifier, simulation_run, user=None): try: if connexion.request.is_json: simulation_run = SimulationRunDetailed.from_dict(connexion.request.get_json()) session = Database.get_session() q = session.query(Simulation).filter(Simulation.uid == sim_identifier) simulation = q.first() if not simulation: return Error(code=404, message="Simulation not found"), 404 model = SimulationRun() model.from_swagger_model(simulation_run, user=user) model.simulation_id = simulation.uid if simulation_run.bag_store_name and simulation_run.bag_name: q = session.query(Rosbag).filter( and_(RosbagStore.name == simulation_run.bag_store_name, Rosbag.name == simulation_run.bag_name) ) bag = q.first() if not bag: return Error(code=400, message="Bag not found"), 400 model.bag_id = bag.uid model.uid = None session.add(model) session.commit() q = session.query(SimulationRun).filter(SimulationRun.uid == model.uid) return q.first().to_swagger_model_detailed(user=user), 200 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationWrite) def put_simulation(sim_identifier, simulation, user=None): try: if connexion.request.is_json: simulation = SimulationDetailed.from_dict(connexion.request.get_json()) if simulation.identifier != sim_identifier: return Error(code=400, message="Body and path identifier are not the same"), 400 session = Database.get_session() q = session.query(Simulation).filter(Simulation.uid == sim_identifier) model = q.first() if not model: return Error(code=404, message="Simulation not found"), 404 q = session.query(SimulationEnvironment).filter(SimulationEnvironment.name == simulation.environment_name) if not q.first(): return Error(code=400, message="Simulation environment '%s' not found" % simulation.environment_name), 400 model.from_swagger_model(simulation, user=user) model.environment_id = q.first().uid session.commit() q = session.query(Simulation).filter(Simulation.uid == model.uid) return q.first().to_swagger_model_detailed(user=user), 200 except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationEnvironmentWrite) def put_simulation_environment(env_name, environment, block_on_existing=None, user=None): try: if connexion.request.is_json: environment = SimulationEnvironmentDetailed.from_dict(connexion.request.get_json()) session = Database.get_session() q = session.query(SimulationEnvironment).filter(SimulationEnvironment.name == env_name) model = SimulationEnvironment() if q.count() == 1: if block_on_existing: return Error(code=1000, message="Already exists."), 400 model = q.first() else: if environment.name != env_name: return Error(code=400, message="Path and body tag have to be equal for a new environment"), 400 session.add(model) model.from_swagger_model(environment, user=user) if environment.rosbag_store: q = session.query(RosbagStore).filter(RosbagStore.name == environment.rosbag_store) rosbag_store = q.first() if not rosbag_store: return Error(code=400, message="Rosbag store not found"), 400 model.rosbag_store_id = rosbag_store.uid session.commit() return model.to_swagger_model_detailed(user=user) except Exception as e: return handle_exception(e) @auth.requires_auth_with_permission(Permissions.SimulationWrite)
MIT License
v1k45/django-notify-x
notify/models.py
NotificationQueryset.read
python
def read(self): return self.filter(deleted=False, read=True)
QuerySet filter() for retrieving read notifications. :return: Read and active Notifications filter().
https://github.com/v1k45/django-notify-x/blob/c8828c1c0c550b7b33acccb4db51fc612045afa3/notify/models.py#L55-L61
from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models from django.conf import settings from django.db.models import QuerySet from jsonfield.fields import JSONField from six import python_2_unicode_compatible from django.utils.html import escape from django.utils.timesince import timesince from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import force_text from django.utils.functional import cached_property from .utils import prefetch_relations class NotificationQueryset(QuerySet): def prefetch(self): qs = self.select_related() qs._prefetch_relations = True return qs def _fetch_all(self): if self._result_cache is None: if hasattr(self, '_prefetch_relations'): del self._prefetch_relations prefetch_relations(self) self._prefetch_relations = True return super(NotificationQueryset, self)._fetch_all() def _clone(self, **kwargs): clone = super(NotificationQueryset, self)._clone(**kwargs) if hasattr(self, '_prefetch_relations'): clone._prefetch_relations = True return clone def active(self): return self.filter(deleted=False)
MIT License
zehaos/mobilenet
tools/quantize_graph.py
GraphRewriter.eightbitize_input_to_node
python
def eightbitize_input_to_node(self, namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name): unique_input_name = unique_node_name_from_input(original_input_name) reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name min_input_name = namespace_prefix + "_min_" + unique_input_name max_input_name = namespace_prefix + "_max_" + unique_input_name quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name reshape_input_node = create_node("Reshape", reshape_input_name, [original_input_name, reshape_dims_name]) set_attr_dtype(reshape_input_node, "T", dtypes.float32) self.add_output_graph_node(reshape_input_node) min_input_node = create_node("Min", min_input_name, [reshape_input_name, reduction_dims_name]) set_attr_dtype(min_input_node, "T", dtypes.float32) set_attr_bool(min_input_node, "keep_dims", False) self.add_output_graph_node(min_input_node) max_input_node = create_node("Max", max_input_name, [reshape_input_name, reduction_dims_name]) set_attr_dtype(max_input_node, "T", dtypes.float32) set_attr_bool(max_input_node, "keep_dims", False) self.add_output_graph_node(max_input_node) quantize_input_node = create_node( "QuantizeV2", quantize_input_name, [original_input_name, min_input_name, max_input_name]) set_attr_dtype(quantize_input_node, "T", dtypes.quint8) set_attr_string(quantize_input_node, "mode", b"MIN_FIRST") self.add_output_graph_node(quantize_input_node) min_output_name = quantize_input_name + ":1" max_output_name = quantize_input_name + ":2" return quantize_input_name, min_output_name, max_output_name
Takes one float input to an op, and converts it to quantized form.
https://github.com/zehaos/mobilenet/blob/bb02b10fbd211d717f7a207245feac229f6bb23e/tools/quantize_graph.py#L687-L717
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import numpy as np from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import app from tensorflow.python.platform import flags as flags_lib from tensorflow.python.platform import gfile flags = flags_lib FLAGS = flags.FLAGS flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""") flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""") flags.DEFINE_string("output_node_names", "", """Output node names, comma separated.""") flags.DEFINE_string("output", "", """File to save the output graph to.""") flags.DEFINE_integer("bitdepth", 8, """How many bits to quantize the graph to.""") flags.DEFINE_string("mode", "round", """What transformation to apply (round, quantize,""" """ eightbit, weights, or weights_rounded).""") flags.DEFINE_string("test_input_dims", "1,224,224,3", """The size of the input tensor to use when testing a""" """ graph loaded from a file.""") flags.DEFINE_boolean("strip_redundant_quantization", True, """Removes redundant dequantize/quantize pairs.""") flags.DEFINE_boolean("quantized_input", False, "If true, assume Placeholders are quantized with values " "covering [--quantized_input_min,--quantized_input_max]. " "Only supported when --mode=eightbit") flags.DEFINE_float("quantized_input_min", 0, "The minimum of the actual input range when " "--quantized_input") flags.DEFINE_float("quantized_input_max", 1, "The maximum of the actual input range when " "--quantized_input") flags.DEFINE_float( "quantized_fallback_min", None, "The fallback 'min' value to use for layers which lack min-max " "information. Note: this should be considered a coarse tool just good " "enough for experimentation purposes, since graphs quantized in this way " "would be very inaccurate.") flags.DEFINE_float( "quantized_fallback_max", None, "The fallback 'max' value to use for layers which lack min-max " "information. Note: this should be considered a coarse tool just good " "enough for experimentation purposes, since graphs quantized in this way " "would be very inaccurate.") def print_input_nodes(current_node, nodes_map, indent, already_visited): print(" " * indent + current_node.op + ":" + current_node.name) already_visited[current_node.name] = True for input_node_name in current_node.input: if input_node_name in already_visited: continue input_node = nodes_map[input_node_name] print_input_nodes(input_node, nodes_map, indent + 1, already_visited) def create_node(op, name, inputs): new_node = node_def_pb2.NodeDef() new_node.op = op new_node.name = name for input_name in inputs: new_node.input.extend([input_name]) return new_node def create_constant_node(name, value, dtype, shape=None): node = create_node("Const", name, []) set_attr_dtype(node, "dtype", dtype) set_attr_tensor(node, "value", value, dtype, shape) return node def copy_attr(node, key, attr_value): try: node.attr[key].CopyFrom(attr_value) except KeyError: pass def set_attr_dtype(node, key, value): try: node.attr[key].CopyFrom( attr_value_pb2.AttrValue(type=value.as_datatype_enum)) except KeyError: pass def set_attr_shape(node, key, value): try: node.attr[key].CopyFrom( attr_value_pb2.AttrValue(shape=tensor_shape.as_shape(value).as_proto())) except KeyError: pass def set_attr_tensor(node, key, value, dtype, shape=None): try: node.attr[key].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( value, dtype=dtype, shape=shape))) except KeyError: pass def set_attr_string(node, key, value): try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(s=value)) except KeyError: pass def set_attr_int_list(node, key, value): list_value = attr_value_pb2.AttrValue.ListValue(i=value) try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(list=list_value)) except KeyError: pass def set_attr_bool(node, key, value): try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(b=value)) except KeyError: pass def set_attr_int(node, key, value): try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(i=value)) except KeyError: pass def set_attr_float(node, key, value): try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(f=value)) except KeyError: pass def node_name_from_input(node_name): if node_name.startswith("^"): node_name = node_name[1:] m = re.search(r"(.*):\d+$", node_name) if m: node_name = m.group(1) return node_name def ensure_tensor_name_has_port(node_name): m = re.search(r"(.*):\d+$", node_name) if m: name_with_port = node_name else: name_with_port = node_name + ":0" return name_with_port def unique_node_name_from_input(node_name): return node_name.replace(":", "__port__").replace("^", "__hat__") def quantize_array(arr, num_buckets): if num_buckets < 1: raise ValueError("num_buckets must be >= 1") arr_max = arr.max() arr_min = arr.min() if arr_max == arr_min: return arr bucket_width = (arr_max - arr_min) / num_buckets bucket_indices = np.floor((arr - arr_min) / bucket_width) bucket_indices[bucket_indices == num_buckets] = num_buckets - 1 arr = arr_min + bucket_width * (bucket_indices + 0.5) return arr def quantize_weight_rounded(input_node): input_tensor = input_node.attr["value"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) shape = input_tensor.tensor_shape num_buckets = 1 << FLAGS.bitdepth tensor_value_rounded = quantize_array(tensor_value, num_buckets) tensor_shape_list = tensor_util.TensorShapeProtoToList(shape) return [ create_constant_node( input_node.name, tensor_value_rounded, dtypes.float32, shape=tensor_shape_list) ] def quantize_weight_eightbit(input_node, quantization_mode): base_name = input_node.name + "_" quint8_const_name = base_name + "quint8_const" min_name = base_name + "min" max_name = base_name + "max" float_tensor = tensor_util.MakeNdarray(input_node.attr["value"].tensor) min_value = np.min(float_tensor.flatten()) max_value = np.max(float_tensor.flatten()) if min_value > 0.0: min_value = 0.0 if min_value == max_value: if abs(min_value) < 0.000001: max_value = min_value + 1.0 elif min_value > 0: max_value = 2 * min_value else: max_value = min_value / 2.0 sess = session.Session() with sess.as_default(): quantize_op = array_ops.quantize_v2( float_tensor, min_value, max_value, dtypes.quint8, mode=quantization_mode) quint8_tensor = quantize_op[0].eval() shape = tensor_util.TensorShapeProtoToList(input_node.attr["value"] .tensor.tensor_shape) quint8_const_node = create_constant_node( quint8_const_name, quint8_tensor, dtypes.quint8, shape=shape) min_node = create_constant_node(min_name, min_value, dtypes.float32) max_node = create_constant_node(max_name, max_value, dtypes.float32) dequantize_node = create_node("Dequantize", input_node.name, [quint8_const_name, min_name, max_name]) set_attr_dtype(dequantize_node, "T", dtypes.quint8) set_attr_string(dequantize_node, "mode", quantization_mode) return [quint8_const_node, min_node, max_node, dequantize_node] EightbitizeRecursionState = collections.namedtuple( "EightbitizeRecursionState", ["already_visited", "output_node_stack", "merged_with_fake_quant"]) class GraphRewriter(object): def __init__(self, input_graph, mode, quantized_input_range, fallback_quantization_range=None): self.input_graph = input_graph self.nodes_map = self.create_nodes_map(input_graph) self.output_graph = None self.mode = mode self.final_node_renames = {} if quantized_input_range: self.input_range = (quantized_input_range[0], quantized_input_range[1]) if self.input_range[0] >= self.input_range[1]: raise ValueError("Invalid quantized_input_range: [%s,%s]" % self.input_range) if self.mode != "eightbit": raise ValueError( "quantized_input_range can only be specified in eightbit mode") else: self.input_range = None if fallback_quantization_range: self.fallback_quantization_range = [ fallback_quantization_range[0], fallback_quantization_range[1] ] if (self.fallback_quantization_range[0] >= self.fallback_quantization_range[1]): raise ValueError("Invalid fallback_quantization_range: [%s,%s]" % self.fallback_quantization_range) if self.mode != "eightbit": raise ValueError("fallback_quantization_range can only be " "specified in eightbit mode") else: self.fallback_quantization_range = None self.state = None def create_nodes_map(self, graph): nodes_map = {} for node in graph.node: if node.name not in nodes_map.keys(): nodes_map[node.name] = node else: raise ValueError("Duplicate node names detected.") return nodes_map def rewrite(self, output_node_names): self.output_graph = graph_pb2.GraphDef() output_nodes = [ self.nodes_map[output_node_name] for output_node_name in output_node_names ] if self.mode == "round": self.already_visited = {} for output_node in output_nodes: self.round_nodes_recursively(output_node) elif self.mode == "quantize": self.already_visited = {} self.already_quantized = {} for output_node in output_nodes: self.quantize_nodes_recursively(output_node) elif self.mode == "eightbit": self.set_input_graph(graph_util.remove_training_nodes(self.input_graph)) output_nodes = [ self.nodes_map[output_node_name] for output_node_name in output_node_names ] self.state = EightbitizeRecursionState( already_visited={}, output_node_stack=[], merged_with_fake_quant={}) for output_node in output_nodes: self.eightbitize_nodes_recursively(output_node) self.state = None if self.input_range: self.add_output_graph_node( create_constant_node("quantized_input_min_value", self.input_range[ 0], dtypes.float32, [])) self.add_output_graph_node( create_constant_node("quantized_input_max_value", self.input_range[ 1], dtypes.float32, [])) if self.fallback_quantization_range: self.add_output_graph_node( create_constant_node("fallback_quantization_min_value", self.fallback_quantization_range[0], dtypes.float32, [])) self.add_output_graph_node( create_constant_node("fallback_quantization_max_value", self.fallback_quantization_range[1], dtypes.float32, [])) if FLAGS.strip_redundant_quantization: self.output_graph = self.remove_redundant_quantization( self.output_graph) self.remove_dead_nodes(output_node_names) self.apply_final_node_renames() elif self.mode == "weights": self.output_graph = self.quantize_weights(self.input_graph, b"MIN_COMBINED") self.remove_dead_nodes(output_node_names) elif self.mode == "weights_rounded": self.output_graph = self.quantize_weights(self.input_graph, self.mode) self.remove_dead_nodes(output_node_names) else: print("Bad mode - " + self.mode + ".") return self.output_graph def round_nodes_recursively(self, current_node): if self.already_visited[current_node.name]: return self.already_visited[current_node.name] = True for input_node_name in current_node.input: input_node_name = node_name_from_input(input_node_name) input_node = self.nodes_map[input_node_name] self.round_nodes_recursively(input_node) nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"] if any(current_node.op in s for s in nodes_to_quantize): new_node = node_def_pb2.NodeDef() new_node.CopyFrom(current_node) new_node.name = current_node.name + "_original" self.add_output_graph_node(new_node) levels = 1 << FLAGS.bitdepth constant_name = current_node.name + "_round_depth" constant_tensor = constant_op.constant( levels, dtype=dtypes.int32, name=constant_name) constant_node = constant_tensor.op.node_def self.add_output_graph_node(constant_node) quantize_node = node_def_pb2.NodeDef() quantize_node.op = "RoundToSteps" quantize_node.name = current_node.name quantize_node.input.extend([current_node.name + "_original"]) quantize_node.input.extend([constant_node.name]) self.add_output_graph_node(quantize_node) else: new_node = node_def_pb2.NodeDef() new_node.CopyFrom(current_node) self.add_output_graph_node(new_node) def quantize_nodes_recursively(self, current_node): if self.already_visited[current_node.name]: return self.already_visited[current_node.name] = True for input_node_name in current_node.input: input_node_name = node_name_from_input(input_node_name) input_node = self.nodes_map[input_node_name] self.quantize_nodes_recursively(input_node) nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"] if any(current_node.op in s for s in nodes_to_quantize): for input_name in current_node.input: input_name = node_name_from_input(input_name) input_node = self.nodes_map[input_name] self.quantize_node(input_node) self.quantize_node(current_node) else: new_node = node_def_pb2.NodeDef() new_node.CopyFrom(current_node) self.add_output_graph_node(new_node) def quantize_node(self, input_node): input_name = input_node.name if input_name in self.already_quantized: return self.already_quantized[input_name] = True original_input_name = input_name + "_original" reshape_name = input_name + "_reshape" reshape_dims_name = input_name + "_reshape_dims" max_name = input_name + "_max" min_name = input_name + "_min" dims_name = input_name + "_dims" quantize_name = input_name + "_quantize" dequantize_name = input_name original_input_node = node_def_pb2.NodeDef() original_input_node.CopyFrom(input_node) original_input_node.name = original_input_name self.add_output_graph_node(original_input_node) reshape_dims_node = create_constant_node(reshape_dims_name, -1, dtypes.int32, [1]) self.add_output_graph_node(reshape_dims_node) reshape_node = create_node("Reshape", reshape_name, [original_input_name, reshape_dims_name]) set_attr_dtype(reshape_node, "T", dtypes.float32) self.add_output_graph_node(reshape_node) dims_node = create_constant_node(dims_name, 0, dtypes.int32, [1]) self.add_output_graph_node(dims_node) max_node = create_node("Max", max_name, [reshape_name, dims_name]) set_attr_dtype(max_node, "T", dtypes.float32) set_attr_bool(max_node, "keep_dims", False) self.add_output_graph_node(max_node) min_node = create_node("Min", min_name, [reshape_name, dims_name]) set_attr_dtype(min_node, "T", dtypes.float32) set_attr_bool(min_node, "keep_dims", False) self.add_output_graph_node(min_node) quantize_node = create_node("Quantize", quantize_name, [original_input_name, min_name, max_name]) set_attr_dtype(quantize_node, "T", dtypes.quint8) set_attr_string(quantize_node, "mode", b"MIN_FIRST") self.add_output_graph_node(quantize_node) dequantize_node = create_node("Dequantize", dequantize_name, [quantize_name, min_name, max_name]) set_attr_dtype(dequantize_node, "T", dtypes.quint8) set_attr_string(dequantize_node, "mode", b"MIN_FIRST") self.add_output_graph_node(dequantize_node) def should_merge_with_fake_quant_node(self): if not self.state.output_node_stack: return False top = self.state.output_node_stack[-1] return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"] def should_quantize_const(self, node): if not self.state.output_node_stack: return False top = self.state.output_node_stack[-1] if not top[2]: return False dtype = dtypes.as_dtype(node.attr["dtype"].type) assert dtype == dtypes.float32, ( "Failed to quantized constant %s of type %s" % (node.name, dtype)) return True def eightbitize_nodes_recursively(self, current_node): if current_node.name in self.state.already_visited: if (self.should_merge_with_fake_quant_node() or current_node.name in self.state.merged_with_fake_quant): raise ValueError("Unsupported graph structure: output of node %s " "is processed by a FakeQuant* node and should have " "no other outputs.", current_node.name) return self.state.already_visited[current_node.name] = True for i, input_node_name in enumerate(current_node.input): quantize_input = False if current_node.op in ("MatMul", "Conv2D", "BiasAdd", "MaxPool", "AvgPool", "Relu", "Relu6", "BatchNormWithGlobalNormalization"): quantize_input = True elif current_node.op == "Concat" and i > 0: quantize_input = ( dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32) elif current_node.op == "Reshape" and i == 0: quantize_input = ( dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32) self.state.output_node_stack.append((current_node, i, quantize_input)) input_node_name = node_name_from_input(input_node_name) input_node = self.nodes_map[input_node_name] self.eightbitize_nodes_recursively(input_node) self.state.output_node_stack.pop() if current_node.op == "MatMul": self.eightbitize_mat_mul_node(current_node) elif current_node.op == "Conv2D": self.eightbitize_conv_node(current_node) elif current_node.op == "BiasAdd": self.eightbitize_bias_add_node(current_node) elif current_node.op == "MaxPool" or current_node.op == "AvgPool": self.eightbitize_single_input_tensor_node(current_node, self.add_pool_function) elif current_node.op == "Relu" or current_node.op == "Relu6": self.eightbitize_single_input_tensor_node(current_node, self.add_relu_function) elif (current_node.op == "Concat" and dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32): self.eightbitize_concat_node(current_node) elif current_node.op == "BatchNormWithGlobalNormalization": self.eightbitize_batch_norm_node(current_node) elif (current_node.op == "Reshape" and dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32): self.eightbitize_reshape_node(current_node) elif (self.input_range and current_node.op in ("Placeholder", "PlaceholderV2")): self.eightbitize_placeholder_node(current_node) elif current_node.op == "FakeQuantWithMinMaxVars": pass elif current_node.op == "Const": if self.should_quantize_const(current_node): for n in quantize_weight_eightbit(current_node, b"MIN_FIRST"): self.add_output_graph_node(n) else: new_node = node_def_pb2.NodeDef() new_node.CopyFrom(current_node) self.add_output_graph_node(new_node) else: new_node = node_def_pb2.NodeDef() new_node.CopyFrom(current_node) self.add_output_graph_node(new_node) if (self.should_merge_with_fake_quant_node() and current_node.name not in self.state.merged_with_fake_quant): raise ValueError( "FakeQuant* node %s failed to merge with node %s of type %s" % (self.state.output_node_stack[-1][0], current_node.name, current_node.op)) def add_eightbit_prologue_nodes(self, original_node): namespace_prefix = original_node.name + "_eightbit" reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes( namespace_prefix) input_names = [] min_max_names = [] for original_input_name in original_node.input: quantize_input_name, min_input_name, max_input_name = ( self.eightbitize_input_to_node(namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name)) input_names.append(quantize_input_name) min_max_names.append(min_input_name) min_max_names.append(max_input_name) all_input_names = [] all_input_names.extend(input_names) all_input_names.extend(min_max_names) return all_input_names def add_common_quantization_nodes(self, namespace_prefix): reshape_dims_name = namespace_prefix + "_reshape_dims" reduction_dims_name = namespace_prefix + "_reduction_dims" reshape_dims_node = create_constant_node(reshape_dims_name, -1, dtypes.int32, [1]) self.add_output_graph_node(reshape_dims_node) reduction_dims_node = create_constant_node(reduction_dims_name, 0, dtypes.int32, [1]) self.add_output_graph_node(reduction_dims_node) return reshape_dims_name, reduction_dims_name
Apache License 2.0
naver/claf
claf/model/reading_comprehension/mixin.py
SQuADv1.make_metrics
python
def make_metrics(self, predictions): preds = {} for index, prediction in predictions.items(): _, _, (answer_start, answer_end) = self._dataset.get_ground_truths(index) qid = self._dataset.get_qid(index) preds[qid] = prediction["predict_text"] self.write_predictions(preds) squad_offical_metrics = self._make_metrics_with_official(preds) metrics = self._make_span_metrics(predictions) metrics.update(squad_offical_metrics) return metrics
Make metrics with prediction dictionary * Args: predictions: prediction dictionary consisting of - key: 'id' (question id) - value: (predict_text, pred_span_start, pred_span_end) * Returns: metrics: metric dictionary consisting of - 'em': exact_match (SQuAD v1.1 official evaluation) - 'f1': f1 (SQuAD v1.1 official evaluation) - 'start_acc': span_start accuracy - 'end_acc': span_end accuracy - 'span_acc': span accuracy (start and end)
https://github.com/naver/claf/blob/6f45b1ecca0aa2b3bcf99e79c9cb2c915ba0bf3b/claf/model/reading_comprehension/mixin.py#L221-L252
from collections import OrderedDict import numpy as np import torch import torch.nn.functional as F from claf.decorator import arguments_required from claf.metric import korquad_v1_official, squad_v1_official, squad_v2_official from claf.model.base import ModelBase class ReadingComprehension: def get_best_span(self, span_start_logits, span_end_logits, answer_maxlen=None): B = span_start_logits.size(0) best_word_span = span_start_logits.new_zeros((B, 2), dtype=torch.long) score_starts = F.softmax(span_start_logits, dim=-1) score_ends = F.softmax(span_end_logits, dim=-1) max_len = answer_maxlen or score_starts.size(1) for i in range(score_starts.size(0)): scores = torch.ger(score_starts[i], score_ends[i]) scores.triu_().tril_(max_len - 1) scores = scores.detach().cpu().numpy() scores_flat = scores.flatten() idx_sort = [np.argmax(scores_flat)] s_idx, e_idx = np.unravel_index(idx_sort, scores.shape) best_word_span[i, 0] = int(s_idx[0]) best_word_span[i, 1] = int(e_idx[0]) return best_word_span def _make_span_metrics(self, predictions): start_accuracy, end_accuracy, span_accuracy = 0, 0, 0 for index, preds in predictions.items(): _, _, (answer_start, answer_end) = self._dataset.get_ground_truths(index) start_acc = 1 if preds["pred_span_start"] == answer_start else 0 end_acc = 1 if preds["pred_span_end"] == answer_end else 0 span_acc = 1 if start_acc == 1 and end_acc == 1 else 0 start_accuracy += start_acc end_accuracy += end_acc span_accuracy += span_acc start_accuracy = 100.0 * start_accuracy / len(self._dataset) end_accuracy = 100.0 * end_accuracy / len(self._dataset) span_accuracy = 100.0 * span_accuracy / len(self._dataset) return {"start_acc": start_accuracy, "end_acc": end_accuracy, "span_acc": span_accuracy} def make_predictions(self, output_dict): data_indices = output_dict["data_idx"] best_word_span = output_dict["best_span"] return OrderedDict( [ ( index.item(), { "predict_text": self._dataset.get_text_with_index( index.item(), best_span[0], best_span[1] ), "pred_span_start": best_span[0], "pred_span_end": best_span[1], "start_logits": start_logits, "end_logits": end_logits, }, ) for index, best_span, start_logits, end_logits in zip( list(data_indices.data), list(best_word_span.data), list(output_dict["start_logits"].data), list(output_dict["end_logits"].data), ) ] ) @arguments_required(["context", "question"]) def predict(self, output_dict, arguments, helper): span_start, span_end = list(output_dict["best_span"][0].data) word_start = span_start.item() word_end = span_end.item() text_span = helper["text_span"] char_start = text_span[word_start][0] char_end = text_span[word_end][1] context_text = arguments["context"] answer_text = context_text[char_start:char_end] start_logit = output_dict["start_logits"][0] end_logit = output_dict["end_logits"][0] score = start_logit[span_start] + end_logit[span_end] score = score.item() return {"text": answer_text, "score": score} def print_examples(self, index, inputs, predictions): data_index = inputs["labels"]["data_idx"][index].item() qid = self._dataset.get_qid(data_index) if "#" in qid: qid = qid.split("#")[0] helper = self._dataset.helper context = helper["examples"][qid]["context"] question = helper["examples"][qid]["question"] answers = helper["examples"][qid]["answers"] predict_text = predictions[data_index]["predict_text"] print() print("- Context:", context) print("- Question:", question) print("- Answers:", answers) print("- Predict:", predict_text) print() def write_predictions(self, predictions, file_path=None, is_dict=True): pass class SQuADv1(ReadingComprehension):
MIT License
stellarcn/py-stellar-base
stellar_sdk/operation/operation.py
Operation.to_xdr_amount
python
def to_xdr_amount(value: Union[str, Decimal]) -> int: return xdr_utils.to_xdr_amount(value)
Converts an amount to the appropriate value to send over the network as a part of an XDR object. Each asset amount is encoded as a signed 64-bit integer in the XDR structures. An asset amount unit (that which is seen by end users) is scaled down by a factor of ten million (10,000,000) to arrive at the native 64-bit integer representation. For example, the integer amount value 25,123,456 equals 2.5123456 units of the asset. This scaling allows for seven decimal places of precision in human-friendly amount units. This static method correctly multiplies the value by the scaling factor in order to come to the integer value used in XDR structures. See `Stellar's documentation on Asset Precision <https://www.stellar.org/developers/guides/concepts/assets.html#amount-precision-and-representation>`_ for more information. :param value: The amount to convert to an integer for XDR serialization.
https://github.com/stellarcn/py-stellar-base/blob/75f73de10517d87cd75f7038cbbbd6416d245103/stellar_sdk/operation/operation.py#L50-L73
from abc import ABCMeta, abstractmethod from decimal import Decimal from typing import Optional, Union from .. import xdr as stellar_xdr from ..muxed_account import MuxedAccount from ..xdr import utils as xdr_utils __all__ = ["Operation"] class Operation(metaclass=ABCMeta): def __init__(self, source: Optional[Union[MuxedAccount, str]] = None) -> None: if isinstance(source, str): source = MuxedAccount.from_account(source) self.source: Optional[MuxedAccount] = source @property @abstractmethod def _XDR_OPERATION_TYPE(self) -> stellar_xdr.OperationType: pass @staticmethod
Apache License 2.0
molssi/qcfractal
qcfractal/procedures/optimization.py
OptimizationTasks.parse_input
python
def parse_input(self, data, duplicate_id="hash_index"): opt_spec = data.meta assert opt_spec.procedure.lower() == "optimization" tag = opt_spec.tag priority = opt_spec.priority opt_keywords = {} if opt_spec.keywords is None else opt_spec.keywords opt_keywords["program"] = opt_spec.qc_spec["program"] qc_spec_dict = data.meta.qc_spec qc_keywords = qc_spec_dict.get("keywords", None) if qc_keywords is not None: qc_keywords = self.storage.get_add_keywords_mixed([qc_keywords])["data"][0] if qc_keywords is None: raise KeyError("Could not find requested KeywordsSet from id key.") qc_spec_dict["keywords"] = qc_keywords.id qc_spec = QCSpecification(**qc_spec_dict) molecule_list = self.storage.get_add_molecules_mixed(data.data)["data"] valid_molecule_idx = [idx for idx, mol in enumerate(molecule_list) if mol is not None] valid_molecules = [x for x in molecule_list if x is not None] all_opt_records = [] for mol in valid_molecules: opt_data = { "initial_molecule": mol.id, "qc_spec": qc_spec, "keywords": opt_keywords, "program": opt_spec.program, } if hasattr(opt_spec, "protocols"): opt_data["protocols"] = data.meta.protocols opt_rec = OptimizationRecord(**opt_data) all_opt_records.append(opt_rec) ret = self.storage.add_procedures(all_opt_records) all_opt_ids = ret["data"] existing_ids = ret["meta"]["duplicates"] for idx in range(len(all_opt_records)): r = all_opt_records[idx].copy(update={"id": all_opt_ids[idx]}) all_opt_records[idx] = r new_opt_records = [o for o in all_opt_records if o.id not in existing_ids] new_molecules = [m for m, r in zip(valid_molecules, all_opt_records) if r.id not in existing_ids] self.create_tasks( new_opt_records, new_molecules, [qc_keywords] * len(new_molecules), tag=tag, priority=priority ) opt_ids = [None] * len(molecule_list) for idx, result_id in zip(valid_molecule_idx, all_opt_ids): opt_ids[idx] = result_id return opt_ids, existing_ids
Parse input json into internally appropriate format json_data = { "meta": { "procedure": "optimization", "option": "default", "program": "geometric", "qc_meta": { "driver": "energy", "method": "HF", "basis": "sto-3g", "keywords": "default", "program": "psi4" }, }, "data": ["mol_id_1", "mol_id_2", ...], } qc_schema_input = { "molecule": { "geometry": [ 0.0, 0.0, -0.6, 0.0, 0.0, 0.6, ], "symbols": ["H", "H"], "connectivity": [[0, 1, 1]] }, "driver": "gradient", "model": { "method": "HF", "basis": "sto-3g" }, "keywords": {}, } json_data = { "keywords": { "coordsys": "tric", "maxiter": 100, "program": "psi4" }, }
https://github.com/molssi/qcfractal/blob/de022c93f2931721fffa509bb61fb176ed27f993/qcfractal/procedures/optimization.py#L32-L170
from typing import List, Optional import qcelemental as qcel import qcengine as qcng from .base import BaseTasks from ..interface.models import Molecule, OptimizationRecord, QCSpecification, ResultRecord, TaskRecord, KeywordSet from ..interface.models.task_models import PriorityEnum from .procedures_util import parse_single_tasks, form_qcinputspec_schema class OptimizationTasks(BaseTasks): def verify_input(self, data): program = data.meta.program.lower() if program not in qcng.list_all_procedures(): return "Procedure '{}' not available in QCEngine.".format(program) program = data.meta.qc_spec["program"].lower() if program not in qcng.list_all_programs(): return "Program '{}' not available in QCEngine.".format(program) return True
BSD 3-Clause New or Revised License
thu-coai/cotk
docs/autodoc/__init__.py
Documenter.get_real_modname
python
def get_real_modname(self) -> str: return self.get_attr(self.object, '__module__', None) or self.modname
Get the real module name of an object to document. It can differ from the name of the module through which the object was imported.
https://github.com/thu-coai/cotk/blob/cd87f602ce1c4c3ce51cb65e31b6822f80bf69ff/docs/autodoc/__init__.py#L385-L391
import importlib import re import warnings from types import ModuleType from typing import Any, Callable, Dict, Iterator, List, Sequence, Set, Tuple, Union from docutils.statemachine import StringList import sphinx from sphinx.application import Sphinx from sphinx.config import Config, ENUM from sphinx.deprecation import ( RemovedInSphinx30Warning, RemovedInSphinx40Warning, deprecated_alias ) from sphinx.environment import BuildEnvironment from .importer import import_object, get_module_members, get_object_members from .mock import mock from sphinx.locale import _, __ from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.util import inspect from sphinx.util import logging from sphinx.util import rpartition from sphinx.util.docstrings import prepare_docstring from sphinx.util.inspect import getdoc, object_description, safe_getattr from .custom_typing import stringify as stringify_typehint from .custom_typing import stringify as stringify_annotation if False: from typing import Type from .autodoc.directive import DocumenterBridge logger = logging.getLogger(__name__) MethodDescriptorType = type(type.__subclasses__) from io import StringIO def stringify_signature(sig: inspect.Signature, show_annotation: bool = True, show_return_annotation: bool = True) -> str: args = [] last_kind = None for param in sig.parameters.values(): if param.kind != param.POSITIONAL_ONLY and last_kind == param.POSITIONAL_ONLY: args.append('/') elif param.kind == param.KEYWORD_ONLY and last_kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY, None): args.append('*') arg = StringIO() if param.kind == param.VAR_POSITIONAL: arg.write('*' + param.name) elif param.kind == param.VAR_KEYWORD: arg.write('**' + param.name) else: arg.write(param.name) if show_annotation and param.annotation is not param.empty: arg.write(': ') arg.write(stringify_annotation(param.annotation)) if param.default is not param.empty: if show_annotation and param.annotation is not param.empty: arg.write(' = ') else: arg.write('=') arg.write(object_description(param.default)) args.append(arg.getvalue()) last_kind = param.kind if last_kind == inspect.Parameter.POSITIONAL_ONLY: args.append('/') if (sig.return_annotation == inspect.Signature.empty or show_return_annotation is False): return '(%s)' % ', '.join(args) else: annotation = stringify_annotation(sig.return_annotation) return '(%s) -> %s' % (', '.join(args), annotation) py_ext_sig_re = re.compile( r'''^ ([\w.]+::)? # explicit module name ([\w.]+\.)? # module and/or class name(s) (\w+) \s* # thing name (?: \((.*)\) # optional: arguments (?:\s* -> \s* (.*))? # return annotation )? $ # and nothing more ''', re.VERBOSE) def identity(x: Any) -> Any: return x ALL = object() INSTANCEATTR = object() SLOTSATTR = object() def members_option(arg: Any) -> Union[object, List[str]]: if arg is None or arg is True: return ALL return [x.strip() for x in arg.split(',') if x.strip()] def members_set_option(arg: Any) -> Union[object, Set[str]]: if arg is None: return ALL return {x.strip() for x in arg.split(',') if x.strip()} SUPPRESS = object() def annotation_option(arg: Any) -> Any: if arg is None: return SUPPRESS else: return arg def bool_option(arg: Any) -> bool: return True def merge_special_members_option(options: Dict) -> None: if 'special-members' in options and options['special-members'] is not ALL: if options.get('members') is ALL: pass elif options.get('members'): for member in options['special-members']: if member not in options['members']: options['members'].append(member) else: options['members'] = options['special-members'] def cut_lines(pre: int, post: int = 0, what: str = None) -> Callable: def process(app: Sphinx, what_: str, name: str, obj: Any, options: Any, lines: List[str] ) -> None: if what and what_ not in what: return del lines[:pre] if post: if lines and not lines[-1]: lines.pop(-1) del lines[-post:] if lines and lines[-1]: lines.append('') return process def between(marker: str, what: Sequence[str] = None, keepempty: bool = False, exclude: bool = False) -> Callable: marker_re = re.compile(marker) def process(app: Sphinx, what_: str, name: str, obj: Any, options: Any, lines: List[str] ) -> None: if what and what_ not in what: return deleted = 0 delete = not exclude orig_lines = lines[:] for i, line in enumerate(orig_lines): if delete: lines.pop(i - deleted) deleted += 1 if marker_re.match(line): delete = not delete if delete: lines.pop(i - deleted) deleted += 1 if not lines and not keepempty: lines[:] = orig_lines if lines and lines[-1]: lines.append('') return process class Options(dict): def __getattr__(self, name: str) -> Any: try: return self[name.replace('_', '-')] except KeyError: return None class Documenter: objtype = 'object' content_indent = ' ' priority = 0 member_order = 0 titles_allowed = False option_spec = {'noindex': bool_option} def get_attr(self, obj: Any, name: str, *defargs: Any) -> Any: return autodoc_attrgetter(self.env.app, obj, name, *defargs) @classmethod def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any ) -> bool: raise NotImplementedError('must be implemented in subclasses') def __init__(self, directive: "DocumenterBridge", name: str, indent: str = '') -> None: self.directive = directive self.env = directive.env self.options = directive.genopt self.name = name self.indent = indent self.modname = None self.module = None self.objpath = None self.fullname = None self.args = None self.retann = None self.object = None self.object_name = None self.parent = None self.analyzer = None @property def documenters(self) -> Dict[str, "Type[Documenter]"]: return get_documenters(self.env.app) def add_line(self, line: str, source: str, *lineno: int) -> None: self.directive.result.append(self.indent + line, source, *lineno) def resolve_name(self, modname: str, parents: Any, path: str, base: Any ) -> Tuple[str, List[str]]: raise NotImplementedError('must be implemented in subclasses') def parse_name(self) -> bool: try: explicit_modname, path, base, args, retann = py_ext_sig_re.match(self.name).groups() except AttributeError: logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name), type='autodoc') return False if explicit_modname is not None: modname = explicit_modname[:-2] parents = path.rstrip('.').split('.') if path else [] else: modname = None parents = [] self.modname, self.objpath = self.resolve_name(modname, parents, path, base) if not self.modname: return False self.args = args self.retann = retann self.fullname = (self.modname or '') + ('.' + '.'.join(self.objpath) if self.objpath else '') return True def import_object(self) -> bool: with mock(self.env.config.autodoc_mock_imports): try: ret = import_object(self.modname, self.objpath, self.objtype, attrgetter=self.get_attr, warningiserror=self.env.config.autodoc_warningiserror) self.module, self.parent, self.object_name, self.object = ret return True except ImportError as exc: logger.warning(exc.args[0], type='autodoc', subtype='import_object') self.env.note_reread() return False
Apache License 2.0
vimeo/graph-explorer
graph_explorer/unitconv.py
parse_unitname
python
def parse_unitname(unitname, fold_scale_prefix=True): def copyfields(srcdict, nameprefix): fields = ('multiplier', 'unit_class', 'primary_unit', 'base_unit', 'scale_multiplier') for f in fields: try: unitstruct[nameprefix + f] = srcdict[f] except KeyError: pass parts = unitname.split('/', 2) if len(parts) > 2 or '' in parts: return {'multiplier': 1, 'unit_class': None, 'primary_unit': unitname, 'base_unit': unitname} unitstruct = parse_simple_unitname(parts[0], fold_scale_prefix=fold_scale_prefix) copyfields(unitstruct, 'numer_') if len(parts) == 2: denominator = parse_simple_unitname(parts[1], fold_scale_prefix=fold_scale_prefix) copyfields(denominator, 'denom_') unitstruct['multiplier'] /= denominator['multiplier'] if unitstruct['unit_class'] is None or denominator['unit_class'] is None: unitstruct['unit_class'] = None else: unitstruct['unit_class'] += '/' + denominator['unit_class'] unitstruct['primary_unit'] += '/' + denominator['primary_unit'] unitstruct['base_unit'] += '/' + denominator['base_unit'] if not fold_scale_prefix: unitstruct['scale_multiplier'] /= denominator['scale_multiplier'] return unitstruct
Parse a unit term with at most two parts separated by / (a numerator and denominator, or just a plain term). Returns a structure identical to that returned by parse_simple_unitname(), but with extra fields for the numerator and for the denominator, if one exists. If there is a denominator, the 'base_unit', 'unit_class', 'primary_unit', 'multiplier', and 'scale_multiplier' fields will be returned as combinations of the corresponding fields for the numerator and the denominator. >>> parse_unitname('GB/h') == { ... 'numer_multiplier': 1e9 * 9, 'denom_multiplier': 3600, ... 'multiplier': 1e9 * 8 / 3600, ... 'numer_unit_class': 'datasize', 'denom_unit_class': 'time', ... 'unit_class': 'datasize/time', ... 'numer_primary_unit': 'b', 'denom_primary_unit': 's', ... 'primary_unit': 'b/s', ... 'numer_base_unit': 'B', 'denom_base_unit': 'h', ... 'base_unit': 'B/h'} True
https://github.com/vimeo/graph-explorer/blob/bc557a6190a99182ec7a1c96dfdd33208a8575cd/graph_explorer/unitconv.py#L177-L228
from __future__ import division si_multiplier_prefixes = ( ('k', 1000 ** 1), ('M', 1000 ** 2), ('G', 1000 ** 3), ('T', 1000 ** 4), ('P', 1000 ** 5), ) iec_multiplier_prefixes = ( ('Ki', 1024 ** 1), ('Mi', 1024 ** 2), ('Gi', 1024 ** 3), ('Ti', 1024 ** 4), ('Pi', 1024 ** 5), ) multiplier_prefixes = iec_multiplier_prefixes + si_multiplier_prefixes multiplier_prefixes_with_empty = multiplier_prefixes + (('', 1),) second = 1 minute = second * 60 hour = minute * 60 day = hour * 24 week = day * 7 month = day * 30 times = ( ('s', second), ('M', minute), ('h', hour), ('d', day), ('w', week), ('mo', month) ) bit = 1 byte = bit * 8 datasizes = (('b', bit), ('B', byte)) unit_classes = (('time', times), ('datasize', datasizes)) unit_classes_by_name = dict(unit_classes) def is_power_of_2(n): return n & (n - 1) == 0 def prefix_class_for(multiplier): if multiplier > 1 and (isinstance(multiplier, int) or multiplier.is_integer()) and is_power_of_2(int(multiplier)): return 'binary' return 'si' def identify_base_unit(unitname): for unitclassname, units in unit_classes: for unit_abbrev, multiplier in units: if unitname == unit_abbrev: return {'multiplier': multiplier, 'unit_class': unitclassname, 'primary_unit': units[0][0], 'base_unit': unitname} return {'multiplier': 1, 'unit_class': None, 'primary_unit': unitname, 'base_unit': unitname} def parse_simple_unitname(unitname, fold_scale_prefix=True): special_units = ['Pckt', 'Msg', 'Metric', 'Ticket'] for prefix, multiplier in multiplier_prefixes: if unitname.startswith(prefix) and unitname not in special_units and unitname != prefix: base = parse_simple_unitname(unitname[len(prefix):], fold_scale_prefix=fold_scale_prefix) if fold_scale_prefix: base['multiplier'] *= multiplier else: base['scale_multiplier'] *= multiplier return base base = identify_base_unit(unitname) if not fold_scale_prefix: base['scale_multiplier'] = 1 return base
Apache License 2.0
google/clusterfuzz
local/polymer_bundler.py
main
python
def main(): os.chdir(APPENGINE_DIRECTORY) bundled_change_times = get_file_modified_times('templates') first_bundled_time = min(bundled_change_times) if bundled_change_times else 0 latest_unbundled_time = max(get_file_modified_times('private')) if latest_unbundled_time < first_bundled_time: print('App Engine templates are up to date.') return print('Building templates for App Engine...') if not os.path.exists('templates'): os.mkdir('templates') template_names = os.listdir(os.path.join('private', 'templates')) pool = multiprocessing.Pool(max(multiprocessing.cpu_count() // 2, 1)) result = pool.map(build_file, template_names) if not all(result): print('Failed to build App Engine templates.') sys.exit(1) print('App Engine templates built successfully.')
Use polymer-bundler to compile templates.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/local/polymer_bundler.py#L49-L73
import multiprocessing import os import sys APPENGINE_DIRECTORY = os.path.join( os.path.dirname(__file__), os.pardir, 'src', 'appengine') def get_file_modified_times(directory): modified_times = [] for root, _, filenames in os.walk(directory): for filename in filenames: modified_times.append(os.path.getmtime(os.path.join(root, filename))) return modified_times def build_file(filename): input_filename = os.path.join('private', 'templates', filename) output_filename = os.path.join('templates', filename) os.system('polymer-bundler --inline-scripts --inline-css --strip-comments ' '--out-file={output_filename} {input_filename}'.format( output_filename=output_filename, input_filename=input_filename)) if os.path.exists(output_filename) and os.path.getsize(output_filename): return True print('Failed to build template: ' + output_filename) return False
Apache License 2.0
openassets/colorcore
colorcore/providers.py
AbstractBlockchainProvider.send_transaction
python
def send_transaction(self, transaction, *args, **kwargs): raise NotImplementedError
Sends a Bitcoin transaction to the network. :param CTransaction transaction: The transaction to send. :return: The hexadecimal representation of the transaction hash. :rtype: str
https://github.com/openassets/colorcore/blob/cac278ed4871b8027d7b30eeb138ca7282e6e05e/colorcore/providers.py#L68-L76
import aiohttp import asyncio import bitcoin.core import json class AbstractBlockchainProvider(object): @asyncio.coroutine def list_unspent(self, addresses, *args, **kwargs): raise NotImplementedError @asyncio.coroutine def get_transaction(self, transaction_hash, *args, **kwargs): raise NotImplementedError @asyncio.coroutine def sign_transaction(self, transaction, *args, **kwargs): raise NotImplementedError @asyncio.coroutine
MIT License
rustychris/stompy
stompy/spatial/join_features.py
vector_mag
python
def vector_mag(vectors): return np.sqrt(np.sum(vectors**2,axis=-1))
vectors: xy vectors, shape [...,2] return magnitude (L2 norm)
https://github.com/rustychris/stompy/blob/ef04d8b3ee9c9af827c87c72c7b50d365e5e567d/stompy/spatial/join_features.py#L568-L575
from __future__ import print_function from optparse import OptionParser import numpy as np import shapely.wkb,shapely.geometry try: from osgeo import ogr except ImportError: import ogr import sys import os.path import six from numpy.linalg import norm from . import wkb2shp from .. import utils import logging logging.basicConfig(level=logging.INFO) log=logging.getLogger('join_features') try: from shapely.prepared import prep as prepare_geometry except ImportError: prepare_geometry=lambda x:x log.warning("Prepared geometries not available - tests will be slow") try: from shapely.strtree import STRtree except ImportError: class STRtree(object): def __init__(self,geoms): self.geoms=geoms def query(self,g): return self.geoms def progress_printer(str,steps_done=None,steps_total=None): if steps_done is not None and steps_total is not None: log.info( "%s -- %d%%"%(str,100.0*steps_done/steps_total) ) elif steps_done: log.info( "%s -- %d"%(str,steps_done) ) else: log.info(str) progress_message = progress_printer trust_prepared = True def merge_lines(layer=None,segments=None): endpoints = {} features = {} remapper = {} progress_message("Reading features") def seg_iter(): if not layer: for fid,seg in enumerate(segments): yield fid,seg else: layer.ResetReading() while 1: feat = layer.GetNextFeature() if not feat: return fid = feat.GetFID() geo = feat.GetGeometryRef() if geo is None: log.warning("Missing geometry - will skip") continue geom=shapely.wkb.loads(geo.ExportToWkb()) if geom.type == 'MultiLineString': geolist=geom.geoms else: geolist=[geom] for sub_idx,one_geom in enumerate(geolist): if one_geom.type != 'LineString': raise Exception("All (sub)features must be linestrings (fid=%s, %s)"%(fid,one_geom.type)) points = np.array(one_geom.coords) yield (fid,sub_idx),points for fid,points in seg_iter(): features[fid] = points start_point = tuple(points[0]) end_point = tuple(points[-1]) if start_point == end_point: continue if start_point not in endpoints: endpoints[start_point] = [] endpoints[start_point].append(fid) if end_point not in endpoints: endpoints[end_point] = [] endpoints[end_point].append(fid) remapper[fid] = fid progress_message("%i possible matched features"%len(endpoints)) endpoint_list = [] for k in endpoints: if len(endpoints[k]) == 2: endpoint_list.append(endpoints[k]) total_pairs = len(endpoint_list) pairs_processed = 0 for matched_pair in endpoint_list: fidA,fidB = [remapper[fid] for fid in matched_pair] if fidA==fidB: continue pairs_processed += 1 if pairs_processed%1000==0: progress_message("Merge lines exact",pairs_processed,total_pairs) coordsA = features[fidA] coordsB = features[fidB] if all(coordsA[0]==coordsB[0]): coordsC = np.concatenate((coordsA[::-1],coordsB[1:])) redirect = coordsB[-1] elif all(coordsA[-1]==coordsB[0]): coordsC = np.concatenate((coordsA,coordsB[1:])) redirect = coordsB[-1] elif all(coordsA[0]==coordsB[-1]): coordsC = np.concatenate((coordsB,coordsA[1:])) redirect = coordsB[0] elif all(coordsA[-1]==coordsB[-1]): coordsC = np.concatenate((coordsA[:-1],coordsB[::-1])) redirect = coordsB[0] else: log.error( "No match:" ) log.error( "%s %s"%( fidA,fidB) ) log.error( "%s"%( coordsA[0]) ) log.error( "%s"%( coordsA[-1]) ) log.error( "%s"%( coordsB[0] ) ) log.error( "%s"%( coordsB[-1] ) ) raise Exception("hash says we have a match, but no good match found") features[fidA] = coordsC for k in remapper.keys(): if remapper[k] == fidB: remapper[k] = fidA del features[fidB] progress_message("merge completed") return list(features.values()) def tolerant_merge_lines(features,tolerance): NO_MATCH =0 FIRST_FIRST=1 FIRST_LAST =2 LAST_FIRST =3 LAST_LAST =4 INIT_MATCH =5 closed_already = [ all(feat[0]==feat[-1]) for feat in features] def check_match(pntsA,pntsB): if norm(pntsA[0]-pntsB[0]) <= tolerance: return FIRST_FIRST elif norm(pntsA[0]-pntsB[-1]) <= tolerance: return FIRST_LAST elif norm(pntsA[-1]-pntsB[0]) <= tolerance: return LAST_FIRST elif norm(pntsA[-1]-pntsB[-1]) <= tolerance: return LAST_LAST else: return NO_MATCH for i in range(len(features)): if features[i] is None: continue if closed_already[i]: continue progress_message("Merge lines tolerant",i,len(features)) match = INIT_MATCH while match: match = NO_MATCH for j in range(i+1,len(features)): if features[j] is None: continue if closed_already[j]: continue match = check_match(features[i], features[j]) if match==FIRST_FIRST: features[i] = np.concatenate((features[i][::-1],features[j][1:])) elif match==FIRST_LAST: features[i] = np.concatenate((features[j],features[i][1:])) elif match==LAST_FIRST: features[i] = np.concatenate((features[i],features[j][1:])) elif match==LAST_LAST: features[i] = np.concatenate((features[i][:-1],features[j][::-1])) if match != NO_MATCH: features[j] = None break features = [f for f in features if f is not None] for feat in features: delta = norm(feat[0] - feat[-1]) if delta > 0.0 and delta <= tolerance: log.info("tolerant_merge: joining a loop - dist = %f"%delta) feat[-1] = feat[0] return features def clean_degenerate_rings(point_lists,degen_shpname='degenerate_rings.shp'): degen_lines = [] valid_lists = [] for i in range(len(point_lists)): point_list = point_lists[i] if all(point_list[0]!=point_list[-1]): valid_lists.append(point_list) else: poly = shapely.geometry.Polygon(point_list) try: a=poly.area valid_lists.append(point_list) except ValueError: log.error( "degenerate feature: %s"%i ) degen_line = shapely.geometry.LineString(point_list) degen_lines.append(degen_line) if degen_shpname is not None and len(degen_lines)>0: wkb2shp.wkb2shp(degen_shpname,degen_lines,srs_text='EPSG:26910',overwrite=True) return valid_lists def find_exterior_ring(point_lists): open_strings = [] max_area = 0 max_area_id = None for i in range(len(point_lists)): point_list = point_lists[i] if all(point_list[0]!=point_list[-1]): open_strings.append(i) else: poly = shapely.geometry.Polygon(point_list) a = poly.area if a > max_area: max_area = a max_area_id = i if len(open_strings) > 1: log.error( "Wanted exactly 0 or 1 open strings, got %i"%len(open_strings) ) for i in open_strings: log.error(" Open string: %s"%( point_lists[i] ) ) raise Exception("Can't figure out who is the exterior ring") if len(open_strings) == 1: log.error("Choosing exterior ring based on it being the only open ring") log.error( "Endpoints: %s"%( point_lists[open_strings[0]][0],point_lists[open_strings[0]][-1] ) ) return open_strings[0],False else: log.info( "No open linestrings, resorting to choosing exterior ring by area" ) print("Selected exterior ring with area %.0f"%max_area) return max_area_id,True def arc_to_close_line(points,n_arc_points=40): geo = shapely.geometry.Polygon(points) centroid = np.array(geo.centroid) arc_center = (points[0]+points[-1])/2.0 start_vector = points[-1] - arc_center arc_center_to_centroid = centroid - arc_center if cross(arc_center_to_centroid, start_vector) > 0: arc_dir = +1 else: arc_dir = -1 angles = np.linspace(0,arc_dir*np.pi,n_arc_points) arc_points = np.zeros((n_arc_points,2),np.float64) for i in range(n_arc_points): angle = angles[i] xx = np.cos(angle) xy = -np.sin(angle) yx = np.sin(angle) yy = np.cos(angle) new_x = start_vector[0]*xx + start_vector[1]*xy new_y = start_vector[0]*yx + start_vector[1]*yy arc_points[i] = arc_center + [new_x,new_y] return arc_points def lines_to_polygons_slow(new_features,close_arc=False,single_feature=True,force_orientation=True): assert single_feature new_features = [f for f in new_features if len(f) > 2] log.info("Finding exterior ring from %d linestrings"%len(new_features)) new_features = clean_degenerate_rings(new_features) exterior_id,closed_p = find_exterior_ring(new_features) if close_arc and not closed_p: closing_arc = arc_to_close_line(new_features[exterior_id]) new_features[exterior_id] = np.concatenate((new_features[exterior_id],closing_arc)) exterior = new_features[exterior_id] interiors = [new_features[i] for i in range(len(new_features)) if i!=exterior_id] ext_poly = shapely.geometry.Polygon(exterior) if prepared is not None: prep_ext_poly = prepared.prep(ext_poly) else: prep_ext_poly = None new_interiors = [] extras = [] for i in range(len(interiors)): interior = interiors[i] if i%300==0: progress_message("Checking for orphan interior features",i,len(interiors)) if force_orientation and (utils.signed_area(interior) > 0): interior=interior[::-1] int_poly = shapely.geometry.Polygon(interior) if prep_ext_poly is None or prep_ext_poly.contains(int_poly): if prep_ext_poly and trust_prepared: new_interiors.append(interior) else: if ext_poly.contains(int_poly): new_interiors.append(interior) else: if prep_ext_poly is not None: log.warning( "A feature got through the prepared query, but the real query says it's outside the exterior") else: log.debug("Removing a feature that was outside the exterior ring" ) extras.append(interior) else: log.debug("Removing a feature that the fast query said was outside the exterior ring") extras.append(interior) poly_geom = shapely.geometry.Polygon(exterior,new_interiors) return [poly_geom],extras def lines_to_polygons(new_features,close_arc=False,single_feature=True,force_orientation=True, return_open=False,min_area=0.0): new_features = [f for f in new_features if len(f) > 2] new_features = clean_degenerate_rings(new_features) open_strings=[] simple_polys=[] for i,point_list in enumerate(new_features): if np.any(point_list[0]!=point_list[-1]): open_strings.append(i) else: simple_polys.append(shapely.geometry.Polygon(point_list)) log.info("%d open strings, %d simple polygons"%(len(open_strings), len(simple_polys))) if len(open_strings): if not return_open: log.error("New version of lines_to_polygons is faster but intolerant. Cannot handle ") log.error("%d open strings"%len(open_strings)) log.error("First open string starts at %s"%(new_features[open_strings[0]][0])) raise Exception("No longer can handle open line strings") polys=[] areas=np.array([p.area for p in simple_polys]) if min_area>0: select=areas>=min_area simple_polys=[p for p,a in zip(simple_polys,areas) if a>=min_area] areas=areas[select] ordering=np.argsort(-areas) simple_polys=[simple_polys[i] for i in ordering] areas=areas[ordering] log.info("Building index") for i,p in enumerate(simple_polys): p.join_id=i index=STRtree(simple_polys) log.info("done building index") poly_geoms=[] assigned_p=[False]*len(simple_polys) unassigned_idxs=list(range(len(simple_polys))) while len(unassigned_idxs): ext_idx=unassigned_idxs.pop(0) if assigned_p[ext_idx]: continue assigned_p[ext_idx]=True ext_poly=simple_polys[ext_idx] log.info("Examining largest poly left with area=%f, %d potential interiors"% (ext_poly.area,len(unassigned_idxs))) prep_ext_poly = prepare_geometry(ext_poly) hits=index.query(ext_poly) hit_indexes=[p.join_id for p in hits] hit_indexes.sort() for i in utils.progress(hit_indexes): if assigned_p[i]: continue int_poly=simple_polys[i] if prep_ext_poly.contains(int_poly): ext_poly=shapely.geometry.Polygon(ext_poly.exterior, list(ext_poly.interiors)+[int_poly.exterior]) prep_ext_poly=prepare_geometry(ext_poly) assigned_p[i]=True poly_geoms.append(ext_poly) if single_feature: break extras=[p for p,is_assigned in zip(simple_polys,assigned_p) if not is_assigned] if return_open: return poly_geoms,extras,open_strings else: return poly_geoms,extras
MIT License
harmon758/harmonbot
Discord/cogs/random.py
Random.day
python
async def day(self, ctx): await ctx.embed_reply(random.choice(calendar.day_name))
Random day of week
https://github.com/harmon758/harmonbot/blob/def3849beabdaea5e0f9c594dcf6d6d8980782bd/Discord/cogs/random.py#L298-L300
import discord from discord.ext import commands import asyncio import calendar import concurrent.futures import csv import datetime import inspect import io import json import multiprocessing import random import string from typing import Optional import xml.etree.ElementTree from bs4 import BeautifulSoup import dice import emoji import pydealer import pyparsing from utilities import checks from utilities.converters import Maptype def setup(bot): bot.add_cog(Random(bot)) class Random(commands.Cog): def __init__(self, bot): self.bot = bot for name, command in inspect.getmembers(self): if isinstance(command, commands.Command) and command.parent is None and name != "random": self.bot.add_command(command) self.random.add_command(command) for command, parent in ((self.fact_cat, self.cat), (self.fact_date, self.date), (self.fact_number, self.number)): self.fact.add_command(commands.Command(command, name = parent.name, aliases = [parent.name + 's'], checks = [checks.not_forbidden().predicate])) parent.add_command(commands.Command(command, name = "fact", aliases = ["facts"], checks = [checks.not_forbidden().predicate])) self.random_commands = ( (self.blob, "Blobs", "blobs", []), (self.color, "Resources", "color", ["colour"]), (self.giphy, "Images", "giphy", []), (self.map, "Location", "map", []), (self.photo, "Images", "image", ["image"]), (self.streetview, "Location", "streetview", []), (self.time, "Location", "time", []), (self.uesp, "Search", "uesp", []), (self.user, "User", "user", ["member"]), (self.wikipedia, "Search", "wikipedia", ["wiki"]), (self.xkcd, "Entertainment", "xkcd", []) ) for command, cog_name, parent_name, aliases in self.random_commands: self.random.add_command(commands.Command(command, aliases = aliases, checks = [checks.not_forbidden().predicate])) if (cog := self.bot.get_cog(cog_name)) and (parent := getattr(cog, parent_name)): parent.add_command(commands.Command(command, name = "random", checks = [checks.not_forbidden().predicate])) self.jokes = [] try: with open(self.bot.data_path + "/jokes.csv", newline = "") as jokes_file: jokes_reader = csv.reader(jokes_file) for row in jokes_reader: self.jokes.append(row[0]) except FileNotFoundError: pass def cog_unload(self): for command, cog_name, parent_name, _ in self.random_commands: if (cog := self.bot.get_cog(cog_name)) and (parent := getattr(cog, parent_name)): parent.remove_command("random") async def cog_check(self, ctx): return await checks.not_forbidden().predicate(ctx) @commands.group(invoke_without_command = True, case_insensitive = True) async def random(self, ctx): await ctx.embed_reply(":grey_question: Random what?") async def blob(self, ctx): if "Blobs" in self.bot.cogs: record = await ctx.bot.db.fetchrow("SELECT * FROM blobs.blobs TABLESAMPLE BERNOULLI (1) LIMIT 1") await ctx.embed_reply(title = record["blob"], image_url = record["image"]) async def color(self, ctx): url = "http://www.colourlovers.com/api/colors/random" params = {"numResults": 1} if cog := self.bot.get_cog("Resources"): await cog.process_color(ctx, url, params) async def giphy(self, ctx): url = "http://api.giphy.com/v1/gifs/random" params = {"api_key": ctx.bot.GIPHY_API_KEY} async with ctx.bot.aiohttp_session.get(url, params = params) as resp: data = await resp.json() await ctx.embed_reply(image_url = data["data"]["image_url"]) async def map(self, ctx, zoom: Optional[int] = 13, maptype: Optional[Maptype] = "roadmap"): latitude = random.uniform(-90, 90) longitude = random.uniform(-180, 180) url = "https://maps.googleapis.com/maps/api/staticmap" params = {"center": f"{latitude},{longitude}", "zoom": zoom, "maptype": maptype, "size": "640x640", "key": ctx.bot.GOOGLE_API_KEY} async with ctx.bot.aiohttp_session.get(url, params = params) as resp: data = await resp.read() await ctx.embed_reply(fields = (("latitude", latitude), ("longitude", longitude)), image_url = "attachment://map.png", file = discord.File(io.BytesIO(data), filename = "map.png")) async def photo(self, ctx, *, query = ""): url = "https://api.unsplash.com/photos/random" headers = {"Accept-Version": "v1", "Authorization": f"Client-ID {ctx.bot.UNSPLASH_ACCESS_KEY}"} params = {"query": query} async with ctx.bot.aiohttp_session.get(url, headers = headers, params = params) as resp: data = await resp.json() if "errors" in data: errors = '\n'.join(data["errors"]) return await ctx.embed_reply(f":no_entry: Error:\n{errors}") await ctx.embed_reply(data["description"] or "", author_name = f"{data['user']['name']} on Unsplash", author_url = f"{data['user']['links']['html']}?utm_source=Harmonbot&utm_medium=referral", author_icon_url = data["user"]["profile_image"]["small"], image_url = data["urls"]["full"]) async def streetview(self, ctx, radius: int = 5_000_000): latitude = random.uniform(-90, 90) longitude = random.uniform(-180, 180) url = "https://maps.googleapis.com/maps/api/streetview" params = {"location": f"{latitude},{longitude}", "size": "640x640", "fov": 120, "radius": radius, "key": ctx.bot.GOOGLE_API_KEY} async with ctx.bot.aiohttp_session.get(url, params = params) as resp: data = await resp.read() await ctx.embed_reply(fields = (("latitude", latitude), ("longitude", longitude)), image_url = "attachment://streetview.png", file = discord.File(io.BytesIO(data), filename = "streetview.png")) async def time(self, ctx): await ctx.embed_reply(f"{random.randint(0, 23):02}:{random.randint(0, 59):02}") async def uesp(self, ctx): if cog := self.bot.get_cog("Search"): await cog.process_uesp(ctx, None, random = True) else: await ctx.embed_reply(title = "Random UESP page", title_url = "http://uesp.net/wiki/Special:Random") async def user(self, ctx): await ctx.embed_reply(random.choice(ctx.guild.members).mention) async def wikipedia(self, ctx): if cog := self.bot.get_cog("Search"): await cog.process_wikipedia(ctx, None, random = True) else: await ctx.embed_reply(title = "Random Wikipedia article", title_url = "https://wikipedia.org/wiki/Special:Random") async def xkcd(self, ctx): url = "http://xkcd.com/info.0.json" async with ctx.bot.aiohttp_session.get(url) as resp: data = await resp.json() number = random.randint(1, data['num']) url = f"http://xkcd.com/{number}/info.0.json" if cog := self.bot.get_cog("Entertainment"): await cog.process_xkcd(ctx, url) @commands.command(aliases = ["rabbit"]) async def bunny(self, ctx): url = "https://api.bunnies.io/v2/loop/random/?media=gif" async with ctx.bot.aiohttp_session.get(url) as resp: data = await resp.json() gif = data["media"]["gif"] await ctx.embed_reply(f"[:rabbit2:]({gif})", image_url = gif) @commands.command() async def card(self, ctx): await ctx.embed_reply(f":{random.choice(pydealer.const.SUITS).lower()}: {random.choice(pydealer.const.VALUES)}") @commands.group(invoke_without_command = True, case_insensitive = True) async def cat(self, ctx, category: Optional[str]): url = "http://thecatapi.com/api/images/get" params = {"format": "xml", "results_per_page": 1} if category: params["category"] = category async with ctx.bot.aiohttp_session.get(url, params = params) as resp: data = await resp.text() try: if (url := xml.etree.ElementTree.fromstring(data).find(".//url")) is None: return await ctx.embed_reply(":no_entry: Error: Category not found") except xml.etree.ElementTree.ParseError: await ctx.embed_reply(":no_entry: Error") else: await ctx.embed_reply(f"[\N{CAT FACE}]({url.text})", image_url = url.text) @cat.command(name = "categories", aliases = ["cats"]) async def cat_categories(self, ctx): async with ctx.bot.aiohttp_session.get("http://thecatapi.com/api/categories/list") as resp: data = await resp.text() try: categories = xml.etree.ElementTree.fromstring(data).findall(".//name") except xml.etree.ElementTree.ParseError: await ctx.embed_reply(":no_entry: Error") else: await ctx.embed_reply('\n'.join(sorted(category.text for category in categories))) @commands.command(aliases = ["choice", "pick"], require_var_positional = True) async def choose(self, ctx, *choices: str): await ctx.embed_reply(random.choice(choices)) @commands.command(aliases = ["flip"]) async def coin(self, ctx): await ctx.embed_reply(random.choice(("Heads!", "Tails!"))) @commands.command() async def command(self, ctx): await ctx.embed_reply(f"{ctx.prefix}{random.choice(tuple(set(command.name for command in self.bot.commands)))}") @commands.command(aliases = ["die", "roll"]) async def dice(self, ctx, *, input: str = '6'): if 'd' not in input: input = 'd' + input with multiprocessing.Pool(1) as pool: async_result = pool.apply_async(dice.roll, (input,)) future = self.bot.loop.run_in_executor(None, async_result.get, 10.0) try: result = await asyncio.wait_for(future, 10.0) if isinstance(result, int): await ctx.embed_reply(result) else: await ctx.embed_reply(", ".join(str(roll) for roll in result)) except discord.HTTPException: await ctx.embed_reply(":no_entry: Output too long") except pyparsing.ParseException: await ctx.embed_reply(":no_entry: Invalid input") except (concurrent.futures.TimeoutError, multiprocessing.context.TimeoutError): await ctx.embed_reply(":no_entry: Execution exceeded time limit") except dice.DiceFatalException as e: await ctx.embed_reply(f":no_entry: Error: {e}") @commands.group(invoke_without_command = True, case_insensitive = True) async def date(self, ctx): await ctx.embed_reply(datetime.date.fromordinal(random.randint(1, 365)).strftime("%B %d")) @commands.command()
MIT License
iter8-tools/iter8-analytics
iter8_analytics/api/v2/experiment.py
get_analytics_results
python
def get_analytics_results(expr: ExperimentResource): ana = Analysis() if expr.status.analysis is not None: ana.aggregated_builtin_hists = expr.status.analysis.aggregated_builtin_hists expr.status.analysis = ana expr.status.analysis.aggregated_metrics = get_aggregated_metrics(expr) expr.status.analysis.version_assessments = get_version_assessments(expr) expr.status.analysis.winner_assessment = get_winner_assessment(expr) expr.status.analysis.weights = get_weights(expr) return expr.status.analysis
Get analysis results using experiment resource and metric resources.
https://github.com/iter8-tools/iter8-analytics/blob/742e8d79f11e517b62b065383dc71ad956f0db26/iter8_analytics/api/v2/experiment.py#L372-L385
import logging import math import pprint from typing import Sequence import numpy as np from iter8_analytics.api.v2.types import ExperimentResource, VersionAssessmentsAnalysis, VersionWeight, VersionDetail, WinnerAssessmentAnalysis, WinnerAssessmentData, WeightsAnalysis, Analysis, Objective, TestingPattern, Reward, PreferredDirection from iter8_analytics.api.v2.metrics import get_aggregated_metrics from iter8_analytics.api.utils import gen_round from iter8_analytics.api.utils import Message, MessageLevel from iter8_analytics.advancedparams import AdvancedParameters logger = logging.getLogger('iter8_analytics') def get_version_assessments(experiment_resource: ExperimentResource): versions = [experiment_resource.spec.versionInfo.baseline] if experiment_resource.spec.versionInfo.candidates is not None: versions += experiment_resource.spec.versionInfo.candidates messages = [] def check_limits(obj: Objective, value: float) -> bool: if (obj.upper_limit is not None) and (value > float(obj.upper_limit)): return False if (obj.lower_limit is not None) and (value < float(obj.lower_limit)): return False return True aggregated_metric_data = experiment_resource.status.analysis.aggregated_metrics.data version_assessments = VersionAssessmentsAnalysis(data = {}) if experiment_resource.spec.criteria is None or experiment_resource.spec.criteria.objectives is None: return version_assessments for version in versions: version_assessments.data[version.name] = [False] * len(experiment_resource.spec.criteria.objectives) for ind, obj in enumerate(experiment_resource.spec.criteria.objectives): if obj.metric in aggregated_metric_data: versions_metric_data = aggregated_metric_data[obj.metric].data for version in versions: if version.name in versions_metric_data: if versions_metric_data[version.name].value is not None: version_assessments.data[version.name][ind] = check_limits(obj, float(versions_metric_data[version.name].value)) else: messages.append(Message(MessageLevel.WARNING, f"Value for {obj.metric} metric and {version.name} version is None.")) else: messages.append(Message(MessageLevel.WARNING, f"Value for {obj.metric} metric and {version.name} version is unavailable.")) else: messages.append(Message(MessageLevel.WARNING, f"Aggregated metric object for {obj.metric} metric is unavailable.")) version_assessments.message = Message.join_messages(messages) logger.debug("version assessments: %s", pprint.PrettyPrinter().pformat(version_assessments)) return version_assessments def get_feasible_versions(experiment_resource: ExperimentResource, versions: Sequence[VersionDetail]) -> Sequence[VersionDetail]: if experiment_resource.status.analysis.version_assessments.data is None or len(experiment_resource.status.analysis.version_assessments.data) == 0: feasible_versions = versions else: feasible_versions = list(filter(lambda version: all(experiment_resource.status.analysis.version_assessments.data[version.name]), versions)) return feasible_versions def get_winner_assessment_for_conformance(experiment_resource: ExperimentResource): was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] feasible_versions = get_feasible_versions(experiment_resource, versions) fvn = list(map(lambda version: version.name, feasible_versions)) if versions[0].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[0].name, bestVersions = [versions[0].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, "baseline satisfies all objectives")]) return was def get_winner_assessment_for_canarybg(experiment_resource: ExperimentResource): was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] versions += experiment_resource.spec.versionInfo.candidates feasible_versions = get_feasible_versions(experiment_resource, versions) fvn = list(map(lambda version: version.name, feasible_versions)) if versions[1].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[1].name, bestVersions = [versions[1].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, "candidate satisfies all objectives")]) elif versions[0].name in fvn: was.data = WinnerAssessmentData(winnerFound = True, winner = versions[0].name, bestVersions = [versions[0].name]) was.message = Message.join_messages([Message(MessageLevel.INFO, "baseline satisfies all objectives; candidate does not")]) return was def get_winner_assessment_for_abn(experiment_resource: ExperimentResource): was = WinnerAssessmentAnalysis() versions = [experiment_resource.spec.versionInfo.baseline] versions += experiment_resource.spec.versionInfo.candidates logger.info("Versions: %s", versions) feasible_versions = get_feasible_versions(experiment_resource, versions) logger.info("Feasible versions: %s", feasible_versions) fvn = list(map(lambda version: version.name, feasible_versions)) def get_inf_reward(reward: Reward): if reward.preferredDirection == PreferredDirection.HIGH: return -math.inf else: return math.inf def first_better_than_second(first: float, second: float, preferred_direction: PreferredDirection): if preferred_direction is None: err = "Metrics cannot be compared without preferred direction" logger.error(err) return False, err if preferred_direction is PreferredDirection.HIGH: return (first > second), None return (first < second), None aggregated_metric_data = experiment_resource.status.analysis.aggregated_metrics.data if experiment_resource.spec.criteria.rewards is not None: reward_metric = experiment_resource.spec.criteria.rewards[0].metric if reward_metric in aggregated_metric_data: reward_metric_data = aggregated_metric_data[reward_metric].data (top_reward, best_versions) = (get_inf_reward( experiment_resource.spec.criteria.rewards[0]), []) messages = [] if not fvn: messages.append(Message(MessageLevel.INFO, "no version satisfies all objectives")) for fver in fvn: if fver in reward_metric_data: if reward_metric_data[fver].value is not None: if reward_metric_data[fver].value == top_reward: best_versions.append(fver) else: is_better, err = first_better_than_second( float(reward_metric_data[fver].value), float(top_reward), experiment_resource.spec.criteria.rewards[0].preferredDirection) if err is None: if is_better: (top_reward, best_versions) = (reward_metric_data[fver].value, [fver]) else: was.message = Message.join_messages(Message(MessageLevel.ERROR, str(err))) return was else: messages.append(Message(MessageLevel.WARNING, f"reward value for feasible version {fver} is not available")) else: messages.append(Message(MessageLevel.WARNING, f"reward value for feasible version {fver} is not available")) was.data.bestVersions = best_versions if len(best_versions) == 1: was.data.winnerFound = True was.data.winner = best_versions[0] messages.append(Message(MessageLevel.INFO, "found unique winner")) elif len(best_versions) > 1: messages.append(Message(MessageLevel.INFO, "no unique winner; two or more feasible versions with same reward value")) was.message = Message.join_messages(messages) else: was.message = Message.join_messages([Message(MessageLevel.WARNING, "reward metric values are not available")]) else: was.message = Message.join_messages([Message(MessageLevel.WARNING, "No reward metric in experiment. Winner assessment cannot be computed for ab or abn experiments without reward metric.")]) return was def get_winner_assessment(experiment_resource: ExperimentResource): if experiment_resource.spec.strategy.testingPattern == TestingPattern.CONFORMANCE: return get_winner_assessment_for_conformance(experiment_resource) elif experiment_resource.spec.strategy.testingPattern == TestingPattern.CANARY: return get_winner_assessment_for_canarybg(experiment_resource) else: return get_winner_assessment_for_abn(experiment_resource) def get_weights(experiment_resource: ExperimentResource): if experiment_resource.spec.strategy.testingPattern == TestingPattern.CONFORMANCE: return WeightsAnalysis(data = [], message = "weight computation is not applicable to a conformance experiment") versions = [experiment_resource.spec.versionInfo.baseline] versions += experiment_resource.spec.versionInfo.candidates messages = [] exploration_weights = np.full((len(versions), ), 1.0 / len(versions)) def get_exploitation_weights(): exploitation_weights = np.full((len(versions), ), 0.0) try: bvs = experiment_resource.status.analysis.winner_assessment.data.bestVersions assert len(bvs) > 0 messages.append(Message(MessageLevel.INFO, "found best version(s)")) for i, version in enumerate(versions): if version.name in bvs: exploitation_weights[i] = 1/len(bvs) except (KeyError, AssertionError): exploitation_weights = np.full((len(versions), ), 0.0) exploitation_weights[0] = 1.0 messages.append(Message(MessageLevel.INFO, "no best version(s) found")) return exploitation_weights exploitation_weights = get_exploitation_weights() def get_constrained_weights(input_weights): old_weights = [100] + ([0]*(len(versions) - 1)) if experiment_resource.status.currentWeightDistribution is not None: old_weights = list(map(lambda x: x.value, experiment_resource.status.currentWeightDistribution)) logger.debug("Old weights: %s", old_weights) logger.debug("Input weights: %s", input_weights) constrained_weights = input_weights.copy() if experiment_resource.spec.strategy.weights is not None: for i in range(len(versions)): if i == 0: continue increase = input_weights[i] - old_weights[i] excess = max(0, increase - experiment_resource.spec.strategy.weights.maxCandidateWeightIncrement, input_weights[i] - experiment_resource.spec.strategy.weights.maxCandidateWeight) constrained_weights[i] -= excess constrained_weights[0] += excess logger.debug("Constrained weights: %s", constrained_weights) return constrained_weights ewf = AdvancedParameters.exploration_traffic_percentage / 100.0 mix_weights = (exploration_weights * ewf) + (exploitation_weights * (1 - ewf)) mix_weights *= 100.0 constrained_weights = get_constrained_weights(mix_weights) integral_weights = gen_round(constrained_weights, 100) data = [] for version in versions: data.append(VersionWeight(name = version.name, value = next(integral_weights))) _weights = WeightsAnalysis(data = data) _weights.message = Message.join_messages([Message(MessageLevel.INFO, "all ok")]) logger.debug("weights: %s", pprint.PrettyPrinter().pformat(_weights)) return _weights
Apache License 2.0
scikit-nano/scikit-nano
sknano/core/atoms/_poav_atoms.py
POAV.pyramidalization_angles
python
def pyramidalization_angles(self, value): if not isinstance(value, list): raise TypeError('Expected a list') self._pyramidalization_angles = value
Set list of :math:`\\theta_{P}` angles.
https://github.com/scikit-nano/scikit-nano/blob/ef9b24165ba37918b3f520657f7311ba139b3e7d/sknano/core/atoms/_poav_atoms.py#L317-L321
from __future__ import absolute_import, division, print_function from __future__ import unicode_literals __docformat__ = 'restructuredtext en' from collections import OrderedDict import functools import operator import warnings import numpy as np np.seterr(all='warn') from sknano.core.math import vector as vec __all__ = ['POAV', 'POAV1', 'POAV2', 'POAVR', 'POAVAtomMixin', 'POAVAtomsMixin'] class POAV: def __init__(self, sigma_bonds): self.bonds = sigma_bonds self.bond1 = self.bonds[0].vector self.bond2 = self.bonds[1].vector self.bond3 = self.bonds[2].vector self.bond_angles = self.bonds.angles self.bond_angle_pairs = self.bonds.bond_angle_pairs self.sigma_bond_angle12 = self.bond_angles[0] self.sigma_bond_angle23 = self.bond_angles[1] self.sigma_bond_angle31 = self.bond_angles[2] self.cosa12 = np.cos(self.bond_angles[0]) self.cosa23 = np.cos(self.bond_angles[1]) self.cosa31 = np.cos(self.bond_angles[2]) self._v1 = self.bond1 self._v2 = self.bond2 self._v3 = self.bond3 self._pyramidalization_angles = None self._sigma_pi_angles = None self._misalignment_angles = None def __str__(self): fmtstr = '{}\n=====\n'.format(self.__class__.__name__) for k, v in list(self.todict(rad2deg=True).items()): fmtstr += '{}: {}\n'.format(k, v) return fmtstr def __repr__(self): return '{}({bonds!r})'.format(self.__class__.__name__, **dict(bonds=self.bonds)) @property def v1(self): return self._v1 @property def v2(self): return self._v2 @property def v3(self): return self._v3 @property def Vv1v2v3(self): return np.abs(vec.scalar_triple_product(self.v1, self.v2, self.v3)) @property def vpi(self): return self.reciprocal_v1 + self.reciprocal_v2 + self.reciprocal_v3 @property def Vpi(self): return self.vpi.unit_vector @property def reciprocal_v1(self): with warnings.catch_warnings(): warnings.filterwarnings('error') try: return vec.cross(self.v2, self.v3) / self.Vv1v2v3 except Warning: return vec.cross(self.v2, self.v3) @property def reciprocal_v2(self): with warnings.catch_warnings(): warnings.filterwarnings('error') try: return vec.cross(self.v3, self.v1) / self.Vv1v2v3 except Warning: return vec.cross(self.v3, self.v1) @property def reciprocal_v3(self): with warnings.catch_warnings(): warnings.filterwarnings('error') try: return vec.cross(self.v1, self.v2) / self.Vv1v2v3 except Warning: return vec.cross(self.v1, self.v2) @property def V1(self): return self.bond1.unit_vector @property def V2(self): return self.bond2.unit_vector @property def V3(self): return self.bond3.unit_vector @property def R1(self): return self.bond1.length @property def R2(self): return self.bond2.length @property def R3(self): return self.bond3.length @property def t(self): return self.Vv1v2v3 / 6 @property def T(self): return np.abs(vec.scalar_triple_product(self.V1, self.V2, self.V3) / 6) @property def A(self): return self.vpi.magnitude @property def H(self): return 3 * self.T / self.A @property def sigma_pi_angles(self): return self._sigma_pi_angles @sigma_pi_angles.setter def sigma_pi_angles(self, value): if not isinstance(value, list): raise TypeError('Expected a list') self._sigma_pi_angles = value @property def pyramidalization_angles(self): return self._pyramidalization_angles @pyramidalization_angles.setter
BSD 2-Clause Simplified License
tensor46/tensormonk
tensormonk/data/utils.py
totensor
python
def totensor(input, t_size: tuple = None): if isinstance(input, torch.Tensor): if t_size is not None: if len(t_size) == input.dim() == 4: if t_size[2] != input.size(2) or t_size[3] != input.size(3): input = F.interpolate(input, size=t_size[2:]) return input if isinstance(input, str): if not os.path.isfile(input): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input) input = ImPIL.open(input).convert("RGB") if ImPIL.isImageType(input): if t_size is not None: if t_size[1] == 1: input = input.convert("L") if t_size[2] != input.size[1] or t_size[3] != input.size[0]: input = input.resize((t_size[3], t_size[2]), ImPIL.BILINEAR) else: raise TypeError("totensor: input must be str/pil-imgage: " "{}".format(type(input).__name__)) tensor = _totensor(input) if tensor.dim() == 2: tensor.unsqueeze_(0) return tensor
r"""Converts image_file or PIL image to torch tensor. Args: input (str/pil-image): full path of image or pil-image t_size (list, optional): tensor_size in BCHW, used to resize the input
https://github.com/tensor46/tensormonk/blob/67617d3fdf8fde072ba9cab42de7d67c79b17494/tensormonk/data/utils.py#L15-L47
import os import errno import torch import torch.nn.functional as F from PIL import Image as ImPIL from torchvision import transforms import threading import requests DEBUG = False _totensor = transforms.ToTensor()
MIT License
hastagab/awesome-python-scripts
File-Sharing-Bot/bot.py
echo
python
def echo(bot, update): if update.message.document: file_id = update.message.document.file_id f = open(str(os.getcwd())+"/file", "w") f.write(file_id) f.close update.message.reply_text("Received.Now send file name and location to store. using /put command") else: reply = "Invalid Input." update.message.reply_text(reply)
Echo the user message.
https://github.com/hastagab/awesome-python-scripts/blob/743bfde7f24e52f1b53a12dae8744be9daad7cda/File-Sharing-Bot/bot.py#L108-L118
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters import logging import os import telegram import shutil logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) username_list = [] def start(bot, update): reply = "Welcome to World of Automation. \nI am a bot developed by a Lazy Programmer.\nSend /help command to see what i can do." update.message.reply_text(reply) def help(bot, update): admin = update.message.from_user.username if admin == username_list[0]: reply = '''Send /get folder_name/file_name.extension to receive a file. \nSend /ls folder_name to show list of files. \nSend /put folder_name/file_name.extension to upload last sent file. \nSend /mkdir folder_name to create a Folder. \nSend /remove folder_name/filename.extension to delete a file. \nSend /adduser username to give access. \nSend /removeuser username to revoke access. \nSend /showuser to show list of users ''' else: reply = '''Send /get folder_name/file_name.extension to receive a file. \nSend /ls folder_name to show list of files. \nSend /put folder_name/file_name.extension to upload last sent file. \nSend /mkdir folder_name to create a Folder. ''' update.message.reply_text(reply) def get(bot, update): username = update.message.from_user.username if(username not in username_list): update.message.reply_text("You are not Authorized.") return file = update.message.text.split(" ")[-1] if(file == "/send"): update.message.reply_text("Invalid File name.") else: reply = "Findind and Sending a requested file to you. Hold on..." update.message.reply_text(reply) path = os.getcwd()+'/'+file if (os.path.exists(path)): bot.send_document(chat_id=update.message.chat_id,document=open(path, 'rb'), timeout = 100) else: update.message.reply_text("File not Found.") def ls(bot, update): username = update.message.from_user.username if(username not in username_list): update.message.reply_text("You are not Authorized.") return file = update.message.text.split(" ")[-1] if(file == "/show"): update.message.reply_text("Invalid Directory name.") else: reply = "Findind and Sending a list of files to you. Hold on..." update.message.reply_text(reply) path = os.getcwd()+'/'+file if (os.path.exists(path)): update.message.reply_text(os.listdir(path)) else: update.message.reply_text("Directory not Found.") def put(bot, update): f = open(str(os.getcwd())+"/file", "r") file_id = f.read() f.close if file_id == "": update.message.reply_text("You didn't upload file.") else: new_file = bot.get_file(file_id) message = update.message.text.split(" ") path = message[-1] if len(path) < 1: update.message.reply_text("Enter Path correctly.") else: new_file.download(os.getcwd()+'/'+path) update.message.reply_text("File Stored.") def mkdir(bot, update): message = update.message.text.split(" ") if len(message) < 1 or message[-1] == "/mkdir": update.message.reply_text("Invalid Syntax. Refer syntax in help section.") return path = os.getcwd() + "/" + message[-1] os.mkdir(path) update.message.reply_text("Folder Created.")
MIT License
i-pan/kaggle-rsna18
models/DeformableConvNets/lib/nms/setup_windows.py
locate_cuda
python
def locate_cuda(): if 'CUDA_PATH' in os.environ: home = os.environ['CUDA_PATH'] print("home = %s\n" % home) nvcc = pjoin(home, 'bin', nvcc_bin) else: default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin') nvcc = find_in_path(nvcc_bin, os.environ['PATH'] + os.pathsep + default_path) if nvcc is None: raise EnvironmentError('The nvcc binary could not be ' 'located in your $PATH. Either add it to your path, or set $CUDA_PATH') home = os.path.dirname(os.path.dirname(nvcc)) print("home = %s, nvcc = %s\n" % (home, nvcc)) cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, lib_dir)} for k, v in cudaconfig.iteritems(): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) return cudaconfig
Locate the CUDA environment on the system Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH.
https://github.com/i-pan/kaggle-rsna18/blob/7c52dbc401c2ca60cab04d622faa21bb8bfd7825/models/DeformableConvNets/lib/nms/setup_windows.py#L36-L69
import numpy as np import os from os.path import join as pjoin from setuptools import setup from distutils.extension import Extension from Cython.Distutils import build_ext import subprocess nvcc_bin = 'nvcc.exe' lib_dir = 'lib/x64' import distutils.msvc9compiler distutils.msvc9compiler.VERSION = 14.0 def find_in_path(name, path): for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None
MIT License
aws-samples/aws-device-farm-appium-python-tests-for-ios-sample-app
tests/pages/login_page.py
LoginPage.is_invalid_login_message_displayed
python
def is_invalid_login_message_displayed(self): permission_denied_message = self.driver.find_element_by_accessibility_id(self.PERMISSION_DENIED_MESSAGE_ID) return permission_denied_message.is_displayed()
Returns visibility of invalid login message as a boolean.
https://github.com/aws-samples/aws-device-farm-appium-python-tests-for-ios-sample-app/blob/f30cba8605eada6b947c931261b537e2479aa53b/tests/pages/login_page.py#L54-L57
from tests.pages.base_pages.base_page import BasePage class LoginPage(BasePage): LOGIN_BUTTON_ID = 'Login' LOGGED_IN_MESSAGE_ID = 'Logged in as admin' PERMISSION_DENIED_MESSAGE_ID = 'PERMISSION DENIED' LOGOUT_BUTTON_ID = 'log out' TRY_AGAIN_BUTTON_ID = 'try again' def get_element_center(self, element): element_mid_width = element.size['width'] / 2 element_mid_height = element.size['height'] / 2 return element.location['x'] + element_mid_width, element.location['y'] + element_mid_height def tap_button_center(self, element): button_center = self.get_element_center(element) self.driver.tap([button_center], 1) def log_in(self, username, password): username_field = self.driver.find_element_by_class_name(self.TEXT_FIELD_CLASS) password_field = self.driver.find_element_by_class_name(self.SECURE_TEXT_FIELD_CLASS) log_in_button = self.driver.find_element_by_accessibility_id(self.LOGIN_BUTTON_ID) username_field.send_keys(username) password_field.send_keys(password) log_in_button.click() def is_valid_login_message_displayed(self): logged_in_message = self.driver.find_element_by_accessibility_id(self.LOGGED_IN_MESSAGE_ID) return logged_in_message.is_displayed()
Apache License 2.0
biasvariancelabs/aitlas
aitlas/utils/segmentation_losses.py
DiceFocal.__init__
python
def __init__(self): super(DiceFocal, self).__init__([DiceLoss(), FocalLoss()])
Combination loss: DiceLoss() + FocalLoss()
https://github.com/biasvariancelabs/aitlas/blob/20473cdd8c46211444b8ee742b944b07200a7d43/aitlas/utils/segmentation_losses.py#L65-L69
import torch import torch.nn.functional as F from torch import nn class DiceLoss(nn.Module): def __init__(self): super(DiceLoss, self).__init__() def forward(self, inputs, targets, smooth=1): inputs = inputs.view(-1) targets = targets.view(-1) intersection = (inputs * targets).sum() dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth) return 1 - dice class FocalLoss(nn.Module): ALPHA = 0.8 GAMMA = 2 def __init__(self): super(FocalLoss, self).__init__() def forward(self, inputs, targets, alpha=ALPHA, gamma=GAMMA): inputs = inputs.view(-1) targets = targets.view(-1) BCE = F.binary_cross_entropy(inputs, targets, reduction="mean") BCE_EXP = torch.exp(-BCE) focal_loss = alpha * (1 - BCE_EXP) ** gamma * BCE return focal_loss class ComboLoss(nn.Module): def __init__(self, loss_modules: list): super().__init__() self.losses = loss_modules def forward(self, *args, **kwargs): loss_values = [l(*args, **kwargs) for l in self.losses] return sum(loss_values) class DiceFocal(ComboLoss):
MIT License
rangilyu/nanodet
nanodet/model/loss/gfocal_loss.py
quality_focal_loss
python
def quality_focal_loss(pred, target, beta=2.0): assert ( len(target) == 2 ), """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" label, score = target pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy_with_logits( pred, zerolabel, reduction="none" ) * scale_factor.pow(beta) bg_class_ind = pred.size(1) pos = torch.nonzero((label >= 0) & (label < bg_class_ind), as_tuple=False).squeeze( 1 ) pos_label = label[pos].long() scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy_with_logits( pred[pos, pos_label], score[pos], reduction="none" ) * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,).
https://github.com/rangilyu/nanodet/blob/d7caaca17b08731b9ed42441e09916902f735c69/nanodet/model/loss/gfocal_loss.py#L9-L54
import torch import torch.nn as nn import torch.nn.functional as F from .utils import weighted_loss @weighted_loss
Apache License 2.0
nacx/kahuna
kahuna/utils/tomcat.py
TomcatScripts.configure_abiquo_listener
python
def configure_abiquo_listener(self): return Statements.exec("sed -i -e " "'/GlobalResourcesLifecycleListener/a <Listener className=" "\"com.abiquo.listeners.AbiquoConfigurationListener\"/>' " "/etc/tomcat6/server.xml")
Adds the Abiquo listener to server.xml
https://github.com/nacx/kahuna/blob/77fb6d97841436c63650ad9f62157e6fc9ba43f6/kahuna/utils/tomcat.py#L99-L104
from __future__ import with_statement import git import hostname import nfs from org.jclouds.scriptbuilder.domain import Statements from org.jclouds.scriptbuilder.domain.chef import DataBag from org.jclouds.scriptbuilder.domain.chef import RunList from org.jclouds.scriptbuilder.statements.chef import ChefSolo class TomcatScripts: def __init__(self, boundary_org, boundary_key, newrelic_key): self.__templatedir = "kahuna/utils/templates" self.__abiquojar = "http://10.60.20.42/2.4/tomcat/abiquo-tomcat.jar" self.__mysqljar = "http://repo1.maven.org/maven2/mysql/" + "mysql-connector-java/5.1.10/" + "mysql-connector-java-5.1.10.jar" self._boundary_org = boundary_org self._boundary_key = boundary_key self._newrelic_key = newrelic_key def start(self): return Statements.exec("service tomcat6 start") def stop(self): return Statements.exec("service tomcat6 stop") def configure_context(self, module, dbhost, dbuser, dbpass, jndi): with open("%s/context.xml" % self.__templatedir, "r") as f: context_config = f.read() % { 'dbhost': dbhost, 'dbuser': dbuser, 'dbpass': dbpass, 'jndi': jndi } return Statements.createOrOverwriteFile( "/etc/tomcat6/Catalina/localhost/%s.xml" % module, [context_config]) def configure_logging(self, module, sysloghost): with open("%s/logback.xml" % self.__templatedir, "r") as f: log_config = f.read() % {'sysloghost': sysloghost} return Statements.createOrOverwriteFile( "/var/lib/tomcat6/webapps/%s/WEB-INF/classes/logback.xml" % module, [log_config]) def configure_user(self, user, group): script = [] script.append(Statements.exec( "sed -i s/TOMCAT6_USER=.*/TOMCAT6_USER=%s/ /etc/default/tomcat6" % user)) script.append(Statements.exec( "sed -i s/TOMCAT6_GROUP=.*/TOMCAT6_GROUP=%s/ /etc/default/tomcat6" % group)) return script def configure_abiquo_props(self, rabbit, redis, zookeeper, datacenter, nfs_share, nfs_directory, hypervisor_sessions): with open("%s/abiquo.properties" % self.__templatedir, "r") as f: abiquo_props = f.read() % { 'rabbit': rabbit, 'redis': redis, 'zookeeper': zookeeper, 'datacenter': datacenter, 'nfs': nfs_share, 'nfsmount': nfs_directory, 'hypervisorsessions': hypervisor_sessions } script = [] script.append(Statements.exec("{md} /opt/abiquo/config")) script.append(Statements.createOrOverwriteFile( "/opt/abiquo/config/abiquo.properties", [abiquo_props])) return script def upload_libs(self): script = [] script.append(Statements.exec( "ensure_cmd_or_install_package_apt wget wget")) script.append(Statements.exec( "wget -O /usr/share/tomcat6/lib/abiquo.jar %s" % self.__abiquojar)) script.append(Statements.exec( "wget -O /usr/share/tomcat6/lib/mysql.jar %s" % self.__mysqljar)) return script
MIT License
dico-api/dico
dico/client.py
Client.run
python
def run(self, *, reconnect_on_unknown_disconnect: bool = False, compress: bool = False): try: self.loop.create_task(self.start(reconnect_on_unknown_disconnect, compress)) self.loop.run_forever() except KeyboardInterrupt: print("Detected KeyboardInterrupt, exiting...", file=sys.stderr) except Exception as ex: print("Unexpected exception occurred, exiting...", file=sys.stderr) traceback.print_exc() finally: self.loop.run_until_complete(self.close())
Runs client and clears every connections after stopping due to error or KeyboardInterrupt. .. warning:: This must be placed at the end of the code. :param bool reconnect_on_unknown_disconnect: Whether to reconnect on unknown websocket error. :param bool compress: Whether to enable zlib compress.
https://github.com/dico-api/dico/blob/d4dc8f022d0838017a3ae1d692dbf4b591b853a9/dico/client.py#L355-L374
import sys import typing import asyncio import traceback from contextlib import suppress from . import utils from .api import APIClient from .http.async_http import AsyncHTTPRequest from .ws.websocket import WebSocketClient from .cache import CacheContainer from .exception import WebsocketClosed from .handler import EventHandler from .model import Intents, AllowedMentions, Snowflake, Activity, Guild, Channel from .utils import get_shard_id class Client(APIClient): def __init__(self, token: str, *, intents: Intents = Intents.no_privileged(), default_allowed_mentions: typing.Optional[AllowedMentions] = None, loop: typing.Optional[asyncio.AbstractEventLoop] = None, cache: bool = True, application_id: typing.Optional[Snowflake.TYPING] = None, monoshard: bool = False, shard_count: typing.Optional[int] = None, shard_id: typing.Optional[int] = None, **cache_max_sizes: int): cache_max_sizes.setdefault("message", 1000) self.loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop() super().__init__(token, base=AsyncHTTPRequest, default_allowed_mentions=default_allowed_mentions, loop=loop, application_id=application_id) self.token: str = token self.__use_cache = cache self.cache: typing.Optional[CacheContainer] = CacheContainer(**cache_max_sizes) if self.__use_cache else None self.__ws_class = WebSocketClient self.intents: Intents = intents self.ws: typing.Optional[WebSocketClient] = None self.events: EventHandler = EventHandler(self) self.__wait_futures = {} self.__ready_future = asyncio.Future() self.monoshard: bool = monoshard self.shard_count: typing.Optional[int] = shard_count self.__shards = {} if self.monoshard else None self.__shard_id = shard_id self.events.add("READY", self.__ready) self.events.add("VOICE_STATE_UPDATE", self.__voice_state_update) def __ready(self, ready): self.application_id = Snowflake(ready.application["id"]) if not self.__ready_future.done(): self.__ready_future.set_result(True) def __voice_state_update(self, voice_state): if self.has_cache: user = self.get(voice_state.user_id) if user: user.set_voice_state(voice_state) def on_(self, name: typing.Optional[str] = None, meth: typing.Optional[typing.Union[typing.Callable, typing.Coroutine]] = None) -> typing.Any: def wrap(func=None): func = func or meth self.events.add(name.upper() if name else func.__name__.upper().lstrip("ON_"), func) return func return wrap if meth is None else wrap() @property def on(self): return self.on_ def wait(self, event_name: str, timeout: typing.Optional[float] = None, check: typing.Optional[typing.Callable[[typing.Any], bool]] = None) -> typing.Any: async def wrap(): while not self.websocket_closed: future = asyncio.Future() if event_name.upper() not in self.__wait_futures: self.__wait_futures[event_name.upper()] = [] self.__wait_futures[event_name.upper()].append(future) res = await asyncio.wait_for(future, timeout=None) ret = res if len(res) > 1 else res[0] if check and check(*res): return ret elif not check: return ret raise WebsocketClosed return asyncio.wait_for(wrap(), timeout=timeout) def dispatch(self, name: str, *args: typing.Any): [self.loop.create_task(utils.safe_call(x(*args))) for x in self.events.get(name.upper())] """ for x in range(len(self.__wait_futures.get(name.upper(), []))): with suppress(IndexError): # temporary fix, we might need to use while instead fut: asyncio.Future = self.__wait_futures[name.upper()].pop(x) if not fut.cancelled(): fut.set_result(args) """ tgt = self.__wait_futures.get(name.upper(), []) while tgt: fut: asyncio.Future = tgt.pop(0) if not fut.cancelled(): fut.set_result(args) def get_shard_id(self, guild: Guild.TYPING) -> int: if self.__shards: return get_shard_id(int(guild), len(self.__shards)) def get_shard(self, guild: Guild.TYPING) -> typing.Optional[WebSocketClient]: if self.__shards: shard_id = get_shard_id(int(guild), len(self.__shards)) return self.__shards.get(shard_id) async def wait_ready(self): if not self.__ready_future.done(): await self.__ready_future @property def get(self): if self.has_cache: return self.cache.get async def start(self, reconnect_on_unknown_disconnect: bool = False, compress: bool = False): if self.monoshard: gateway = await self.request_gateway() shard_count = self.shard_count or gateway.shards if self.shard_count is None: self.shard_count = gateway.shards for x in range(shard_count): ws = await self.__ws_class.connect_without_request( gateway, self.http, self.intents, self.events, reconnect_on_unknown_disconnect, compress, shard=[x, shard_count] ) self.__shards[x] = ws await ws.receive_once() self.loop.create_task(ws.run()) else: maybe_shard = {"shard": [self.__shard_id, self.shard_count]} if self.__shard_id else {} self.ws = await self.__ws_class.connect(self.http, self.intents, self.events, reconnect_on_unknown_disconnect, compress, **maybe_shard) await self.ws.run() async def close(self): if self.ws: await self.ws.close() elif self.__shards: for x in self.__shards.values(): await x.close() await self.http.close() async def update_presence(self, *, since: typing.Optional[int] = None, activities: typing.List[typing.Union[Activity, dict]], status: str = "online", afk: bool = False): activities = [x.to_dict() if not isinstance(x, dict) else x for x in activities] if self.ws: await self.ws.update_presence(since, activities, status, afk) elif self.__shards: for x in self.__shards.values(): await x.update_presence(since, activities, status, afk) def update_voice_state(self, guild: Guild.TYPING, channel: typing.Optional[Channel.TYPING] = None, self_mute: bool = False, self_deaf: bool = False): if self.ws: ws = self.ws elif self.__shards: ws = self.get_shard(guild) if not ws: raise AttributeError(f"shard for guild {int(guild)} not found.") else: raise AttributeError(f"shard for guild {int(guild)} not found.") return ws.update_voice_state(str(int(guild)), str(int(channel)) if channel else None, self_mute, self_deaf) @property def has_cache(self) -> bool: return self.__use_cache @property def websocket_closed(self) -> bool: if self.ws: return self.ws.closed elif self.__shards: return any([x.closed for x in self.__shards.values()]) return True @property def shards_closed(self) -> typing.List[bool]: if not self.__shards: raise TypeError("unable to get shards closed status") return [x.closed for x in self.__shards.values()] @property def guild_count(self) -> typing.Optional[int]: if self.has_cache: return self.cache.get_size("guild") @property def ping(self) -> float: if self.ws: return self.ws.ping elif self.__shards: pings = [x.ping for x in self.__shards.values()] return sum(pings) / len(pings) else: return 0.0 @property def shards(self) -> typing.Optional[typing.Tuple[WebSocketClient]]: return tuple(self.__shards.values()) if self.__shards else None def __setattr__(self, key, value): if not key.lower().startswith("on_") or key.lower() in ["on", "on_"]: return super().__setattr__(key, value) event_name = key.lower().lstrip("on_") return self.on_(event_name, value) def __getattr__(self, item): if item.startswith("get_"): def wrap(snowflake_id): return self.get(snowflake_id, item[4:]) return wrap if not item.lower().startswith("on_") or item.lower() in ["on", "on_", "get"]: return super().__getattribute__(item) event_name = item.lower().lstrip("on_") def deco(func): return self.on_(event_name, func) return deco
MIT License
revdotcom/revai-python-sdk
src/rev_ai/apiclient.py
RevAiAPIClient.get_captions_as_stream
python
def get_captions_as_stream(self, id_, content_type=CaptionType.SRT, channel_id=None): if not id_: raise ValueError('id_ must be provided') query = self._create_captions_query(channel_id) response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{0}/captions{1}'.format(id_, query)), headers={'Accept': content_type.value}, stream=True ) return response
Get the captions output of a specific job and return it as a plain text stream :param id_: id of job to be requested :param content_type: caption type which should be returned. Defaults to SRT :param channel_id: id of speaker channel to be captioned, only matters for multichannel jobs :returns: requests.models.Response HTTP response which can be used to stream the payload of the response :raises: HTTPError
https://github.com/revdotcom/revai-python-sdk/blob/60296de355dffb5ae6c5ab1efc66886e41b08422/src/rev_ai/apiclient.py#L328-L349
import json from .models import Account, CaptionType, Job, Transcript from .baseclient import BaseClient from . import utils try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin class RevAiAPIClient(BaseClient): rev_json_content_type = 'application/vnd.rev.transcript.v1.0+json' def __init__(self, access_token): BaseClient.__init__(self, access_token) def submit_job_url( self, media_url, metadata=None, callback_url=None, skip_diarization=False, skip_punctuation=False, speaker_channels_count=None, custom_vocabularies=None, filter_profanity=False, remove_disfluencies=False, delete_after_seconds=None, language=None, custom_vocabulary_id=None): if not media_url: raise ValueError('media_url must be provided') payload = self._create_job_options_payload(media_url, metadata, callback_url, skip_diarization, skip_punctuation, speaker_channels_count, custom_vocabularies, filter_profanity, remove_disfluencies, delete_after_seconds, language, custom_vocabulary_id) response = self._make_http_request( "POST", urljoin(self.base_url, 'jobs'), json=payload ) return Job.from_json(response.json()) def submit_job_local_file( self, filename, metadata=None, callback_url=None, skip_diarization=False, skip_punctuation=False, speaker_channels_count=None, custom_vocabularies=None, filter_profanity=False, remove_disfluencies=False, delete_after_seconds=None, language=None, custom_vocabulary_id=None): if not filename: raise ValueError('filename must be provided') payload = self._create_job_options_payload(None, metadata, callback_url, skip_diarization, skip_punctuation, speaker_channels_count, custom_vocabularies, filter_profanity, remove_disfluencies, delete_after_seconds, language, custom_vocabulary_id) with open(filename, 'rb') as f: files = { 'media': (filename, f), 'options': (None, json.dumps(payload, sort_keys=True)) } response = self._make_http_request( "POST", urljoin(self.base_url, 'jobs'), files=files ) return Job.from_json(response.json()) def get_job_details(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}'.format(id_)) ) return Job.from_json(response.json()) def get_list_of_jobs(self, limit=None, starting_after=None): params = [] if limit is not None: params.append('limit={}'.format(limit)) if starting_after is not None: params.append('starting_after={}'.format(starting_after)) query = '?{}'.format('&'.join(params)) response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs{}'.format(query)) ) return [Job.from_json(job) for job in response.json()] def get_transcript_text(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)), headers={'Accept': 'text/plain'} ) return response.text def get_transcript_text_as_stream(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)), headers={'Accept': 'text/plain'}, stream=True ) return response def get_transcript_json(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)), headers={'Accept': self.rev_json_content_type} ) return response.json() def get_transcript_json_as_stream(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)), headers={'Accept': self.rev_json_content_type}, stream=True ) return response def get_transcript_object(self, id_): if not id_: raise ValueError('id_ must be provided') response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)), headers={'Accept': self.rev_json_content_type} ) return Transcript.from_json(response.json()) def get_captions(self, id_, content_type=CaptionType.SRT, channel_id=None): if not id_: raise ValueError('id_ must be provided') query = self._create_captions_query(channel_id) response = self._make_http_request( "GET", urljoin(self.base_url, 'jobs/{0}/captions{1}'.format(id_, query)), headers={'Accept': content_type.value} ) return response.text
MIT License
wikimedia/pywikibot
pywikibot/family.py
Family.obsolete
python
def obsolete(self): data = {code: None for code in self.interwiki_removals} data.update(self.interwiki_replacements) return types.MappingProxyType(data)
Old codes that are not part of the family. Interwiki replacements override removals for the same code. :return: mapping of old codes to new codes (or None) :rtype: dict
https://github.com/wikimedia/pywikibot/blob/5097f5b9a7ef9d39f35f17edd11faf3086a01d1d/pywikibot/family.py#L946-L957
import collections import logging import re import string import sys import types import urllib.parse as urlparse import warnings from importlib import import_module from itertools import chain from os.path import basename, dirname, splitext from typing import Optional import pywikibot from pywikibot import config from pywikibot.backports import Dict, List, Tuple from pywikibot.exceptions import FamilyMaintenanceWarning, UnknownFamilyError from pywikibot.tools import classproperty, deprecated logger = logging.getLogger('pywiki.wiki.family') NAME_CHARACTERS = string.ascii_letters + string.digits CODE_CHARACTERS = string.ascii_lowercase + string.digits + '_-' class Family: def __new__(cls): if cls in globals().values(): raise TypeError( 'Abstract Family class {} cannot be instantiated; ' 'subclass it instead'.format(cls.__name__)) cls.instance = super().__new__(cls) cls.__new__ = lambda cls: cls.instance if '__init__' in cls.__dict__: cls.__init__ = deprecated(cls.__init__) cls.instance.__init__() cls.__init__ = lambda self: None return cls.instance @classproperty def instance(cls): return cls() name = None langs = {} alphabetic = [ 'ace', 'kbd', 'ady', 'af', 'ak', 'als', 'alt', 'am', 'smn', 'ang', 'ab', 'ar', 'an', 'arc', 'roa-rup', 'frp', 'as', 'ast', 'atj', 'awa', 'gn', 'av', 'ay', 'az', 'ban', 'bm', 'bn', 'bjn', 'zh-min-nan', 'nan', 'map-bms', 'ba', 'be', 'be-tarask', 'mnw', 'bh', 'bcl', 'bi', 'bg', 'bar', 'bo', 'bs', 'br', 'bxr', 'ca', 'cv', 'ceb', 'cs', 'ch', 'cbk-zam', 'ny', 'sn', 'tum', 'cho', 'co', 'cy', 'dag', 'da', 'dk', 'ary', 'pdc', 'de', 'dv', 'nv', 'dsb', 'dty', 'dz', 'mh', 'et', 'el', 'eml', 'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr', 'fy', 'ff', 'fur', 'ga', 'gv', 'gag', 'gd', 'gl', 'gan', 'ki', 'glk', 'gu', 'gor', 'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy', 'hi', 'ho', 'hsb', 'hr', 'hyw', 'io', 'ig', 'ilo', 'inh', 'bpy', 'id', 'ia', 'ie', 'iu', 'ik', 'os', 'xh', 'zu', 'is', 'it', 'he', 'jv', 'kbp', 'kl', 'kn', 'kr', 'pam', 'krc', 'ka', 'ks', 'csb', 'kk', 'kw', 'rw', 'rn', 'sw', 'kv', 'kg', 'gom', 'avk', 'ht', 'gcr', 'ku', 'kj', 'ky', 'mrj', 'lld', 'lad', 'lbe', 'lo', 'ltg', 'la', 'lv', 'lb', 'lez', 'lfn', 'lt', 'lij', 'li', 'ln', 'olo', 'jbo', 'lg', 'lmo', 'lrc', 'mad', 'hu', 'mai', 'mk', 'mg', 'ml', 'mt', 'mi', 'mr', 'xmf', 'arz', 'mzn', 'mni', 'ms', 'min', 'cdo', 'mwl', 'mdf', 'mo', 'mn', 'mus', 'my', 'nah', 'na', 'fj', 'nl', 'nds-nl', 'cr', 'ne', 'new', 'nia', 'ja', 'nqo', 'nap', 'ce', 'frr', 'pih', 'no', 'nb', 'nn', 'nrm', 'nov', 'ii', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa', 'pi', 'pfl', 'pag', 'pnb', 'pap', 'ps', 'jam', 'koi', 'km', 'pcd', 'pms', 'tpi', 'nds', 'pl', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy', 'rm', 'qu', 'rue', 'ru', 'sah', 'szy', 'se', 'sm', 'sa', 'sg', 'sat', 'skr', 'sc', 'sco', 'trv', 'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple', 'sd', 'ss', 'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'su', 'fi', 'sv', 'shi', 'tl', 'shn', 'ta', 'kab', 'roa-tara', 'tt', 'tay', 'te', 'tet', 'th', 'ti', 'tg', 'to', 'chr', 'chy', 've', 'tcy', 'tr', 'azb', 'tk', 'tw', 'tyv', 'din', 'udm', 'bug', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vi', 'vo', 'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts', 'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw', 'zh-cn', ] alphabetic_revised = [ 'ace', 'ady', 'kbd', 'af', 'ak', 'als', 'alt', 'am', 'smn', 'ang', 'ab', 'ar', 'an', 'arc', 'roa-rup', 'frp', 'as', 'ast', 'atj', 'awa', 'gn', 'av', 'ay', 'az', 'bjn', 'id', 'ms', 'ban', 'bm', 'bn', 'zh-min-nan', 'nan', 'map-bms', 'jv', 'su', 'ba', 'min', 'be', 'be-tarask', 'mnw', 'mad', 'bh', 'bcl', 'bi', 'bar', 'bo', 'bs', 'br', 'bug', 'bg', 'bxr', 'ca', 'ceb', 'cv', 'cs', 'ch', 'cbk-zam', 'ny', 'sn', 'tum', 'cho', 'co', 'cy', 'dag', 'da', 'dk', 'ary', 'pdc', 'de', 'dv', 'nv', 'dsb', 'na', 'dty', 'dz', 'mh', 'et', 'el', 'eml', 'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr', 'fy', 'ff', 'fur', 'ga', 'gv', 'sm', 'gag', 'gd', 'gl', 'gan', 'ki', 'glk', 'gu', 'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy', 'hi', 'ho', 'hsb', 'hr', 'hyw', 'io', 'ig', 'ilo', 'inh', 'bpy', 'ia', 'ie', 'iu', 'ik', 'os', 'xh', 'zu', 'is', 'it', 'he', 'kl', 'kn', 'kr', 'pam', 'ka', 'ks', 'csb', 'kk', 'kw', 'rw', 'ky', 'rn', 'mrj', 'sw', 'kv', 'kg', 'gom', 'avk', 'gor', 'ht', 'gcr', 'ku', 'shn', 'kj', 'lld', 'lad', 'lbe', 'lez', 'lfn', 'lo', 'la', 'ltg', 'lv', 'to', 'lb', 'lt', 'lij', 'li', 'ln', 'nia', 'olo', 'jbo', 'lg', 'lmo', 'lrc', 'hu', 'mai', 'mk', 'mg', 'ml', 'krc', 'mt', 'mi', 'mr', 'xmf', 'arz', 'mzn', 'mni', 'cdo', 'mwl', 'koi', 'mdf', 'mo', 'mn', 'mus', 'my', 'nah', 'fj', 'nl', 'nds-nl', 'cr', 'ne', 'new', 'ja', 'nqo', 'nap', 'ce', 'frr', 'pih', 'no', 'nb', 'nn', 'nrm', 'nov', 'ii', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa', 'pi', 'pfl', 'pag', 'pnb', 'pap', 'ps', 'jam', 'km', 'pcd', 'pms', 'nds', 'pl', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy', 'rm', 'qu', 'ru', 'rue', 'sah', 'szy', 'se', 'sa', 'sg', 'sat', 'skr', 'sc', 'sco', 'trv', 'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple', 'sd', 'ss', 'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'fi', 'sv', 'shi', 'tl', 'ta', 'kab', 'kbp', 'roa-tara', 'tt', 'tay', 'te', 'tet', 'th', 'vi', 'ti', 'tg', 'tpi', 'chr', 'chy', 've', 'tcy', 'tr', 'azb', 'tk', 'tw', 'tyv', 'din', 'udm', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vo', 'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts', 'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw', 'zh-cn', ] fyinterwiki = alphabetic[:] fyinterwiki.remove('nb') fyinterwiki.sort(key=lambda x: x.replace('y', 'i') + x.count('y') * '!') linktrails = { '_default': '[a-z]*', 'ab': '[a-zабвгҕдежзӡикқҟлмнопҧрстҭуфхҳцҵчҷҽҿшыҩџьә]*', 'als': '[äöüßa-z]*', 'alt': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'an': '[a-záéíóúñ]*', 'ar': '[a-zء-يؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ۭ]*', 'ary': '[a-zء-يؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ۭ]*', 'arz': '[a-zء-يؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ۭ]*', 'ast': '[a-záéíóúñ]*', 'atj': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'av': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'avk': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'awa': '[a-zऀ-ॣ०-꣠-ꣿ]*', 'ay': '[a-záéíóúñ]*', 'az': '[a-zçəğıöşü]*', 'azb': '[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة‌]*', 'ba': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюяәөүғҡңҙҫһ“»]*', 'bar': '[äöüßa-z]*', 'bat-smg': '[a-ząčęėįšųūž]*', 'be': '[абвгґджзеёжзійклмнопрстуўфхцчшыьэюяćčłńśšŭźža-z]*', 'be-tarask': '[абвгґджзеёжзійклмнопрстуўфхцчшыьэюяćčłńśšŭźža-z]*', 'bg': '[a-zабвгдежзийклмнопрстуфхцчшщъыьэюя]*', 'bm': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'bn': '[ঀ-৿]*', 'bpy': '[ঀ-৿]*', 'br': "(?:[a-zA-ZàâçéèêîôûäëïöüùñÇÉÂÊÎÔÛÄËÏÖÜÀÈÙÑ]|[cC]['’]h|C['’]H)*", 'bs': '[a-zćčžšđž]*', 'bxr': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'ca': "(?:[a-zàèéíòóúç·ïü]|'(?!'))*", 'cbk-zam': '[a-záéíóúñ]*', 'ce': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'ckb': '[ئابپتجچحخدرڕزژسشعغفڤقکگلڵمنوۆهھەیێ‌]*', 'co': '[a-zàéèíîìóòúù]*', 'crh': '[a-zâçğıñöşüа-яёʺʹ“»]*', 'cs': '[a-záčďéěíňóřšťúůýž]*', 'csb': '[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*', 'cu': '[a-zабвгдеєжѕзїіıићклмнопсстѹфхѡѿцчш' 'щъыьѣюѥѧѩѫѭѯѱѳѷѵґѓђёјйљњќуўџэ҄я“»]*', 'cv': '[a-zа-яĕçăӳ"»]*', 'cy': '[àáâèéêìíîïòóôûŵŷa-z]*', 'da': '[a-zæøå]*', 'dag': '[ɛɣŋɔʒƐƔŊƆƷa-z]*', 'de': '[äöüßa-z]*', 'din': '[äëɛɛ̈éɣïŋöɔɔ̈óa-z]*', 'dsb': '[äöüßa-z]*', 'el': '[a-zαβγδεζηθικλμνξοπρστυφχψωςΑΒΓΔΕΖΗΘ' 'ΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίόύώϊϋΐΰΆΈΉΊΌΎΏΪΫ]*', 'eml': '[a-zàéèíîìóòúù]*', 'es': '[a-záéíóúñ]*', 'et': '[äöõšüža-z]*', 'ext': '[a-záéíóúñ]*', 'fa': '[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة‌]*', 'ff': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'fi': '[a-zäö]*', 'fiu-vro': '[äöõšüža-z]*', 'fo': '[áðíóúýæøa-z]*', 'fr': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'frp': '[a-zàâçéèêîœôû·’æäåāăëēïīòöōùü‘]*', 'frr': '[a-zäöüßåāđē]*', 'fur': '[a-zàéèíîìóòúù]*', 'fy': '[a-zàáèéìíòóùúâêîôûäëïöü]*', 'gag': '[a-zÇĞçğİıÖöŞşÜüÂâÎîÛû]*', 'gan': '', 'gcr': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'gl': '[áâãàéêẽçíòóôõq̃úüűũa-z]*', 'glk': '[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة‌]*', 'gn': '[a-záéíóúñ]*', 'gu': '[઀-૿]*', 'he': '[a-zא-ת]*', 'hi': '[a-zऀ-ॣ०-꣠-ꣿ]*', 'hr': '[čšžćđßa-z]*', 'hsb': '[äöüßa-z]*', 'ht': '[a-zàèòÀÈÒ]*', 'hu': '[a-záéíóúöüőűÁÉÍÓÚÖÜŐŰ]*', 'hy': '[a-zաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆև«»]*', 'hyw': '[a-zաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆև«»]*', 'ii': '', 'inh': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'is': '[áðéíóúýþæöa-z-–]*', 'it': '[a-zàéèíîìóòúù]*', 'ka': '[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*', 'kaa': "(?:[a-zıʼ’“»]|'(?!'))*", 'kab': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'kbp': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'kk': '[a-zäçéğıïñöşüýʺʹа-яёәғіқңөұүһٴ' 'ابپتجحدرزسشعفقكلمنڭەوۇۋۆىيچھ“»]*', 'kl': '[a-zæøå]*', 'koi': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'krc': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'ksh': '[äöüėëijßəğåůæœça-z]*', 'ku': '[a-zçêîşûẍḧÇÊÎŞÛẌḦ]*', 'kv': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'lad': '[a-záéíóúñ]*', 'lb': '[äöüßa-z]*', 'lbe': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюяӀ1“»]*', 'lez': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'li': '[a-zäöüïëéèà]*', 'lij': '[a-zàéèíîìóòúù]*', 'lld': '[a-zàéèíîìóòúù]*', 'lmo': '[a-zàéèíîìóòúù]*', 'ln': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'lrc': '[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة‌]*', 'lt': '[a-ząčęėįšųūž]*', 'ltg': '[a-zA-ZĀāČčĒēĢģĪīĶķĻļŅņŠšŪūŽž]*', 'lv': '[a-zA-ZĀāČčĒēĢģĪīĶķĻļŅņŠšŪūŽž]*', 'mai': '[a-zऀ-ॣ०-꣠-ꣿ]*', 'mdf': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'mg': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'mhr': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'mk': '[a-zабвгдѓежзѕијклљмнњопрстќуфхцчџш]*', 'ml': '[a-zം-ൿ]*', 'mn': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя“»]*', 'mr': '[ऀ-ॣॱ-ॿ‍]*', 'mrj': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'mwl': '[áâãàéêẽçíòóôõq̃úüűũa-z]*', 'myv': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'mzn': '[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة‌]*', 'nah': '[a-záéíóúñ]*', 'nap': '[a-zàéèíîìóòúù]*', 'nds': '[äöüßa-z]*', 'nds-nl': '[a-zäöüïëéèà]*', 'nl': '[a-zäöüïëéèà]*', 'nn': '[æøåa-z]*', 'no': '[æøåa-z]*', 'nrm': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'oc': '[a-zàâçéèêîôû]*', 'olo': '[a-zčČšŠžŽäÄöÖ]*', 'or': '[a-z଀-୿]*', 'os': '[a-zаæбвгдеёжзийклмнопрстуфхцчшщъыьэюя“»]*', 'pa': '[ਁਂਃਅਆਇਈਉਊਏਐਓਔਕਖਗਘਙਚਛਜਝਞਟਠਡਢਣਤਥਦਧਨਪਫਬਭਮ' 'ਯਰਲਲ਼ਵਸ਼ਸਹ਼ਾਿੀੁੂੇੈੋੌ੍ਖ਼ਗ਼ਜ਼ੜਫ਼ੰੱੲੳa-z]*', 'pcd': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'pdc': '[äöüßa-z]*', 'pfl': '[äöüßa-z]*', 'pl': '[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*', 'pms': '[a-zàéèíîìóòúù]*', 'pnt': '[a-zαβγδεζηθικλμνξοπρστυφχψωςΑΒΓΔΕΖΗΘ' 'ΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίόύώϊϋΐΰΆΈΉΊΌΎΏΪΫ]*', 'pt': '[áâãàéêẽçíòóôõq̃úüűũa-z]*', 'qu': '[a-záéíóúñ]*', 'rmy': '[a-zăâîşţșțĂÂÎŞŢȘȚ]*', 'ro': '[a-zăâîşţșțĂÂÎŞŢȘȚ]*', 'roa-rup': '[a-zăâîşţșțĂÂÎŞŢȘȚ]*', 'roa-tara': '[a-zàéèíîìóòúù]*', 'ru': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'rue': '[a-zабвгґдеєжзиіїйклмнопрстуфхцчшщьєюяёъы“»]*', 'sa': '[a-zऀ-ॣ०-꣠-ꣿ]*', 'sah': '[a-zабвгҕдеёжзийклмнҥоөпрсһтуүфхцчшщъыьэюя]*', 'scn': '[a-zàéèíîìóòúù]*', 'se': '[a-zàáâçčʒǯđðéèêëǧǥȟíìîïıǩŋñóòôõßšŧúùûýÿüžþæøåäö]*', 'sg': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'sh': '[a-zčćđžš]*', 'shi': '[ⴰ-ⵯa-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙḍḥɛṛɣṣṭẓḌḤƐṚƔṢṬẒʷ]*', 'sk': '[a-záäčďéíľĺňóôŕšťúýž]*', 'skr': '[ابپتٹثجچحخدڈذرڑزژسشصضطظعغفقکگلمنںوؤہھیئےآأءۃٻڄݙڋڰڳݨ]*', 'sl': '[a-zčćđžš]*', 'smn': '[a-zâčđŋšžäá]*', 'sr': '[abvgdđežzijklljmnnjoprstćufhcčdž' 'šабвгдђежзијклљмнњопрстћуфхцчџш]*', 'srn': '[a-zäöüïëéèà]*', 'stq': '[äöüßa-z]*', 'sv': '[a-zåäöéÅÄÖÉ]*', 'szl': '[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*', 'szy': '', 'ta': '[஀-௿]*', 'tay': '', 'te': '[ఁ-౯]*', 'tet': '[áâãàéêẽçíòóôõq̃úüűũa-z]*', 'tg': '[a-zабвгдеёжзийклмнопрстуфхчшъэюяғӣқўҳҷцщыь]*', 'tk': '[a-zÄäÇçĞğŇňÖöŞşÜüÝýŽž]*', 'tr': '[a-zÇĞçğİıÖöŞşÜüÂâÎîÛû]*', 'trv': '', 'tt': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюяӘәӨөҮүҖҗҢңҺһ]*', 'ty': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'tyv': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'udm': '[a-zа-яёӝӟӥӧӵ]*', 'uk': '[a-zабвгґдеєжзиіїйклмнопрстуфхцчшщьєюяёъы“»]*', 'ur': '[ابپتٹثجچحخدڈذر​ڑ​زژسشصضطظعغفقکگل​م​نںوؤہھیئےآأءۃ]*', 'uz': '[a-zʻʼ“»]*', 'vec': '[a-zàéèíîìóòúù]*', 'vep': '[äöõšüža-z]*', 'vi': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'vls': '[a-zäöüïëéèà]*', 'wa': '[a-zåâêîôûçéè]*', 'wo': '[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*', 'wuu': '', 'xal': '[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*', 'xmf': '[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*', 'yi': '[a-zא-ת]*', 'za': '', 'zea': '[a-zäöüïëéèà]*', 'zh': '', } category_redirect_templates = { '_default': [] } use_hard_category_redirects = [] disambiguationTemplates = { '_default': [] } edit_restricted_templates = {} archived_page_templates = {} cross_projects = [] cross_projects_cookies = ['centralauth_Session', 'centralauth_Token', 'centralauth_User'] cross_projects_cookie_username = 'centralauth_User' cross_allowed = [] disambcatname = {} interwiki_attop = [] interwiki_on_one_line = [] interwiki_text_separator = '\n\n' category_attop = [] category_on_one_line = [] category_text_separator = '\n\n' categories_last = [] interwiki_putfirst = {} interwiki_forward = None interwiki_replacements = {} interwiki_removals = [] languages_by_size = [] language_groups = { 'arab': [ 'ar', 'ary', 'arz', 'azb', 'ckb', 'fa', 'glk', 'ks', 'lrc', 'mzn', 'ps', 'sd', 'ur', 'ha', 'kk', 'ku', 'pnb', 'ug' ], 'chinese': [ 'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii', 'ja', 'za' ], 'cyril': [ 'ab', 'av', 'ba', 'be', 'be-tarask', 'bg', 'bxr', 'ce', 'cu', 'cv', 'kbd', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo', 'myv', 'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk', 'udm', 'uk', 'xal', 'ha', 'kk', 'sh', 'sr', 'tt' ], 'grec': [ 'el', 'grc', 'pnt' ], 'latin': [ 'aa', 'ace', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar', 'bat-smg', 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam', 'cdo', 'ceb', 'ch', 'cho', 'chy', 'co', 'crh', 'cs', 'csb', 'cy', 'da', 'de', 'diq', 'dsb', 'ee', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', 'fj', 'fo', 'fr', 'frp', 'frr', 'fur', 'fy', 'ga', 'gag', 'gd', 'gl', 'gn', 'gv', 'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia', 'id', 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv', 'kaa', 'kab', 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la', 'lad', 'lb', 'lg', 'li', 'lij', 'lmo', 'ln', 'lt', 'ltg', 'lv', 'map-bms', 'mg', 'mh', 'mi', 'ms', 'mt', 'mus', 'mwl', 'na', 'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', 'no', 'nov', 'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pcd', 'pdc', 'pfl', 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro', 'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg', 'simple', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'srn', 'ss', 'st', 'stq', 'su', 'sv', 'sw', 'szl', 'tet', 'tl', 'tn', 'to', 'tpi', 'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', 'vec', 'vi', 'vls', 'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', 'zh-min-nan', 'zu', 'az', 'chr', 'ckb', 'ha', 'iu', 'kk', 'ku', 'rmy', 'sh', 'sr', 'tt', 'ug', 'za' ], 'scand': [ 'da', 'fo', 'is', 'nb', 'nn', 'no', 'sv' ], } ldapDomain = () crossnamespace = collections.defaultdict(dict) shared_urlshortner_wiki = None _families = {} @staticmethod def load(fam: Optional[str] = None): if fam is None: fam = config.family assert all(x in NAME_CHARACTERS for x in fam), 'Name of family "{}" must be ASCII letters and digits ' '[a-zA-Z0-9]'.format(fam) if fam in Family._families: return Family._families[fam] if fam in config.family_files: family_file = config.family_files[fam] if family_file.startswith(('http://', 'https://')): myfamily = AutoFamily(fam, family_file) Family._families[fam] = myfamily return Family._families[fam] else: raise UnknownFamilyError('Family {} does not exist'.format(fam)) try: with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) sys.path.append(dirname(family_file)) mod = import_module(splitext(basename(family_file))[0]) except ImportError: raise UnknownFamilyError('Family {} does not exist'.format(fam)) cls = mod.Family.instance if cls.name != fam: warnings.warn('Family name {} does not match family module name {}' .format(cls.name, fam), FamilyMaintenanceWarning) if not all(x in NAME_CHARACTERS for x in cls.name): warnings.warn('Name of family {} must be ASCII letters ' 'and digits [a-zA-Z0-9]' .format(cls.name), FamilyMaintenanceWarning) for code in cls.langs.keys(): if not all(x in CODE_CHARACTERS for x in code): warnings.warn('Family {} code {} must be ASCII lowercase ' 'letters and digits [a-z0-9] or ' 'underscore/dash [_-]' .format(cls.name, code), FamilyMaintenanceWarning) Family._families[fam] = cls return cls def linktrail(self, code, fallback='_default'): if code in self.linktrails: return self.linktrails[code] if fallback: return self.linktrails[fallback] raise KeyError( 'ERROR: linktrail in language {language_code} unknown' .format(language_code=code)) def category_redirects(self, code, fallback='_default'): if not hasattr(self, '_catredirtemplates') or code not in self._catredirtemplates: self._get_cr_templates(code, fallback) return self._catredirtemplates[code] def _get_cr_templates(self, code, fallback): if not hasattr(self, '_catredirtemplates'): self._catredirtemplates = {} if code in self.category_redirect_templates: cr_template_tuple = self.category_redirect_templates[code] elif fallback and fallback in self.category_redirect_templates: cr_template_tuple = self.category_redirect_templates[fallback] else: self._catredirtemplates[code] = [] return cr_set = set() site = pywikibot.Site(code, self) tpl_ns = site.namespaces.TEMPLATE for cr_template in cr_template_tuple: cr_page = pywikibot.Page(site, cr_template, ns=tpl_ns) for t in cr_page.backlinks(filter_redirects=True, namespaces=tpl_ns): newtitle = t.title(with_ns=False) if newtitle not in cr_template_tuple: cr_set.add(newtitle) self._catredirtemplates[code] = list(cr_template_tuple) + list(cr_set) @deprecated('site.category_redirects()', since='20170608') def get_cr_templates(self, code, fallback): self._get_cr_templates(code, fallback) def get_edit_restricted_templates(self, code): return self.edit_restricted_templates.get(code, ()) def get_archived_page_templates(self, code): return self.archived_page_templates.get(code, ()) def disambig(self, code, fallback='_default'): if code in self.disambiguationTemplates: return self.disambiguationTemplates[code] if fallback: return self.disambiguationTemplates[fallback] raise KeyError( 'ERROR: title for disambig template in language {} unknown' .format(code)) def protocol(self, code: str) -> str: return 'http' def verify_SSL_certificate(self, code: str) -> bool: return True def hostname(self, code): return self.langs[code] def ssl_hostname(self, code): return self.hostname(code) def scriptpath(self, code: str) -> str: return '/w' def ssl_pathprefix(self, code): return '' def _hostname(self, code, protocol=None): if protocol is None: protocol = self.protocol(code) if protocol == 'https': host = self.ssl_hostname(code) else: host = self.hostname(code) return protocol, host def base_url(self, code: str, uri: str, protocol=None) -> str: protocol, host = self._hostname(code, protocol) if protocol == 'https': uri = self.ssl_pathprefix(code) + uri return urlparse.urljoin('{}://{}'.format(protocol, host), uri) def path(self, code): return '{}/index.php'.format(self.scriptpath(code)) def querypath(self, code): return '{}/query.php'.format(self.scriptpath(code)) def apipath(self, code): return '{}/api.php'.format(self.scriptpath(code)) def eventstreams_host(self, code): raise NotImplementedError('This family does not support EventStreams') def eventstreams_path(self, code): raise NotImplementedError('This family does not support EventStreams') def get_address(self, code, title): return '{}?title={}&redirect=no'.format(self.path(code), title) def interface(self, code): if code in self.interwiki_removals: if code in self.codes: pywikibot.warn('Interwiki removal {} is in {} codes' .format(code, self)) if code in self.closed_wikis: return 'ClosedSite' if code in self.removed_wikis: return 'RemovedSite' return config.site_interface def from_url(self, url: str) -> Optional[str]: parsed = urlparse.urlparse(url) if not re.match('(https?)?$', parsed.scheme): return None path = parsed.path if parsed.query: path += '?' + parsed.query path, _, suffix = path.partition('$1') if suffix: raise ValueError('Url: {}\nText {} after the $1 placeholder is ' 'not supported (T111513).'.format(url, suffix)) for domain in self.domains: if domain in parsed.netloc: break else: return None matched_sites = set() for code in chain(self.codes, getattr(self, 'test_codes', ()), getattr(self, 'closed_wikis', ()), ): if self._hostname(code)[1] == parsed.netloc: site = pywikibot.Site(code, self.name) pywikibot.log('Found candidate {}'.format(site)) for iw_url in site._interwiki_urls(): if path.startswith(iw_url): matched_sites.add(site) break if len(matched_sites) == 1: return matched_sites.pop().code if not matched_sites: return None raise RuntimeError( 'Found multiple matches for URL "{}": {}' .format(url, ', '.join(str(s) for s in matched_sites))) def maximum_GET_length(self, code): return config.maximum_GET_length def dbName(self, code): return '{}{}'.format(code, self.name) def encoding(self, code): return 'utf-8' def encodings(self, code): return (self.encoding(code), ) def __eq__(self, other): if not isinstance(other, Family): other = self.load(other) return self is other def __ne__(self, other): try: return not self.__eq__(other) except UnknownFamilyError: return False def __hash__(self): return hash(self.name) def __str__(self): return self.name def __repr__(self): return 'Family("{}")'.format(self.name) def shared_image_repository(self, code): return (None, None) def isPublic(self, code): return True def post_get_convert(self, site, getText): return getText def pre_put_convert(self, site, putText): return putText @property
MIT License
brython-dev/brython
www/src/Lib/inspect.py
isabstract
python
def isabstract(object): if not isinstance(object, type): return False if object.__flags__ & TPFLAGS_IS_ABSTRACT: return True if not issubclass(type(object), abc.ABCMeta): return False if hasattr(object, '__abstractmethods__'): return False for name, value in object.__dict__.items(): if getattr(value, "__isabstractmethod__", False): return True for base in object.__bases__: for name in getattr(base, "__abstractmethods__", ()): value = getattr(object, name, None) if getattr(value, "__isabstractmethod__", False): return True return False
Return true if the object is an abstract base class (ABC).
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/inspect.py#L420-L442
__author__ = ('Ka-Ping Yee <ping@lfw.org>', 'Yury Selivanov <yselivanov@sprymix.com>') import abc import ast import dis import collections.abc import enum import importlib.machinery import itertools import linecache import os import re import sys import tokenize import token import types import warnings import functools import builtins from operator import attrgetter from collections import namedtuple, OrderedDict mod_dict = globals() for k, v in dis.COMPILER_FLAG_NAMES.items(): mod_dict["CO_" + v] = k TPFLAGS_IS_ABSTRACT = 1 << 20 def get_annotations(obj, *, globals=None, locals=None, eval_str=False): if isinstance(obj, type): obj_dict = getattr(obj, '__dict__', None) if obj_dict and hasattr(obj_dict, 'get'): ann = obj_dict.get('__annotations__', None) if isinstance(ann, types.GetSetDescriptorType): ann = None else: ann = None obj_globals = None module_name = getattr(obj, '__module__', None) if module_name: module = sys.modules.get(module_name, None) if module: obj_globals = getattr(module, '__dict__', None) obj_locals = dict(vars(obj)) unwrap = obj elif isinstance(obj, types.ModuleType): ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__dict__') obj_locals = None unwrap = None elif callable(obj): ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__globals__', None) obj_locals = None unwrap = obj else: raise TypeError(f"{obj!r} is not a module, class, or callable.") if ann is None: return {} if not isinstance(ann, dict): raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") if not ann: return {} if not eval_str: return dict(ann) if unwrap is not None: while True: if hasattr(unwrap, '__wrapped__'): unwrap = unwrap.__wrapped__ continue if isinstance(unwrap, functools.partial): unwrap = unwrap.func continue break if hasattr(unwrap, "__globals__"): obj_globals = unwrap.__globals__ if globals is None: globals = obj_globals if locals is None: locals = obj_locals return_value = {key: value if not isinstance(value, str) else eval(value, globals, locals) for key, value in ann.items() } return return_value def ismodule(object): return isinstance(object, types.ModuleType) def isclass(object): return isinstance(object, type) def ismethod(object): return isinstance(object, types.MethodType) def ismethoddescriptor(object): if isclass(object) or ismethod(object) or isfunction(object): return False tp = type(object) return hasattr(tp, "__get__") and not hasattr(tp, "__set__") def isdatadescriptor(object): if isclass(object) or ismethod(object) or isfunction(object): return False tp = type(object) return hasattr(tp, "__set__") or hasattr(tp, "__delete__") if hasattr(types, 'MemberDescriptorType'): def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: def ismemberdescriptor(object): return False if hasattr(types, 'GetSetDescriptorType'): def isgetsetdescriptor(object): return isinstance(object, types.GetSetDescriptorType) else: def isgetsetdescriptor(object): return False def isfunction(object): return isinstance(object, types.FunctionType) def _has_code_flag(f, flag): while ismethod(f): f = f.__func__ f = functools._unwrap_partial(f) if not isfunction(f): return False return bool(f.__code__.co_flags & flag) def isgeneratorfunction(obj): return _has_code_flag(obj, CO_GENERATOR) def iscoroutinefunction(obj): return _has_code_flag(obj, CO_COROUTINE) def isasyncgenfunction(obj): return _has_code_flag(obj, CO_ASYNC_GENERATOR) def isasyncgen(object): return isinstance(object, types.AsyncGeneratorType) def isgenerator(object): return isinstance(object, types.GeneratorType) def iscoroutine(object): return isinstance(object, types.CoroutineType) def isawaitable(object): return (isinstance(object, types.CoroutineType) or isinstance(object, types.GeneratorType) and bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or isinstance(object, collections.abc.Awaitable)) def istraceback(object): return isinstance(object, types.TracebackType) def isframe(object): return isinstance(object, types.FrameType) def iscode(object): return isinstance(object, types.CodeType) def isbuiltin(object): return isinstance(object, types.BuiltinFunctionType) def isroutine(object): return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object))
BSD 3-Clause New or Revised License
ldv-klever/klever
klever/core/vtg/emg/translation/__init__.py
translate_intermediate_model
python
def translate_intermediate_model(logger, conf, avt, source, collection): logger.info(f"Translate '{collection.attributed_name}' with an identifier {collection.name}") conf['translation options'].setdefault('entry point', 'main') conf['translation options'].setdefault('environment model file', 'environment_model.c') conf['translation options'].setdefault('nested automata', True) conf['translation options'].setdefault('direct control functions calls', True) conf['translation options'].setdefault('code additional aspects', list()) conf['translation options'].setdefault('additional headers', DEFAULT_INCLUDE_HEADERS) conf['translation options'].setdefault('self parallel processes', False) conf['translation options'].setdefault('ignore missing program files', False) model_path = str(collection.name) assert model_path, 'Each environment model should have a unique name' assert not os.path.isdir(model_path), f"Model name '{model_path}' is used twice" if os.path.isdir(model_path): logger.info(f"Clean workdir for translation '{model_path}'") shutil.rmtree(model_path) os.makedirs(model_path) if collection.attributed_name != collection.name: os.symlink(model_path, collection.attributed_name, target_is_directory=True) model_file = os.path.join(model_path, 'input model.json') with open(model_file, mode='w', encoding='utf-8') as fp: json.dump(collection, fp, cls=CollectionEncoder, sort_keys=True, indent=2) collection.save_digraphs(os.path.join(model_path, 'images')) if not collection.entry: raise RuntimeError("It is impossible to generate an environment model without main process") if conf['translation options'].get('ignore missing function models'): for name in list(collection.models.keys()): fs = source.get_source_functions(name) if not fs: logger.info("Ignore function model {!r} since there is no such function in the code".format(name)) del collection.models[name] if conf['translation options'].get('implicit signal peers'): collection.establish_peers() logger.info("Determine entry point file and function name") entry_file = os.path.join(model_path, conf['translation options'].get('environment model file', 'environment_model.c')) entry_point_name = get_or_die(conf['translation options'], 'entry point') files = source.c_full_paths if entry_file not in files: files.add(entry_file) try: entry_file_realpath = find_file_or_dir(logger, conf['main working directory'], entry_file) except FileNotFoundError: entry_file_realpath = os.path.relpath(entry_file, conf['main working directory']) avt['environment model'] = entry_file_realpath additional_code = dict() for process in list(collection.models.values()) + list(collection.environment.values()) + [collection.entry]: for att in ('declarations', 'definitions'): for file in getattr(process, att): additional_code.setdefault(file, {'declarations': sortedcontainers.SortedDict(), 'definitions': sortedcontainers.SortedDict()}) additional_code[file][att].update(getattr(process, att)[file]) if process.file == 'environment model': process.file = entry_file cmodel = CModel(logger, conf, conf['main working directory'], files, entry_point_name, entry_file) for file in additional_code: additional_code[file]['declarations'] = [val if val.endswith('\n') else val + '\n' for val in additional_code[file]['declarations'].values()] val = additional_code[file]['definitions'] additional_code[file]['definitions'] = list() for name, item in val.items(): if isinstance(item, list): additional_code[file]['definitions'].extend(item) elif isinstance(item, str): pth = find_file_or_dir(logger, conf['main working directory'], item) with open(pth, 'r', encoding='utf-8') as fp: additional_code[file]['definitions'].extend(fp.readlines() + ["\n"]) elif isinstance(item, dict): func = cmodel.create_wrapper(name, item['wrapper'], item['declaration']) additional_code[file]['definitions'].extend(func.define() + ["\n"]) if isinstance(additional_code['environment model']['declarations'], list): additional_code['environment model']['declarations'].append(func.declare(extern=True)[0] + "\n") elif func.name not in additional_code['environment model']['declarations']: additional_code['environment model']['declarations'][func.name] = func.declare(extern=True)[0] + "\n" else: raise ValueError("Expect either a list of string as a definition in intermediate model specification of" " a path name but got {!r}".format(item)) if 'environment model' in additional_code: additional_code[entry_file] = additional_code['environment model'] del additional_code['environment model'] for file in files: cmodel.add_headers(file, get_or_die(conf['translation options'], "additional headers")) logger.info("Generate finite state machine on each process") entry_fsa = Automaton(collection.entry, 1) identifiers = id_generator(start_from=2, cast=int) model_fsa = [] main_fsa = [] for process in collection.models.values(): model_fsa.append(Automaton(process, next(identifiers))) for process in collection.environment.values(): main_fsa.append(Automaton(process, next(identifiers))) sp_ids = conf["translation options"].get('not self parallel processes') if sp_ids and isinstance(sp_ids, list): for amtn in (a for a in model_fsa + main_fsa + [entry_fsa] if str(a.process) in sp_ids): amtn.self_parallelism = False sp_categories = conf["translation options"].get("not self parallel processes from categories") sp_scenarios = conf["translation options"].get("not self parallel processes from scenarios") if sp_categories and isinstance(sp_categories, list): for amtn in (a for a in model_fsa + main_fsa + [entry_fsa] if a.process.category in sp_categories): amtn.self_parallelism = False if sp_scenarios and isinstance(sp_scenarios, list): for amtn in (a for a in model_fsa + main_fsa + [entry_fsa] if a.process.name in sp_scenarios): amtn.self_parallelism = False logger.info("Translate finite state machines into C code") if conf['translation options'].get("simple control functions calls", True): SimplestTranslator(logger, conf['translation options'], source, collection, cmodel, entry_fsa, model_fsa, main_fsa) elif get_or_die(conf['translation options'], "nested automata"): LabelTranslator(logger, conf['translation options'], source, collection, cmodel, entry_fsa, model_fsa, main_fsa) else: StateTranslator(logger, conf['translation options'], source, collection, cmodel, entry_fsa, model_fsa, main_fsa) logger.info("Print generated source code") addictions = cmodel.print_source_code(model_path, additional_code) logger.info("Add an entry point function name to the abstract verification task") avt["entry points"] = [cmodel.entry_name] if conf['translation options'].get("code additional aspects"): additional_aspects = [os.path.abspath(find_file_or_dir(logger, conf["main working directory"], f)) for f in conf['translation options'].get("code additional aspects")] else: additional_aspects = [] for grp in avt['grps']: logger.info('Add aspects to C files of group {!r}'.format(grp['id'])) for cc_extra_full_desc_file in [f for f in grp['Extra CCs'] if 'in file' in f]: if cc_extra_full_desc_file["in file"] in addictions: if 'plugin aspects' not in cc_extra_full_desc_file: cc_extra_full_desc_file['plugin aspects'] = [] cc_extra_full_desc_file['plugin aspects'].append( { "plugin": "EMG", "aspects": [addictions[cc_extra_full_desc_file["in file"]]] + additional_aspects } ) extra_c_files = {f for p in list(collection.models.values()) + list(collection.environment.values()) + [collection.entry] for f in p.cfiles} avt.setdefault('extra C files', list()) avt['extra C files'].extend([ {"C file": os.path.realpath(find_file_or_dir(logger, get_or_die(conf, "main working directory"), f))} for f in extra_c_files]) return avt
This is the main translator function. It generates automata first for all given processes of the environment model and then give them to particular translator chosen by the user defined configuration. At the end it triggers code printing and adds necessary information to the (abstract) verification task description. :param logger: Logger object. :param conf: Configuration dictionary for the whole EMG. :param avt: Verification task dictionary. :param source: Source object. :param collection: ProcessCollection object. :return: None.
https://github.com/ldv-klever/klever/blob/160b1fe0a73dd5b2b0c220235f6c663045610edd/klever/core/vtg/emg/translation/__init__.py#L44-L235
import os import shutil import json import sortedcontainers from klever.core.vtg.utils import find_file_or_dir from klever.core.vtg.emg.translation.code import CModel from klever.core.vtg.emg.translation.automaton import Automaton from klever.core.vtg.emg.common import id_generator, get_or_die from klever.core.vtg.emg.common.process.serialization import CollectionEncoder from klever.core.vtg.emg.translation.fsa_translator.label_fsa_translator import LabelTranslator from klever.core.vtg.emg.translation.fsa_translator.state_fsa_translator import StateTranslator from klever.core.vtg.emg.translation.fsa_translator.simplest_fsa_translator import SimplestTranslator DEFAULT_INCLUDE_HEADERS = ( "ldv/linux/common.h", "ldv/linux/err.h", "ldv/verifier/common.h", "ldv/verifier/gcc.h", "ldv/verifier/nondet.h", "ldv/verifier/memory.h", "ldv/verifier/thread.h" )
Apache License 2.0
crohr/rpm-s3
vendor/createrepo/createrepo/yumbased.py
CreateRepoPackage.do_primary_sqlite_dump
python
def do_primary_sqlite_dump(self, cur): if self.crp_reldir and self.localpath.startswith(self.crp_reldir): relpath = self.localpath.replace(self.crp_reldir, '') if relpath[0] == '/': relpath = relpath[1:] else: relpath = self.localpath p = (self.crp_packagenumber, self.checksum, self.name, self.arch, self.version, self.epoch, self.release, self.summary.strip(), self.description.strip(), self._sqlite_null(self.url), self.filetime, self.buildtime, self._sqlite_null(self.license), self._sqlite_null(self.vendor), self._sqlite_null(self.group), self._sqlite_null(self.buildhost), self._sqlite_null(self.sourcerpm), self.hdrstart, self.hdrend, self._sqlite_null(self.packager), self.packagesize, self.size, self.archivesize, relpath, self.crp_baseurl, self.checksum_type) q = """insert into packages values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?, ?, ?, ?, ?, ?, ?)""" cur.execute(q, p) for pco in ('obsoletes', 'provides', 'conflicts'): thispco = [] for (name, flag, (epoch, ver, rel)) in getattr(self, pco): thispco.append((name, flag, epoch, ver, rel, self.crp_packagenumber)) q = "insert into %s values (?, ?, ?, ?, ?, ?)" % pco cur.executemany(q, thispco) reqs = [] for (name, flag, (epoch, ver, rel), pre) in self._requires_with_pre(): if name.startswith('rpmlib('): continue pre_bool = 'FALSE' if pre == 1: pre_bool = 'TRUE' reqs.append((name, flag, epoch, ver,rel, self.crp_packagenumber, pre_bool)) q = "insert into requires values (?, ?, ?, ?, ?, ?, ?)" cur.executemany(q, reqs) p = [] for f in self._return_primary_files(): p.append((f,)) if p: q = "insert into files values (?, 'file', %s)" % self.crp_packagenumber cur.executemany(q, p) p = [] for f in self._return_primary_dirs(): p.append((f,)) if p: q = "insert into files values (?, 'dir', %s)" % self.crp_packagenumber cur.executemany(q, p) p = [] for f in self._return_primary_files(list_of_files = self.returnFileEntries('ghost')): p.append((f,)) if p: q = "insert into files values (?, 'ghost', %s)" % self.crp_packagenumber cur.executemany(q, p)
insert primary data in place, this assumes the tables exist
https://github.com/crohr/rpm-s3/blob/db3ef54a6c55e5812ef41a338c7905461c2c8612/vendor/createrepo/createrepo/yumbased.py#L113-L189
import os def _get_umask(): oumask = os.umask(0) os.umask(oumask) return oumask _b4rpm_oumask = _get_umask() import rpm import types from yum.packages import YumLocalPackage from yum.Errors import * from yum import misc import utils import tempfile class CreateRepoPackage(YumLocalPackage): def __init__(self, ts, package, sumtype=None, external_data={}): YumLocalPackage.__init__(self, ts, package) if sumtype: self.checksum_type = sumtype if external_data: for (key, val) in external_data.items(): setattr(self, key, val) def _do_checksum(self): if self._checksum: return self._checksum if not hasattr(self, '_cachedir') or not self._cachedir: self._checksum = misc.checksum(self.checksum_type, self.localpath) self._checksums = [(self.checksum_type, self._checksum, 1)] return self._checksum t = [] if type(self.hdr[rpm.RPMTAG_SIGGPG]) is not types.NoneType: t.append("".join(self.hdr[rpm.RPMTAG_SIGGPG])) if type(self.hdr[rpm.RPMTAG_SIGPGP]) is not types.NoneType: t.append("".join(self.hdr[rpm.RPMTAG_SIGPGP])) if type(self.hdr[rpm.RPMTAG_HDRID]) is not types.NoneType: t.append("".join(self.hdr[rpm.RPMTAG_HDRID])) kcsum = misc.Checksums(checksums=[self.checksum_type]) kcsum.update("".join(t)) key = kcsum.hexdigest() csumtag = '%s-%s-%s-%s' % (os.path.basename(self.localpath), key, self.size, self.filetime) csumfile = '%s/%s' % (self._cachedir, csumtag) if os.path.exists(csumfile) and float(self.filetime) <= float(os.stat(csumfile)[-2]): csumo = open(csumfile, 'r') checksum = csumo.readline() csumo.close() else: checksum = misc.checksum(self.checksum_type, self.localpath) try: (csumo, tmpfilename) = tempfile.mkstemp(dir=self._cachedir) csumo = os.fdopen(csumo, 'w', -1) csumo.write(checksum) csumo.close() os.chmod(tmpfilename, 0666 ^ _b4rpm_oumask) os.rename(tmpfilename, csumfile) except: pass self._checksum = checksum self._checksums = [(self.checksum_type, checksum, 1)] return self._checksum def _sqlite_null(self, item): if not item: return None return item
BSD 2-Clause Simplified License
mantl/terraform.py
src/ati/terraform.py
iterhosts
python
def iterhosts(resources, args): for module_name, key, resource in resources: resource_type, name = key.split('.', 1) try: parser = PARSERS[resource_type] except KeyError: continue yield parser(resource, module_name, args=args)
yield host tuples of (name, attributes, groups)
https://github.com/mantl/terraform.py/blob/c9276d710585e2db0925ac83ad9d7145dfc3d723/src/ati/terraform.py#L111-L120
from collections import defaultdict from functools import wraps import json import os import re import sh try: unicode STRING_TYPES = [unicode, str] except NameError: STRING_TYPES = [str] def tfstates(root=None): root = root or os.getcwd() for dirpath, _, filenames in os.walk(root): for name in filenames: if os.path.splitext(name)[-1] == '.tfstate': yield os.path.join(dirpath, name) def iter_states(root=None): root = root or os.getcwd() curdir = os.getcwd() for dpath, dnames, fnames in os.walk(root): if '.terraform' in dnames: try: os.chdir(dpath) output = sh.terraform("state", "pull").stdout.decode('utf-8') start_index = output.find('{') if start_index < 0: start_index = 0 yield json.loads(output[start_index:]) finally: os.chdir(curdir) def iterresources(sources): for source in sources: if type(source) in STRING_TYPES: with open(source, 'r') as json_file: state = json.load(json_file) else: state = source for module in state['modules']: name = module['path'][-1] for key, resource in list(module['resources'].items()): yield name, key, resource def get_stage_root(tf_dirname=None, root=None): root = root or os.getcwd() ansible_dir = os.getcwd() tf_dirname = tf_dirname or 'terraform' inv_name = root.split(os.path.sep)[-1] try: terraform_base = os.path.join(ansible_dir, tf_dirname) if inv_name in os.listdir(terraform_base): return os.path.join(terraform_base, inv_name) else: return root except OSError: return root PARSERS = {} def _clean_dc(dcname): return re.sub('[^\w_\-]', '-', dcname)
Apache License 2.0
brianhie/trajectorama
trajectorama/pan_dag.py
PanDAG.sketch
python
def sketch(self, X): n_samples = X.shape[0] if self.verbose > 1: tprint('Sketching...') if self.sketch_method == 'geometric': from geosketch import gs sketch_idx = gs(X, self.sketch_size, replace=False) elif self.sketch_method == 'uniform': sketch_idx = sorted(np.random.choice( n_samples, size=self.sketch_size, replace=False )) else: return X X_sketch = X[sketch_idx] self.sketch_neighbors = nearest_approx(X, X_sketch) return X[sketch_idx]
Actually sketches the dataset and saves nearest neighbor mappings from sketch elements to sample observations in full dataset in the `self.sketch_neighbors` variable. Parameters ---------- X: `numpy.ndarray` or `scipy.sparse.csr_matrix` Dataset tot be sketched. Returns ------- X_sketch Sketched version of dataset `X`.
https://github.com/brianhie/trajectorama/blob/5a4995b916cd80ba4310111ce4a5776f58517b39/trajectorama/pan_dag.py#L114-L149
from anndata import AnnData from joblib import Parallel, delayed import numpy as np import scanpy as sc from sklearn.preprocessing import normalize import os import sys import uuid import warnings from ._louvain import louvain from .utils import * def louvain_worker(X, resolution): log_uuid = str(uuid.uuid4()) tmp_log_fname = 'target/tmp/{}_louvain.log'.format(log_uuid) adata = AnnData(X=X) sc.pp.neighbors(adata, use_rep='X') louvain(adata, resolution=resolution, key_added='louvain', log_fname=tmp_log_fname) return tmp_log_fname class PanDAG(object): def __init__( self, cluster_method='louvain', sketch_size='auto', sketch_method='auto', reduce_dim=None, verbose=False, ): self.cluster_method = cluster_method self.sketch_size = sketch_size self.sketch_method = sketch_method self.sketch_neighbors = None self.reduce_dim = reduce_dim self.verbose = verbose self.features = None self.children = [] self.sample_idx = [] self.n_leaves = len(self.sample_idx) self.nodes = [ self ] def check_and_sketch(self, X): n_samples = X.shape[0] if self.sketch_size is None: return X if self.sketch_method not in set([ 'auto', 'geometric', 'uniform' ]): raise ValueError('Invalid sketching method {}' .format(self.sketch_method)) if self.sketch_size == 'auto': if self.cluster_method == 'agg_ward': if n_samples > 5000: self.sketch_size = 5000 if self.sketch_method == 'auto': self.sketch_method = 'geometric' else: return X elif self.cluster_method == 'louvain': if n_samples > 1000000: self.sketch_size = 1000000 if self.sketch_method == 'auto': self.sketch_method = 'uniform' else: return X else: self.sketch_size = 20000 if self.sketch_method == 'auto': self.sketch_method = 'geometric' elif self.sketch_method == 'auto': self.sketch_method = 'geometric' if self.sketch_method == 'geometric' and self.reduce_dim is None: X = reduce_dimensionality(normalize(X), dim_red_k=100) return self.sketch(X)
MIT License
gmr/mikkoo
mikkoo/state.py
State.set_state
python
def set_state(self, new_state): if new_state not in self.STATES: raise ValueError('Invalid state value: %r' % new_state) LOGGER.debug('State changing from %s to %s', self.STATES[self.state], self.STATES[new_state]) self.state = new_state self.state_start = time.time()
Assign the specified state to this consumer object. :param int new_state: The new state of the object :raises: ValueError
https://github.com/gmr/mikkoo/blob/1809528f61ca70f222bd0785e7d85a866d27b0a6/mikkoo/state.py#L47-L62
import logging import time LOGGER = logging.getLogger(__name__) class State(object): STATE_INITIALIZING = 0x01 STATE_CONNECTING = 0x02 STATE_IDLE = 0x03 STATE_ACTIVE = 0x04 STATE_SLEEPING = 0x05 STATE_STOP_REQUESTED = 0x06 STATE_SHUTTING_DOWN = 0x07 STATE_STOPPED = 0x08 STATE_RECONNECTING = 0x09 STATE_BLOCKED = 0x10 STATES = { 0x01: 'Initializing', 0x02: 'Connecting', 0x03: 'Idle', 0x04: 'Active', 0x05: 'Sleeping', 0x06: 'Stop Requested', 0x07: 'Shutting down', 0x08: 'Stopped', 0x09: 'Reconnecting', 0x10: 'Blocked' } def __init__(self): self.state = self.STATE_INITIALIZING self.state_start = time.time()
BSD 3-Clause New or Revised License
xanaduai/qmlt
qmlt/numerical/losses.py
square_loss
python
def square_loss(outputs, targets): outputs = np.array(outputs) targets = np.array(targets) if outputs.shape != targets.shape: raise ValueError("Cannot compute squared loss if outputs and targets have" " different shapes {} and {}".format(outputs.shape, targets.shape)) if outputs.ndim > 2: raise ValueError("Mean squared loss expects 1-d outputs, dimension of current outputs" " is {}.".format(outputs.ndim - 1)) diff = outputs - targets res = 0.5*sum(np.dot(d, d) for d in diff) return res
r"""Mean squared loss :math:`0.5 \sum\limits_{m=1}^M |y^m - t^m|^2` between outputs :math:`y^m` and targets :math:`t^m` for :math:`m = 1,...,M`. Args: outputs (ndarray or list): array of dimension M x 1 containing the 1-dimensional outputs. targets (ndarray or list): array of the same dimension and type as outputs, containing the targets. Returns: float: Scalar mean squared loss.
https://github.com/xanaduai/qmlt/blob/fc9487f89bf894576d2001abe5a5d07f35f19d7a/qmlt/numerical/losses.py#L105-L131
import numpy as np def trace_distance(rho, sigma): rho = np.array(rho) sigma = np.array(sigma) if rho.shape != sigma.shape: raise ValueError("Cannot compute the trace distance if inputs have" " different shapes {} and {}".format(rho.shape, sigma.shape)) if rho.ndim != 2: raise ValueError("Trace distance loss expects 2-d arrays representing density matrices.") diffs = rho - sigma eigvals = np.linalg.eigvals(diffs) return 0.5 * sum(np.absolute(eigvals)) def expectation(rho, operator): rho = np.array(rho) operator = np.array(operator) if rho.shape != operator.shape: raise ValueError("Cannot compute expectation value if rho and operator have" " different shapes {} and {}".format(rho.shape, operator.shape)) if rho.ndim != 2: raise ValueError("Expectation loss expects a 2-d array representing a density matrix.") exp = np.trace(rho@operator) if np.imag(exp) > 1e-5: raise ValueError("Expectation value has a non-negligible imaginary contribution." "Something went wrong.") return exp
Apache License 2.0
reticulatingspline/scores
plugin.py
Scores._formatstatus
python
def _formatstatus(self, string): if string.startswith('F'): string = string.replace('FINAL', 'F') string = string.replace('Final', 'F') string = string.replace(' ', '') string = self._red(string) elif string.startswith('Top ') or string.startswith('Bot ') or string.startswith('End') or string.startswith('Mid') or string.startswith('Bottom '): string = string.replace('Top ', 'T') string = string.replace('Bottom ', 'B') string = string.replace('Bot ', 'B') string = string.replace('End ', 'E') string = string.replace('Mid ', 'M') string = string.replace('th', '').replace('nd', '').replace('rd', '').replace('st', '') string = self._green(string) elif string.startswith('Dly') or string.startswith('Ppd.') or string.startswith('Del') or string.startswith('Susp'): if string == "Ppd.": string = self._yellow('PPD') else: string = self._yellow('DLY') return string
Handle status here.
https://github.com/reticulatingspline/scores/blob/f32aa2b9431cfdc60a4e48c5ab5544642d345e4a/plugin.py#L139-L164
import requests from bs4 import BeautifulSoup import re import sys import datetime from time import time import re import supybot.utils as utils from supybot.commands import * import supybot.plugins as plugins import supybot.ircutils as ircutils import supybot.callbacks as callbacks try: from supybot.i18n import PluginInternationalization _ = PluginInternationalization('Scores') except ImportError: _ = lambda x: x class Scores(callbacks.Plugin): threaded = True def __init__(self, irc): self.__parent = super(Scores, self) self.__parent.__init__(irc) self.DAYS = ['yesterday', 'tonight', 'today', 'tomorrow'] def _red(self, string): return ircutils.mircColor(string, 'red') def _yellow(self, string): return ircutils.mircColor(string, 'yellow') def _green(self, string): return ircutils.mircColor(string, 'green') def _bold(self, string): return ircutils.bold(string) def _ul(self, string): return ircutils.underline(string) def _bu(self, string): return ircutils.bold(ircutils.underline(string)) def _sf(self, string): return ircutils.stripFormatting(string) def _datetodatetime(self, optdate): if optdate == "lastweek": datedelta = -7 elif optdate == "yesterday": datedelta = -1 elif optdate == "today" or optdate =="tonight": datedelta = 0 elif optdate == "tomorrow": datedelta = 1 elif optdate == "nextweek": datedelta = 7 elif optdate in self.DAYS: weekdaynum = self.DAYS.index(optdate) dayoftheweek = datetime.datetime.now().isoweekday() if weekdaynum >= dayoftheweek: datedelta = weekdaynum - dayoftheweek else: datedelta = 7+(weekdaynum-dayoftheweek) datestr = (datetime.date.today() + datetime.timedelta(days=datedelta)).strftime('%Y-%m-%d') return datestr def _boldleader(self, atm, asc, htm, hsc): if int(asc) > int(hsc): return("{0} {1}".format(self._bold(atm + " " + asc), self._sf(htm + " " + hsc))) elif int(hsc) > int(asc): return("{0} {1}".format(self._sf(atm + " " + asc), self._bold(htm + " " + hsc))) else: return("{0} {1} {2} {3}".format(atm, asc, htm, hsc)) def _fetch(self, optsport, date=None): if date: url = "http://m.yahoo.com/w/sports/%s/scores?date=%s&.ts=%s&.intl=us&.lang=en" % (date, optsport, time.time()) else: url = "http://m.yahoo.com/w/sports/%s/scores?.ts=%s&.intl=us&.lang=en" % (optsport, time.time()) try: headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/17.0 Firefox/17.0"} page = requests.get(url, headers=headers, verify=False) return page except Exception as e: self.log.error("ERROR. Could not open {0} message: {1}".format(url, e)) return None def _urlfetch(self, url): try: headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/17.0 Firefox/17.0"} page = requests.get(url, headers=headers, verify=False) return page except Exception as e: self.log.error("ERROR. Could not open {0} message: {1}".format(url, e)) return None
MIT License
rucio/rucio
lib/rucio/db/sqla/migrate_repo/versions/b8caac94d7f0_add_comments_column_for_subscriptions_.py
upgrade
python
def upgrade(): if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']: schema = context.get_context().version_table_schema if context.get_context().version_table_schema else '' add_column('subscriptions_history', sa.Column('comments', sa.String(4000)), schema=schema)
Upgrade the database to this revision
https://github.com/rucio/rucio/blob/6a6092798bb8220dec07328d0e3f7f42d1b931cd/lib/rucio/db/sqla/migrate_repo/versions/b8caac94d7f0_add_comments_column_for_subscriptions_.py#L32-L39
import sqlalchemy as sa from alembic import context from alembic.op import add_column, drop_column revision = 'b8caac94d7f0' down_revision = '8523998e2e76'
Apache License 2.0
alvinctk/google-tech-dev-guide
Data_Structures/avl_tree.py
AVL_Tree.rotate_with_left_child
python
def rotate_with_left_child(self, k2: Node): k1 = k2.left k2.left = k1.right k1.right = k2 self.set_height(k2) self.set_height(k1) return k1
Case 1: An insertion onto left subtree of left child of k2 To fix case 1: Rotate with left child (or single right rotation) k2 Right k1 / \ Rotation / \ k1 Z ----> k2 /\ / /\ / Y X Y Z X
https://github.com/alvinctk/google-tech-dev-guide/blob/9d7759bea1f44673c2de4f25a94b27368928a59f/Data_Structures/avl_tree.py#L198-L215
class Node: def __init__(self, data, left, right): self.data = data self.left = left self.right = right self.height = -1 def __str__(self): return str(self.data) class AVL_Tree: ALLOWED_IMBALANCE = 1 def __init__(self): self.root = None self.height = -1 def get_height(self, t: Node): return -1 if t is None else t.height def set_height(self, t: Node): h = self.get_height t.height = max(h(t.left), h(t.right)) + 1 def find_max(self): return self.find_max_value(self.root) def find_max_value(self, t: Node): if t is None: return None current = t while (current.right is not None): current = current.right return current.data def find_min(self): return self.find_min_value(self.root) def find_min_value(self, t: Node): if t is None: return None current = t while (current.left is not None): current = current.left return current.data def contains(self, value): return self.contains_value(value, self.root) def contains_value(self, value, node): if node is None: return False if node.data < value: return self.contains_value(value, node.left) elif node.data > value: return self.contains_value(value, node.right) else: return True def insert(self, value): if self.root is None: self.root = Node(value, None, None) else: self.root = self.insert_value(value, self.root) def insert_value(self, value, t: Node): if t is None: t = Node(value, None, None) elif value < t.data: t.left = self.insert_value(value, t.left) elif value > t.data: t.right = self.insert_value(value, t.right) return self.balance(t) def remove(self, value): self.root = self.remove_value(value, self.root) def remove_value(self, value, t: Node): if t is None: return t if value < t.data: t.left = self.remove_value(value, t.left) elif value > t.data: t.right = self.remove_value(value, t.right) elif t.left is not None and t.right is not None: t.data = self.find_min_value(t.right) t.data = self.find_min_value(t.right) t.right = self.remove_value(t.data, t.right) else: if t.left is not None: t = t.left else: t = t.right return self.balance(t) def not_balance(self, x: Node, y: Node): h = self.get_height return (h(x) - h(y)) > self.ALLOWED_IMBALANCE def balance(self, t: Node): if t is None: return t h = self.get_height if self.not_balance(t.left, t.right): if h(t.left.left) >= h(t.left.right): t = self.rotate_with_left_child(t) else: t = self.double_with_left_child(t) elif self.not_balance(t.right, t.left): if h(t.right.right) >= h(t.right.left): t = self.rotate_with_right_child(t) else: t = self.double_with_right_child(t) self.set_height(t) return t
Apache License 2.0
salesforce/pomgen
common/version.py
parse_build_pom_released_version
python
def parse_build_pom_released_version(build_pom_released_content): return parse_build_pom_version(build_pom_released_content)
Returns the value of released_maven_artifact.version.
https://github.com/salesforce/pomgen/blob/4fb427c95c9dc35bfcf47f921e85d6be3876ef6c/common/version.py#L60-L64
from . import code from collections import namedtuple import re version_re = re.compile("(^.*version *= *[\"'])(.*?)([\"'].*)$", re.S) def get_version_increment_strategy(build_pom_content, path): maven_art_up = _parse_maven_artifact_update(build_pom_content, path) if maven_art_up.version_increment_strategy == "major": incr_strat = _get_major_version_increment_strategy() elif maven_art_up.version_increment_strategy == "minor": incr_strat = _get_minor_version_increment_strategy() elif maven_art_up.version_increment_strategy == "patch": incr_strat = _get_patch_version_increment_strategy() else: raise Exception("Unknown version increment strategy: %s" % maven_art_up.version_increment_strategy) return lambda version: version_update_handler(version, incr_strat) def parse_build_pom_version(build_pom_content): m = version_re.search(build_pom_content) if m is None: return None else: return m.group(2).strip()
BSD 3-Clause New or Revised License
jordanisaacs/fastapi-sessions
fastapi_sessions/backends/session_backend.py
SessionBackend.update
python
async def update(self, session_id: ID, data: SessionModel) -> None: raise NotImplementedError()
Update session data to the storage
https://github.com/jordanisaacs/fastapi-sessions/blob/be4126938011abd709fa18e9d9fc8a54c66a2130/fastapi_sessions/backends/session_backend.py#L31-L33
from abc import ABC, abstractmethod from typing import Generic, Optional, TypeVar from fastapi_sessions.frontends.session_frontend import ID from pydantic.main import BaseModel SessionModel = TypeVar("SessionModel", bound=BaseModel) class BackendError(Exception): pass class SessionBackend(ABC, Generic[ID, SessionModel]): @abstractmethod async def create(self, session_id: ID, data: SessionModel) -> None: raise NotImplementedError() @abstractmethod async def read(self, session_id: ID) -> Optional[SessionModel]: raise NotImplementedError() @abstractmethod
MIT License
wuher/devil
devil/resource.py
Resource._input_validation_failed
python
def _input_validation_failed(self, error, data, request): raise errors.BadRequest(str(error))
Always raises HttpStatusCodeError. Override to raise different status code when request data doesn't pass validation. todo: should format the content using the datamapper
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L296-L305
from django.contrib.auth.models import AnonymousUser from django.core.exceptions import ValidationError from django.http import HttpResponse from django.conf import settings import errors import datamapper import util from http import codes, Response import logging REALM = 'devil' def coerce_put_post(request): if request.method.upper() == "PUT": if hasattr(request, '_post'): del request._post del request._files try: request.method = "POST" request._load_post_and_files() request.method = "PUT" except AttributeError: request.META['REQUEST_METHOD'] = 'POST' request._load_post_and_files() request.META['REQUEST_METHOD'] = 'PUT' request.PUT = request.POST class Resource(object): access_controller = None allow_anonymous = True authentication = None representation = None post_representation = None factory = None post_factory = None default_mapper = None mapper = None def __call__(self, request, *args, **kw): coerce_put_post(request) try: return self.__handle_request(request, *args, **kw) except errors.HttpStatusCodeError, exc: return self._get_error_response(exc) except Exception, exc: return self._get_unknown_error_response(request, exc) def name(self): return util.camelcase_to_slash(self.__class__.__name__) def __handle_request(self, request, *args, **kw): self._authenticate(request) self._check_permission(request) method = self._get_method(request) data = self._get_input_data(request) data = self._clean_input_data(data, request) response = self._exec_method(method, request, data, *args, **kw) return self._process_response(response, request) def _exec_method(self, method, request, data, *args, **kw): if self._is_data_method(request): return method(data, request, *args, **kw) else: return method(request, *args, **kw) def _process_response(self, response, request): def coerce_response(): if not isinstance(response, Response): return Response(0, response) return response if isinstance(response, HttpResponse): return response devil_res = coerce_response() if devil_res.content and devil_res.get_code_num() in (0, 200, 201): serialized_res = devil_res.content = self._serialize_object(devil_res.content, request) formatted_res = self._format_response(request, devil_res) self._validate_output_data(response, serialized_res, formatted_res, request) else: formatted_res = self._format_response(request, devil_res) return formatted_res def _format_response(self, request, response): res = datamapper.format(request, response, self) if res.status_code is 0: res.status_code = 200 self._add_resposne_headers(res, response) return res def _add_resposne_headers(self, django_response, devil_response): try: headers = devil_response.headers except AttributeError: pass else: for k, v in headers.items(): django_response[k] = v return django_response def _get_input_data(self, request): if not self._is_data_method(request): return None content = [row for row in request.read()] content = ''.join(content) if content else None return self._parse_input_data(content, request) if content else None def _parse_input_data(self, data, request): return datamapper.parse(data, request, self) def _clean_input_data(self, data, request): if not self._is_data_method(request): return data try: if self.representation: self._validate_input_data(data, request) if self.factory: return self._create_object(data, request) else: return data except ValidationError, exc: return self._input_validation_failed(exc, data, request) def _get_input_validator(self, request): method = request.method.upper() if method != 'POST': return self.representation elif self.post_representation: return self.post_representation else: return self.representation def _validate_input_data(self, data, request): validator = self._get_input_validator(request) if isinstance(data, (list, tuple)): return map(validator.validate, data) else: return validator.validate(data) def _validate_output_data( self, original_res, serialized_res, formatted_res, request): validator = self.representation if not validator: return try: if isinstance(serialized_res, (list, tuple)): map(validator.validate, serialized_res) else: validator.validate(serialized_res) except ValidationError, exc: self._output_validation_failed(exc, serialized_res, request)
MIT License
jkibele/opticalrs
OpticalRS/Sagawa2010.py
reflectance_index
python
def reflectance_index(bandarr,deptharr,Kgarr,deep_water_means,band_list=None): arrlist = [] if not band_list: band_list = range(bandarr.shape[-1]) for i in band_list: RI = single_band_reflectance_index(bandarr[...,i], deptharr, Kgarr[i], deep_water_means[i]) arrlist.append(RI) return np.ma.dstack(arrlist)
Produce a reflectance index image for each band of an image and return it as a (Row, Column, Band) shaped array. For more information see the docstring for `single_band_reflectance_index`. This method simply applies `single_band_reflectance_index` to multiple bands. Parameters ---------- bandarr : numpy.array An image array of (Rows, Columns, Bands) shaped array of the multispectral image for which a reflectance index image is to be created. Sagawa et al. used radiance values but you may be able to use DN or reflectance values as well. deptharr : numpy.array An array of (Rows, Columns) shape with the same number of rows and columns as bandarr. It should contains depths in meters. Kgarr : array-like of float The K*g values for the bands. ...otherwise known as the two-way diffuse attenuation coefficients or as the effective diffuse attenuation coefficients (K) times the geometric factor (g). This is the slope value returned from the `band_attenuation_geometric` method multiplied by -1. See the docstring for that method for more information or use the `OpticalRS.ParameterEstimator` module. deep_water_means : array-like of floats The mean signal over deep water for each band in `bandarr`. See `OpticalRS.ParameterEstimator` module for ways to calculate this. `Rinf` values (essentially the same thing) can be used as well. band_list : list of ints, optional A subset of the bands in bandarr. If supplied, only the values for the bands in the list will be calculated and returned. If left as `None`, all bands will be calculated and returned. Returns ------- numpy.array An array of the same row and column dimensions as the input containing reflectance index values. Array dimensions will be (Rows, Columns, Bands). The number of bands will be equal to the number of bands in `bandarr` unless `band_list` has been specified. In that case the number of bands will be equal to the length of `band_list`. The index values are (or should be) linearly related to bottom reflectance.
https://github.com/jkibele/opticalrs/blob/20d73aec1cbabfa54e62214ae3179e3ba375dff9/OpticalRS/Sagawa2010.py#L235-L285
from scipy.stats import linregress import numpy as np def band_attenuation_geometric(bandarr,deptharr): if np.ma.is_masked(deptharr): X = deptharr.compressed() else: X = deptharr.ravel() if np.ma.is_masked(bandarr): Y = bandarr.compressed() else: Y = bandarr.ravel() slope, intercept, r_value, p_value, std_err = linregress(X,np.log(Y)) return slope, intercept, r_value def single_band_reflectance_index(single_band_arr,depth_arr,Kg,deep_water_mean): RI = (single_band_arr - deep_water_mean) / np.exp(-1 * Kg * depth_arr) return RI def negKg_regression_array(bandarr,deptharr,band_list=None): if not band_list: band_list = range(bandarr.shape[-1]) outlist = [] for i in band_list: negKg = band_attenuation_geometric(bandarr[...,i],deptharr) outlist.append(negKg) return np.array(outlist) def negKg_array(bandarr,deptharr,band_list=None): if not band_list: band_list = range(bandarr.shape[-1]) nra = negKg_regression_array(bandarr,deptharr,band_list=band_list) return nra[:,0]
BSD 3-Clause New or Revised License
googlecloudplatform/gsutil
gslib/wildcard_iterator.py
FileWildcardIterator.__init__
python
def __init__(self, wildcard_url, ignore_symlinks=False, logger=None): self.wildcard_url = wildcard_url self.ignore_symlinks = ignore_symlinks self.logger = logger or logging.getLogger()
Instantiates an iterator over BucketListingRefs matching wildcard URL. Args: wildcard_url: FileUrl that contains the wildcard to iterate. ignore_symlinks: If True, ignore symlinks during iteration. logger: logging.Logger used for outputting debug messages during iteration. If None, the root logger will be used.
https://github.com/googlecloudplatform/gsutil/blob/b1361dd5e9c2a246b328e871603f3a2b0d5fd5fa/gslib/wildcard_iterator.py#L586-L597
from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import fnmatch import glob import logging import os import re import sys import textwrap import six from gslib.bucket_listing_ref import BucketListingBucket from gslib.bucket_listing_ref import BucketListingObject from gslib.bucket_listing_ref import BucketListingPrefix from gslib.cloud_api import AccessDeniedException from gslib.cloud_api import CloudApi from gslib.cloud_api import NotFoundException from gslib.exception import CommandException from gslib.storage_url import ContainsWildcard from gslib.storage_url import GenerationFromUrlAndString from gslib.storage_url import StorageUrlFromString from gslib.storage_url import StripOneSlash from gslib.storage_url import WILDCARD_REGEX from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages from gslib.utils.constants import UTF8 from gslib.utils.text_util import FixWindowsEncodingIfNeeded from gslib.utils.text_util import PrintableStr if six.PY3: StandardError = Exception FLAT_LIST_REGEX = re.compile(r'(?P<before>.*?)\*\*(?P<after>.*)') _UNICODE_EXCEPTION_TEXT = ( 'Invalid Unicode path encountered (%s). gsutil cannot proceed ' 'with such files present. Please remove or rename this file and ' 'try again. NOTE: the path printed above replaces the ' 'problematic characters with a hex-encoded printable ' 'representation. For more details (including how to convert to a ' 'gsutil-compatible encoding) see `gsutil help encoding`.') class WildcardIterator(object): def __repr__(self): return 'WildcardIterator(%s)' % self.wildcard_url.url_string class CloudWildcardIterator(WildcardIterator): def __init__(self, wildcard_url, gsutil_api, all_versions=False, project_id=None, logger=None): self.wildcard_url = wildcard_url self.all_versions = all_versions self.gsutil_api = gsutil_api self.project_id = project_id self.logger = logger or logging.getLogger() def __iter__(self, bucket_listing_fields=None, expand_top_level_buckets=False): single_version_request = self.wildcard_url.HasGeneration() get_fields = None if bucket_listing_fields: get_fields = set() for field in bucket_listing_fields: get_fields.add(field) bucket_listing_fields = self._GetToListFields( get_fields=bucket_listing_fields) bucket_listing_fields.update(['items/name', 'prefixes']) get_fields.update(['name']) if single_version_request or self.all_versions: bucket_listing_fields.update( ['items/generation', 'items/metageneration']) get_fields.update(['generation', 'metageneration']) for bucket_listing_ref in self._ExpandBucketWildcards(bucket_fields=['id']): bucket_url_string = bucket_listing_ref.url_string if self.wildcard_url.IsBucket(): if expand_top_level_buckets: url = StorageUrlFromString(bucket_url_string) for obj_or_prefix in self.gsutil_api.ListObjects( url.bucket_name, delimiter='/', all_versions=self.all_versions, provider=self.wildcard_url.scheme, fields=bucket_listing_fields): if obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.OBJECT: yield self._GetObjectRef(bucket_url_string, obj_or_prefix.data, with_version=self.all_versions) else: yield self._GetPrefixRef(bucket_url_string, obj_or_prefix.data) else: yield bucket_listing_ref else: if (not ContainsWildcard(self.wildcard_url.url_string) and self.wildcard_url.IsObject() and not self.all_versions): try: get_object = self.gsutil_api.GetObjectMetadata( self.wildcard_url.bucket_name, self.wildcard_url.object_name, generation=self.wildcard_url.generation, provider=self.wildcard_url.scheme, fields=get_fields) yield self._GetObjectRef(self.wildcard_url.bucket_url_string, get_object, with_version=(self.all_versions or single_version_request)) return except (NotFoundException, AccessDeniedException): pass if single_version_request: url_string = '%s%s#%s' % (bucket_url_string, self.wildcard_url.object_name, self.wildcard_url.generation) else: url_string = '%s%s' % ( bucket_url_string, StripOneSlash(self.wildcard_url.object_name) or '/') urls_needing_expansion = [url_string] while urls_needing_expansion: url = StorageUrlFromString(urls_needing_expansion.pop(0)) (prefix, delimiter, prefix_wildcard, suffix_wildcard) = (self._BuildBucketFilterStrings(url.object_name)) regex_patterns = self._GetRegexPatterns(prefix_wildcard) listing_fields = (set(['prefixes']) if suffix_wildcard else bucket_listing_fields) for obj_or_prefix in self.gsutil_api.ListObjects( url.bucket_name, prefix=prefix, delimiter=delimiter, all_versions=self.all_versions or single_version_request, provider=self.wildcard_url.scheme, fields=listing_fields): for pattern in regex_patterns: if obj_or_prefix.datatype == CloudApi.CsObjectOrPrefixType.OBJECT: gcs_object = obj_or_prefix.data if pattern.match(gcs_object.name): if not suffix_wildcard or (StripOneSlash(gcs_object.name) == suffix_wildcard): if not single_version_request or ( self._SingleVersionMatches(gcs_object.generation)): yield self._GetObjectRef( bucket_url_string, gcs_object, with_version=(self.all_versions or single_version_request)) break else: prefix = obj_or_prefix.data if ContainsWildcard(prefix): raise CommandException( 'Cloud folder %s%s contains a wildcard; gsutil does ' 'not currently support objects with wildcards in their ' 'name.' % (bucket_url_string, prefix)) rstripped_prefix = StripOneSlash(prefix) if pattern.match(rstripped_prefix): if suffix_wildcard and rstripped_prefix != suffix_wildcard: url_append_string = '%s%s' % (bucket_url_string, rstripped_prefix + '/' + suffix_wildcard) urls_needing_expansion.append(url_append_string) else: yield self._GetPrefixRef(bucket_url_string, prefix) break def _GetRegexPatterns(self, wildcard_pattern): wildcard_patterns = [wildcard_pattern] if '/**/' in wildcard_pattern: updated_pattern = wildcard_pattern.replace('/**/', '/') wildcard_patterns.append(updated_pattern) else: updated_pattern = wildcard_pattern for pattern in (wildcard_pattern, updated_pattern): if pattern.startswith('**/'): wildcard_patterns.append(pattern[3:]) return [re.compile(fnmatch.translate(p)) for p in wildcard_patterns] def _BuildBucketFilterStrings(self, wildcard): match = WILDCARD_REGEX.search(wildcard) if not match: prefix = wildcard delimiter = '/' prefix_wildcard = wildcard suffix_wildcard = '' else: if match.start() > 0: prefix = wildcard[:match.start()] wildcard_part = wildcard[match.start():] else: prefix = None wildcard_part = wildcard end = wildcard_part.find('/') if end != -1: wildcard_part = wildcard_part[:end + 1] prefix_wildcard = (prefix or '') + wildcard_part if not prefix_wildcard.endswith('**/'): prefix_wildcard = StripOneSlash(prefix_wildcard) suffix_wildcard = wildcard[match.end():] end = suffix_wildcard.find('/') if end == -1: suffix_wildcard = '' else: suffix_wildcard = suffix_wildcard[end + 1:] if prefix_wildcard.find('**') != -1: delimiter = None prefix_wildcard += suffix_wildcard suffix_wildcard = '' else: delimiter = '/' self.logger.debug( 'wildcard=%s, prefix=%s, delimiter=%s, ' 'prefix_wildcard=%s, suffix_wildcard=%s\n', PrintableStr(wildcard), PrintableStr(prefix), PrintableStr(delimiter), PrintableStr(prefix_wildcard), PrintableStr(suffix_wildcard)) return (prefix, delimiter, prefix_wildcard, suffix_wildcard) def _SingleVersionMatches(self, listed_generation): decoded_generation = GenerationFromUrlAndString(self.wildcard_url, listed_generation) return str(self.wildcard_url.generation) == str(decoded_generation) def _ExpandBucketWildcards(self, bucket_fields=None): bucket_url = StorageUrlFromString(self.wildcard_url.bucket_url_string) if (bucket_fields and set(bucket_fields) == set(['id']) and not ContainsWildcard(self.wildcard_url.bucket_name)): yield BucketListingBucket(bucket_url) elif (self.wildcard_url.IsBucket() and not ContainsWildcard(self.wildcard_url.bucket_name)): yield BucketListingBucket(bucket_url, root_object=self.gsutil_api.GetBucket( self.wildcard_url.bucket_name, provider=self.wildcard_url.scheme, fields=bucket_fields)) else: regex = fnmatch.translate(self.wildcard_url.bucket_name) prog = re.compile(regex) fields = self._GetToListFields(bucket_fields) if fields: fields.add('items/id') for bucket in self.gsutil_api.ListBuckets( fields=fields, project_id=self.project_id, provider=self.wildcard_url.scheme): if prog.match(bucket.id): url = StorageUrlFromString('%s://%s/' % (self.wildcard_url.scheme, bucket.id)) yield BucketListingBucket(url, root_object=bucket) def _GetToListFields(self, get_fields=None): if get_fields: list_fields = set() for field in get_fields: list_fields.add('items/' + field) return list_fields def _GetObjectRef(self, bucket_url_string, gcs_object, with_version=False): if with_version and gcs_object.generation is not None: generation_str = GenerationFromUrlAndString(self.wildcard_url, gcs_object.generation) object_string = '%s%s#%s' % (bucket_url_string, gcs_object.name, generation_str) else: object_string = '%s%s' % (bucket_url_string, gcs_object.name) object_url = StorageUrlFromString(object_string) return BucketListingObject(object_url, root_object=gcs_object) def _GetPrefixRef(self, bucket_url_string, prefix): prefix_url = StorageUrlFromString('%s%s' % (bucket_url_string, prefix)) return BucketListingPrefix(prefix_url, root_object=prefix) def IterBuckets(self, bucket_fields=None): for blr in self._ExpandBucketWildcards(bucket_fields=bucket_fields): yield blr def IterAll(self, bucket_listing_fields=None, expand_top_level_buckets=False): for blr in self.__iter__(bucket_listing_fields=bucket_listing_fields, expand_top_level_buckets=expand_top_level_buckets): yield blr def IterObjects(self, bucket_listing_fields=None): for blr in self.__iter__(bucket_listing_fields=bucket_listing_fields, expand_top_level_buckets=True): if blr.IsObject(): yield blr def _GetFileObject(filepath): return apitools_messages.Object(size=os.path.getsize(filepath)) class FileWildcardIterator(WildcardIterator):
Apache License 2.0
xnuinside/simple-ddl-parser
simple_ddl_parser/dialects/sql.py
BaseSQL.p_seq_name
python
def p_seq_name(self, p: List) -> None: p_list = list(p) schema = None if len(p) > 4: if "." in p: schema = p_list[-3] seq_name = p_list[-1] else: seq_name = p_list[-1] p[0] = {"schema": schema, "sequence_name": seq_name}
seq_name : create_seq ID DOT ID | create_seq ID
https://github.com/xnuinside/simple-ddl-parser/blob/b476f0d74e9cbab59c87e245743b7caa2d4b266a/simple_ddl_parser/dialects/sql.py#L738-L751
import re from copy import deepcopy from typing import Dict, List, Tuple from simple_ddl_parser.utils import check_spec, remove_par class AfterColumns: def p_expression_partition_by(self, p: List) -> None: p[0] = p[1] p_list = list(p) _type = None if isinstance(p[4], list): columns = p[4] else: columns = p_list[-2] if isinstance(p[4], str) and p[4].lower() != "(": _type = p[4] p[0]["partition_by"] = {"columns": columns, "type": _type} class Database: def p_database_base(self, p: List) -> None: p[0] = p[1] p_list = list(p) if isinstance(p_list[-1], dict): p[0].update(p_list[-1]) else: p[0]["database_name"] = p_list[-1] def p_expression_create_database(self, p: List) -> None: p[0] = p[1] p_list = list(p) p[0].update(p_list[-1]) class TableSpaces: @staticmethod def get_tablespace_data(p_list): if p_list[1] == "TABLESPACE": _type = None temp = False else: if p_list[1].upper() == "TEMPORARY": _type = None temp = True else: _type = p_list[1] if p_list[2].upper() == "TEMPORARY": temp = True else: temp = False if isinstance(p_list[-1], dict): properties = p_list[-1] tablespace_name = p_list[-2] else: properties = None tablespace_name = p_list[-1] result = { "tablespace_name": tablespace_name, "properties": properties, "type": _type, "temporary": temp, } return result def p_expression_create_tablespace(self, p: List) -> None: p_list = list(p) p[0] = self.get_tablespace_data(p_list[1:]) def p_properties(self, p: List) -> None: p_list = list(p) if len(p_list) == 3: p[0] = p[1] p[0].update(p[2]) else: p[0] = p[1] def p_property(self, p: List) -> None: p[0] = {p[1]: p[2]} class Table: def p_create_table(self, p: List): p[0] = {} if p[2].upper() == "EXTERNAL": p[0] = {"external": True} if p[2].upper() == "TEMP" or p[2].upper() == "TEMPORARY": p[0] = {"temp": True} class Column: def p_column_property(self, p: List): p_list = list(p) p[0] = {"property": {p_list[1]: p_list[-1]}} def set_base_column_propery(self, p: List) -> Dict: if "." in list(p): type_str = f"{p[2]}.{p[4]}" else: type_str = p[2] if isinstance(p[1], dict): p[0] = p[1] else: size = None p[0] = {"name": p[1], "type": type_str, "size": size} return p[0] @staticmethod def parse_complex_type(p_list: List[str]) -> str: start_index = 1 _type = "" if isinstance(p_list[1], dict): _type = p_list[1]["type"] start_index = 2 for elem in p_list[start_index:]: if isinstance(elem, list): for _elem in elem: _type += f" {_elem.rstrip()}" elif "ARRAY" in elem and elem != "ARRAY": _type += elem else: _type += f" {elem}" return _type def p_c_type(self, p: List) -> None: p[0] = {} p_list = remove_par(list(p)) _type = None if len(p_list) == 2: _type = p_list[-1] elif isinstance(p[1], str) and p[1].lower() == "encode": p[0] = {"property": {"encode": p[2]}} else: _type = self.parse_complex_type(p_list) if _type: _type = self.process_type(_type, p_list, p) p[0]["type"] = _type @staticmethod def process_type(_type: str, p_list: List, p: List) -> str: if isinstance(p_list[-1], str) and p_list[-1].lower() == "distkey": p[0] = {"property": {"distkey": True}} _type = _type.split("distkey")[0] _type = _type.strip().replace('" . "', '"."') if "<" not in _type and "ARRAY" in _type: if "[" not in p_list[-1]: _type = _type.replace(" ARRAY", "[]").replace("ARRAY", "[]") else: _type = _type.replace("ARRAY", "") elif "<" in _type and "[]" in _type: _type = _type.replace("[]", "ARRAY") return _type @staticmethod def get_size(p_list: List): if p_list[-1].isnumeric(): size = int(p_list[-1]) else: size = p_list[-1] if len(p_list) != 3: size = (int(p_list[-3]), int(p_list[-1])) return size @staticmethod def get_column_details(p_list: List, p: List): if p_list[-1].get("type"): p[0]["type"] += f"{p_list[-1]['type'].strip()}" elif p_list[-1].get("comment"): p[0].update(p_list[-1]) elif p_list[-1].get("property"): for key, value in p_list[-1]["property"].items(): p[0][key] = value p_list.pop(-1) def p_column(self, p: List) -> None: p[0] = self.set_base_column_propery(p) p_list = remove_par(list(p)) if isinstance(p_list[-1], dict) and "type" in p_list[-1] and len(p_list) <= 3: p[0]["type"] = p_list[-1]["type"] if p_list[-1].get("property"): for key, value in p_list[-1]["property"].items(): p[0][key] = value elif isinstance(p_list[-1], dict): self.get_column_details(p_list, p) self.set_column_size(p_list, p) def set_column_size(self, p_list: List, p: List): if ( not isinstance(p_list[-1], dict) and bool(re.match(r"[0-9]+", p_list[-1])) or p_list[-1] == "max" ): p[0]["size"] = self.get_size(p_list) @staticmethod def set_property(p: List) -> List: for item in p[1:]: if isinstance(item, dict): if "property" in item: for key, value in item["property"].items(): p[0][key] = value del item["property"] p[0].update(item) return p @staticmethod def get_column_properties(p_list: List) -> Tuple: pk = False nullable = True default = None unique = False references = None if isinstance(p_list[-1], str): if p_list[-1].upper() == "KEY": pk = True nullable = False elif p_list[-1].upper() == "UNIQUE": unique = True elif isinstance(p_list[-1], dict) and "references" in p_list[-1]: p_list[-1]["references"]["column"] = p_list[-1]["references"]["columns"][0] del p_list[-1]["references"]["columns"] references = p_list[-1]["references"] return pk, default, unique, references, nullable def p_defcolumn(self, p: List) -> None: p[0] = p[1] p_list = list(p) pk, default, unique, references, nullable = self.get_column_properties(p_list) self.set_property(p) p[0]["references"] = p[0].get("references", references) p[0]["unique"] = unique or p[0].get("unique", unique) p[0]["primary_key"] = pk or p[0].get("primary_key", pk) p[0]["nullable"] = ( nullable if nullable is not True else p[0].get("nullable", nullable) ) p[0]["default"] = p[0].get("default", default) p[0]["check"] = p[0].get("check", None) if isinstance(p_list[-1], dict) and p_list[-1].get("encode"): p[0]["encode"] = p[0].get("encode", p_list[-1]["encode"]) if p[0]["check"]: p[0]["check"] = " ".join(p[0]["check"]) def p_check_ex(self, p: List) -> None: name = None if isinstance(p[1], dict): if "constraint" in p[1]: p[0] = { "check": { "constraint_name": p[1]["constraint"]["name"], "statement": " ".join(p[2]["check"]), } } elif "check" in p[1]: p[0] = p[1] if isinstance(p[1], list): p[0] = { "check": {"constraint_name": name, "statement": p[1]["check"]} } if len(p) >= 3: for item in list(p)[2:]: p[0]["check"]["statement"].append(item) else: p[0] = {"check": {"statement": [p[2]], "constraint_name": name}} class Schema: def p_expression_schema(self, p: List) -> None: p[0] = p[1] p_list = list(p) if isinstance(p_list[-1], dict): p[0].update(p_list[-1]) elif len(p) > 2: p[0]["authorization"] = p[2] def p_create(self, p: List) -> None: p_list = list(p) auth = "AUTHORIZATION" if isinstance(p_list[1], dict): p[0] = p_list[1] if not p[0].get("properties"): p[0]["properties"] = {p_list[-3]: p_list[-1]} else: p[0]["properties"].update({p_list[-3]: p_list[-1]}) elif auth in p_list: if p_list[3] != auth: p[0] = {f"{p[2].lower()}_name": p_list[3], auth.lower(): p_list[-1]} elif p_list[3] == auth: p[0] = {f"{p[2].lower()}_name": p_list[4], auth.lower(): p_list[4]} else: p[0] = {f"{p[2].lower()}_name": p_list[-1]} class Drop: def p_expression_drop_table(self, p: List) -> None: p_list = list(p) schema = None if len(p) > 4: if "." in p: schema = p_list[-3] table_name = p_list[-1] else: table_name = p_list[-1] p[0] = {"schema": schema, "table_name": table_name} class Type: def p_multiple_column_names(self, p: List) -> None: p_list = list(p) if isinstance(p[1], dict): p[0] = [p[1]] else: p[0] = p[1] if p_list[-1] != ",": p[0].append(p_list[-1]) def p_type_definition(self, p: List) -> None: p_list = remove_par(list(p)) p[0] = p[1] if not p[0].get("properties"): p[0]["properties"] = {} if "TABLE" in p_list or isinstance(p_list[-1], dict) and p_list[-1].get("name"): if not p[0]["properties"].get("columns"): p[0]["properties"]["columns"] = [] p[0]["properties"]["columns"].append(p_list[-1]) if len(p_list) > 3: p[0]["base_type"] = p_list[2] else: p[0]["base_type"] = None if isinstance(p[0]["base_type"], str): base_type = p[0]["base_type"].upper() if base_type == "ENUM": p[0]["properties"]["values"] = p_list[3] elif p[0]["base_type"] == "OBJECT": if "type" in p_list[3][0]: p[0]["properties"]["attributes"] = p_list[3] else: if isinstance(p_list[-1], list): for item in p_list[-1]: p[0]["properties"].update(item) def p_expression_type_as(self, p: List) -> None: p[0] = p[1] def p_type_name(self, p: List) -> None: p_list = list(p) p[0] = {} if "." not in p_list: p[0]["schema"] = None p[0]["type_name"] = p_list[2] else: p[0]["schema"] = p[2] p[0]["type_name"] = p_list[4] def p_type_create(self, p: List) -> None: p[0] = None class Domain: def p_expression_domain_as(self, p: List) -> None: p_list = list(p) p[0] = p[1] p[0]["base_type"] = p[2] p[0]["properties"] = {} if p[0]["base_type"] == "ENUM": p[0]["properties"]["values"] = p_list[4] def p_domain_name(self, p: List) -> None: p_list = list(p) p[0] = {} if "." not in p_list: p[0]["schema"] = None else: p[0]["schema"] = p[3] p[0]["domain_name"] = p_list[-2] class BaseSQL( Database, Table, Drop, Domain, Column, AfterColumns, Type, Schema, TableSpaces ): def p_id_equals(self, p: List) -> None: p_list = list(p) if "=" == p_list[-2]: property = {p_list[-3]: p_list[-1]} if not isinstance(p[1], list): p[0] = [property] else: p[0] = p[1] p[0].append(property) def p_expression_index(self, p: List) -> None: p_list = remove_par(list(p)) p[0] = p[1] for item in ["detailed_columns", "columns"]: if item not in p[0]: p[0][item] = p_list[-1][item] else: p[0][item].extend(p_list[-1][item]) def p_index_table_name(self, p: List) -> None: p[0] = p[1] p_list = list(p) schema = None if "." in p_list: schema = p_list[-3] table_name = p_list[-1] else: table_name = p_list[-1] p[0].update({"schema": schema, "table_name": table_name}) def p_create_index(self, p: List) -> None: p_list = list(p) if "CLUSTERED" in p_list: clustered = True else: clustered = False if isinstance(p[1], dict): p[0] = p[1] else: p[0] = { "schema": None, "index_name": p_list[-1], "unique": "UNIQUE" in p_list, "clustered": clustered, } def extract_check_data(self, p, p_list): if isinstance(p_list[-1]["check"], list): check = " ".join(p_list[-1]["check"]) if isinstance(check, str): check = {"constraint_name": None, "statement": check} else: check = p_list[-1]["check"] p[0] = self.set_constraint(p[0], "checks", check, check["constraint_name"]) p[0]["checks"].append(check) return p[0] def p_expression_table(self, p: List) -> None: p[0] = p[1] p_list = list(p) if p_list[-1] != "," and p_list[-1] != ")": if "type" in p_list[-1] and "name" in p_list[-1]: p[0]["columns"].append(p_list[-1]) elif "check" in p_list[-1]: p[0] = self.extract_check_data(p, p_list) elif "enforced" in p_list[-1]: p_list[-2].update(p_list[-1]) p[0].update({"primary_key_enforced": p_list[-1]["enforced"]}) else: p[0].update(p_list[-1]) if isinstance(p_list[-1], dict): if "constraint" in p_list[-2]: if p_list[-1].get("unique_statement"): p[0] = self.set_constraint( p[0], "uniques", {"columns": p_list[-1]["unique_statement"]}, p_list[-2]["constraint"]["name"], ) else: p[0] = self.set_constraint( p[0], "primary_keys", {"columns": p_list[-1]["primary_key"]}, p_list[-2]["constraint"]["name"], ) elif ( len(p_list) >= 4 and isinstance(p_list[3], dict) and p_list[3].get("constraint") and p_list[3]["constraint"].get("primary_key") ): del p_list[3]["constraint"]["primary_key"] p[0] = self.set_constraint( target_dict=p[0], _type="primary_keys", constraint=p_list[3]["constraint"], constraint_name=p_list[3]["constraint"]["name"], ) del p[0]["constraint"] elif p_list[-1].get("references"): p[0] = self.add_ref_information_to_table(p, p_list) def add_ref_information_to_table(self, p, p_list): if len(p_list) > 4 and "constraint" in p_list[3]: p[0] = self.set_constraint( p[0], "references", p_list[-1]["references"], p_list[3]["constraint"]["name"], ) elif isinstance(p_list[-2], list): if "ref_columns" not in p[0]: p[0]["ref_columns"] = [] for num, column in enumerate(p_list[-2]): ref = deepcopy(p_list[-1]["references"]) ref["column"] = ref["columns"][num] del ref["columns"] ref["name"] = column p[0]["ref_columns"].append(ref) return p[0] @staticmethod def set_constraint( target_dict: Dict, _type: str, constraint: Dict, constraint_name: str ) -> Dict: if not target_dict.get("constraints"): target_dict["constraints"] = {} if not target_dict["constraints"].get(_type): target_dict["constraints"][_type] = [] if "name" in constraint: del constraint["name"] constraint.update({"constraint_name": constraint_name}) target_dict["constraints"][_type].append(constraint) return target_dict def p_likke(self, p: List) -> None: p[0] = None def p_expression_like_table(self, p: List) -> None: p_list = remove_par(list(p)) if len(p_list) > 4: if "." in p: schema = p_list[-3] table_name = p_list[-1] else: table_name = p_list[-1] schema = None p[0] = p[1] p[0].update({"like": {"schema": schema, "table_name": table_name}}) def p_table_name(self, p: List) -> None: p_list = list(p) p[0] = p[1] if len(p) > 4: if "." in p: schema = p_list[-3] table_name = p_list[-1] else: table_name = p_list[-1] schema = None p[0].update( {"schema": schema, "table_name": table_name, "columns": [], "checks": []} ) def p_expression_seq(self, p: List) -> None: p_list = list(p) p[0] = p[1] if len(p) == 4: p[0].update({p[2].lower(): int(p_list[-1])}) if len(p) == 3: p[0].update({p[2].lower(): True}) elif len(p) == 5: p[0].update({f"{p[2].lower()}_{p[3].lower()}": int(p_list[-1])})
MIT License
ethanc/callofduty.py
callofduty/feed.py
FeedItem.react
python
async def react(self, reaction: Reaction) -> None: await self._client.SetFeedReaction( reaction, self.player.platform, self.player.username, self.title, (self.date.timestamp() * 1000), self.category, )
Set a Reaction to the Call of Duty Friend Feed item. Parameters ---------- reaction : callofduty.Reaction Reaction to add to the feed item. Returns ------- None
https://github.com/ethanc/callofduty.py/blob/6fca2d13804bdda4989995bca8ae3b01fa966e6d/callofduty/feed.py#L60-L81
import logging from datetime import datetime from typing import List, Optional from .enums import Reaction, Title from .match import Match from .object import Object from .player import Player from .utils import StripHTML log: logging.Logger = logging.getLogger(__name__) class FeedItem(Object): _type: str = "FeedItem" def __init__(self, client, data: dict): super().__init__(client) self.player: Player = Player( self, {"platform": data.pop("platform"), "username": data.pop("username")} ) self.title: Title = Title(data.pop("title")) self.match: Optional[Match] = None self.category: str = data.pop("category") self.date: datetime = datetime.fromtimestamp((data.pop("date") / 1000)) self.html: str = data.pop("rendered") self.text: str = StripHTML(self.html) self.favorited: bool = data.pop("favorited") if (_matchId := data["meta"].get("matchId")) is not None: self.match: Optional[Match] = Match( client, {"id": _matchId, "platform": self.player.platform, "title": self.title}, )
MIT License
dcos/dcos-e2e
src/dcos_e2e_cli/_vendor/dcos_test_utils/package.py
Cosmos.uninstall_package
python
def uninstall_package(self, package_name, app_id=None): self._update_headers('uninstall') package = { 'packageName': package_name } if app_id is not None: package.update({'appId': app_id}) return self._post('/uninstall', package)
Uninstall a package using the cosmos packaging API Args: package_name: str app_id: str, should have leading slash Returns: requests.response object
https://github.com/dcos/dcos-e2e/blob/ab7c4bfd58872f458e5766fff01ca74322441065/src/dcos_e2e_cli/_vendor/dcos_test_utils/package.py#L76-L92
import logging from ..dcos_test_utils import helpers log = logging.getLogger(__name__) class Cosmos(helpers.RetryCommonHttpErrorsMixin, helpers.ApiClientSession): def __init__(self, default_url: helpers.Url, session=None): super().__init__(default_url) if session is not None: self.session = session def _update_headers(self, endpoint, request_version='1', response_version='1'): media_type = "application/vnd.dcos.package." + endpoint + "-{action}+json;charset=utf-8;" + "version=v{version}" self.session.headers.update({ 'Content-type': media_type.format(action="request", version=request_version), 'Accept': media_type.format(action="response", version=response_version) }) def _post(self, endpoint, data): response = self.post(endpoint, json=data) log.info('Response from cosmos: {0}'.format(repr(response.text))) response.raise_for_status() return response def install_package(self, package_name, package_version=None, options=None, app_id=None): self._update_headers('install', response_version='2') package = { 'packageName': package_name } if package_version is not None: package.update({'packageVersion': package_version}) if options is not None: package.update({'options': options}) if app_id is not None: package.update({'appId': app_id}) return self._post('/install', package)
Apache License 2.0
morsecorp/snappiershot
snappiershot/serializers/json.py
JsonSerializer.encode_numeric
python
def encode_numeric(value: Number) -> JsonType: if isinstance(value, (bool, int, float)): return value if isinstance(value, complex): encoded_value: List[float] = [value.real, value.imag] return CustomEncodedNumericTypes.complex.json_encoding(encoded_value) if isinstance(value, Decimal): encode_value: Dict[str, Any] = value.as_tuple()._asdict() return CustomEncodedNumericTypes.decimal.json_encoding(encode_value) raise NotImplementedError( f"No encoding implemented for the following numeric type: {value} ({type(value)})" )
Encoding for numeric types. This will do nothing to naturally serializable types (bool, int, float) but will perform custom encoding for non-supported types (complex). This will convert Decimal values to their tuple encodings. See: https://docs.python.org/3.9/library/decimal.html#decimal.Decimal.as_tuple The custom encoding follows the template: { NUMERIC_KEY: <type-as-a-string>, NUMERIC_VALUE_KEY: <value> } The values for the NUMERIC_KEY and NUMERIC_VALUE_KEY constants are attributes to the `snappiershot.serializers.constants.CustomEncodedNumericTypes` class. Raises: NotImplementedError - If encoding is not implement for the given numeric type.
https://github.com/morsecorp/snappiershot/blob/acb6a8d01d4496abe0f2fe83c7e7af9cf77aac8e/snappiershot/serializers/json.py#L102-L131
import datetime import json from decimal import Decimal, DecimalTuple from numbers import Number from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath from typing import Any, Collection, Dict, Iterator, List from .constants import ( COLLECTION_TYPES, DATETIME_TYPES, PATH_TYPES, CustomEncodedCollectionTypes, CustomEncodedDatetimeTypes, CustomEncodedNumericTypes, CustomEncodedPathTypes, JsonType, ) class JsonSerializer(json.JSONEncoder): @classmethod def _hint_tuples(cls, obj: Any) -> Any: if isinstance(obj, list): return [cls._hint_tuples(item) for item in obj] if isinstance(obj, COLLECTION_TYPES): return cls._hint_tuples(cls.encode_collection(obj)) if isinstance(obj, dict): return {key: cls._hint_tuples(value) for key, value in obj.items()} return obj def encode(self, obj: Any) -> str: return super().encode(self._hint_tuples(obj)) def iterencode(self, obj: Any, _one_shot: bool = False) -> Iterator[str]: return super().iterencode(self._hint_tuples(obj), _one_shot) def default(self, value: Any) -> Any: if isinstance(value, Number): return self.encode_numeric(value) if isinstance(value, DATETIME_TYPES): return self.encode_datetime(value) if isinstance(value, PATH_TYPES): return self.encode_path(value) raise NotImplementedError( f"Encoding for this object is not yet implemented: {value} ({type(value)})" ) @staticmethod
Apache License 2.0
mara/mara-db
mara_db/bigquery.py
create_bigquery_table_from_postgresql_query
python
def create_bigquery_table_from_postgresql_query( postgresql_query: str, postgresql_db_alias: str, bigquery_db_alias: str, bigquery_dataset_id: str, bigquery_table_name: str): from mara_db.postgresql import postgres_cursor_context with mara_db.postgresql.postgres_cursor_context(postgresql_db_alias) as cursor: cursor.execute('SELECT oid, typname FROM pg_type;') pg_types = {} for oid, type_name in cursor.fetchall(): pg_types[oid] = type_name pg_to_bigquery_type_mapping = { 'bool': 'BOOL', 'bytea': 'BYTES', 'date': 'DATE', 'int2': 'INT64', 'int4': 'INT64', 'int8': 'INT64', 'json': 'STRING', 'jsonb': 'STRING', 'numeric': 'NUMERIC', 'float4': 'FLOAT64', 'float8': 'FLOAT64', 'varchar': 'STRING', 'text': 'STRING', 'time': 'TIME', 'timestamp': 'DATETIME', 'timestamptz': 'TIMESTAMP', } cursor.execute(postgresql_query + ' LIMIT 0') column_specs = [] for column in cursor.description: pg_type = pg_types[column.type_code] assert pg_type in pg_to_bigquery_type_mapping, f"Unmapped type '{pg_type}'" column_specs.append(f'`{column.name}` {pg_to_bigquery_type_mapping[pg_type]}') query = f""" CREATE OR REPLACE TABLE `{bigquery_dataset_id}`.`{bigquery_table_name}` ( """ + ',\n '.join(column_specs) + "\n)" print(query) client = bigquery_client(bigquery_db_alias) client.query(query)
Creates a table for bigquery from a Postgresql SELECT query. Will print the query Useful for copying PostgreSQL tables to BigQuery (create table first and then copy) Example: >>> create_bigquery_table_from_postgresql_query( >>> postgresql_db_alias='dwh', >>> postgresql_query='SELECT 1::SMALLINT AS a, now() as b', >>> bigquery_db_alias='reporting', >>> bigquery_dataset_id='foo', >>> bigquery_table_name='bar') CREATE OR REPLACE TABLE `foo`.`bar` ( `a` INT64, `b` TIMESTAMP ) Args: postgresql_query: The query to execute in PostgreSQL, must not end with a semicolon postgresql_db_alias: The postgresql database to execute the query in bigquery_db_alias: The mara db alias of the bigquery connection bigquery_dataset_id: The id of the bigquery dataset in which the table is to be created bigquery_table_name: The name of the to be created table
https://github.com/mara/mara-db/blob/6cd79f0c050e63504c5c43a8c804b1c91c1990a0/mara_db/bigquery.py#L52-L121
import contextlib import typing import mara_db.dbs import sys import time from google.api_core.exceptions import BadRequest def bigquery_credentials(db: typing.Union[str, mara_db.dbs.BigQueryDB]) -> 'google.oauth2.service_account.Credentials': from google.oauth2.service_account import Credentials if isinstance(db, str): db = mara_db.dbs.db(db) return Credentials.from_service_account_file(db.service_account_json_file_name) def bigquery_client(db: typing.Union[str, mara_db.dbs.BigQueryDB]) -> 'google.cloud.bigquery.client.Client': from google.cloud.bigquery.client import Client if isinstance(db, str): db = mara_db.dbs.db(db) credentials = bigquery_credentials(db) return Client(project=credentials.project_id, credentials=credentials, location=db.location) @contextlib.contextmanager def bigquery_cursor_context(db: typing.Union[str, mara_db.dbs.BigQueryDB]) -> 'google.cloud.bigquery.dbapi.cursor.Cursor': client = bigquery_client(db) from google.cloud.bigquery.dbapi.connection import Connection connection = Connection(client) cursor = connection.cursor() try: yield cursor connection.commit() except Exception as e: connection.close() raise e
MIT License
azure/autorest.python
test/vanilla/low-level/Expected/AcceptanceTests/BodyDateTimeRfc1123LowLevel/bodydatetimerfc1123lowlevel/rest/datetimerfc1123/_request_builders_py3.py
build_put_utc_max_date_time_request
python
def build_put_utc_max_date_time_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: content_type = kwargs.pop("content_type", None) accept = "application/json" url = kwargs.pop("template_url", "/datetimerfc1123/max") header_parameters = kwargs.pop("headers", {}) if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, content=content, **kwargs)
Put max datetime value Fri, 31 Dec 9999 23:59:59 GMT. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. datetime body. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). datetime body. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = "2020-02-20 00:00:00" # Optional.
https://github.com/azure/autorest.python/blob/90d60a965788e3b4c0809e6686bdc3525acac89c/test/vanilla/low-level/Expected/AcceptanceTests/BodyDateTimeRfc1123LowLevel/bodydatetimerfc1123lowlevel/rest/datetimerfc1123/_request_builders_py3.py#L109-L145
import datetime from typing import Any, Optional from azure.core.rest import HttpRequest from msrest import Serializer _SERIALIZER = Serializer() def build_get_null_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/datetimerfc1123/null") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_get_invalid_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/datetimerfc1123/invalid") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_get_overflow_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/datetimerfc1123/overflow") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) def build_get_underflow_request(**kwargs: Any) -> HttpRequest: accept = "application/json" url = kwargs.pop("template_url", "/datetimerfc1123/underflow") header_parameters = kwargs.pop("headers", {}) header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
MIT License
markusschmitt/vmc_jax
jVMC/mpi_wrapper.py
global_mean
python
def global_mean(data, p=None): jit_my_stuff() if p is not None: return global_sum(mean_helper(data, p)) global globNumSamples return global_sum(data) / globNumSamples
Computes the mean of input data across MPI processes and device/batch dimensions. On each MPI process the input data is assumed to be a ``jax.numpy.array`` with a leading device dimension followed by a batch dimension. The data is reduced by computing the mean along device and batch dimensions as well as accross MPI processes. Hence, the result is an array of shape ``data.shape[2:]``. If no probabilities ``p`` are given, the empirical mean is computed, i.e., :math:`\\langle X\\rangle=\\frac{1}{N_S}\sum_{j=1}^{N_S} X_j` Otherwise, the mean is computed using the given probabilities, i.e., :math:`\\langle X\\rangle=\sum_{j=1}^{N_S} p_jX_j` Arguments: * ``data``: Array of input data. * ``p``: Probabilities associated with the given data. Returns: Mean of data across MPI processes and device/batch dimensions.
https://github.com/markusschmitt/vmc_jax/blob/53eb70989ac39033388b03633142d34ccdbd4bb0/jVMC/mpi_wrapper.py#L178-L209
from mpi4py import MPI import jax import jax.numpy as jnp import numpy as np import sys sys.path.append(sys.path[0] + "/..") import jVMC.global_defs as global_defs comm = MPI.COMM_WORLD rank = comm.Get_rank() commSize = comm.Get_size() globNumSamples = 0 myNumSamples = 0 from functools import partial import time communicationTime = 0. def _cov_helper_with_p(data, p): return jnp.expand_dims( jnp.matmul(jnp.conj(jnp.transpose(data)), jnp.multiply(p[:, None], data)), axis=0 ) def _cov_helper_without_p(data): return jnp.expand_dims( jnp.matmul(jnp.conj(jnp.transpose(data)), data), axis=0 ) _sum_up_pmapd = None _sum_sq_pmapd = None _sum_sq_withp_pmapd = None mean_helper = None cov_helper_with_p = None cov_helper_without_p = None pmapDevices = None import collections def pmap_devices_updated(): if collections.Counter(pmapDevices) == collections.Counter(global_defs.myPmapDevices): return False return True def jit_my_stuff(): global _sum_up_pmapd global _sum_sq_pmapd global _sum_sq_withp_pmapd global mean_helper global cov_helper_with_p global cov_helper_without_p global pmapDevices if pmap_devices_updated(): _sum_up_pmapd = global_defs.pmap_for_my_devices(lambda x: jax.lax.psum(jnp.sum(x, axis=0), 'i'), axis_name='i') _sum_sq_pmapd = global_defs.pmap_for_my_devices(lambda data, mean: jax.lax.psum(jnp.sum(jnp.conj(data - mean) * (data - mean), axis=0), 'i'), axis_name='i', in_axes=(0, None)) _sum_sq_withp_pmapd = global_defs.pmap_for_my_devices(lambda data, mean, p: jax.lax.psum(jnp.conj(data - mean).dot(p * (data - mean)), 'i'), axis_name='i', in_axes=(0, None, 0)) mean_helper = global_defs.pmap_for_my_devices(lambda data, p: jnp.expand_dims(jnp.dot(p, data), axis=0), in_axes=(0, 0)) cov_helper_with_p = global_defs.pmap_for_my_devices(_cov_helper_with_p, in_axes=(0, 0)) cov_helper_without_p = global_defs.pmap_for_my_devices(_cov_helper_without_p) pmapDevices = global_defs.myPmapDevices def distribute_sampling(numSamples, localDevices=None, numChainsPerDevice=1): global globNumSamples samplesPerProcess = numSamples // commSize if rank < numSamples % commSize: samplesPerProcess += 1 if localDevices is None: globNumSamples = numSamples return samplesPerProcess numChainsPerProcess = localDevices * numChainsPerDevice def spc(spp): return (spp + numChainsPerProcess - 1) // numChainsPerProcess a = numSamples % commSize globNumSamples = (a * spc(1 + numSamples // commSize) + (commSize - a) * spc(numSamples // commSize)) * numChainsPerProcess return spc(samplesPerProcess) def first_sample_id(): global globNumSamples mySamples = globNumSamples // commSize firstSampleId = rank * mySamples if rank < globNumSamples % commSize: firstSampleId += rank else: firstSampleId += globNumSamples % commSize return firstSampleId def global_sum(data): jit_my_stuff() data.block_until_ready() t0 = time.perf_counter() localSum = np.array(_sum_up_pmapd(data)[0]) res = np.empty_like(localSum, dtype=localSum.dtype) comm.Allreduce(localSum, res, op=MPI.SUM) global communicationTime communicationTime += time.perf_counter() - t0 return jax.device_put(res, global_defs.myDevice)
MIT License
oemof/tespy
src/tespy/components/turbomachinery/turbine.py
Turbine.eta_s_char_func
python
def eta_s_char_func(self): p = self.eta_s_char.param expr = self.get_char_expr(p) if not expr: msg = ('Please choose a valid parameter, you want to link the ' 'isentropic efficiency to at component ' + self.label + '.') logging.error(msg) raise ValueError(msg) i = self.inl[0] o = self.outl[0] return ( -(o.h.val_SI - i.h.val_SI) + self.eta_s.design * self.eta_s_char.char_func.evaluate(expr) * (isentropic( i.get_flow(), o.get_flow(), T0=self.inl[0].T.val_SI) - i.h.val_SI))
r""" Equation for given isentropic efficiency characteristic. Returns ------- residual : float Residual value of equation. .. math:: 0 = - \left( h_\mathrm{out} - h_\mathrm{in} \right) + \eta_\mathrm{s,design} \cdot f\left( expr \right) \cdot \left(h_\mathrm{out,s}-h_\mathrm{in}\right)
https://github.com/oemof/tespy/blob/70bf8da9fd8521a1177613a894829cd1fa78c663/src/tespy/components/turbomachinery/turbine.py#L304-L333
import logging import numpy as np from tespy.components.turbomachinery.turbomachine import Turbomachine from tespy.tools.data_containers import ComponentCharacteristics as dc_cc from tespy.tools.data_containers import ComponentProperties as dc_cp from tespy.tools.data_containers import DataContainerSimple as dc_simple from tespy.tools.document_models import generate_latex_eq from tespy.tools.fluid_properties import isentropic from tespy.tools.fluid_properties import v_mix_ph class Turbine(Turbomachine): @staticmethod def component(): return 'turbine' def get_variables(self): return { 'P': dc_cp( max_val=0, num_eq=1, deriv=self.energy_balance_deriv, func=self.energy_balance_func, latex=self.energy_balance_func_doc), 'eta_s': dc_cp( min_val=0, max_val=1, num_eq=1, deriv=self.eta_s_deriv, func=self.eta_s_func, latex=self.eta_s_func_doc), 'eta_s_char': dc_cc( param='m', num_eq=1, deriv=self.eta_s_char_deriv, func=self.eta_s_char_func, latex=self.eta_s_char_func_doc), 'pr': dc_cp( min_val=0, max_val=1, num_eq=1, deriv=self.pr_deriv, func=self.pr_func, func_params={'pr': 'pr'}, latex=self.pr_func_doc), 'cone': dc_simple( deriv=self.cone_deriv, num_eq=1, func=self.cone_func, latex=self.cone_func_doc) } def eta_s_func(self): return ( -(self.outl[0].h.val_SI - self.inl[0].h.val_SI) + ( isentropic( self.inl[0].get_flow(), self.outl[0].get_flow(), T0=self.inl[0].T.val_SI) - self.inl[0].h.val_SI) * self.eta_s.val) def eta_s_func_doc(self, label): latex = ( r'0=-\left(h_\mathrm{out}-h_\mathrm{in}\right)+\left(' r'h_\mathrm{out,s}-h_\mathrm{in}\right)\cdot\eta_\mathrm{s}') return generate_latex_eq(self, latex, label) def eta_s_deriv(self, increment_filter, k): f = self.eta_s_func if not increment_filter[0, 1]: self.jacobian[k, 0, 1] = self.numeric_deriv(f, 'p', 0) if not increment_filter[1, 1]: self.jacobian[k, 1, 1] = self.numeric_deriv(f, 'p', 1) if not increment_filter[0, 2]: self.jacobian[k, 0, 2] = self.numeric_deriv(f, 'h', 0) self.jacobian[k, 1, 2] = -1 def cone_func(self): n = 1 i = self.inl[0] o = self.outl[0] vol = v_mix_ph(i.get_flow(), T0=self.inl[0].T.val_SI) return ( - i.m.val_SI + i.m.design * i.p.val_SI / i.p.design * np.sqrt(i.p.design * i.vol.design / (i.p.val_SI * vol)) * np.sqrt(abs((1 - (o.p.val_SI / i.p.val_SI) ** ((n + 1) / n)) / (1 - (self.pr.design) ** ((n + 1) / n))))) def cone_func_doc(self, label): latex = ( r'0 = \frac{\dot{m}_\mathrm{in,design}\cdot p_\mathrm{in}}' r'{p_\mathrm{in,design}}\cdot\sqrt{\frac{p_\mathrm{in,design}' r'\cdot v_\mathrm{in}}{p_\mathrm{in}\cdot ' r'v_\mathrm{in,design}}\cdot\frac{1-\left(' r'\frac{p_\mathrm{out}}{p_\mathrm{in}} \right)^{2}}' r'{1-\left(\frac{p_\mathrm{out,design}}{p_\mathrm{in,design}}' r'\right)^{2}}} -\dot{m}_\mathrm{in}') return generate_latex_eq(self, latex, label) def cone_deriv(self, increment_filter, k): f = self.cone_func self.jacobian[k, 0, 0] = -1 if not increment_filter[0, 1]: self.jacobian[k, 0, 1] = self.numeric_deriv(f, 'p', 0) if not increment_filter[0, 2]: self.jacobian[k, 0, 2] = self.numeric_deriv(f, 'h', 0) if not increment_filter[1, 2]: self.jacobian[k, 1, 2] = self.numeric_deriv(f, 'p', 1)
MIT License
twidi/py-dataql
dataql/parsers/base.py
BaseParser.visit_nb
python
def visit_nb(self, node, _): return self.convert_nb(node.text)
Return a int of float from the given number. Also work with scientific notation like 1e+50. Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ int or float Example ------- >>> BaseParser('1', default_rule='NB').data 1 >>> BaseParser('-3', default_rule='NB').data -3 >>> BaseParser('0', default_rule='NB').data 0 >>> BaseParser('0.0', default_rule='NB').data 0.0 >>> BaseParser('1.0', default_rule='NB').data 1.0 >>> BaseParser('9999999999', default_rule='NB').data 9999999999 >>> BaseParser('1e+50', default_rule='NB').data 1e+50 >>> BaseParser('-2.5e33', default_rule='NB').data -2.5e+33
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/base.py#L449-L485
from abc import ABCMeta from inspect import isfunction import re import sys from parsimonious import Grammar, NodeVisitor from parsimonious.exceptions import ParseError, UndefinedLabel, VisitationError from parsimonious.nodes import RuleDecoratorMeta as BaseRuleDecoratorMeta from dataql import resources from dataql.parsers.exceptions import ParserError def rule(rule_string): def decorator(method): method.rule = rule_string return method return decorator class RuleDecoratorMeta(BaseRuleDecoratorMeta, ABCMeta): grammar_simple_parser = re.compile(r'^\s*([a-zA-Z_][a-zA-Z_0-9]*)\s+=') ident_simple_parser = re.compile(r'^\s*([a-zA-Z_][a-zA-Z_0-9]*)(\s*(?:#.*)?)$') def __new__(mcs, name, bases, namespace): def get_rule_name_from_method(method_name): result = method_name[6:] if method_name.startswith('visit_') else method_name return result.upper() grammar_parts = [getattr(b, 'grammar_str', '') for b in reversed(bases)] grammar_parts.append(namespace.get('base_grammar', '')) methods = [v for k, v in namespace.items() if hasattr(v, 'rule') and isfunction(v)] if methods: methods.sort(key=lambda x: x.__code__.co_firstlineno) for method in methods: method_rule = method.rule match = mcs.ident_simple_parser.match(method_rule) if match: method_rule = '%s NOOP %s' % match.groups() grammar_parts.append('%s = %s' % (get_rule_name_from_method(method.__name__), method_rule)) grammar_str = '\n'.join(grammar_parts) namespace['grammar'] = Grammar(grammar_str) default_rule = namespace.get('default_rule') if not default_rule: for base in bases: default_rule = getattr(base, 'default_rule', '') if default_rule: break if default_rule: namespace['grammar'] = namespace['grammar'].default(default_rule) grammar_dict = {} for line in grammar_str.split('\n'): match = mcs.grammar_simple_parser.match(line) if match: grammar_dict[match.group(1)] = line.strip() namespace['grammar_str'] = '\n'.join(grammar_dict.values()) return super().__new__(mcs, name, bases, namespace) class BaseParser(NodeVisitor, metaclass=RuleDecoratorMeta): base_grammar = r""" WS = ~"\s*" NOOP = "" PAR_O = WS "(" WS PAR_C = WS ")" WS CUR_O = WS "{" WS CUR_C = WS "}" WS BRA_O = WS "[" WS BRA_C = WS "]" WS DOT = WS "." WS COM = WS "," WS COL = ":" EQ = "=" COL_OR_EQ = COL / EQ """ Field = resources.Field List = resources.List Object = resources.Object Filter = resources.Filter SliceFilter = resources.SliceFilter NamedArg = resources.NamedArg PosArg = resources.PosArg def __init__(self, text, default_rule=None): if default_rule: self.grammar = self.grammar.default(default_rule) try: self.data = self.parse(text) except ParseError as ex: raise ParserError(ex) def visit(self, node): try: method = getattr(self, 'visit_%s' % node.expr_name.lower()) except AttributeError: return try: return method(node, [self.visit(n) for n in node]) except (VisitationError, UndefinedLabel): raise except self.unwrapped_exceptions: raise except Exception: exc_class, exc, trace = sys.exc_info() raise VisitationError(exc, exc_class, node).with_traceback(trace) def generic_visit(self, node, children): pass @rule('~"[_A-Z][_A-Z0-9]*"i') def visit_ident(self, node, _): return node.text @rule('COL_OR_EQ') def visit_oper(self, node, _): oper = node.text if oper == ':': oper = '=' return oper @rule('STR / NB / NULL / FALSE / TRUE') def visit_value(self, _, children): return children[0] @rule(r'~"([\'\"])(?:[^\\1\\\\]|\\\\.)*?\\1"') def visit_str(self, node, _): return self.visit_str.re_single_backslash.sub('', node.text[1:-1]) visit_str.re_single_backslash = re.compile(r'(?<!\\)\\') @rule(r'~"[-+]?\d*\.?\d+([eE][-+]?\d+)?"')
BSD 2-Clause Simplified License
xuru/pyvisdk
pyvisdk/mo/performance_manager.py
PerformanceManager.RemovePerfInterval
python
def RemovePerfInterval(self, samplePeriod): return self.delegate("RemovePerfInterval")(samplePeriod)
<b>Deprecated.</b> <i>As of API 2.5, use UpdatePerfInterval. Historical intervals cannot be removed.</i> Removes an interval from the list. :param samplePeriod: The sampling period, in seconds, for the specified interval being removed.
https://github.com/xuru/pyvisdk/blob/de24eb4426eb76233dc2e57640d3274ffd304eb3/pyvisdk/mo/performance_manager.py#L160-L167
from pyvisdk.base.managed_object_types import ManagedObjectTypes from pyvisdk.base.base_entity import BaseEntity import logging log = logging.getLogger(__name__) class PerformanceManager(BaseEntity): def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.PerformanceManager): super(PerformanceManager, self).__init__(core, name=name, ref=ref, type=type) @property def description(self): return self.update('description') @property def historicalInterval(self): return self.update('historicalInterval') @property def perfCounter(self): return self.update('perfCounter') def CreatePerfInterval(self, intervalId): return self.delegate("CreatePerfInterval")(intervalId) def QueryAvailablePerfMetric(self, entity, beginTime=None, endTime=None, intervalId=None): return self.delegate("QueryAvailablePerfMetric")(entity, beginTime, endTime, intervalId) def QueryPerf(self, querySpec): return self.delegate("QueryPerf")(querySpec) def QueryPerfComposite(self, querySpec): return self.delegate("QueryPerfComposite")(querySpec) def QueryPerfCounter(self, counterId): return self.delegate("QueryPerfCounter")(counterId) def QueryPerfCounterByLevel(self, level): return self.delegate("QueryPerfCounterByLevel")(level) def QueryPerfProviderSummary(self, entity): return self.delegate("QueryPerfProviderSummary")(entity)
MIT License