repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
unofficial-memsource/memsource-cli-client
memsource_cli/models/search_tm_trans_memory_dto_v3.py
SearchTMTransMemoryDtoV3.__eq__
python
def __eq__(self, other): if not isinstance(other, SearchTMTransMemoryDtoV3): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/search_tm_trans_memory_dto_v3.py#L158-L163
import pprint import re import six class SearchTMTransMemoryDtoV3(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'name': 'str', 'reverse': 'bool' } attribute_map = { 'id': 'id', 'name': 'name', 'reverse': 'reverse' } def __init__(self, id=None, name=None, reverse=None): self._id = None self._name = None self._reverse = None self.discriminator = None if id is not None: self.id = id if name is not None: self.name = name if reverse is not None: self.reverse = reverse @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def reverse(self): return self._reverse @reverse.setter def reverse(self, reverse): self._reverse = reverse def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(SearchTMTransMemoryDtoV3, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
siddhi/test_driven_python
stock_alerter/stock.py
Stock.update
python
def update(self, timestamp, price): if price < 0: raise ValueError("price should not be negative") self.history.update(timestamp, price) self.updated.fire(self)
Updates the stock with the price at the given timestamp >>> stock.update(datetime(2014, 10, 2), 10) >>> stock.price 10 The method raises a ValueError exception if the price is negative >>> stock.update(datetime(2014, 10, 2), -1) Traceback (most recent call last): ... ValueError: price should not be negative
https://github.com/siddhi/test_driven_python/blob/751e204b0b32b2780db598297217ff2758a9f39f/stock_alerter/stock.py#L58-L75
from datetime import timedelta from enum import Enum from .event import Event from .timeseries import TimeSeries, MovingAverage, NotEnoughDataException class StockSignal(Enum): buy = 1 neutral = 0 sell = -1 class Stock: LONG_TERM_TIMESPAN = 10 SHORT_TERM_TIMESPAN = 5 def __init__(self, symbol): self.symbol = symbol self.history = TimeSeries() self.updated = Event() @property def price(self): try: return self.history[-1].value except IndexError: return None
MIT License
tencent/pysc2tencentextension
pysc2/env/sc2_env.py
SC2Env.reset
python
def reset(self): self._episode_steps = 0 if self._episode_count: self._restart() self._episode_count += 1 logging.info("Starting episode: %s", self._episode_count) self._metrics.increment_episode() self._last_score = [0] * self._num_agents self._state = environment.StepType.FIRST self._game_info = self._parallel.run(c.game_info for c in self._controllers) return self._observe(target_game_loop=0)
Start a new episode.
https://github.com/tencent/pysc2tencentextension/blob/1cb01427f720381125aef067ec14e8602777ee8a/pysc2/env/sc2_env.py#L496-L510
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from absl import logging import time import enum import numpy as np import portpicker from pysc2 import maps from pysc2 import run_configs from pysc2.env import environment from pysc2.lib import actions as actions_lib from pysc2.lib import features from pysc2.lib import metrics from pysc2.lib import named_array from pysc2.lib import protocol from pysc2.lib import renderer_human from pysc2.lib import run_parallel from pysc2.lib import stopwatch from s2clientprotocol import common_pb2 as sc_common from s2clientprotocol import sc2api_pb2 as sc_pb sw = stopwatch.sw possible_results = { sc_pb.Victory: 1, sc_pb.Defeat: -1, sc_pb.Tie: 0, sc_pb.Undecided: 0, } class Race(enum.IntEnum): random = sc_common.Random protoss = sc_common.Protoss terran = sc_common.Terran zerg = sc_common.Zerg class Difficulty(enum.IntEnum): very_easy = sc_pb.VeryEasy easy = sc_pb.Easy medium = sc_pb.Medium medium_hard = sc_pb.MediumHard hard = sc_pb.Hard harder = sc_pb.Harder very_hard = sc_pb.VeryHard cheat_vision = sc_pb.CheatVision cheat_money = sc_pb.CheatMoney cheat_insane = sc_pb.CheatInsane ActionSpace = actions_lib.ActionSpace Dimensions = features.Dimensions AgentInterfaceFormat = features.AgentInterfaceFormat parse_agent_interface_format = features.parse_agent_interface_format Agent = collections.namedtuple("Agent", ["race"]) Bot = collections.namedtuple("Bot", ["race", "difficulty"]) def ext_score(obs): score_details = obs.observation.score.score_details return named_array.NamedNumpyArray([ obs.observation.score.score, score_details.idle_production_time, score_details.idle_worker_time, score_details.total_value_units, score_details.total_value_structures, score_details.killed_value_units, score_details.killed_value_structures, score_details.collected_minerals, score_details.collected_vespene, score_details.collection_rate_minerals, score_details.collection_rate_vespene, score_details.spent_minerals, score_details.spent_vespene, ], names=features.ScoreCumulative, dtype=np.int32) def _pick_unused_ports(num_ports, retry_interval_secs=3, retry_attempts=5): ports = set() for _ in range(retry_attempts): ports.update( portpicker.pick_unused_port() for _ in range(num_ports - len(ports))) ports.discard(None) if len(ports) == num_ports: return list(ports) time.sleep(retry_interval_secs) for port in ports: portpicker.return_port(port) raise RuntimeError("Unable to obtain %d unused ports." % num_ports) class SC2Env(environment.Base): def __init__(self, _only_use_kwargs=None, map_name=None, players=None, agent_names=None, agent_race=None, bot_race=None, difficulty=None, screen_size_px=None, minimap_size_px=None, agent_interface_format=None, discount=1., visualize=False, step_mul=None, realtime=False, save_replay_episodes=0, replay_dir=None, game_steps_per_episode=None, score_index=None, score_multiplier=None, random_seed=None, disable_fog=False, crop_to_playable_area=False, show_cloaked=False, show_burrowed_shadows=False, show_placeholders=False, raw_affects_selection=True, update_game_info=False, use_pysc2_feature=True, version=None): if _only_use_kwargs: raise ValueError("All arguments must be passed as keyword arguments.") if screen_size_px or minimap_size_px: raise DeprecationWarning( "screen_size_px and minimap_size_px are deprecated. Use the feature " "or rgb variants instead. Make sure to check your observations too " "since they also switched from screen/minimap to feature and rgb " "variants.") if agent_race or bot_race or difficulty: raise DeprecationWarning( "Explicit agent and bot races are deprecated. Pass an array of " "sc2_env.Bot and sc2_env.Agent instances instead.") map_inst = maps.get(map_name) self._map_name = map_name if not players: players = list() players.append(Agent(Race.random)) if not map_inst.players or map_inst.players >= 2: players.append(Bot(Race.random, Difficulty.very_easy)) for p in players: if not isinstance(p, (Agent, Bot)): raise ValueError( "Expected players to be of type Agent or Bot. Got: %s." % p) num_players = len(players) self._num_agents = sum(1 for p in players if isinstance(p, Agent)) self._players = players if agent_names is None: self._agent_names = [None] * self._num_agents else: assert len(agent_names) == self._num_agents self._agent_names = agent_names if not 1 <= num_players <= 2 or not self._num_agents: raise ValueError( "Only 1 or 2 players with at least one agent is " "supported at the moment.") if save_replay_episodes and not replay_dir: raise ValueError("Missing replay_dir") if map_inst.players and num_players > map_inst.players: raise ValueError( "Map only supports %s players, but trying to join with %s" % ( map_inst.players, num_players)) self._discount = discount self._step_mul = step_mul or map_inst.step_mul self._realtime = realtime self._save_replay_episodes = save_replay_episodes self._replay_dir = replay_dir self._random_seed = random_seed self._disable_fog = disable_fog self._version = version self._update_game_info = update_game_info if score_index is None: self._score_index = map_inst.score_index else: self._score_index = score_index if score_multiplier is None: self._score_multiplier = map_inst.score_multiplier else: self._score_multiplier = score_multiplier self._episode_length = game_steps_per_episode if self._episode_length is None: self._episode_length = map_inst.game_steps_per_episode self._run_config = run_configs.get() self._parallel = run_parallel.RunParallel() if agent_interface_format is None: raise ValueError("Please specify agent_interface_format.") if isinstance(agent_interface_format, AgentInterfaceFormat): agent_interface_format = [agent_interface_format] * self._num_agents if len(agent_interface_format) != self._num_agents: raise ValueError( "The number of entries in agent_interface_format should " "correspond 1-1 with the number of agents.") self.raw = not use_pysc2_feature interfaces = [] for i, interface_format in enumerate(agent_interface_format): interfaces.append(self._get_interface(interface_format, self.raw, crop_to_playable_area, show_cloaked, show_burrowed_shadows, show_placeholders, raw_affects_selection)) if self._num_agents == 1: self._launch_sp(map_inst, interfaces[0]) else: self._launch_mp(map_inst, interfaces) self._finalize(agent_interface_format, interfaces, visualize) def _finalize(self, agent_interface_formats, interfaces, visualize): game_info = self._parallel.run(c.game_info for c in self._controllers) self._game_info = game_info if not self._map_name: self._map_name = game_info[0].map_name for g, interface in zip(game_info, interfaces): if g.options.render != interface.render: logging.warning( "Actual interface options don't match requested options:\n" "Requested:\n%s\n\nActual:\n%s", interface, g.options) self._features = [None for c in self._controllers] if not self.raw: self._features = [ features.features_from_game_info( game_info=g, use_feature_units=agent_interface_format.use_feature_units, action_space=agent_interface_format.action_space, hide_specific_actions=agent_interface_format.hide_specific_actions) for g, agent_interface_format in zip(game_info, agent_interface_formats) ] if visualize: static_data = self._controllers[0].data() self._renderer_human = renderer_human.RendererHuman() self._renderer_human.init(game_info[0], static_data) else: self._renderer_human = None self._metrics = metrics.Metrics(self._map_name) self._metrics.increment_instance() self._last_score = None self._total_steps = 0 self._episode_steps = 0 self._episode_count = 0 self._obs = None self._state = environment.StepType.LAST logging.info("Environment is ready on map: %s", self._map_name) @staticmethod def _get_interface(agent_interface_format, require_raw, crop_to_playable_area=False, show_cloaked=False, show_burrowed_shadows=False, show_placeholders=False, raw_affects_selection=True): interface = sc_pb.InterfaceOptions( raw=(agent_interface_format.use_feature_units or require_raw), score=True, raw_affects_selection=raw_affects_selection, raw_crop_to_playable_area=crop_to_playable_area, show_cloaked=show_cloaked, show_burrowed_shadows=show_burrowed_shadows, show_placeholders=show_placeholders, ) if agent_interface_format is not None: if agent_interface_format.feature_dimensions: interface.feature_layer.width = ( agent_interface_format.camera_width_world_units) agent_interface_format.feature_dimensions.screen.assign_to( interface.feature_layer.resolution) agent_interface_format.feature_dimensions.minimap.assign_to( interface.feature_layer.minimap_resolution) interface.feature_layer.crop_to_playable_area = crop_to_playable_area if agent_interface_format.rgb_dimensions: agent_interface_format.rgb_dimensions.screen.assign_to( interface.render.resolution) agent_interface_format.rgb_dimensions.minimap.assign_to( interface.render.minimap_resolution) interface.render.crop_to_playable_area = crop_to_playable_area return interface def _launch_sp(self, map_inst, interface): self._sc2_procs = [self._run_config.start(version=self._version)] self._controllers = [p.controller for p in self._sc2_procs] create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap( map_path=map_inst.path, map_data=map_inst.data(self._run_config)), disable_fog=self._disable_fog, realtime=self._realtime) agent_race = Race.random for p in self._players: if isinstance(p, Agent): create.player_setup.add(type=sc_pb.Participant) agent_race = p.race else: create.player_setup.add(type=sc_pb.Computer, race=p.race, difficulty=p.difficulty) if self._random_seed is not None: create.random_seed = self._random_seed self._controllers[0].create_game(create) join = sc_pb.RequestJoinGame(race=agent_race, options=interface, player_name=self._agent_names[0]) self._controllers[0].join_game(join) def _launch_mp(self, map_inst, interfaces): self._ports = _pick_unused_ports(self._num_agents * 2) self._sc2_procs = [self._run_config.start(version=self._version, extra_ports=self._ports) for _ in range(self._num_agents)] self._controllers = [p.controller for p in self._sc2_procs] for c in self._controllers: c.save_map(map_inst.path, map_inst.data(self._run_config)) create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap( map_path=map_inst.path), disable_fog=self._disable_fog, realtime=self._realtime) if self._random_seed is not None: create.random_seed = self._random_seed for p in self._players: if isinstance(p, Agent): create.player_setup.add(type=sc_pb.Participant) else: create.player_setup.add(type=sc_pb.Computer, race=p.race, difficulty=p.difficulty) self._controllers[0].create_game(create) agent_players = (p for p in self._players if isinstance(p, Agent)) join_reqs = [] for agent_index, p in enumerate(agent_players): ports = self._ports[:] join = sc_pb.RequestJoinGame(options=interfaces[agent_index], player_name=self._agent_names[agent_index]) join.shared_port = 0 join.server_ports.game_port = ports.pop(0) join.server_ports.base_port = ports.pop(0) for _ in range(self._num_agents - 1): join.client_ports.add(game_port=ports.pop(0), base_port=ports.pop(0)) join.race = p.race join_reqs.append(join) self._parallel.run((c.join_game, join) for c, join in zip(self._controllers, join_reqs)) self._create_req = create self._join_reqs = join_reqs def observation_spec(self): if not self.raw: return tuple(f.observation_spec() for f in self._features) else: return [None] * self._num_agents def action_spec(self): if not self.raw: return tuple(f.action_spec() for f in self._features) else: return [None] * self._num_agents def _restart(self): if len(self._controllers) == 1: self._controllers[0].restart() else: self._parallel.run(c.leave for c in self._controllers) self._controllers[0].create_game(self._create_req) self._parallel.run((c.join_game, j) for c, j in zip(self._controllers, self._join_reqs)) @sw.decorate
Apache License 2.0
mfouesneau/tap
examples/ipython_notebook.py
NBPbar.update
python
def update(self, n, desc=None, total=None): if total is None: total = self._maxval if desc is not None: self.desc = desc cur_t = _time.time() self.print_status(n, total, cur_t - self._start_t)
Kept for backward compatibility and the decorator feature
https://github.com/mfouesneau/tap/blob/dc304f2981ecd2333d9b5784a10e556187849203/examples/ipython_notebook.py#L874-L881
from IPython.display import display, Markdown try: from nbconvert.filters.markdown import markdown2latex, markdown2html except ImportError: from IPython.nbconvert.filters.markdown import markdown2latex, markdown2html from IPython.display import DisplayObject import time as _time import sys class Caption(Markdown): def __init__(self, s, center=False, **kwargs): Markdown.__init__(self, s, **kwargs) self._center = center def _repr_html_(self): txt = markdown2html(self.data) if self._center: return '<center>{0}</center>'.format(txt) else: return '{0}'.format(txt) def _repr_latex_(self): txt = markdown2latex(self.data) if self._center: return '\\begin{center}\n' + txt + '\n\\end{center}' else: return txt def display(self): display(self) def __str__(self): return self._repr_latex_() class Matrix(object): def __init__(self,s, fmt='%0.4g'): self.s = s self._fmt = fmt def _repr_(self): text = r"""\begin{bmatrix}""" t = [] for k in self.s: t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' ) text += ''.join(t) text += r"""\end{bmatrix}""" return Markdown(text) def _repr_latex_(self): text = r"""\begin{bmatrix}""" t = [] for k in self.s: t.append( ' & '.join([self._fmt % v for v in k] ) + r'\\' ) text += ''.join(t) text += r"""\end{bmatrix}""" return text def __str__(self): return self._repr_latex_() def display(self): display(self) def disp_markdown(*args): return display(Markdown(*args)) def load_latex_macros(): return disp_markdown(open('notebook_macros').read()) def add_input_toggle(): from IPython.display import HTML, display r = HTML(''' <script> $( document ).ready(function () { IPython.CodeCell.options_default['cm_config']['lineWrapping'] = true; IPython.notebook.get_selected_cell() IPython.toolbar.add_buttons_group([ { 'label' : 'toggle all input cells', 'icon' : 'fa-eye-slash', 'callback': function(){ $('div.input').slideToggle(); } } ]); }); </script> ''') display(r) return r def add_citation_button(): from IPython.display import HTML, display r = HTML(""" <script> function insert_citn() { // Build paragraphs of cell type and count var entry_box = $('<input type="text"/>'); var body = $('<div><p> Enter the Bibtex reference to insert </p><form>').append(entry_box) .append('</form></div>'); // Show a modal dialog with the stats IPython.dialog.modal({ notebook: IPython.notebook, keyboard_manager: IPython.notebook.keyboard_manager, title: "Bibtex reference insertion", body: body, open: function() { // Submit on pressing enter var that = $(this); that.find('form').submit(function () { that.find('.btn-primary').first().click(); return false; }); entry_box.focus(); }, buttons : { "Cancel" : {}, "Insert" : { "class" : "btn-primary", "click" : function() { // Retrieve the selected citation, add to metadata, var citation = entry_box.val(); // if (!citation) {return;} var citn_html = '<cite data-cite="' + citation + '">' + citation + '</cite>'; var cell = IPython.notebook.get_selected_cell(); cell.code_mirror.replaceSelection(citn_html); } } } }); }; $( document ).ready(function () { IPython.toolbar.add_buttons_group([ { 'label' : 'insert bibtex reference in markdown', 'icon' : 'fa-graduation-cap', // http://fontawesome.io/icons/ 'callback': insert_citn, } ]); }); </script> <style> cite { font-style: normal; color: #45749e; } </style> """) display(r) return r class PDF(object): def __init__(self,url): self.url = url def _repr_html_(self): return '<iframe src=%s></iframe>' % self.url def _repr_latex_(self): return r'\begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{%s}\end{center}' % self.url class Table(DisplayObject): VDOTS = object() def __init__(self, data, headings=None, formats=None, caption=None, label=None, position='h', subtables=1): if len(data) == 0: raise ValueError("data is empty") if label is None != caption is None: raise ValueError("specify neither or both of label & caption") self.columns = len(data[0]) if self.columns == 0: raise ValueError("no columns") if headings and len(headings) != self.columns: raise ValueError("bad headings length") if isinstance(formats, str): formats = [formats.format] * self.columns elif callable(formats): formats = [formats] * self.columns elif formats: if len(formats) != self.columns: raise ValueError("bad formats length") def maybe_string_format(f): if isinstance(f, str): return f.format else: assert callable(f) return f formats = list(map(maybe_string_format, formats)) else: formats = [self._default_format] * self.columns for i, row in enumerate(data): if row is not self.VDOTS and len(row) != self.columns: raise ValueError("bad row length", i) self.headings = headings self.data = data self.formats = formats self.caption = caption self.label = label self.position = position self.subtables = subtables @staticmethod def _default_format(what): if isinstance(what, float): return "{0:.5f}".format(what) else: return str(what) def _format_rows(self): for row in self.data: if row is self.VDOTS: yield self.VDOTS else: yield (f(x) for f, x in zip(self.formats, row)) def _subtables_split(self): assert self.subtables > 1 rows = list(self._format_rows()) nominal_height = len(rows) // self.subtables remainder = len(rows) % self.subtables heights = [nominal_height] * self.subtables for i in range(remainder): heights[i] += 1 slices = [] acc = 0 for l in heights: slices.append((acc, acc + l)) acc += l assert slices[-1][1] == len(rows) subtables = [rows[a:b] for a, b in slices] return subtables def _repr_latex_(self): strings = [] strings.append(r""" \begin{table}[""" + self.position + r"""] \centering """) if self.label: strings.append(r"\caption{" + self.caption + "}") strings.append(r"\label{tab:" + self.label + "}") if self.subtables > 1: subtables = self._subtables_split() width = "{:.3f}\linewidth".format(0.95 / self.subtables) for i, rows in enumerate(subtables): strings.append(r"\begin{{subtable}}[t]{{{0}}}%".format(width)) strings.append(r""" \centering \vspace{0pt} """) self._latex_tabular(strings, rows) strings.append(r"\end{subtable}%") if i != len(subtables) - 1: strings.append("\hfill%") else: rows = self._format_rows() self._latex_tabular(strings, rows) strings.append(r""" \end{table} """) return "\n".join(strings) def _latex_tabular(self, strings, rows): x = "|".join(["c"] * self.columns) strings.append(r"\begin{tabular}{|" + x + "|}") strings.append(r"\hline") if self.headings: latex = " & ".join(str(x) for x in self.headings) strings.append(latex + r" \\") strings.append(r"\hline") for row in rows: if row is self.VDOTS: row = [r"\vdots"] * self.columns latex = " & ".join(row) strings.append(latex + r" \\") strings.append(r""" \hline \end{tabular}%""") def _repr_html_(self): strings = [] strings.append(""" <style type="text/css"> .util_Table td { text-align: center; } .util_Table tbody tr, .util_Table tbody td { border-bottom: 0; border-top: 0; } .util_Table_subtable { float: left; } </style> """) if self.label: c = self.caption l = "<code>[{}]</code>".format(self.label) strings.append(""" <h3>{1} {2}</h3> """.format(self.columns, c, l)) if self.subtables > 1: subtables = self._subtables_split() strings.append("<div class='clearfix'>") for rows in subtables: strings.append("<div class='util_Table_subtable'>") self._html_table(strings, rows) strings.append("</div>") strings.append("</div>") else: rows = self._format_rows() self._html_table(strings, rows) return "\n".join(strings) def _html_table(self, strings, rows): strings.append("<table class='util_Table'>") if self.headings: strings.append("<thead>") strings.append("<tr>") headings = map("<th>{0}</th>".format, self.headings) strings.append("\n".join(headings)) strings.append("</tr>") strings.append("</thead>") strings.append("<tbody>") for row in rows: if row is self.VDOTS: row = ["\u22ee"] * self.columns strings.append("<tr>") row = map("<td>{0}</td>".format, row) strings.append("\n".join(row)) strings.append("</tr>") strings.append("</tbody>") strings.append("</table>") def __repr__(self): if self.headings: widths = [len(x) for x in self.headings] data = [self.headings] else: widths = None data = [] for row in self._format_rows(): if row is self.VDOTS: continue r = list(row) w = [len(x) for x in r] if widths is None: widths = w else: widths = [max(a, b) for a, b in zip(widths, w)] data.append(list(r)) strings = [] if self.label: c = self.caption.replace("\n", " ") strings.append('Table: {0} ({1})'.format(self.label, c)) for row in data: if row is self.VDOTS: strings.append('...') else: r = [x.ljust(b + 4) for x, b in zip(row, widths)] strings.append(''.join(r)) return '\n'.join(strings) def __html__(self): return self._repr_html_() class LatexFigure(object): extension = 'pdf' def __init__(self, label, caption, fig=None, position="", star=False, options='width=\columnwidth', margin=False): if fig is None: from matplotlib.pyplot import gcf fig = gcf() self.label = label self.caption = caption self.fig = fig self.position = position self.options = options self.star = star self.margin = margin self.filename = "figure_{0:s}.{1:s}".format(label, self.__class__.extension) import pylab as plt try: plt.savefig(self.filename, bbox_inches='tight') except: plt.savefig(self.filename) def _repr_html_(self): return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format( label=self.label, caption=self.caption)) def _repr_latex_(self, subfigure=None): if subfigure: environment = "subfigure" args = "[{position}]{{{width}}}".format(**subfigure) else: environment = "figure" args = "[{0}]".format(self.position) args = args.replace('[]', '') if self.star: environment += '*' elif self.margin & (not subfigure): environment = "margin" + environment return r"""\begin{{{env:s}}}{args:s} \centering \includegraphics[{options:s}]{{{fname:s}}} \caption{{{caption:s}}} \label{{fig:{label:s}}} \end{{{env:s}}} """.format(env=environment, args=args, options=self.options, fname=self.filename, caption=self.caption, label=self.label) def __repr__(self): c = self.caption.replace("\n", " ") return "Figure: {0} ({1})".format(self.label, c) def __html__(self): return "" class LatexSubfigures(object): def __init__(self, label, caption, figures, position='h', subfigure_position='b', star=False): self.label = label self.caption = caption self.figures = figures self.position = position self.subfigure_position = subfigure_position self.star = star def _repr_html_(self): return markdown2html('> **Figure (<a name="fig:{label:s}">{label:s}</a>)**: {caption:s}'.format( label=self.label, caption=self.caption)) def _repr_latex_(self): strings = [] environment = "figure" if self.star: environment += '*' strings.append(r"""\begin{""" + environment + """}[""" + self.position + r"""] \centering """) opts = {"position": self.subfigure_position, "width": "{0:0.2f}\linewidth".format((1 - len(self.figures) * 0.01) / len(self.figures))} for f in self.figures: latex = f._repr_latex_(subfigure=opts).strip() strings.append(latex) strings.append(r""" \caption{""" + self.caption + r"""} \label{fig:""" + self.label + r"""} \end{""" + environment + """} """) return "\n".join(strings) def __repr__(self): c = self.caption.replace("\n", " ") strings = ["Figure group: {0} ({1})".format(self.label, c)] strings += [repr(x) for x in self.figures] return "\n".join(strings) def __html__(self): return "" class LatexNumberFormatter(object): def __init__(self, sf=10): self.sf = sf self.s_fmt = "{{:.{0}e}}".format(self.sf) def __call__(self, n): n = self.s_fmt.format(n) n, e, exp = n.partition("e") if e == "e": exp = int(exp) if not n.startswith("-"): n = r"\phantom{-}" + n return r"${} \times 10^{{{}}}$".format(n, exp) else: return "${}$".format(n) class NBPbar(object): def __init__(self, desc=None, maxval=None, time=True, eta=True, rate=True, length=None, file=None, keep=True, mininterval=0.5, miniters=1, units='iters', **kwargs): self.time = time self.eta = eta self.rate = rate self.desc = desc or '' self.units = units self.file = file or sys.stdout self._last_print_len = 0 self.keep = keep self.mininterval = mininterval self.miniters = miniters self._auto_width = True self.length = 10 if length is not None: self.length = length self._auto_width = False self._start_t = _time.time() self._maxval = maxval if 'txt' in kwargs: self.desc = kwargs['txt'] self._F = None @staticmethod def format_interval(t): mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) d, h = divmod(h, 24) txt = '{m:02d}:{s:02d}' if h: txt = '{h:02d}:' + txt if d: txt = '{d:d}d ' + txt return txt.format(d=d, h=h, m=m, s=s) def build_str_meter(self, n, total, elapsed): if n > total: total = None vals = {'n': n} vals['elapsed'] = self.format_interval(elapsed) vals['rate'] = '{0:5.2f}'.format((n / elapsed)) if elapsed else '?' vals['units'] = self.units if not total: txt = '{desc:s} {n:d}' else: txt = '{desc:s} {n:d}/{total:d} {percent:s}' if self.time or self.eta or self.rate: txt += ' [' info = [] if self.time: info.append('time: {elapsed:s}') if self.eta and total: info.append('eta: {left:s}') if self.rate: info.append('{rate:s} {units:s}/sec') txt += ', '.join(info) + ']' if not total: return txt.format(**vals) frac = float(n) / total vals['desc'] = self.desc vals['percent'] = '{0:3.0%}'.format(frac) vals['left'] = self.format_interval(elapsed / n * (total - n)) if n else '?' vals['total'] = total return txt.format(**vals) def print_status(self, n, total, elapsed): from IPython.html.widgets import FloatProgress desc = self.build_str_meter(n, total, elapsed) if self._F is None: self._F = FloatProgress(min=0, max=total, description=desc) display(self._F) self._F.value = n self._F.description = desc def iterover(self, iterable, total=None): if total is None: try: total = len(iterable) except TypeError: total = self._maxval self.print_status(0, total, 0) last_print_n = 0 start_t = last_print_t = _time.time() for n, obj in enumerate(iterable): yield obj if n - last_print_n >= self.miniters: cur_t = _time.time() if cur_t - last_print_t >= self.mininterval: self.print_status(n, total, cur_t - start_t) last_print_n = n last_print_t = cur_t if self.keep: if last_print_n < n: cur_t = _time.time() self.print_status(n, total, cur_t - start_t) self.file.write('\n') def __enter__(self): return self def __exit__(self, *args, **kwargs): return False
MIT License
jahjajaka/afternoon_cleaner
object_detection/predictors/heads/keras_box_head.py
ConvolutionalBoxHead._predict
python
def _predict(self, features): box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings
Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes.
https://github.com/jahjajaka/afternoon_cleaner/blob/590bdf58a216cbc6cfc47ef8f49d7af3df3703b7/object_detection/predictors/heads/keras_box_head.py#L107-L132
import tensorflow as tf from object_detection.predictors.heads import head class ConvolutionalBoxHead(head.KerasHead): def __init__(self, is_training, box_code_size, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, box_encodings_clip_range=None, name=None): super(ConvolutionalBoxHead, self).__init__(name=name) self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range self._box_encoder_layers = [] if self._use_depthwise: self._box_encoder_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='BoxEncodingPredictor_depthwise', **conv_hyperparams.params())) self._box_encoder_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_depthwise_batchnorm')) self._box_encoder_layers.append( conv_hyperparams.build_activation_layer( name='BoxEncodingPredictor_depthwise_activation')) self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [1, 1], name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True)))
MIT License
frank-qlu/recruit
招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/lib/histograms.py
_unsigned_subtract
python
def _unsigned_subtract(a, b): signed_to_unsigned = { np.byte: np.ubyte, np.short: np.ushort, np.intc: np.uintc, np.int_: np.uint, np.longlong: np.ulonglong } dt = np.result_type(a, b) try: dt = signed_to_unsigned[dt.type] except KeyError: return np.subtract(a, b, dtype=dt) else: return np.subtract(a, b, casting='unsafe', dtype=dt)
Subtract two values where a >= b, and produce an unsigned result This is needed when finding the difference between the upper and lower bound of an int16 histogram
https://github.com/frank-qlu/recruit/blob/0875fb1d2cfb581aaa8abc7a97880c0ce5bf6147/招聘爬虫/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/lib/histograms.py#L325-L348
from __future__ import division, absolute_import, print_function import functools import operator import warnings import numpy as np from numpy.compat.py3k import basestring from numpy.core import overrides __all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') _range = range def _hist_bin_sqrt(x, range): del range return x.ptp() / np.sqrt(x.size) def _hist_bin_sturges(x, range): del range return x.ptp() / (np.log2(x.size) + 1.0) def _hist_bin_rice(x, range): del range return x.ptp() / (2.0 * x.size ** (1.0 / 3)) def _hist_bin_scott(x, range): del range return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) def _hist_bin_stone(x, range): n = x.size ptp_x = np.ptp(x) if n <= 1 or ptp_x == 0: return 0 def jhat(nbins): hh = ptp_x / nbins p_k = np.histogram(x, bins=nbins, range=range)[0] / n return (2 - (n + 1) * p_k.dot(p_k)) / hh nbins_upper_bound = max(100, int(np.sqrt(n))) nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) if nbins == nbins_upper_bound: warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2) return ptp_x / nbins def _hist_bin_doane(x, range): del range if x.size > 2: sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) sigma = np.std(x) if sigma > 0.0: temp = x - np.mean(x) np.true_divide(temp, sigma, temp) np.power(temp, 3, temp) g1 = np.mean(temp) return x.ptp() / (1.0 + np.log2(x.size) + np.log2(1.0 + np.absolute(g1) / sg1)) return 0.0 def _hist_bin_fd(x, range): del range iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0) def _hist_bin_auto(x, range): fd_bw = _hist_bin_fd(x, range) sturges_bw = _hist_bin_sturges(x, range) del range if fd_bw: return min(fd_bw, sturges_bw) else: return sturges_bw _hist_bin_selectors = {'stone': _hist_bin_stone, 'auto': _hist_bin_auto, 'doane': _hist_bin_doane, 'fd': _hist_bin_fd, 'rice': _hist_bin_rice, 'scott': _hist_bin_scott, 'sqrt': _hist_bin_sqrt, 'sturges': _hist_bin_sturges} def _ravel_and_check_weights(a, weights): a = np.asarray(a) if a.dtype == np.bool_: warnings.warn("Converting input from {} to {} for compatibility." .format(a.dtype, np.uint8), RuntimeWarning, stacklevel=2) a = a.astype(np.uint8) if weights is not None: weights = np.asarray(weights) if weights.shape != a.shape: raise ValueError( 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() return a, weights def _get_outer_edges(a, range): if range is not None: first_edge, last_edge = range if first_edge > last_edge: raise ValueError( 'max must be larger than min in range parameter.') if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) elif a.size == 0: first_edge, last_edge = 0, 1 else: first_edge, last_edge = a.min(), a.max() if not (np.isfinite(first_edge) and np.isfinite(last_edge)): raise ValueError( "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) if first_edge == last_edge: first_edge = first_edge - 0.5 last_edge = last_edge + 0.5 return first_edge, last_edge
Apache License 2.0
sarcasm/compdb
tests/unit/test_headerdb.py
HeaderDB.complement
python
def complement(self, compile_commands): database = InMemoryCompilationDatabase(compile_commands) result = list(Complementer().complement([[database]])[0] .get_all_compile_commands()) result.sort(key=operator.attrgetter('file', 'directory', 'arguments')) return result
The output is returned sorted in the following order: file, directory, arguments.
https://github.com/sarcasm/compdb/blob/62b4c0f6daa0654256a0ae9036d59cd5026f280f/tests/unit/test_headerdb.py#L47-L56
from __future__ import print_function, unicode_literals, absolute_import import operator import os import unittest from compdb.backend.memory import InMemoryCompilationDatabase from compdb.complementer.headerdb import ( Complementer, subword_split, ) from compdb.models import CompileCommand class Utils(unittest.TestCase): def test_subword_split(self): self.assertEqual(["Camel", "Case"], subword_split("CamelCase")) self.assertEqual(["camel", "Back"], subword_split("camelBack")) self.assertEqual(["String", "Ref"], subword_split("StringRef")) self.assertEqual(["Gst", "Buffer"], subword_split("GstBuffer")) self.assertEqual(["NS", "String"], subword_split("NSString")) self.assertEqual(["ALLCAP"], subword_split("ALLCAP")) self.assertEqual(["nocap"], subword_split("nocap")) self.assertEqual(["One", "Two", "Three", "Four"], subword_split("OneTwoThreeFour")) self.assertEqual(["Foo1", "Bar2"], subword_split("Foo1Bar2")) self.assertEqual(["123"], subword_split("123")) self.assertEqual(["lowercase", "underscore"], subword_split("lowercase_underscore")) self.assertEqual(["Funny", "Case", "dash"], subword_split("FunnyCase-dash")) self.assertEqual(["underscore"], subword_split("_underscore_")) self.assertEqual(["with", "dot"], subword_split("with.dot")) self.assertEqual(["with", "space"], subword_split("with space")) class HeaderDB(unittest.TestCase): LOCAL_PATH = os.path.abspath(os.path.dirname(__file__)) TEST_DIR = os.path.join(LOCAL_PATH, 'headerdb') def srcdir(self, dirname): return os.path.join(self.TEST_DIR, dirname)
MIT License
jarryshaw/pypcapkit
pcapkit/vendor/ftp/return_code.py
ReturnCode.count
python
def count(self, soup):
Count field records.
https://github.com/jarryshaw/pypcapkit/blob/cfa778f606a111b2dc6eb57d1af054ba2689b578/pcapkit/vendor/ftp/return_code.py#L133-L148
import bs4 from pcapkit.vendor.default import Vendor __all__ = ['ReturnCode'] LINE = lambda NAME, DOCS, FLAG, ENUM: f'''\ # -*- coding: utf-8 -*- # pylint: disable=line-too-long """{DOCS}""" from aenum import IntEnum, extend_enum __all__ = ['{NAME}'] #: Response kind; whether the response is good, bad or incomplete. KIND = {{ '1': 'Positive Preliminary', '2': 'Positive Completion', '3': 'Positive Intermediate', '4': 'Transient Negative Completion', '5': 'Permanent Negative Completion', '6': 'Protected', }} #: Grouping information. INFO = {{ '0': 'Syntax', '1': 'Information', '2': 'Connections', '3': 'Authentication and accounting', '4': 'Unspecified', # [RFC 959] '5': 'File system', }} class {NAME}(IntEnum): """[{NAME}] {DOCS}""" {ENUM} @staticmethod def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return {NAME}(key) if key not in {NAME}._member_map_: # pylint: disable=no-member extend_enum({NAME}, key, default) return {NAME}[key] @classmethod def _missing_(cls, value): """Lookup function used when value is not found.""" if not ({FLAG}): raise ValueError('%r is not a valid %s' % (value, cls.__name__)) code = str(value) kind = KIND.get(code[0], 'Reserved') info = INFO.get(code[1], 'Reserved') extend_enum(cls, '%s - %s [%s]' % (kind, info, value), value) return cls(value) ''' class ReturnCode(Vendor): FLAG = 'isinstance(value, int) and 100 <= value <= 659' LINK = 'https://en.wikipedia.org/wiki/List_of_FTP_server_return_codes' def request(self, text): return bs4.BeautifulSoup(text, 'html5lib') def context(self, soup): enum = self.process(soup) ENUM = '\n\n '.join(map(lambda s: s.rstrip(), enum)) return LINE(self.NAME, self.DOCS, self.FLAG, ENUM) def process(self, soup): table = soup.find_all('table', class_='wikitable')[2] content = filter(lambda item: isinstance(item, bs4.element.Tag), table.tbody) next(content) enum = list() for item in content: line = item.find_all('td') code = ' '.join(line[0].stripped_strings) if len(code) != 3: continue sufs = self.wrap_comment('. '.join(map(lambda s: s.strip(), ' '.join(line[1].stripped_strings).split('.')))) pref = f"CODE_{code} = {code}" enum.append(f'#: {sufs}\n {pref}') return enum
BSD 3-Clause New or Revised License
mcedit/pymclevel
gprof2dot.py
Profile.integrate
python
def integrate(self, outevent, inevent): assert outevent not in self for function in self.functions.itervalues(): assert outevent not in function assert inevent in function for call in function.calls.itervalues(): assert outevent not in call if call.callee_id != function.id: assert call.ratio is not None for cycle in self.cycles: total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self[inevent] = total total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total
Propagate function time ratio allong the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html
https://github.com/mcedit/pymclevel/blob/8bf7b3d76479e007a51f3055198a8bcddb626c84/gprof2dot.py#L319-L350
__author__ = "Jose Fonseca" __version__ = "1.0" import sys import math import os.path import re import textwrap import optparse import xml.parsers.expat def times(x): return u"%u\xd7" % (x,) def percentage(p): return "%.02f%%" % (p * 100.0,) def add(a, b): return a + b def equal(a, b): if a == b: return a else: return None def fail(a, b): assert False tol = 2 ** -23 def ratio(numerator, denominator): try: ratio = float(numerator) / float(denominator) except ZeroDivisionError: return 1.0 if ratio < 0.0: if ratio < -tol: sys.stderr.write('warning: negative ratio (%s/%s)\n' % (numerator, denominator)) return 0.0 if ratio > 1.0: if ratio > 1.0 + tol: sys.stderr.write('warning: ratio greater than one (%s/%s)\n' % (numerator, denominator)) return 1.0 return ratio class UndefinedEvent(Exception): def __init__(self, event): Exception.__init__(self) self.event = event def __str__(self): return 'unspecified event %s' % self.event.name class Event(object): def __init__(self, name, null, aggregator, formatter=str): self.name = name self._null = null self._aggregator = aggregator self._formatter = formatter def __eq__(self, other): return self is other def __hash__(self): return id(self) def null(self): return self._null def aggregate(self, val1, val2): assert val1 is not None assert val2 is not None return self._aggregator(val1, val2) def format(self, val): assert val is not None return self._formatter(val) CALLS = Event("Calls", 0, add, times) SAMPLES = Event("Samples", 0, add) SAMPLES2 = Event("Samples", 0, add) TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')') TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')') TOTAL_TIME = Event("Total time", 0.0, fail) TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage) class Object(object): def __init__(self, events=None): if events is None: self.events = {} else: self.events = events def __hash__(self): return id(self) def __eq__(self, other): return self is other def __contains__(self, event): return event in self.events def __getitem__(self, event): try: return self.events[event] except KeyError: raise UndefinedEvent(event) def __setitem__(self, event, value): if value is None: if event in self.events: del self.events[event] else: self.events[event] = value class Call(Object): def __init__(self, callee_id): Object.__init__(self) self.callee_id = callee_id self.ratio = None self.weight = None class Function(Object): def __init__(self, id, name): Object.__init__(self) self.id = id self.name = name self.module = None self.process = None self.calls = {} self.called = None self.weight = None self.cycle = None def add_call(self, call): if call.callee_id in self.calls: sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id))) self.calls[call.callee_id] = call def __repr__(self): return self.name class Cycle(Object): def __init__(self): Object.__init__(self) self.functions = set() def add_function(self, function): assert function not in self.functions self.functions.add(function) if function.cycle is not None: for other in function.cycle.functions: if function not in self.functions: self.add_function(other) function.cycle = self class Profile(Object): def __init__(self): Object.__init__(self) self.functions = {} self.cycles = [] def add_function(self, function): if function.id in self.functions: sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id))) self.functions[function.id] = function def add_cycle(self, cycle): self.cycles.append(cycle) def validate(self): for function in self.functions.itervalues(): for callee_id in function.calls.keys(): assert function.calls[callee_id].callee_id == callee_id if callee_id not in self.functions: sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)) del function.calls[callee_id] def find_cycles(self): visited = set() for function in self.functions.itervalues(): if function not in visited: self._tarjan(function, 0, [], {}, {}, visited) cycles = [] for function in self.functions.itervalues(): if function.cycle is not None and function.cycle not in cycles: cycles.append(function.cycle) self.cycles = cycles if 0: for cycle in cycles: sys.stderr.write("Cycle:\n") for member in cycle.functions: sys.stderr.write("\tFunction %s\n" % member.name) def _tarjan(self, function, order, stack, orders, lowlinks, visited): visited.add(function) orders[function] = order lowlinks[function] = order order += 1 pos = len(stack) stack.append(function) for call in function.calls.itervalues(): callee = self.functions[call.callee_id] if callee not in orders: order = self._tarjan(callee, order, stack, orders, lowlinks, visited) lowlinks[function] = min(lowlinks[function], lowlinks[callee]) elif callee in stack: lowlinks[function] = min(lowlinks[function], orders[callee]) if lowlinks[function] == orders[function]: members = stack[pos:] del stack[pos:] if len(members) > 1: cycle = Cycle() for member in members: cycle.add_function(member) return order def call_ratios(self, event): cycle_totals = {} for cycle in self.cycles: cycle_totals[cycle] = 0.0 function_totals = {} for function in self.functions.itervalues(): function_totals[function] = 0.0 for function in self.functions.itervalues(): for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] function_totals[callee] += call[event] if callee.cycle is not None and callee.cycle is not function.cycle: cycle_totals[callee.cycle] += call[event] for function in self.functions.itervalues(): for call in function.calls.itervalues(): assert call.ratio is None if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is not None and callee.cycle is not function.cycle: total = cycle_totals[callee.cycle] else: total = function_totals[callee] call.ratio = ratio(call[event], total)
ISC License
samschott/maestral
src/maestral/daemon.py
start_maestral_daemon_process
python
def start_maestral_daemon_process( config_name: str = "maestral", timeout: float = 20 ) -> Start: if is_running(config_name): return Start.AlreadyRunning cc = quote(config_name).strip("'") script = f'import maestral.daemon; maestral.daemon.start_maestral_daemon("{cc}")' env = os.environ.copy() env.update(ENV) pid = os.spawnve(os.P_NOWAIT, sys.executable, [sys.executable, "-c", script], env) try: wait_for_startup(config_name, timeout) except Exception as exc: from .logging import scoped_logger, setup_logging setup_logging(config_name, log_to_stderr=False) clogger = scoped_logger(__name__, config_name) clogger.error("Could not communicate with daemon", exc_info=exc_info_tuple(exc)) try: os.kill(pid, 0) clogger.error("Daemon is running but not responsive, killing now") except OSError: clogger.error("Daemon quit unexpectedly") else: os.kill(pid, signal.SIGTERM) return Start.Failed else: return Start.Ok
Starts the Maestral daemon in a new process by calling :func:`start_maestral_daemon`. Startup is race free: there will never be more than one daemon running for the same config name. This function will use :obj:`sys.executable` as a Python executable to start the daemon. Environment variables from the current process will be preserved and updated with the environment variables defined in :const:`constants.ENV`. :param config_name: The name of the Maestral configuration to use. :param timeout: Time in sec to wait for daemon to start. :returns: :attr:`Start.Ok` if successful, :attr:`Start.AlreadyRunning` if the daemon was already running or :attr:`Start.Failed` if startup failed. It is possible that :attr:`Start.Ok` may be returned instead of :attr:`Start.AlreadyRunning` in case of a race but the daemon is nevertheless started only once.
https://github.com/samschott/maestral/blob/a0cd0ebbfecae65d71337fc35a54d1f3fab7ab5a/src/maestral/daemon.py#L499-L554
import sys import os import time import signal import enum import threading import fcntl import struct import warnings import argparse import re from pprint import pformat from shlex import quote from typing import Optional, Any, Union, Dict, Iterable, Type, TYPE_CHECKING from types import TracebackType import Pyro5 from Pyro5.errors import CommunicationError from Pyro5.api import Daemon, Proxy, expose, register_dict_to_class import sdnotify from fasteners import InterProcessLock from .errors import SYNC_ERRORS, GENERAL_ERRORS, MaestralApiError from .utils import exc_info_tuple from .utils.appdirs import get_runtime_path from .constants import IS_MACOS, ENV if TYPE_CHECKING: from .main import Maestral __all__ = [ "Stop", "Start", "Lock", "maestral_lock", "get_maestral_pid", "sockpath_for_config", "lockpath_for_config", "wait_for_startup", "is_running", "freeze_support", "start_maestral_daemon", "start_maestral_daemon_process", "stop_maestral_daemon_process", "MaestralProxy", "CommunicationError", ] INVOCATION_ID = os.getenv("INVOCATION_ID") NOTIFY_SOCKET = os.getenv("NOTIFY_SOCKET") WATCHDOG_PID = os.getenv("WATCHDOG_PID") WATCHDOG_USEC = os.getenv("WATCHDOG_USEC") IS_WATCHDOG = WATCHDOG_USEC and ( WATCHDOG_PID is None or int(WATCHDOG_PID) == os.getpid() ) URI = "PYRO:maestral.{0}@{1}" Pyro5.config.THREADPOOL_SIZE_MIN = 2 def freeze_support() -> None: parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-c") parsed_args, _ = parser.parse_known_args() if parsed_args.c: template = r'.*start_maestral_daemon\("(?P<config_name>\S+)"\).*' match = re.match(template, parsed_args.c) if match: start_maestral_daemon(match["config_name"]) sys.exit() class Stop(enum.Enum): Ok = 0 Killed = 1 NotRunning = 2 Failed = 3 class Start(enum.Enum): Ok = 0 AlreadyRunning = 1 Failed = 2 def serpent_deserialize_api_error(class_name: str, d: dict) -> MaestralApiError: import maestral.errors cls = eval(class_name) err = cls(*d["args"]) for a_name, a_value in d["attributes"].items(): setattr(err, a_name, a_value) return err for err_cls in (*SYNC_ERRORS, *GENERAL_ERRORS): register_dict_to_class( err_cls.__module__ + "." + err_cls.__name__, serpent_deserialize_api_error ) class Lock: _instances: Dict[str, "Lock"] = {} _singleton_lock = threading.Lock() @classmethod def singleton(cls, path: str) -> "Lock": with cls._singleton_lock: path = os.path.abspath(path) if path not in cls._instances: cls._instances[path] = cls(path) return cls._instances[path] def __init__(self, path: str) -> None: self.path = path self.pid = os.getpid() self._external_lock = InterProcessLock(self.path) self._lock = threading.RLock() def acquire(self) -> bool: with self._lock: if self._external_lock.acquired: return False return self._external_lock.acquire(blocking=False) def release(self) -> None: with self._lock: if not self._external_lock.acquired: raise RuntimeError( "Cannot release a lock, it was acquired by a different process" ) self._external_lock.release() def locked(self) -> bool: with self._lock: gotten = self.acquire() if gotten: self.release() return not gotten def locking_pid(self) -> Optional[int]: with self._lock: if self._external_lock.acquired: return self.pid try: fh = open(self._external_lock.path, "a") except OSError: return None if IS_MACOS: fmt = "qqihh" pid_index = 2 flock = struct.pack(fmt, 0, 0, 0, fcntl.F_WRLCK, 0) else: fmt = "hhqqih" pid_index = 4 flock = struct.pack(fmt, fcntl.F_WRLCK, 0, 0, 0, 0, 0) lockdata = fcntl.fcntl(fh.fileno(), fcntl.F_GETLK, flock) lockdata_list = struct.unpack(fmt, lockdata) pid = lockdata_list[pid_index] if pid > 0: return pid return None def _send_signal(pid: int, sig: int) -> None: try: os.kill(pid, sig) except ProcessLookupError: pass def maestral_lock(config_name: str) -> Lock: name = f"{config_name}.lock" path = get_runtime_path("maestral") return Lock.singleton(os.path.join(path, name)) def sockpath_for_config(config_name: str) -> str: return get_runtime_path("maestral", f"{config_name}.sock") def lockpath_for_config(config_name: str) -> str: return get_runtime_path("maestral", f"{config_name}.lock") def get_maestral_pid(config_name: str) -> Optional[int]: return maestral_lock(config_name).locking_pid() def is_running(config_name: str) -> bool: return maestral_lock(config_name).locked() def wait_for_startup(config_name: str, timeout: float = 20) -> None: sock_name = sockpath_for_config(config_name) maestral_daemon = Proxy(URI.format(config_name, "./u:" + sock_name)) t0 = time.time() while True: try: maestral_daemon._pyroBind() return except Exception as exc: if time.time() - t0 > timeout: raise exc else: time.sleep(0.2) finally: maestral_daemon._pyroRelease() def start_maestral_daemon( config_name: str = "maestral", log_to_stderr: bool = False ) -> None: import asyncio from .main import Maestral from .logging import scoped_logger, setup_logging setup_logging(config_name, log_to_stderr) dlogger = scoped_logger(__name__, config_name) sd_notifier = sdnotify.SystemdNotifier() dlogger.info("Starting daemon") lock = maestral_lock(config_name) try: if threading.current_thread() is not threading.main_thread(): dlogger.error("Must run daemon in main thread") raise RuntimeError("Must run daemon in main thread") dlogger.debug("Environment:\n%s", pformat(os.environ.copy())) if lock.acquire(): dlogger.debug("Acquired daemon lock: %r", lock.path) else: dlogger.error("Could not acquire lock, daemon is already running") return os.nice(10) if IS_MACOS: dlogger.debug("Integrating with CFEventLoop") from rubicon.objc.eventloop import EventLoopPolicy asyncio.set_event_loop_policy(EventLoopPolicy()) loop = asyncio.get_event_loop() if NOTIFY_SOCKET: dlogger.debug("Running as systemd notify service") dlogger.debug("NOTIFY_SOCKET = %s", NOTIFY_SOCKET) sd_notifier.notify("READY=1") if IS_WATCHDOG and WATCHDOG_USEC: async def periodic_watchdog() -> None: if WATCHDOG_USEC: sleep = int(WATCHDOG_USEC) while True: sd_notifier.notify("WATCHDOG=1") await asyncio.sleep(sleep / (2 * 10 ** 6)) dlogger.debug("Running as systemd watchdog service") dlogger.debug("WATCHDOG_USEC = %s", WATCHDOG_USEC) dlogger.debug("WATCHDOG_PID = %s", WATCHDOG_PID) loop.create_task(periodic_watchdog()) sockpath = sockpath_for_config(config_name) dlogger.debug("Socket path: %r", sockpath) try: os.remove(sockpath) except (FileNotFoundError, NotADirectoryError): pass dlogger.debug("Creating Pyro daemon") maestral_daemon = expose(Maestral)(config_name, log_to_stderr=log_to_stderr) dlogger.debug("Starting event loop") with Daemon(unixsocket=sockpath) as daemon: daemon.register(maestral_daemon, f"maestral.{config_name}") if daemon.transportServer.housekeeper: daemon.transportServer.housekeeper.waittime = 20 for socket in daemon.sockets: loop.add_reader(socket.fileno(), daemon.events, daemon.sockets) signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: loop.add_signal_handler(s, maestral_daemon.shutdown_daemon) loop.run_until_complete(maestral_daemon.shutdown_complete) for socket in daemon.sockets: loop.remove_reader(socket.fileno()) daemon.transportServer.housekeeper = None except Exception as exc: dlogger.error(exc.args[0], exc_info=True) finally: if NOTIFY_SOCKET: sd_notifier.notify("STOPPING=1") lock.release()
MIT License
getnikola/nikola
nikola/nikola.py
Nikola._filter_duplicate_plugins
python
def _filter_duplicate_plugins(self, plugin_list): def plugin_position_in_places(plugin): for i, place in enumerate(self._plugin_places): if plugin[0].startswith(place): return i utils.LOGGER.warn("Duplicate plugin found in unexpected location: {}".format(plugin[0])) return len(self._plugin_places) plugin_dict = defaultdict(list) for data in plugin_list: plugin_dict[data[2].name].append(data) result = [] for _, plugins in plugin_dict.items(): if len(plugins) > 1: plugins.sort(key=plugin_position_in_places) utils.LOGGER.debug("Plugin {} exists in multiple places, using {}".format( plugins[-1][2].name, plugins[-1][0])) result.append(plugins[-1]) return result
Find repeated plugins and discard the less local copy.
https://github.com/getnikola/nikola/blob/334000e049c42fff52170563e94592d0c886acc4/nikola/nikola.py#L997-L1019
import datetime import io import json import functools import logging import operator import os import sys import mimetypes from collections import defaultdict from copy import copy from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin, unquote, parse_qs import dateutil.tz import lxml.etree import lxml.html import natsort import PyRSS2Gen as rss from pkg_resources import resource_filename from blinker import signal from yapsy.PluginManager import PluginManager from . import DEBUG, SHOW_TRACEBACKS, filters, utils, hierarchy_utils, shortcodes from . import metadata_extractors from .metadata_extractors import default_metadata_extractors_by from .post import Post from .plugin_categories import ( Command, LateTask, PageCompiler, CompilerExtension, MarkdownExtension, RestExtension, MetadataExtractor, ShortcodePlugin, Task, TaskMultiplier, TemplateSystem, SignalHandler, ConfigPlugin, CommentSystem, PostScanner, Taxonomy, ) from .state import Persistor try: import pyphen except ImportError: pyphen = None if DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.ERROR) DEFAULT_INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>' DEFAULT_FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>' config_changed = utils.config_changed __all__ = ('Nikola',) LEGAL_VALUES = { 'DEFAULT_THEME': 'bootblog4', 'COMMENT_SYSTEM': [ 'disqus', 'facebook', 'intensedebate', 'isso', 'muut', 'commento', 'utterances', ], 'TRANSLATIONS': { 'af': 'Afrikaans', 'ar': 'Arabic', 'az': 'Azerbaijani', 'bg': 'Bulgarian', 'bs': 'Bosnian', 'ca': 'Catalan', ('cs', 'cz'): 'Czech', 'da': 'Danish', 'de': 'German', ('el', '!gr'): 'Greek', 'en': 'English', 'eo': 'Esperanto', 'es': 'Spanish', 'et': 'Estonian', 'eu': 'Basque', 'fa': 'Persian', 'fi': 'Finnish', 'fr': 'French', 'fur': 'Friulian', 'gl': 'Galician', 'he': 'Hebrew', 'hi': 'Hindi', 'hr': 'Croatian', 'hu': 'Hungarian', 'ia': 'Interlingua', 'id': 'Indonesian', 'it': 'Italian', ('ja', '!jp'): 'Japanese', 'ko': 'Korean', 'lt': 'Lithuanian', 'mi': 'Maori', 'ml': 'Malayalam', 'mr': 'Marathi', 'nb': 'Norwegian (Bokmål)', 'nl': 'Dutch', 'oc': 'Occitan', 'pa': 'Punjabi', 'pl': 'Polish', 'pt': 'Portuguese', 'pt_br': 'Portuguese (Brazil)', 'ru': 'Russian', 'sk': 'Slovak', 'sl': 'Slovene', 'sq': 'Albanian', 'sr': 'Serbian (Cyrillic)', 'sr_latin': 'Serbian (Latin)', 'sv': 'Swedish', 'te': 'Telugu', 'th': 'Thai', ('tr', '!tr_TR'): 'Turkish', 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese', 'zh_cn': 'Chinese (Simplified)', 'zh_tw': 'Chinese (Traditional)' }, '_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS': { }, 'LOCALES_BASE': { 'sr_latin': 'sr_Latn', }, 'RTL_LANGUAGES': ('ar', 'fa', 'he', 'ur'), 'LUXON_LOCALES': defaultdict(lambda: 'en', **{ 'af': 'af', 'ar': 'ar', 'az': 'az', 'bg': 'bg', 'bn': 'bn', 'bs': 'bs', 'ca': 'ca', 'cs': 'cs', 'cz': 'cs', 'da': 'da', 'de': 'de', 'el': 'el', 'en': 'en', 'eo': 'eo', 'es': 'es', 'et': 'et', 'eu': 'eu', 'fa': 'fa', 'fi': 'fi', 'fr': 'fr', 'fur': 'fur', 'gl': 'gl', 'hi': 'hi', 'he': 'he', 'hr': 'hr', 'hu': 'hu', 'ia': 'ia', 'id': 'id', 'it': 'it', 'ja': 'ja', 'ko': 'ko', 'lt': 'lt', 'mi': 'mi', 'ml': 'ml', 'mr': 'mr', 'nb': 'nb', 'nl': 'nl', 'oc': 'oc', 'pa': 'pa', 'pl': 'pl', 'pt': 'pt', 'pt_br': 'pt-BR', 'ru': 'ru', 'sk': 'sk', 'sl': 'sl', 'sq': 'sq', 'sr': 'sr-Cyrl', 'sr_latin': 'sr-Latn', 'sv': 'sv', 'te': 'te', 'tr': 'tr', 'th': 'th', 'uk': 'uk', 'ur': 'ur', 'vi': 'vi', 'zh_cn': 'zh-CN', 'zh_tw': 'zh-TW' }), 'MOMENTJS_LOCALES': defaultdict(lambda: 'en', **{ 'af': 'af', 'ar': 'ar', 'az': 'az', 'bg': 'bg', 'bn': 'bn', 'bs': 'bs', 'ca': 'ca', 'cs': 'cs', 'cz': 'cs', 'da': 'da', 'de': 'de', 'el': 'el', 'en': 'en', 'eo': 'eo', 'es': 'es', 'et': 'et', 'eu': 'eu', 'fa': 'fa', 'fi': 'fi', 'fr': 'fr', 'gl': 'gl', 'hi': 'hi', 'he': 'he', 'hr': 'hr', 'hu': 'hu', 'id': 'id', 'it': 'it', 'ja': 'ja', 'ko': 'ko', 'lt': 'lt', 'ml': 'ml', 'mr': 'mr', 'nb': 'nb', 'nl': 'nl', 'pa': 'pa-in', 'pl': 'pl', 'pt': 'pt', 'pt_br': 'pt-br', 'ru': 'ru', 'sk': 'sk', 'sl': 'sl', 'sq': 'sq', 'sr': 'sr-cyrl', 'sr_latin': 'sr', 'sv': 'sv', 'te': 'te', 'tr': 'tr', 'th': 'th', 'uk': 'uk', 'ur': 'ur', 'vi': 'vi', 'zh_cn': 'zh-cn', 'zh_tw': 'zh-tw' }), 'PYPHEN_LOCALES': { 'af': 'af', 'bg': 'bg', 'ca': 'ca', 'cs': 'cs', 'cz': 'cs', 'da': 'da', 'de': 'de', 'el': 'el', 'en': 'en_US', 'es': 'es', 'et': 'et', 'fr': 'fr', 'hr': 'hr', 'hu': 'hu', 'it': 'it', 'lt': 'lt', 'nb': 'nb', 'nl': 'nl', 'pl': 'pl', 'pt': 'pt', 'pt_br': 'pt_BR', 'ru': 'ru', 'sk': 'sk', 'sl': 'sl', 'sr': 'sr', 'sv': 'sv', 'te': 'te', 'uk': 'uk', }, 'DOCUTILS_LOCALES': { 'af': 'af', 'ca': 'ca', 'da': 'da', 'de': 'de', 'en': 'en', 'eo': 'eo', 'es': 'es', 'fa': 'fa', 'fi': 'fi', 'fr': 'fr', 'gl': 'gl', 'he': 'he', 'it': 'it', 'ja': 'ja', 'lt': 'lt', 'nl': 'nl', 'pl': 'pl', 'pt': 'pt_br', 'pt_br': 'pt_br', 'ru': 'ru', 'sk': 'sk', 'sv': 'sv', 'zh_cn': 'zh_cn', 'zh_tw': 'zh_tw' }, "METADATA_MAPPING": ["yaml", "toml", "rest_docinfo", "markdown_metadata"], } TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP = { "render_archive": ["classify_archive"], "render_authors": ["classify_authors"], "render_indexes": ["classify_page_index", "classify_sections"], "render_tags": ["classify_categories", "classify_tags"], } DEFAULT_TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' def _enclosure(post, lang): enclosure = post.meta('enclosure', lang) if enclosure: try: length = int(post.meta('enclosure_length', lang) or 0) except KeyError: length = 0 except ValueError: utils.LOGGER.warning("Invalid enclosure length for post {0}".format(post.source_path)) length = 0 url = enclosure mime = mimetypes.guess_type(url)[0] return url, length, mime class Nikola(object): def __init__(self, **config): self.path_handlers = { 'slug': self.slug_path, 'post_path': self.post_path, 'root': self.root_path, 'filename': self.filename_path, } self.strict = False self.posts = [] self.all_posts = [] self.posts_per_year = defaultdict(list) self.posts_per_month = defaultdict(list) self.posts_per_tag = defaultdict(list) self.posts_per_category = defaultdict(list) self.tags_per_language = defaultdict(list) self.post_per_file = {} self.timeline = [] self.pages = [] self._scanned = False self._template_system = None self._THEMES = None self._MESSAGES = None self.filters = {} self.debug = DEBUG self.show_tracebacks = SHOW_TRACEBACKS self.colorful = config.pop('__colorful__', False) self.invariant = config.pop('__invariant__', False) self.quiet = config.pop('__quiet__', False) self._doit_config = config.pop('DOIT_CONFIG', {}) self.original_cwd = config.pop('__cwd__', False) self.configuration_filename = config.pop('__configuration_filename__', False) self.configured = bool(config) self.injected_deps = defaultdict(list) self.shortcode_registry = {} self.metadata_extractors_by = default_metadata_extractors_by() self.registered_auto_watched_folders = set() self.rst_transforms = [] self.template_hooks = { 'extra_head': utils.TemplateHookRegistry('extra_head', self), 'body_end': utils.TemplateHookRegistry('body_end', self), 'page_header': utils.TemplateHookRegistry('page_header', self), 'menu': utils.TemplateHookRegistry('menu', self), 'menu_alt': utils.TemplateHookRegistry('menu_alt', self), 'page_footer': utils.TemplateHookRegistry('page_footer', self), } utils.generic_rss_renderer = self.generic_rss_renderer self.config = { 'ARCHIVE_PATH': "", 'ARCHIVE_FILENAME': "archive.html", 'ARCHIVES_ARE_INDEXES': False, 'AUTHOR_PATH': 'authors', 'AUTHOR_PAGES_ARE_INDEXES': False, 'AUTHOR_PAGES_DESCRIPTIONS': {}, 'AUTHORLIST_MINIMUM_POSTS': 1, 'BLOG_AUTHOR': 'Default Author', 'BLOG_TITLE': 'Default Title', 'BLOG_EMAIL': '', 'BLOG_DESCRIPTION': 'Default Description', 'BODY_END': "", 'CACHE_FOLDER': 'cache', 'CATEGORIES_INDEX_PATH': '', 'CATEGORY_PATH': None, 'CATEGORY_PAGES_ARE_INDEXES': None, 'CATEGORY_DESCRIPTIONS': {}, 'CATEGORY_TITLES': {}, 'CATEGORY_PREFIX': 'cat_', 'CATEGORY_ALLOW_HIERARCHIES': False, 'CATEGORY_OUTPUT_FLAT_HIERARCHY': False, 'CATEGORY_DESTPATH_AS_DEFAULT': False, 'CATEGORY_DESTPATH_TRIM_PREFIX': False, 'CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY': True, 'CATEGORY_DESTPATH_NAMES': {}, 'CATEGORY_PAGES_FOLLOW_DESTPATH': False, 'CATEGORY_TRANSLATIONS': [], 'CATEGORY_TRANSLATIONS_ADD_DEFAULTS': False, 'CODE_COLOR_SCHEME': 'default', 'COMMENT_SYSTEM': 'disqus', 'COMMENTS_IN_GALLERIES': False, 'COMMENTS_IN_PAGES': False, 'COMPILERS': { "rest": ('.txt', '.rst'), "markdown": ('.md', '.mdown', '.markdown'), "textile": ('.textile',), "txt2tags": ('.t2t',), "bbcode": ('.bb',), "wiki": ('.wiki',), "ipynb": ('.ipynb',), "html": ('.html', '.htm') }, 'CONTENT_FOOTER': '', 'CONTENT_FOOTER_FORMATS': {}, 'RSS_COPYRIGHT': '', 'RSS_COPYRIGHT_PLAIN': '', 'RSS_COPYRIGHT_FORMATS': {}, 'COPY_SOURCES': True, 'CREATE_ARCHIVE_NAVIGATION': False, 'CREATE_MONTHLY_ARCHIVE': False, 'CREATE_SINGLE_ARCHIVE': False, 'CREATE_FULL_ARCHIVES': False, 'CREATE_DAILY_ARCHIVE': False, 'DATE_FORMAT': 'yyyy-MM-dd HH:mm', 'DISABLE_INDEXES': False, 'DISABLE_MAIN_ATOM_FEED': False, 'DISABLE_MAIN_RSS_FEED': False, 'MOMENTJS_DATE_FORMAT': 'YYYY-MM-DD HH:mm', 'LUXON_DATE_FORMAT': {}, 'DATE_FANCINESS': 0, 'DEFAULT_LANG': "en", 'DEPLOY_COMMANDS': {'default': []}, 'DISABLED_PLUGINS': [], 'EXTRA_PLUGINS_DIRS': [], 'EXTRA_THEMES_DIRS': [], 'COMMENT_SYSTEM_ID': 'nikolademo', 'ENABLE_AUTHOR_PAGES': True, 'EXIF_WHITELIST': {}, 'EXTRA_HEAD_DATA': '', 'FAVICONS': (), 'FEED_LENGTH': 10, 'FILE_METADATA_REGEXP': None, 'FILE_METADATA_UNSLUGIFY_TITLES': True, 'ADDITIONAL_METADATA': {}, 'FILES_FOLDERS': {'files': ''}, 'FILTERS': {}, 'FORCE_ISO8601': False, 'FRONT_INDEX_HEADER': '', 'GALLERY_FOLDERS': {'galleries': 'galleries'}, 'GALLERY_SORT_BY_DATE': True, 'GALLERIES_USE_THUMBNAIL': False, 'GALLERIES_DEFAULT_THUMBNAIL': None, 'GLOBAL_CONTEXT_FILLER': [], 'GZIP_COMMAND': None, 'GZIP_FILES': False, 'GZIP_EXTENSIONS': ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml'), 'HIDDEN_AUTHORS': [], 'HIDDEN_TAGS': [], 'HIDE_REST_DOCINFO': False, 'HIDDEN_CATEGORIES': [], 'HYPHENATE': False, 'IMAGE_FOLDERS': {'images': ''}, 'INDEX_DISPLAY_POST_COUNT': 10, 'INDEX_FILE': 'index.html', 'INDEX_TEASERS': False, 'IMAGE_THUMBNAIL_SIZE': 400, 'IMAGE_THUMBNAIL_FORMAT': '{name}.thumbnail{ext}', 'INDEXES_TITLE': "", 'INDEXES_PAGES': "", 'INDEXES_PAGES_MAIN': False, 'INDEXES_PRETTY_PAGE_URL': False, 'INDEXES_STATIC': True, 'INDEX_PATH': '', 'IPYNB_CONFIG': {}, 'KATEX_AUTO_RENDER': '', 'LICENSE': '', 'LINK_CHECK_WHITELIST': [], 'LISTINGS_FOLDERS': {'listings': 'listings'}, 'LOGO_URL': '', 'DEFAULT_PREVIEW_IMAGE': None, 'NAVIGATION_LINKS': {}, 'NAVIGATION_ALT_LINKS': {}, 'MARKDOWN_EXTENSIONS': ['fenced_code', 'codehilite', 'extra'], 'MARKDOWN_EXTENSION_CONFIGS': {}, 'MAX_IMAGE_SIZE': 1280, 'MATHJAX_CONFIG': '', 'METADATA_FORMAT': 'nikola', 'METADATA_MAPPING': {}, 'MULTIPLE_AUTHORS_PER_POST': False, 'NEW_POST_DATE_PATH': False, 'NEW_POST_DATE_PATH_FORMAT': '%Y/%m/%d', 'OLD_THEME_SUPPORT': True, 'OUTPUT_FOLDER': 'output', 'POSTS': (("posts/*.txt", "posts", "post.tmpl"),), 'PRESERVE_EXIF_DATA': False, 'PRESERVE_ICC_PROFILES': False, 'PAGES': (("pages/*.txt", "pages", "page.tmpl"),), 'PANDOC_OPTIONS': [], 'PRETTY_URLS': True, 'FUTURE_IS_NOW': False, 'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK, 'REDIRECTIONS': [], 'ROBOTS_EXCLUSIONS': [], 'GENERATE_ATOM': False, 'ATOM_EXTENSION': '.atom', 'ATOM_PATH': '', 'ATOM_FILENAME_BASE': 'index', 'FEED_TEASERS': True, 'FEED_PLAIN': False, 'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK, 'FEED_LINKS_APPEND_QUERY': False, 'GENERATE_RSS': True, 'RSS_EXTENSION': '.xml', 'RSS_LINK': None, 'RSS_PATH': '', 'RSS_FILENAME_BASE': 'rss', 'SEARCH_FORM': '', 'SHOW_BLOG_TITLE': True, 'SHOW_INDEX_PAGE_NAVIGATION': False, 'SHOW_SOURCELINK': True, 'SHOW_UNTRANSLATED_POSTS': True, 'SLUG_AUTHOR_PATH': True, 'SLUG_TAG_PATH': True, 'SOCIAL_BUTTONS_CODE': '', 'SITE_URL': 'https://example.com/', 'PAGE_INDEX': False, 'SECTION_PATH': '', 'STRIP_INDEXES': True, 'TAG_PATH': 'categories', 'TAG_PAGES_ARE_INDEXES': False, 'TAG_DESCRIPTIONS': {}, 'TAG_TITLES': {}, 'TAG_TRANSLATIONS': [], 'TAG_TRANSLATIONS_ADD_DEFAULTS': False, 'TAGS_INDEX_PATH': '', 'TAGLIST_MINIMUM_POSTS': 1, 'TEMPLATE_FILTERS': {}, 'THEME': LEGAL_VALUES['DEFAULT_THEME'], 'THEME_COLOR': '#5670d4', 'THEME_CONFIG': {}, 'THUMBNAIL_SIZE': 180, 'TRANSLATIONS_PATTERN': DEFAULT_TRANSLATIONS_PATTERN, 'URL_TYPE': 'rel_path', 'USE_BUNDLES': True, 'USE_CDN': False, 'USE_CDN_WARNING': True, 'USE_REST_DOCINFO_METADATA': False, 'USE_FILENAME_AS_TITLE': True, 'USE_KATEX': False, 'USE_SLUGIFY': True, 'USE_TAG_METADATA': True, 'TIMEZONE': 'UTC', 'WARN_ABOUT_TAG_METADATA': True, 'DEPLOY_DRAFTS': True, 'DEPLOY_FUTURE': False, 'SCHEDULE_ALL': False, 'SCHEDULE_RULE': '', 'DEMOTE_HEADERS': 1, 'GITHUB_SOURCE_BRANCH': 'master', 'GITHUB_DEPLOY_BRANCH': 'gh-pages', 'GITHUB_REMOTE_NAME': 'origin', 'GITHUB_COMMIT_SOURCE': False, 'META_GENERATOR_TAG': True, 'REST_FILE_INSERTION_ENABLED': True, 'TYPES_TO_HIDE_TITLE': [], } self._GLOBAL_CONTEXT = {} self.ALL_PAGE_DEPS = {} self.config.update(config) if '__builtins__' in self.config: try: del self.config['__builtins__'] except KeyError: del self.config[b'__builtins__'] self.config['__colorful__'] = self.colorful self.config['__invariant__'] = self.invariant self.config['__quiet__'] = self.quiet self.config['ATOM_PATH'] = self.config['ATOM_PATH'] or self.config['INDEX_PATH'] if not self.config['NAVIGATION_LINKS']: self.config['NAVIGATION_LINKS'] = {self.config['DEFAULT_LANG']: ()} if not self.config['NAVIGATION_ALT_LINKS']: self.config['NAVIGATION_ALT_LINKS'] = {self.config['DEFAULT_LANG']: ()} self.config['TRANSLATIONS'] = self.config.get('TRANSLATIONS', {self.config['DEFAULT_LANG']: ''}) for k, v in self.config['TRANSLATIONS'].items(): if os.path.isabs(v): self.config['TRANSLATIONS'][k] = os.path.relpath(v, '/') utils.TranslatableSetting.default_lang = self.config['DEFAULT_LANG'] self.TRANSLATABLE_SETTINGS = ('BLOG_AUTHOR', 'BLOG_TITLE', 'BLOG_DESCRIPTION', 'LICENSE', 'CONTENT_FOOTER', 'SOCIAL_BUTTONS_CODE', 'SEARCH_FORM', 'BODY_END', 'EXTRA_HEAD_DATA', 'NAVIGATION_LINKS', 'NAVIGATION_ALT_LINKS', 'FRONT_INDEX_HEADER', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK', 'INDEXES_TITLE', 'CATEGORY_DESTPATH_NAMES', 'INDEXES_PAGES', 'INDEXES_PRETTY_PAGE_URL', 'THEME_CONFIG', 'ARCHIVE_PATH', 'ARCHIVE_FILENAME', 'TAG_PATH', 'TAGS_INDEX_PATH', 'CATEGORY_PATH', 'CATEGORIES_INDEX_PATH', 'SECTION_PATH', 'INDEX_PATH', 'ATOM_PATH', 'RSS_PATH', 'RSS_FILENAME_BASE', 'ATOM_FILENAME_BASE', 'AUTHOR_PATH', 'DATE_FORMAT', 'LUXON_DATE_FORMAT', 'MOMENTJS_DATE_FORMAT', 'RSS_COPYRIGHT', 'RSS_COPYRIGHT_PLAIN', 'MARKDOWN_EXTENSION_CONFIGS', ) self._GLOBAL_CONTEXT_TRANSLATABLE = ('blog_author', 'blog_title', 'blog_description', 'license', 'content_footer', 'social_buttons_code', 'search_form', 'body_end', 'extra_head_data', 'date_format', 'js_date_format', 'luxon_date_format', 'front_index_header', 'theme_config', ) self._ALL_PAGE_DEPS_TRANSLATABLE = ('atom_path', 'rss_path', 'rss_filename_base', 'atom_filename_base', ) if not self.config['LUXON_DATE_FORMAT']: self.config['LUXON_DATE_FORMAT'] = {self.config['DEFAULT_LANG']: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}} if 'JS_DATE_FORMAT' in self.config: utils.LOGGER.warning("Moment.js was replaced by Luxon in the default themes, which uses different date formats.") utils.LOGGER.warning("If you’re using a built-in theme, set LUXON_DATE_FORMAT. If your theme uses Moment.js, you can silence this warning by renaming JS_DATE_FORMAT to MOMENTJS_DATE_FORMAT.") utils.LOGGER.warning("Sample Luxon config: LUXON_DATE_FORMAT = " + str(self.config['LUXON_DATE_FORMAT'])) self.config['MOMENTJS_DATE_FORMAT'] = self.config['LUXON_DATE_FORMAT'] if 'MOMENTJS_DATE_FORMAT' in self.config: if isinstance(self.config['MOMENTJS_DATE_FORMAT'], dict): for k in self.config['MOMENTJS_DATE_FORMAT']: self.config['MOMENTJS_DATE_FORMAT'][k] = json.dumps(self.config['MOMENTJS_DATE_FORMAT'][k]) else: self.config['MOMENTJS_DATE_FORMAT'] = json.dumps(self.config['MOMENTJS_DATE_FORMAT']) if 'LUXON_DATE_FORMAT' in self.config: for k in self.config['LUXON_DATE_FORMAT']: self.config['LUXON_DATE_FORMAT'][k] = json.dumps(self.config['LUXON_DATE_FORMAT'][k]) for i in self.TRANSLATABLE_SETTINGS: try: self.config[i] = utils.TranslatableSetting(i, self.config[i], self.config['TRANSLATIONS']) except KeyError: pass if self.config['EXIF_WHITELIST'] and not self.config['PRESERVE_EXIF_DATA']: utils.LOGGER.warning('Setting EXIF_WHITELIST implies PRESERVE_EXIF_DATA is set to True') self.config['PRESERVE_EXIF_DATA'] = True if self.config['PRESERVE_EXIF_DATA'] and not self.config['EXIF_WHITELIST']: utils.LOGGER.warning('You are setting PRESERVE_EXIF_DATA and not EXIF_WHITELIST so EXIF data is not really kept.') if 'UNSLUGIFY_TITLES' in self.config: utils.LOGGER.warning('The UNSLUGIFY_TITLES setting was renamed to FILE_METADATA_UNSLUGIFY_TITLES.') self.config['FILE_METADATA_UNSLUGIFY_TITLES'] = self.config['UNSLUGIFY_TITLES'] if 'TAG_PAGES_TITLES' in self.config: utils.LOGGER.warning('The TAG_PAGES_TITLES setting was renamed to TAG_TITLES.') self.config['TAG_TITLES'] = self.config['TAG_PAGES_TITLES'] if 'TAG_PAGES_DESCRIPTIONS' in self.config: utils.LOGGER.warning('The TAG_PAGES_DESCRIPTIONS setting was renamed to TAG_DESCRIPTIONS.') self.config['TAG_DESCRIPTIONS'] = self.config['TAG_PAGES_DESCRIPTIONS'] if 'CATEGORY_PAGES_TITLES' in self.config: utils.LOGGER.warning('The CATEGORY_PAGES_TITLES setting was renamed to CATEGORY_TITLES.') self.config['CATEGORY_TITLES'] = self.config['CATEGORY_PAGES_TITLES'] if 'CATEGORY_PAGES_DESCRIPTIONS' in self.config: utils.LOGGER.warning('The CATEGORY_PAGES_DESCRIPTIONS setting was renamed to CATEGORY_DESCRIPTIONS.') self.config['CATEGORY_DESCRIPTIONS'] = self.config['CATEGORY_PAGES_DESCRIPTIONS'] if 'DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED' in self.config: utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED setting was renamed and split to DISABLE_INDEXES and DISABLE_MAIN_ATOM_FEED.') self.config['DISABLE_INDEXES'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED'] self.config['DISABLE_MAIN_ATOM_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_INDEX_AND_ATOM_FEED'] if 'DISABLE_INDEXES_PLUGIN_RSS_FEED' in self.config: utils.LOGGER.warning('The DISABLE_INDEXES_PLUGIN_RSS_FEED setting was renamed to DISABLE_MAIN_RSS_FEED.') self.config['DISABLE_MAIN_RSS_FEED'] = self.config['DISABLE_INDEXES_PLUGIN_RSS_FEED'] for val in self.config['DATE_FORMAT'].values.values(): if '%' in val: utils.LOGGER.error('The DATE_FORMAT setting needs to be upgraded.') utils.LOGGER.warning("Nikola now uses CLDR-style date strings. http://cldr.unicode.org/translation/date-time-1/date-time") utils.LOGGER.warning("Example: %Y-%m-%d %H:%M ==> yyyy-MM-dd HH:mm") utils.LOGGER.warning("(note it’s different to what moment.js uses!)") sys.exit(1) locales = LEGAL_VALUES['LOCALES_BASE'] if 'LOCALES' in self.config: for k, v in self.config['LOCALES'].items(): self.config['LOCALES'][k] = v.split('.')[0] locales.update(self.config['LOCALES']) self.config['LOCALES'] = locales if self.config.get('POSTS_SECTIONS'): utils.LOGGER.warning("The sections feature has been removed and its functionality has been merged into categories.") utils.LOGGER.warning("For more information on how to migrate, please read: https://getnikola.com/blog/upgrading-to-nikola-v8.html#sections-were-replaced-by-categories") for section_config_suffix, cat_config_suffix in ( ('DESCRIPTIONS', 'DESCRIPTIONS'), ('TITLE', 'TITLES'), ('TRANSLATIONS', 'TRANSLATIONS') ): section_config = 'POSTS_SECTION_' + section_config_suffix cat_config = 'CATEGORY_' + cat_config_suffix if section_config in self.config: self.config[section_config].update(self.config[cat_config]) self.config[cat_config] = self.config[section_config] self.config['CATEGORY_DESTPATH_NAMES'] = self.config.get('POSTS_SECTION_NAME', {}) self.config['CATEGORY_DESTPATH_NAMES'] = utils.TranslatableSetting('CATEGORY_DESTPATH_NAMES', self.config['CATEGORY_DESTPATH_NAMES'], self.config['TRANSLATIONS']) self.config['CATEGORY_DESTPATH_AS_DEFAULT'] = not self.config.get('POSTS_SECTION_FROM_META') utils.LOGGER.info("Setting CATEGORY_DESTPATH_AS_DEFAULT = " + str(self.config['CATEGORY_DESTPATH_AS_DEFAULT'])) if self.config.get('CATEGORY_PAGES_FOLLOW_DESTPATH') and (not self.config.get('CATEGORY_ALLOW_HIERARCHIES') or self.config.get('CATEGORY_OUTPUT_FLAT_HIERARCHY')): utils.LOGGER.error('CATEGORY_PAGES_FOLLOW_DESTPATH requires CATEGORY_ALLOW_HIERARCHIES = True, CATEGORY_OUTPUT_FLAT_HIERARCHY = False.') sys.exit(1) if self.config.get('COMMENT_SYSTEM') == 'utterances': utterances_config = self.config.get('GLOBAL_CONTEXT', {}).get('utterances_config', {}) if not ('issue-term' in utterances_config or 'issue-number' in utterances_config): utils.LOGGER.error("COMMENT_SYSTEM = 'utterances' must have either GLOBAL_CONTEXT['utterances_config']['issue-term'] or GLOBAL_CONTEXT['utterances_config']['issue-term'] defined.") self.config['CONTENT_FOOTER'].langformat(self.config['CONTENT_FOOTER_FORMATS']) self.config['RSS_COPYRIGHT'].langformat(self.config['RSS_COPYRIGHT_FORMATS']) self.config['RSS_COPYRIGHT_PLAIN'].langformat(self.config['RSS_COPYRIGHT_FORMATS']) utils.USE_SLUGIFY = self.config['USE_SLUGIFY'] if self.config.get('HYPHENATE') and pyphen is None: utils.LOGGER.warning('To use the hyphenation, you have to install ' 'the "pyphen" package.') utils.LOGGER.warning('Setting HYPHENATE to False.') self.config['HYPHENATE'] = False self.config['post_pages'] = [] for i1, i2, i3 in self.config['POSTS']: self.config['post_pages'].append([i1, i2, i3, True]) for i1, i2, i3 in self.config['PAGES']: self.config['post_pages'].append([i1, i2, i3, False]) for old_plugin_name, new_plugin_names in TAXONOMY_COMPATIBILITY_PLUGIN_NAME_MAP.items(): if old_plugin_name in self.config['DISABLED_PLUGINS']: missing_plugins = [] for plugin_name in new_plugin_names: if plugin_name not in self.config['DISABLED_PLUGINS']: missing_plugins.append(plugin_name) if missing_plugins: utils.LOGGER.warning('The "{}" plugin was replaced by several taxonomy plugins (see PR #2535): {}'.format(old_plugin_name, ', '.join(new_plugin_names))) utils.LOGGER.warning('You are currently disabling "{}", but not the following new taxonomy plugins: {}'.format(old_plugin_name, ', '.join(missing_plugins))) utils.LOGGER.warning('Please also disable these new plugins or remove "{}" from the DISABLED_PLUGINS list.'.format(old_plugin_name)) self.config['DISABLED_PLUGINS'].extend(missing_plugins) if 'render_indexes' in self.config['DISABLED_PLUGINS']: if 'generate_rss' in self.config['DISABLED_PLUGINS'] or self.config['GENERATE_RSS'] is False: if 'classify_indexes' not in self.config['DISABLED_PLUGINS']: utils.LOGGER.warning('You are disabling the "render_indexes" plugin, as well as disabling the "generate_rss" plugin or setting GENERATE_RSS to False. To achieve the same effect, please disable the "classify_indexes" plugin in the future.') self.config['DISABLED_PLUGINS'].append('classify_indexes') else: if not self.config['DISABLE_INDEXES']: utils.LOGGER.warning('You are disabling the "render_indexes" plugin, but not the generation of RSS feeds. Please put "DISABLE_INDEXES = True" into your configuration instead.') self.config['DISABLE_INDEXES'] = True if 'generate_rss' in self.config['DISABLED_PLUGINS'] and self.config['GENERATE_RSS'] is True: utils.LOGGER.warning('Please use GENERATE_RSS to disable RSS feed generation, instead of mentioning generate_rss in DISABLED_PLUGINS.') self.config['GENERATE_RSS'] = False self.config['DISABLE_MAIN_RSS_FEED'] = True if self.config.get('PRETTY_URLS') and 'STRIP_INDEXES' not in config: self.config['STRIP_INDEXES'] = True if not self.config.get('COPY_SOURCES'): self.config['SHOW_SOURCELINK'] = False if self.config['CATEGORY_PATH']._inp is None: self.config['CATEGORY_PATH'] = self.config['TAG_PATH'] if self.config['CATEGORY_PAGES_ARE_INDEXES'] is None: self.config['CATEGORY_PAGES_ARE_INDEXES'] = self.config['TAG_PAGES_ARE_INDEXES'] self.default_lang = self.config['DEFAULT_LANG'] self.translations = self.config['TRANSLATIONS'] utils.LocaleBorg.initialize(self.config.get('LOCALES', {}), self.default_lang) if 'BASE_URL' not in self.config: self.config['BASE_URL'] = self.config.get('SITE_URL') if self.config['BASE_URL'] and self.config['BASE_URL'][-1] != '/': utils.LOGGER.warning("Your BASE_URL doesn't end in / -- adding it, but please fix it in your config file!") self.config['BASE_URL'] += '/' try: _bnl = urlsplit(self.config['BASE_URL']).netloc _bnl.encode('ascii') urlsplit(self.config['SITE_URL']).netloc.encode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): utils.LOGGER.error("Your BASE_URL or SITE_URL contains an IDN expressed in Unicode. Please convert it to Punycode.") utils.LOGGER.error("Punycode of {}: {}".format(_bnl, _bnl.encode('idna'))) sys.exit(1) metadata_extractors.load_defaults(self, self.metadata_extractors_by) if metadata_extractors.DEFAULT_EXTRACTOR is None: utils.LOGGER.error("Could not find default meta extractor ({})".format( metadata_extractors.DEFAULT_EXTRACTOR_NAME)) sys.exit(1) if config.get('METADATA_FORMAT', 'nikola').lower() == 'pelican': if 'markdown.extensions.meta' not in config.get('MARKDOWN_EXTENSIONS', []) and 'markdown' in self.config['COMPILERS']: utils.LOGGER.warning( 'To use the Pelican metadata format, you need to add ' '"markdown.extensions.meta" to your MARKDOWN_EXTENSIONS setting.') try: self.tzinfo = dateutil.tz.gettz(self.config['TIMEZONE']) except Exception as exc: utils.LOGGER.warning("Error getting TZ: {}", exc) self.tzinfo = dateutil.tz.gettz() self.config['__tzinfo__'] = self.tzinfo self.config['_COMPILERS_RAW'] = {} for k, v in self.config['COMPILERS'].items(): self.config['_COMPILERS_RAW'][k] = list(v) self.themes_dirs = ['themes'] + self.config['EXTRA_THEMES_DIRS'] filter_name_format = 'filters.{0}' for filter_name, filter_definition in filters.__dict__.items(): if filter_name.startswith('_') or not callable(filter_definition): continue self.register_filter(filter_name_format.format(filter_name), filter_definition) self._set_global_context_from_config() self._set_all_page_deps_from_config() if self.configured: self._set_global_context_from_data() self.state = Persistor('state_data.json') self.cache = Persistor(os.path.join(self.config['CACHE_FOLDER'], 'cache_data.json')) if self.configured: self.state._set_site(self) self.cache._set_site(self)
MIT License
tensorflow/tensor2tensor
tensor2tensor/models/research/vqa_self_attention.py
question_encoder
python
def question_encoder(question, question_self_attention_bias, hparams, name="question_encoder", save_weights_to=None, make_image_summary=True): x = question with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, question_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.question_self_attention_type, block_length=hparams.block_length, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "query_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "query_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams)
A stack of self attention layers.
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/models/research/vqa_self_attention.py#L342-L392
from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import range from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_hparams from tensor2tensor.layers import common_layers from tensor2tensor.layers import vqa_layers from tensor2tensor.models.research import vqa_attention from tensor2tensor.utils import registry import tensorflow.compat.v1 as tf from tensorflow.contrib.layers.python.layers import utils @registry.register_model class VqaSelfAttention(vqa_attention.VqaAttentionBaseline): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.image_hidden_size or hp.hidden_size if hp.image_feat_preprocess_proj: image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) else: assert image_hidden_size == 2048 image_feat = tf.nn.dropout( image_feat, keep_prob=1.-hp.layer_prepostprocess_dropout) if hp.image_feat_encode: image_feat = image_encoder(image_feat, hp) utils.collect_named_outputs("norms", "image_feat_encoded", tf.norm(image_feat, axis=-1)) else: image_feat = common_layers.layer_norm(image_feat) utils.collect_named_outputs("norms", "image_feat_after_layer", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) question, question_self_attention_bias = prepare_question_encoder( question, hp) question = tf.nn.dropout( question, keep_prob=1.-hp.layer_prepostprocess_dropout) query = question_encoder(question, question_self_attention_bias, hp) utils.collect_named_outputs( "norms", "query_encode", tf.norm(query, axis=-1)) query = (query + tf.expand_dims( tf.squeeze(question_self_attention_bias, [1, 2]), axis=2)) query = tf.reduce_max(query, axis=1) utils.collect_named_outputs( "norms", "query_maxpool", tf.norm(query, axis=-1)) image_ave = attn(image_feat, query, hp) utils.collect_named_outputs("norms", "image_ave", tf.norm(image_ave, axis=-1)) if hp.multimodal_combine == "concat": image_question = tf.concat([image_ave, query], axis=1) elif hp.multimodal_combine == "sum": image_question = image_ave + query elif hp.multimodal_combine == "product": image_question = image_ave * query utils.collect_named_outputs("norms", "image_question", tf.norm(image_question, axis=-1)) image_question = tf.nn.dropout(image_question, 1. - hp.dropout) output = mlp(image_question, hp) utils.collect_named_outputs("norms", "output", tf.norm(output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2) @registry.register_model class VqaCombinedSelfAttention(VqaSelfAttention): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.hidden_size image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) = prepare_image_question_encoder( image_feat, question, hp) encoder_input = tf.nn.dropout( encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) encoder_output = image_question_encoder( encoder_input, encoder_self_attention_bias, hp) utils.collect_named_outputs( "norms", "encoder_output", tf.norm(encoder_output, axis=-1)) query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) batch_size = common_layers.shape_list(encoder_input)[0] query = tf.tile(query, [batch_size, 1, 1]) query = tf.nn.dropout( query, keep_prob=1.-hp.layer_prepostprocess_dropout) decoder_output = decoder( query, encoder_output, None, encoder_decoder_attention_bias, hp) utils.collect_named_outputs("norms", "decoder_output", tf.norm(decoder_output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(decoder_output, axis=1) @registry.register_model class VqaIterativeCombinedSelfAttention(VqaSelfAttention): def body(self, features): hp = self.hparams if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_hidden_size = hp.hidden_size image_feat = common_layers.dense(image_feat, image_hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) = prepare_image_question_encoder( image_feat, question, hp) encoder_input = tf.nn.dropout( encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) batch_size = common_layers.shape_list(encoder_input)[0] query = tf.tile(query, [batch_size, 1, 1]) query = tf.nn.dropout( query, keep_prob=1.-hp.layer_prepostprocess_dropout) decoder_output = iterative_encoder_decoder( encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias, query, hp) utils.collect_named_outputs("norms", "decoder_output", tf.norm(decoder_output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") return tf.expand_dims(decoder_output, axis=1) def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): x = image_feat image_hidden_size = hparams.image_hidden_size or hparams.hidden_size image_filter_size = hparams.image_filter_size or hparams.filter_size with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or image_hidden_size, hparams.attention_value_channels or image_hidden_size, image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.image_self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, scale_dotproduct=hparams.scale_dotproduct, ) utils.collect_named_outputs( "norms", "image_feat_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_postprocess_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), image_filter_size, image_hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs( "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_ffn_postprocess_%d"%(layer), tf.norm(x, axis=-1)) return common_layers.layer_preprocess(x, hparams) def prepare_question_encoder(inputs, hparams): encoder_input = inputs encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding if hparams.pos == "timing": encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", None) return (encoder_input, encoder_self_attention_bias)
Apache License 2.0
netrack/tensorcraft
tensorcraft/client.py
Session.close
python
async def close(self) -> None: await self.session.close()
Close the session and interrupt communication with remote server.
https://github.com/netrack/tensorcraft/blob/15e0c54b795f4ce527cc5e2c46bbb7da434ac036/tensorcraft/client.py#L54-L56
import aiohttp import aiohttp.web import numpy import ssl import tensorcraft import tensorcraft.asynclib from tensorcraft import arglib from tensorcraft import errors from tensorcraft import tlslib from types import TracebackType from typing import Dict, IO, NamedTuple, Optional, Sequence, Union, Type from urllib.parse import urlparse, urlunparse class Session: default_headers = {"Accept-Version": ">={0}".format(tensorcraft.__apiversion__)} def __init__(self, service_url: str, ssl_context: Union[ssl.SSLContext, None] = None): if ssl_context: url = urlparse(service_url) _, *parts = url service_url = urlunparse(["https"]+parts) self.service_url = service_url self.session = aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl_context=ssl_context), headers=self.default_headers, ) @property def default_headers(self) -> Dict: return {"Accept-Version": f">={tensorcraft.__apiversion__}"} async def __aenter__(self) -> aiohttp.ClientSession: return await self.session.__aenter__() async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> None: await self.session.__aexit__(exc_type, exc_val, exc_tb) def url(self, path: str) -> str: return f"{self.service_url}/{path}"
Apache License 2.0
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/sendable/command.py
BotCommandScopeAllGroupChats.validate_array
python
def validate_array(array): assert_type_or_raise(array, dict, parameter_name="array") data = BotCommandScope.validate_array(array) return data
Builds a new array with valid values for the BotCommandScopeAllGroupChats constructor. :return: new array with valid values :rtype: dict
https://github.com/luckydonald/pytgbot/blob/e29a0b5f8f8331bd347c8e2b8e75af19b12d1bc5/code_generation/output/pytgbot/api_types/sendable/command.py#L520-L530
from luckydonaldUtils.encoding import unicode_type, to_unicode as u from luckydonaldUtils.exceptions import assert_type_or_raise from . import Sendable __author__ = 'luckydonald' __all__ = [ 'BotCommand', 'BotCommandScope', 'BotCommandScopeDefault', 'BotCommandScopeAllPrivateChats', 'BotCommandScopeAllGroupChats', 'BotCommandScopeAllChatAdministrators', 'BotCommandScopeChat', 'BotCommandScopeChatAdministrators', 'BotCommandScopeChatMember', ] class BotCommand(Sendable): def __init__(self, command, description): super(BotCommand, self).__init__() assert_type_or_raise(command, unicode_type, parameter_name="command") self.command = command assert_type_or_raise(description, unicode_type, parameter_name="description") self.description = description def to_array(self, prefer_original=False): if prefer_original and self._raw: return self._raw array = super(BotCommand, self).to_array() array['command'] = u(self.command) array['description'] = u(self.description) return array @staticmethod def validate_array(array): assert_type_or_raise(array, dict, parameter_name="array") data = Sendable.validate_array(array) data['command'] = u(array.get('command')) data['description'] = u(array.get('description')) return data @staticmethod def from_array(array): if not array: return None data = BotCommand.validate_array(array) instance = BotCommand(**data) instance._raw = array return instance def __str__(self): return "BotCommand(command={self.command!r}, description={self.description!r})".format(self=self) def __repr__(self): if self._raw: return "BotCommand.from_array({self._raw})".format(self=self) return "BotCommand(command={self.command!r}, description={self.description!r})".format(self=self) def __contains__(self, key): return ( key in ["command", "description"] and hasattr(self, key) and bool(getattr(self, key, None)) ) class BotCommandScope(Sendable): def __init__(self): super(BotCommandScope, self).__init__() def to_array(self, prefer_original=False): if prefer_original and self._raw: return self._raw array = super(BotCommandScope, self).to_array() return array @staticmethod def validate_array(array): assert_type_or_raise(array, dict, parameter_name="array") data = Sendable.validate_array(array) return data @staticmethod def from_array(array): if not array: return None data = BotCommandScope.validate_array(array) instance = BotCommandScope(**data) instance._raw = array return instance def __str__(self): return "BotCommandScope()".format(self=self) def __repr__(self): if self._raw: return "BotCommandScope.from_array({self._raw})".format(self=self) return "BotCommandScope()".format(self=self) def __contains__(self, key): return ( key in [] and hasattr(self, key) and bool(getattr(self, key, None)) ) class BotCommandScopeDefault(BotCommandScope): def __init__(self): super(BotCommandScopeDefault, self).__init__() self.type = 'default' def to_array(self, prefer_original=False): if prefer_original and self._raw: return self._raw array = super(BotCommandScopeDefault, self).to_array() array['type'] = u(self.type) return array @staticmethod def validate_array(array): assert_type_or_raise(array, dict, parameter_name="array") data = BotCommandScope.validate_array(array) return data @staticmethod def from_array(array): if not array: return None data = BotCommandScopeDefault.validate_array(array) instance = BotCommandScopeDefault(**data) instance._raw = array return instance def __str__(self): return "BotCommandScopeDefault(type={self.type!r})".format(self=self) def __repr__(self): if self._raw: return "BotCommandScopeDefault.from_array({self._raw})".format(self=self) return "BotCommandScopeDefault(type={self.type!r})".format(self=self) def __contains__(self, key): return ( key in ["type"] and hasattr(self, key) and bool(getattr(self, key, None)) ) class BotCommandScopeAllPrivateChats(BotCommandScope): def __init__(self): super(BotCommandScopeAllPrivateChats, self).__init__() self.type = 'all_private_chats' def to_array(self, prefer_original=False): if prefer_original and self._raw: return self._raw array = super(BotCommandScopeAllPrivateChats, self).to_array() array['type'] = u(self.type) return array @staticmethod def validate_array(array): assert_type_or_raise(array, dict, parameter_name="array") data = BotCommandScope.validate_array(array) return data @staticmethod def from_array(array): if not array: return None data = BotCommandScopeAllPrivateChats.validate_array(array) instance = BotCommandScopeAllPrivateChats(**data) instance._raw = array return instance def __str__(self): return "BotCommandScopeAllPrivateChats(type={self.type!r})".format(self=self) def __repr__(self): if self._raw: return "BotCommandScopeAllPrivateChats.from_array({self._raw})".format(self=self) return "BotCommandScopeAllPrivateChats(type={self.type!r})".format(self=self) def __contains__(self, key): return ( key in ["type"] and hasattr(self, key) and bool(getattr(self, key, None)) ) class BotCommandScopeAllGroupChats(BotCommandScope): def __init__(self): super(BotCommandScopeAllGroupChats, self).__init__() self.type = 'all_group_chats' def to_array(self, prefer_original=False): if prefer_original and self._raw: return self._raw array = super(BotCommandScopeAllGroupChats, self).to_array() array['type'] = u(self.type) return array @staticmethod
MIT License
carla-rl-gym/carla-rl
client/carla/planner/city_track.py
CityTrack.project_node
python
def project_node(self, position): node = self._map.convert_to_node(position) node = tuple([int(x) for x in node]) node = (max(0, node[0]), max(0, node[1])) node = (min(self._map.get_graph_resolution()[0] - 1, node[0]), min(self._map.get_graph_resolution()[1] - 1, node[1])) node = self._map.search_on_grid(node) return node
Projecting the graph node into the city road
https://github.com/carla-rl-gym/carla-rl/blob/e3ea0df450fe9716c6f1d2e6fbaec05009fb7da8/client/carla/planner/city_track.py#L31-L50
from carla.planner.graph import sldist from carla.planner.astar import AStar from carla.planner.map import CarlaMap class CityTrack(object): def __init__(self, city_name): self._node_density = 50.0 self._pixel_density = 0.1643 self._map = CarlaMap(city_name, self._pixel_density, self._node_density) self._astar = AStar() self._previous_node = [] self._route = None
MIT License
zakkg3/clustersecret
src/handlers.py
create_secret
python
def create_secret(logger,namespace,body,v1=None): if v1 is None: v1 = client.CoreV1Api() logger.debug('new client - fn create secret') try: name = body['metadata']['name'] except KeyError: logger.debug("No name in body ?") raise kopf.TemporaryError("can not get the name.") try: data = body.get('data') except KeyError: data = '' logger.error("Empty secret?? could not get the data.") secret_type = 'Opaque' if 'type' in body: secret_type = body['type'] metadata = {'name': name, 'namespace': namespace} api_version = 'v1' kind = 'Secret' body = client.V1Secret(api_version, data , kind, metadata, type = secret_type) logger.info(f"cloning secret in namespace {namespace}") try: api_response = v1.create_namespaced_secret(namespace, body) except client.rest.ApiException as e: if e.reason == 'Conflict': logger.warning(f"secret `{name}` already exist in namesace '{namespace}'") return 0 logger.error(f'Can not create a secret, it is base64 encoded? data: {data}') logger.error(f'Kube exception {e}') return 1 return 0
Creates a given secret on a given namespace
https://github.com/zakkg3/clustersecret/blob/c7fb2054eb419d4e1f18dfea9c773a25b1daf16f/src/handlers.py#L116-L152
import kopf import re from kubernetes import client, config @kopf.on.delete('clustersecret.io', 'v1', 'clustersecrets') def on_delete(spec,uid,body,name,logger=None, **_): syncedns = body['status']['create_fn']['syncedns'] v1 = client.CoreV1Api() for ns in syncedns: logger.info(f'deleting secret {name} from namespace {ns}') try: v1.delete_namespaced_secret(name,ns) except client.rest.ApiException as e: if e.status == 404: logger.warning(f"The namespace {ns} may not exist anymore: Not found") else: logger.warning(f" Something wierd deleting the secret: {e}") try: csecs.pop(uid) logger.debug(f"csec {uid} deleted from memory ok") except KeyError as k: logger.info(f" This csec were not found in memory, maybe it was created in another run: {k}") @kopf.on.field('clustersecret.io', 'v1', 'clustersecrets', field='data') def on_field_data(old, new, body,name,logger=None, **_): logger.debug(f'Data changed: {old} -> {new}') if old is not None: syncedns = body['status']['create_fn']['syncedns'] v1 = client.CoreV1Api() secret_type = 'Opaque' if 'type' in body: secret_type = body['type'] for ns in syncedns: logger.info(f'Re Syncing secret {name} in ns {ns}') metadata = {'name': name, 'namespace': ns} api_version = 'v1' kind = 'Secret' data = new body = client.V1Secret(api_version, data , kind, metadata, type = secret_type) response = v1.replace_namespaced_secret(name,ns,body) logger.debug(response) else: logger.debug('This is a new object') csecs = {} @kopf.on.resume('clustersecret.io', 'v1', 'clustersecrets') @kopf.on.create('clustersecret.io', 'v1', 'clustersecrets') async def create_fn(spec,uid,logger=None,body=None,**kwargs): v1 = client.CoreV1Api() matchedns = get_ns_list(logger,body,v1) logger.info(f'Syncing on Namespaces: {matchedns}') for namespace in matchedns: create_secret(logger,namespace,body,v1) csecs[uid]={} csecs[uid]['body']=body csecs[uid]['syncedns']=matchedns return {'syncedns': matchedns} def get_ns_list(logger,body,v1=None): if v1 is None: v1 = client.CoreV1Api() logger.debug('new client - fn get_ns_list') try: matchNamespace = body.get('matchNamespace') except KeyError: matchNamespace = '*' logger.debug("matching all namespaces.") logger.debug(f'Matching namespaces: {matchNamespace}') try: avoidNamespaces = body.get('avoidNamespaces') except KeyError: avoidNamespaces = '' logger.debug("not avoiding namespaces") nss = v1.list_namespace().items matchedns = [] avoidedns = [] for matchns in matchNamespace: for ns in nss: if re.match(matchns, ns.metadata.name): matchedns.append(ns.metadata.name) logger.debug(f'Matched namespaces: {ns.metadata.name} matchpathern: {matchns}') if avoidNamespaces: for avoidns in avoidNamespaces: for ns in nss: if re.match(avoidns, ns.metadata.name): avoidedns.append(ns.metadata.name) logger.debug(f'Skipping namespaces: {ns.metadata.name} avoidpatrn: {avoidns}') for ns in matchedns.copy(): if ns in avoidedns: matchedns.remove(ns) return matchedns
Apache License 2.0
apache/incubator-spot
spot-ingest/common/file_watcher.py
FileWatcher.is_empty
python
def is_empty(self): return self._queue == []
Return ``True`` if there is no file in the queue, otherwise ``False``.
https://github.com/apache/incubator-spot/blob/2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb/spot-ingest/common/file_watcher.py#L72-L76
import logging import re from os.path import basename from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer class FileWatcher(Observer): def __init__(self, path, supported_files, recursive): self._logger = logging.getLogger('SPOT.INGEST.COMMON.FILE_WATCHER') self._queue = [] super(FileWatcher, self).__init__() self._logger.info('Schedule watching "{0}" directory.'.format(path)) super(FileWatcher, self).schedule(NewFileEventHandler(self), path, recursive) self._regexs = [re.compile(x) for x in supported_files] pattern_names = ', '.join(['"%s"' % x for x in supported_files]) self._logger.info('Supported filenames: {0}'.format(pattern_names)) self._logger.info('The search in sub-directories is {0}.' .format('enabled' if recursive else 'disabled')) def __str__(self): return '"{0}({1})"'.format(self.__class__.__name__, self.name) @property def dequeue(self): return None if self._queue == [] else self._queue.pop() @property
Apache License 2.0
theislab/batchglm
batchglm/data.py
constraint_matrix_from_dict
python
def constraint_matrix_from_dict( sample_description: pd.DataFrame, formula: str, as_categorical: Union[bool, list] = True, constraints: dict = {}, return_type: str = "patsy" ) -> Tuple: assert len(constraints) > 0, "supply constraints" sample_description: pd.DataFrame = sample_description.copy() if type(as_categorical) is not bool or as_categorical: if type(as_categorical) is bool and as_categorical: as_categorical = np.repeat(True, sample_description.columns.size) for to_cat, col in zip(as_categorical, sample_description): if to_cat: sample_description[col] = sample_description[col].astype("category") formula_unconstrained = formula.split("+") formula_unconstrained = [x for x in formula_unconstrained if x.strip(" ") not in constraints.keys()] formula_unconstrained = "+".join(formula_unconstrained) dmat = patsy.dmatrix(formula_unconstrained, sample_description) coef_names = dmat.design_info.column_names term_names = dmat.design_info.term_names constraints_ls = string_constraints_from_dict( sample_description=sample_description, constraints=constraints ) for i, x in enumerate(constraints.keys()): assert isinstance(x, str), "constrained should contain strings" dmat_constrained_temp = patsy.highlevel.dmatrix("0+" + x, sample_description) dmat = np.hstack([dmat, dmat_constrained_temp]) coef_names.extend(dmat_constrained_temp.design_info.column_names) term_names.extend(dmat_constrained_temp.design_info.term_names) constraints_ar = constraint_matrix_from_string( dmat=dmat, coef_names=coef_names, constraints=constraints_ls ) if return_type == "dataframe": dmat = pd.DataFrame(dmat, columns=coef_names) return dmat, coef_names, constraints_ar, term_names
Create a design matrix from some sample description and a constraint matrix based on factor encoding of constrained parameter sets. Note that we build a dataframe instead of a pasty.DesignMatrix here if constraints are used. This is done because we were not able to build a patsy.DesignMatrix of the constrained form required in this context. In those cases in which the return type cannot be patsy, we encourage the use of the returned term_names to perform term-wise slicing which is not supported by other design matrix return types. :param sample_description: pandas.DataFrame of length "num_observations" containing explanatory variables as columns :param formula: model formula as string, describing the relations of the explanatory variables. E.g. '~ 1 + batch + confounder' :param as_categorical: boolean or list of booleans corresponding to the columns in 'sample_description' If True, all values in 'sample_description' will be treated as categorical values. If list of booleans, each column will be changed to categorical if the corresponding value in 'as_categorical' is True. Set to false, if columns should not be changed. :param constraints: Grouped factors to enfore equality constraints on. Every element of the dictionary corresponds to one set of equality constraints. Each set has to be be an entry of the form {..., x: y, ...} where x is the factor to be constrained and y is a factor by which levels of x are grouped and then constrained. Set y="1" to constrain all levels of x to sum to one, a single equality constraint. E.g.: {"batch": "condition"} Batch levels within each condition are constrained to sum to zero. This is applicable if repeats of a an experiment within each condition are independent so that the set-up ~1+condition+batch is perfectly confounded. Can only group by non-constrained effects right now, use constraint_matrix_from_string for other cases. :return: - model design matrix - term_names to allow slicing by factor if return type cannot be patsy.DesignMatrix
https://github.com/theislab/batchglm/blob/31b905b99b6baa7c94b82550d6a74f00d81966ea/batchglm/data.py#L264-L351
import logging import patsy import pandas as pd import numpy as np from typing import Union, Tuple, List try: import anndata try: from anndata.base import Raw except ImportError: from anndata import Raw except ImportError: anndata = None Raw = None def design_matrix( sample_description: Union[pd.DataFrame, None] = None, formula: Union[str, None] = None, as_categorical: Union[bool, list] = True, dmat: Union[pd.DataFrame, None] = None, return_type: str = "patsy", ) -> Tuple[Union[patsy.design_info.DesignMatrix, pd.DataFrame], List[str]]: if (dmat is None and sample_description is None) or (dmat is not None and sample_description is not None): raise ValueError("supply either dmat or sample_description") if dmat is None: sample_description: pd.DataFrame = sample_description.copy() if type(as_categorical) is not bool or as_categorical: if type(as_categorical) is bool and as_categorical: as_categorical = np.repeat(True, sample_description.columns.size) for to_cat, col in zip(as_categorical, sample_description): if to_cat: sample_description[col] = sample_description[col].astype("category") dmat = patsy.dmatrix(formula, sample_description) coef_names = dmat.design_info.column_names if return_type == "dataframe": df = pd.DataFrame(dmat, columns=dmat.design_info.column_names) df = pd.concat([df, sample_description], axis=1) df.set_index(list(sample_description.columns), inplace=True) return df elif return_type == "patsy": return dmat, coef_names else: raise ValueError("return type %s not recognized" % return_type) else: if return_type == "dataframe": return dmat, dmat.columns elif return_type == "patsy": raise ValueError("return type 'patsy' not supported for input (dmat is not None)") else: raise ValueError("return type %s not recognized" % return_type) def view_coef_names( dmat: Union[patsy.design_info.DesignMatrix, pd.DataFrame] ) -> np.ndarray: if isinstance(dmat, pd.DataFrame): return np.asarray(dmat.columns) elif isinstance(dmat, patsy.design_info.DesignMatrix): return np.asarray(dmat.design_info.column_names) else: raise ValueError("dmat type %s not recognized" % type(dmat)) def preview_coef_names( sample_description: pd.DataFrame, formula: str, as_categorical: Union[bool, list] = True ) -> List[str]: _, coef_names = design_matrix( sample_description=sample_description, formula=formula, as_categorical=as_categorical, return_type="patsy" ) return coef_names def constraint_system_from_star( dmat: Union[None, patsy.design_info.DesignMatrix, pd.DataFrame] = None, sample_description: Union[None, pd.DataFrame] = None, formula: Union[None, str] = None, as_categorical: Union[bool, list] = True, constraints: Union[None, List[str], Tuple[str], dict, np.ndarray] = None, return_type: str = "patsy" ) -> Tuple: if sample_description is None and dmat is None: raise ValueError("supply either sample_description or dmat") if dmat is None and not isinstance(constraints, dict): dmat, coef_names = design_matrix( sample_description=sample_description, formula=formula, as_categorical=as_categorical, dmat=None, return_type=return_type ) elif dmat is not None and isinstance(constraints, dict): raise ValueError("dmat was supplied even though constraints were given as dict") if isinstance(constraints, dict): dmat, coef_names, cmat, term_names = constraint_matrix_from_dict( sample_description=sample_description, formula=formula, as_categorical=as_categorical, constraints=constraints, return_type="patsy" ) elif isinstance(constraints, tuple) or isinstance(constraints, list): cmat, coef_names = constraint_matrix_from_string( dmat=dmat, coef_names=dmat.design_info.column_names, constraints=constraints ) term_names = None elif isinstance(constraints, np.ndarray): cmat = constraints term_names = None if isinstance(dmat, pd.DataFrame): coef_names = dmat.columns dmat = dmat.values elif constraints is None: cmat = None term_names = None if isinstance(dmat, pd.DataFrame): coef_names = dmat.columns dmat = dmat.values else: raise ValueError("constraint format %s not recognized" % type(constraints)) if cmat is None: if np.linalg.matrix_rank(dmat) != dmat.shape[1]: raise ValueError( "constrained design matrix is not full rank: %i %i" % (np.linalg.matrix_rank(dmat), dmat.shape[1]) ) else: if np.linalg.matrix_rank(np.matmul(dmat, cmat)) != cmat.shape[1]: raise ValueError( "constrained design matrix is not full rank: %i %i" % (np.linalg.matrix_rank(np.matmul(dmat, cmat)), cmat.shape[1]) ) return dmat, coef_names, cmat, term_names
BSD 3-Clause New or Revised License
lithops-cloud/lithops
lithops/storage/backends/infinispan/infinispan.py
InfinispanBackend.head_bucket
python
def head_bucket(self, bucket_name): raise NotImplementedError
Head bucket from COS with a name. Throws StorageNoSuchKeyError if the given bucket does not exist. :param bucket_name: name of the bucket :return: Metadata of the bucket :rtype: str/bytes
https://github.com/lithops-cloud/lithops/blob/a274a0bc423e22b9a68834cac5d63130666a4ee8/lithops/storage/backends/infinispan/infinispan.py#L141-L148
import logging import requests import json import base64 from requests.auth import HTTPBasicAuth from lithops.constants import STORAGE_CLI_MSG logger = logging.getLogger(__name__) class InfinispanBackend: def __init__(self, infinispan_config): logger.debug("Creating Infinispan storage client") self.infinispan_config = infinispan_config self.basicAuth = HTTPBasicAuth(infinispan_config.get('username'), infinispan_config.get('password')) self.endpoint = infinispan_config.get('endpoint') self.cache_name = infinispan_config.get('cache_name', 'default') self.cache_type = infinispan_config.get('cache_type', 'org.infinispan.DIST_SYNC') self.infinispan_client = requests.session() self.__is_server_version_supported() self.__create_cache(self.cache_name, self.cache_type) self.headers = {"Content-Type": "application/octet-stream", "Key-Content-Type": "application/octet-stream;encoding=base64"} msg = STORAGE_CLI_MSG.format('Infinispan') logger.info("{} - Endpoint: {}".format(msg, self.endpoint)) def __create_cache(self, cache_name, cache_type): url = self.endpoint + '/rest/v2/caches/' + cache_name res = self.infinispan_client.head(url, auth=self.basicAuth) if res.status_code == 404: logger.debug('going to create new Infinispan cache {}'.format(cache_name)) url = self.endpoint+'/rest/v2/caches/'+cache_name+'?template='+cache_type res = self.infinispan_client.post(url) logger.debug('New Infinispan cache {} created with ' 'status {}'.format(cache_name, res.status_code)) def __key_url(self, bucket_name, key): data_key = '{}_{}'.format(bucket_name, key) urlSafeEncodedBytes = base64.urlsafe_b64encode(data_key.encode("utf-8")) urlSafeEncodedStr = str(urlSafeEncodedBytes, "utf-8") url = self.endpoint + '/rest/v2/caches/' + self.cache_name + '/' + urlSafeEncodedStr return url def __is_server_version_supported(self): url = self.endpoint + '/rest/v2/cache-managers/default' res = self.infinispan_client.get(url, auth=self.basicAuth) json_resp = json.loads(res.content.decode('utf-8')) server_version = json_resp['version'].split('.') if (int(server_version[0]) < 10 or (int(server_version[0]) == 10 and int(server_version[1]) < 1)): raise Exception('Infinispan versions 10.1 and up supported') def get_client(self): return self.infinispan_client def put_object(self, bucket_name, key, data): url = self.__key_url(bucket_name, key) resp = self.infinispan_client.put(url, data=data, auth=self.basicAuth, headers=self.headers) logger.debug(resp) def get_object(self, bucket_name, key, stream=False, extra_get_args={}): url = self.__key_url(bucket_name, key) res = self.infinispan_client.get(url, headers=self.headers, auth=self.basicAuth) data = res.content return data def head_object(self, bucket_name, key): url = self.__key_url(bucket_name, key) res = self.infinispan_client.head(url, headers=self.headers, auth=self.basicAuth) return res.status_code def delete_object(self, bucket_name, key): url = self.__key_url(bucket_name, key) return self.infinispan_client.delete(url, headers=self.headers, auth=self.basicAuth) def delete_objects(self, bucket_name, key_list): result = [] for key in key_list: self.delete_object(bucket_name, key) return result
Apache License 2.0
dhn/osee
Kernel_Exploitation/ioctl.py
IOCTL.__get_method
python
def __get_method(self, ioctl_code): method_names = [ "METHOD_BUFFERED", "METHOD_IN_DIRECT", "METHOD_OUT_DIRECT", "METHOD_NEITHER", ] method = ioctl_code & 3 return method_names[method], method
Returns the correct method type name for a 32 bit IOCTL code
https://github.com/dhn/osee/blob/9875d16edc09ac569681e325ef7dbfa0ba69cfbc/Kernel_Exploitation/ioctl.py#L124-L134
import os import re import sys import angr import mmap import pyvex import claripy import logging import argparse import collections class IOCTL: def __init__(self, driver): self._ASCII_BYTE = b" !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t" self._UNICODE_RE_4 = re.compile(b"((?:[%s]\x00){%d,})" % (self._ASCII_BYTE, 4)) self._REPEATS = ["A", "\x00", "\xfe", "\xff"] self._String = collections.namedtuple("String", ["s", "offset"]) self._driver = driver self._p = angr.Project(self._driver, auto_load_libs=False) def __get_device(self, ioctl_code): device_name_unknown = "<UNKNOWN>" device_names = [ device_name_unknown, "FILE_DEVICE_BEEP", "FILE_DEVICE_CD_ROM", "FILE_DEVICE_CD_ROM_FILE_SYSTEM", "FILE_DEVICE_CONTROLLER", "FILE_DEVICE_DATALINK", "FILE_DEVICE_DFS", "FILE_DEVICE_DISK", "FILE_DEVICE_DISK_FILE_SYSTEM", "FILE_DEVICE_FILE_SYSTEM", "FILE_DEVICE_INPORT_PORT", "FILE_DEVICE_KEYBOARD", "FILE_DEVICE_MAILSLOT", "FILE_DEVICE_MIDI_IN", "FILE_DEVICE_MIDI_OUT", "FILE_DEVICE_MOUSE", "FILE_DEVICE_MULTI_UNC_PROVIDER", "FILE_DEVICE_NAMED_PIPE", "FILE_DEVICE_NETWORK", "FILE_DEVICE_NETWORK_BROWSER", "FILE_DEVICE_NETWORK_FILE_SYSTEM", "FILE_DEVICE_NULL", "FILE_DEVICE_PARALLEL_PORT", "FILE_DEVICE_PHYSICAL_NETCARD", "FILE_DEVICE_PRINTER", "FILE_DEVICE_SCANNER", "FILE_DEVICE_SERIAL_MOUSE_PORT", "FILE_DEVICE_SERIAL_PORT", "FILE_DEVICE_SCREEN", "FILE_DEVICE_SOUND", "FILE_DEVICE_STREAMS", "FILE_DEVICE_TAPE", "FILE_DEVICE_TAPE_FILE_SYSTEM", "FILE_DEVICE_TRANSPORT", "FILE_DEVICE_UNKNOWN", "FILE_DEVICE_VIDEO", "FILE_DEVICE_VIRTUAL_DISK", "FILE_DEVICE_WAVE_IN", "FILE_DEVICE_WAVE_OUT", "FILE_DEVICE_8042_PORT", "FILE_DEVICE_NETWORK_REDIRECTOR", "FILE_DEVICE_BATTERY", "FILE_DEVICE_BUS_EXTENDER", "FILE_DEVICE_MODEM", "FILE_DEVICE_VDM", "FILE_DEVICE_MASS_STORAGE", "FILE_DEVICE_SMB", "FILE_DEVICE_KS", "FILE_DEVICE_CHANGER", "FILE_DEVICE_SMARTCARD", "FILE_DEVICE_ACPI", "FILE_DEVICE_DVD", "FILE_DEVICE_FULLSCREEN_VIDEO", "FILE_DEVICE_DFS_FILE_SYSTEM", "FILE_DEVICE_DFS_VOLUME", "FILE_DEVICE_SERENUM", "FILE_DEVICE_TERMSRV", "FILE_DEVICE_KSEC", "FILE_DEVICE_FIPS", "FILE_DEVICE_INFINIBAND", device_name_unknown, device_name_unknown, "FILE_DEVICE_VMBUS", "FILE_DEVICE_CRYPT_PROVIDER", "FILE_DEVICE_WPD", "FILE_DEVICE_BLUETOOTH", "FILE_DEVICE_MT_COMPOSITE", "FILE_DEVICE_MT_TRANSPORT", "FILE_DEVICE_BIOMETRIC", "FILE_DEVICE_PMI", ] device_names2 = [ {"name": "MOUNTMGRCONTROLTYPE", "code": 0x0000006d}, ] device = (ioctl_code >> 16) & 0xffff if device >= len(device_names): device_name = device_name_unknown for dev in device_names2: if device == dev["code"]: device_name = dev["name"] break else: device_name = device_names[device] return device_name, device
BSD 3-Clause New or Revised License
wolkabout/wolkconnect-python
wolk/wolkabout_protocol_message_deserializer.py
WolkAboutProtocolMessageDeserializer.is_firmware_abort
python
def is_firmware_abort(self, message: Message) -> bool: firmware_update_abort = message.topic.startswith( self.FIRMWARE_UPDATE_ABORT ) self.logger.debug( f"{message.topic} is firmware abort: {firmware_update_abort}" ) return firmware_update_abort
Check if message is firmware update command. :param message: The message received :type message: Message :returns: firmware_update_abort :rtype: bool
https://github.com/wolkabout/wolkconnect-python/blob/11412e3f88911170f587b5e857d07ab41c8f52b5/wolk/wolkabout_protocol_message_deserializer.py#L164-L179
import json from distutils.util import strtobool from typing import List from typing import Tuple from wolk import logger_factory from wolk.interface.message_deserializer import MessageDeserializer from wolk.model.actuator_command import ActuatorCommand from wolk.model.configuration_command import ConfigurationCommand from wolk.model.device import Device from wolk.model.file_transfer_package import FileTransferPackage from wolk.model.message import Message class WolkAboutProtocolMessageDeserializer(MessageDeserializer): DEVICE_PATH_DELIMITER = "d/" REFERENCE_PATH_PREFIX = "r/" CHANNEL_DELIMITER = "/" KEEP_ALIVE_RESPONSE = "pong/" ACTUATOR_SET = "p2d/actuator_set/" CONFIGURATION_SET = "p2d/configuration_set/" FILE_BINARY_RESPONSE = "p2d/file_binary_response/" FILE_DELETE = "p2d/file_delete/" FILE_PURGE = "p2d/file_purge/" FILE_LIST_CONFIRM = "p2d/file_list_confirm/" FILE_LIST_REQUEST = "p2d/file_list_request/" FILE_UPLOAD_ABORT = "p2d/file_upload_abort/" FILE_UPLOAD_INITIATE = "p2d/file_upload_initiate/" FILE_URL_DOWNLOAD_ABORT = "p2d/file_url_download_abort/" FILE_URL_DOWNLOAD_INITIATE = "p2d/file_url_download_initiate/" FIRMWARE_UPDATE_ABORT = "p2d/firmware_update_abort/" FIRMWARE_UPDATE_INSTALL = "p2d/firmware_update_install/" def __init__(self, device: Device) -> None: self.logger = logger_factory.logger_factory.get_logger( str(self.__class__.__name__) ) self.logger.debug(f"{device}") self.inbound_topics = [ self.KEEP_ALIVE_RESPONSE + device.key, self.CONFIGURATION_SET + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_BINARY_RESPONSE + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_DELETE + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_PURGE + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_LIST_CONFIRM + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_LIST_REQUEST + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_UPLOAD_ABORT + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_UPLOAD_INITIATE + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_URL_DOWNLOAD_ABORT + self.DEVICE_PATH_DELIMITER + device.key, self.FILE_URL_DOWNLOAD_INITIATE + self.DEVICE_PATH_DELIMITER + device.key, self.FIRMWARE_UPDATE_ABORT + self.DEVICE_PATH_DELIMITER + device.key, self.FIRMWARE_UPDATE_INSTALL + self.DEVICE_PATH_DELIMITER + device.key, ] for reference in device.actuator_references: self.inbound_topics.append( self.ACTUATOR_SET + self.DEVICE_PATH_DELIMITER + device.key + self.CHANNEL_DELIMITER + self.REFERENCE_PATH_PREFIX + reference ) self.logger.debug(f"inbound topics: {self.inbound_topics}") def get_inbound_topics(self) -> List[str]: return self.inbound_topics def is_keep_alive_response(self, message: Message) -> bool: keep_alive_response = message.topic.startswith( self.KEEP_ALIVE_RESPONSE ) self.logger.debug( f"{message.topic} is keep alive response: {keep_alive_response}" ) return keep_alive_response def is_actuation_command(self, message: Message) -> bool: actuation_command = message.topic.startswith(self.ACTUATOR_SET) self.logger.debug( f"{message.topic} is actuation command: {actuation_command}" ) return actuation_command def is_firmware_install(self, message: Message) -> bool: firmware_update_install = message.topic.startswith( self.FIRMWARE_UPDATE_INSTALL ) self.logger.debug( f"{message.topic} is firmware install: {firmware_update_install}" ) return firmware_update_install
Apache License 2.0
shivansh-007/piston-cli
piston/commands/input.py
user_input
python
def user_input(ctx: click.Context, theme: str, language: str) -> Union[tuple, str]: console = ctx.obj["console"] args = helpers.get_args(console) stdin = helpers.get_stdin(console) console.print("[green]Enter your code, (press esc + enter to run)[/green]") style = helpers.set_style(console, theme) console.print() code = prompt( "", lexer=PygmentsLexer(lexers_dict[language]), include_default_pygments_style=False, style=style, multiline=True, ) payload = PistonQuery(language=language, args=args, stdin=stdin, code=code) data = services.query_piston(ctx, console, payload) if len(data["output"]) == 0: return "Your code ran without output.", language return data["output"].split("\n"), language
Make a multiline prompt for code input and send the code to the api. The compiled output from the api is returned.
https://github.com/shivansh-007/piston-cli/blob/c7e2e418174a98501443ccaa53d582c3e52502b9/piston/commands/input.py#L12-L40
from typing import Union import click from prompt_toolkit.lexers import PygmentsLexer from prompt_toolkit.shortcuts import prompt from piston.utils import helpers, services from piston.utils.constants import PistonQuery from piston.utils.lexers import lexers_dict
MIT License
zfit/zfit
zfit/core/coordinates.py
Coordinates.with_axes
python
def with_axes(self, axes: Optional[ztyping.AxesTypeInput], allow_superset: bool = True, allow_subset: bool = True) -> "Coordinates": axes = convert_to_axes(axes) if axes is None: if self.obs is None: raise CoordinatesUnderdefinedError("Cannot remove axes (using None) for a Space without obs") new_coords = type(self)(obs=self.obs, axes=axes) else: axes = _convert_axes_to_int(axes) if not self.axes and not len(axes) == len(self.obs): raise AxesIncompatibleError(f"Trying to set axes {axes} to object with obs {self.obs}") if self.axes is None: new_coords = type(self)(obs=self.obs, axes=axes) else: if not set(axes).intersection(self.axes): raise AxesIncompatibleError(f"The requested axes {axes} are not compatible with the current axes " f"{self.axes}") if not frozenset(axes) == frozenset(self.axes): if not allow_superset and set(axes) - set(self.axes): raise AxesIncompatibleError( f"Axes {axes} are a superset of {self.axes}, not allowed according to flag.") if not allow_subset and set(self.axes) - set(axes): raise AxesIncompatibleError( f"Axes {axes} are a subset of {self.axes}, not allowed according to flag.") new_indices = self.get_reorder_indices(axes=axes) new_obs = self._reorder_obs(indices=new_indices) new_axes = self._reorder_axes(indices=new_indices) new_coords = type(self)(obs=new_obs, axes=new_axes) return new_coords
Create a new instance that has `axes`; sorted by or set or dropped. The behavior is as follows: * axes are already set: * input axes are None: the axes will be dropped. If no observables are set, an error will be raised, as no coordinates will be assigned to this instance anymore. * input axes are not None: the instance will be sorted by the incoming axes. If obs or other objects have an associated order (e.g. data, limits,...), they will be reordered as well. If a strict subset is given (and allow_subset is True), only a subset will be returned. This can be used to retrieve a subspace of limits, data etc. If a strict superset is given (and allow_superset is True), the axes will be sorted accordingly as if the axes not contained in the instances axes were not present in the input axes. * axes are not set: * if the input axes are None, the same object is returned. * if the input axes are not None, they will be set as-is and now correspond to the already existing obs in the object. Args: axes: Axes to sort/associate this instance with allow_superset: if False and a strict superset of the own axeservables is given, an error is raised. allow_subset:if False and a strict subset of the own axeservables is given, an error is raised. Returns: A copy of the object with the new ordering/axes Raises: CoordinatesUnderdefinedError: if obs is None and the instance does not have axes AxesIncompatibleError: if `axes` is a superset and allow_superset is False or a subset and allow_allow_subset is False
https://github.com/zfit/zfit/blob/d293c8d02afcfa681273eed7825b519bdc60b320/zfit/core/coordinates.py#L142-L208
from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from zfit import z from ..util import ztyping from ..util.container import convert_to_container from ..util.exception import (AxesIncompatibleError, CoordinatesIncompatibleError, CoordinatesUnderdefinedError, IntentionAmbiguousError, ObsIncompatibleError, OverdefinedError, WorkInProgressError) from .interfaces import (ZfitData, ZfitDimensional, ZfitOrderableDimensional, ZfitSpace) class Coordinates(ZfitOrderableDimensional): def __init__(self, obs=None, axes=None): obs, axes, n_obs = self._check_convert_obs_axes(obs, axes) self._obs = obs self._axes = axes self._n_obs = n_obs @staticmethod def _check_convert_obs_axes(obs, axes): if isinstance(obs, ZfitOrderableDimensional): if axes is not None: raise OverdefinedError(f"Cannot use {obs}, a" " ZfitOrderableDimensional as obs with axes not None" " (currently, please open an issue if desired)") coord = obs return coord.obs, coord.axes, coord.n_obs obs = convert_to_obs_str(obs, container=tuple) axes = convert_to_axes(axes, container=tuple) if obs is None: if axes is None: raise CoordinatesUnderdefinedError("Neither obs nor axes specified") else: if any(not isinstance(ax, int) for ax in axes): raise ValueError(f"Axes have to be int. Currently: {axes}") n_obs = len(axes) else: if any(not isinstance(ob, str) for ob in obs): raise ValueError(f"Observables have to be strings. Currently: {obs}") n_obs = len(obs) if axes is not None: if not len(obs) == len(axes): raise CoordinatesIncompatibleError("obs and axes do not have the same length.") if not (obs or axes): raise CoordinatesUnderdefinedError(f"Neither obs {obs} nor axes {axes} are defined.") return obs, axes, n_obs @property def obs(self) -> ztyping.ObsTypeReturn: return self._obs @property def axes(self) -> ztyping.AxesTypeReturn: return self._axes @property def n_obs(self) -> int: return self._n_obs def with_obs(self, obs: Optional[ztyping.ObsTypeInput], allow_superset: bool = True, allow_subset: bool = True) -> "Coordinates": obs = convert_to_obs_str(obs) if obs is None: if self.axes is None: raise AxesIncompatibleError("cannot remove obs (using None) for a Space without axes") new_coords = type(self)(obs=obs, axes=self.axes) else: obs = _convert_obs_to_str(obs) if self.obs is None: new_coords = type(self)(obs=obs, axes=self.axes) else: if not set(obs).intersection(self.obs): raise ObsIncompatibleError(f"The requested obs {obs} are not compatible with the current obs " f"{self.obs}") if not frozenset(obs) == frozenset(self.obs): if not allow_superset and frozenset(obs) - frozenset(self.obs): raise ObsIncompatibleError( f"Obs {obs} are a superset of {self.obs}, not allowed according to flag.") if not allow_subset and set(self.obs) - set(obs): raise ObsIncompatibleError( f"Obs {obs} are a subset of {self.obs}, not allowed according to flag.") new_indices = self.get_reorder_indices(obs=obs) new_obs = self._reorder_obs(indices=new_indices) new_axes = self._reorder_axes(indices=new_indices) new_coords = type(self)(obs=new_obs, axes=new_axes) return new_coords
BSD 3-Clause New or Revised License
awslabs/aws-ops-automator
source/code/boto_retry/__init__.py
WaitStrategy.__init__
python
def __init__(self, waits, random_factor=0): self.waits = waits self.random_factor = random_factor self._index = 0
Initializes constant wait strategy :param waits: list of wait waits
https://github.com/awslabs/aws-ops-automator/blob/362abd0717b48ecca7f20d8985ae7d76f045daf3/source/code/boto_retry/__init__.py#L197-L204
import os import random import types from datetime import datetime from time import time import botocore.config import services from .aws_service_retry import AwsApiServiceRetry from .dynamodb_service_retry import DynamoDbServiceRetry from .ec2_service_retry import Ec2ServiceRetry from .logs_service_retry import CloudWatchLogsServiceRetry DEFAULT_SUFFIX = "_with_retries" DEFAULT_WAIT_SECONDS = 10 DEFAULT_MAX_WAIT = 60 DEFAULT_RANDOM_FACTOR = 0.25 MAX_WAIT = 24 * 3600 EXPECTED_EXCEPTIONS = "_expected_boto3_exceptions_" STATS_FORMAT = "{}: , calls: {}, failed: {}, retries: {}, timed-out {}" LOG_FORMAT = "{:0>4d}-{:0>2d}-{:0>2d} - {:0>2d}:{:0>2d}:{:0>2d}.{:0>3s} {}, retry: {}" ENV_BOTO_RETRY_STATS = "BOTO_RETRY_STATS" ENV_BOTO_STATS_OUTPUT = "BOTO_RETRY_OUTPUT" ENV_USER_AGENT = "USER_AGENT" stats_enabled = False boto_retry_stats = str(os.getenv(ENV_BOTO_RETRY_STATS, "false")).lower() == "true" or stats_enabled boto_stats_output = str(os.getenv(ENV_BOTO_STATS_OUTPUT, "false")).lower() == "true" statistics = {} def make_method_with_retries(boto_client_or_resource, name, service_retry_strategy=None, method_suffix=DEFAULT_SUFFIX): retry_strategy = service_retry_strategy if service_retry_strategy is not None else AwsApiServiceRetry() method_name = name + method_suffix def wrapped_api_method(client_or_resource, **args): return retry_strategy.call(client_or_resource, name, args) setattr(boto_client_or_resource, method_name, types.MethodType(wrapped_api_method, boto_client_or_resource)) return wrapped_api_method def get_default_wait_strategy(service): if service == "logs": return MultiplyWaitStrategy(start=2, factor=2, max_wait=15, random_factor=DEFAULT_RANDOM_FACTOR) return MultiplyWaitStrategy(start=DEFAULT_WAIT_SECONDS, max_wait=DEFAULT_MAX_WAIT, random_factor=DEFAULT_RANDOM_FACTOR) def get_default_retry_strategy(service, wait_strategy=None, context=None, logger=None): if wait_strategy is None: wait_strategy = get_default_wait_strategy(service) service_retry_strategy_class = _get_service_retry_strategy_class(service) strategy = service_retry_strategy_class(wait_strategy=wait_strategy, context=context, logger=logger) return strategy def _get_service_retry_strategy_class(service): if service == "ec2": retry_class = Ec2ServiceRetry elif service == "dynamodb": retry_class = DynamoDbServiceRetry elif service == "logs": retry_class = CloudWatchLogsServiceRetry else: retry_class = AwsApiServiceRetry return retry_class def get_client_with_retries(service_name, methods, context=None, region=None, session=None, wait_strategy=None, method_suffix=DEFAULT_SUFFIX, logger=None): args = { "service_name": service_name, } if region is not None: args["region_name"] = region user_agent = os.getenv(ENV_USER_AGENT, None) if user_agent is not None: session_config = botocore.config.Config(user_agent=user_agent) args["config"] = session_config if session is not None: aws_session = session else: aws_session = services.get_session() result = aws_session.client(**args) service_retry_strategy = get_default_retry_strategy(context=context, service=service_name, wait_strategy=wait_strategy, logger=logger) for method in methods: make_method_with_retries(boto_client_or_resource=result, name=method, service_retry_strategy=service_retry_strategy, method_suffix=method_suffix) return result def add_retry_methods_to_resource(resource, methods, context=None, method_suffix=DEFAULT_SUFFIX): service_name = type(resource).__name__.split(".")[0] service_retry_strategy_class = _get_service_retry_strategy_class(service_name) retry_wait_strategy = get_default_wait_strategy(service_name) for method in methods: make_method_with_retries(boto_client_or_resource=resource, name=method, method_suffix=method_suffix, service_retry_strategy=service_retry_strategy_class( wait_strategy=retry_wait_strategy, context=context) ) return resource def _apply_randomness(value, random_factor): if random_factor < 0 or random_factor > 1: raise ValueError("Random factor must be in range 0 to 1") return value + (random.uniform(random_factor * -1, random_factor) * value) if random_factor != 0 else value class WaitStrategy(object):
Apache License 2.0
weasyl/weasyl
weasyl/commishinfo.py
_charmap_to_currency_code
python
def _charmap_to_currency_code(charmap): for c in charmap: if c in CURRENCY_CHARMAP: return CURRENCY_CHARMAP[c].code return "USD"
Convert Weasyl's internal single-character representation of currencies to standard ISO4217 codes for use in comparing against exchange rate APIs :param charmap: String containing ideally one or zero characters used as currency indicators by Weasyl :return: A 3-letter ISO4217 currency code. Returns "USD" if no match found.
https://github.com/weasyl/weasyl/blob/80c86942c6f20a815086e2895fdad51d3aa77eed/weasyl/commishinfo.py#L98-L109
import re from collections import namedtuple from decimal import Decimal from urllib.parse import quote as urlquote from sentry_sdk import capture_message from libweasyl.cache import region from weasyl import config from weasyl import define as d from weasyl import macro as m from weasyl.error import PostgresError, WeasylError _MAX_PRICE = 99999999 CURRENCY_PRECISION = 2 Currency = namedtuple('Currency', ('code', 'name', 'symbol')) CURRENCY_CHARMAP = { "": Currency(code="USD", name="United States Dollar", symbol="$"), "e": Currency(code="EUR", name="Euro", symbol="€"), "p": Currency(code="GBP", name="British Pound Sterling", symbol="£"), "y": Currency(code="JPY", name="Japanese Yen", symbol="J¥"), "c": Currency(code="CAD", name="Canadian Dollar", symbol="C$"), "m": Currency(code="MXN", name="Mexican Peso", symbol="M$"), "u": Currency(code="AUD", name="Australian Dollar", symbol="A$"), "z": Currency(code="NZD", name="New Zealand Dollar", symbol="NZ$"), "n": Currency(code="CNY", name="Chinese Yuan", symbol="C¥"), "f": Currency(code="CHF", name="Swiss Franc", symbol="Fr"), } PRESET_COMMISSION_CLASSES = [ ("Visual", ["Sketch", "Badge", "Icon", "Reference", "Fullbody", "Headshot", "Chibi"]), ("Literary", ["Story"]), ("Multimedia", ["Music"]), ] def parse_currency(target): if not target: return 0 digits = re.sub("[^0-9.]", "", target) if not digits: return 0 return int(Decimal(digits) * (10 ** CURRENCY_PRECISION)) @region.cache_on_arguments(expiration_time=60 * 60 * 24, should_cache_fn=bool) def _fetch_rates_no_cache_failure(): if not config.config_read_bool('convert_currency'): return None try: response = d.http_get("https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml") except WeasylError: return None else: capture_message("Fetched exchange rates") rates = {'EUR': 1.0} for match in re.finditer(r"currency='([A-Z]{3})' rate='([0-9.]+)'", response.text): code, rate = match.groups() try: rate = float(rate) except ValueError: pass else: if 0.0 < rate < float('inf'): rates[code] = rate return rates @region.cache_on_arguments(expiration_time=60 * 60) def _fetch_rates(): return _fetch_rates_no_cache_failure()
Apache License 2.0
jhuapl-boss/boss
django/bosscore/error.py
BossUserNotFoundError.__init__
python
def __init__(self, object): super(BossUserNotFoundError, self).__init__("{} does not exist. Ensure that the user has logged in" .format(object), ErrorCodes.USER_NOT_FOUND)
Custom HTTP Error class for object not found errors Args: object (str): Name of resource/object that user is trying to access/manipulate
https://github.com/jhuapl-boss/boss/blob/c2e26d272bd7b8d54abdc2948193163537e31291/django/bosscore/error.py#L285-L292
from django.http import JsonResponse from bossutils.logger import bossLogger from enum import IntEnum import sys import json class ErrorCodes(IntEnum): INVALID_URL = 1000 INVALID_CUTOUT_ARGS = 1001 TYPE_ERROR = 1002 INVALID_POST_ARGUMENT = 1003 UNABLE_TO_VALIDATE = 1004 INVALID_ARGUMENT = 1005 BAD_REQUEST = 1006 REQUEST_TOO_LARGE = 2000 DATATYPE_DOES_NOT_MATCH = 2001 DATA_DIMENSION_MISMATCH = 2002 DATATYPE_NOT_SUPPORTED = 2003 INVALID_REQUEST = 2004 UNSUPPORTED_4D = 2005 INVALID_STATE = 2006 MISSING_ROLE = 3000 MISSING_PERMISSION = 3001 UNRECOGNIZED_PERMISSION = 3002 INGEST_NOT_CREATOR = 3003 ACCESS_DENIED_UNKNOWN = 3004 RESOURCE_NOT_FOUND = 4000 GROUP_NOT_FOUND = 4001 USER_NOT_FOUND = 4002 INTEGRITY_ERROR = 4003 OBJECT_NOT_FOUND = 4004 RESOURCE_MARKED_FOR_DELETION = 4005 IO_ERROR = 5000 UNSUPPORTED_TRANSPORT_FORMAT = 5001 SERIALIZATION_ERROR = 5002 DESERIALIZATION_ERROR = 5003 GROUP_EXISTS = 6001 RESOURCE_EXISTS = 6002 KEYCLOAK_EXCEPTION = 7001 INVALID_ROLE = 7002 FUTURE = 9000 BOSS_SYSTEM_ERROR = 9001 UNHANDLED_EXCEPTION = 9002 UNSUPPORTED_VERSION = 9003 RESP_CODES = { ErrorCodes.INVALID_URL: 400, ErrorCodes.INVALID_CUTOUT_ARGS: 400, ErrorCodes.TYPE_ERROR: 400, ErrorCodes.INVALID_POST_ARGUMENT: 400, ErrorCodes.INVALID_ARGUMENT: 400, ErrorCodes.BAD_REQUEST: 400, ErrorCodes.UNABLE_TO_VALIDATE: 400, ErrorCodes.REQUEST_TOO_LARGE: 413, ErrorCodes.DATATYPE_DOES_NOT_MATCH: 400, ErrorCodes.DATA_DIMENSION_MISMATCH: 400, ErrorCodes.DATATYPE_NOT_SUPPORTED: 400, ErrorCodes.UNSUPPORTED_4D: 400, ErrorCodes.INVALID_STATE: 409, ErrorCodes.INVALID_REQUEST: 404, ErrorCodes.MISSING_ROLE: 403, ErrorCodes.MISSING_PERMISSION: 403, ErrorCodes.UNRECOGNIZED_PERMISSION: 404, ErrorCodes.INGEST_NOT_CREATOR: 403, ErrorCodes.RESOURCE_NOT_FOUND: 404, ErrorCodes.GROUP_NOT_FOUND: 404, ErrorCodes.USER_NOT_FOUND: 404, ErrorCodes.INTEGRITY_ERROR: 400, ErrorCodes.OBJECT_NOT_FOUND: 404, ErrorCodes.RESOURCE_MARKED_FOR_DELETION: 404, ErrorCodes.IO_ERROR: 404, ErrorCodes.UNSUPPORTED_TRANSPORT_FORMAT: 404, ErrorCodes.SERIALIZATION_ERROR: 404, ErrorCodes.DESERIALIZATION_ERROR: 404, ErrorCodes.GROUP_EXISTS: 404, ErrorCodes.RESOURCE_EXISTS: 404, ErrorCodes.KEYCLOAK_EXCEPTION: 500, ErrorCodes.INVALID_ROLE: 403, ErrorCodes.FUTURE: 404, ErrorCodes.BOSS_SYSTEM_ERROR: 400, ErrorCodes.UNHANDLED_EXCEPTION: 500, ErrorCodes.UNSUPPORTED_VERSION: 400 } class BossError(Exception): def __init__(self, *args): if args[1] in RESP_CODES: self.status_code = RESP_CODES[args[1]] else: self.status_code = 400 self.message = args[0] self.error_code = args[1] def to_http(self): return BossHTTPError(self.message, self.error_code) class BossParserError(object): def __init__(self, *args): if args[1] in RESP_CODES: self.status_code = RESP_CODES[args[1]] else: self.status_code = 400 self.message = args[0] self.error_code = args[1] def to_http(self): return BossHTTPError(self.message, self.error_code) class BossHTTPError(JsonResponse): def __init__(self, message, code): self.status_code = RESP_CODES[code] blog = bossLogger() blog.info("BossHTTPError - Status: {0} - Code: {1} - Message: {2}".format(self.status_code, code, message)) data = {'status': self.status_code, 'code': code, 'message': message} super(BossHTTPError, self).__init__(data) class BossKeycloakError(JsonResponse): def __init__(self, message, code=ErrorCodes.KEYCLOAK_EXCEPTION): ex = sys.exc_info()[1] self.status_code = ex.status if ex else RESP_CODES[code] data = { 'status': self.status_code, 'code': ErrorCodes.KEYCLOAK_EXCEPTION, 'message': message } if ex: if isinstance(ex.data, str): val = json.loads(ex.data) else: val = ex.data data.update(val) msg = "BossKeycloakError" for k in data: msg += " - {}: {}".format(k.capitalize(), data[k]) log = bossLogger() log.info(msg) super(BossKeycloakError, self).__init__(data) class BossResourceNotFoundError(BossHTTPError): def __init__(self, object): super(BossResourceNotFoundError, self).__init__("{} does not exist.".format(object), ErrorCodes.RESOURCE_NOT_FOUND) class BossUserNotFoundError(BossHTTPError):
Apache License 2.0
sepandhaghighi/pycm
pycm/pycm_compare.py
Compare.print_report
python
def print_report(self): report = compare_report_print( self.sorted, self.scores, self.best_name) print(report)
Print Compare report. :return: None
https://github.com/sepandhaghighi/pycm/blob/efca98f6dc92fd1eb5ac05a48aec01680cd344bc/pycm/pycm_compare.py#L73-L81
from __future__ import division from .pycm_error import pycmCompareError from .pycm_output import * from .pycm_util import * from .pycm_param import * from .pycm_obj import ConfusionMatrix import os import numpy from warnings import warn class Compare(): def __init__(self, cm_dict, by_class=False, weight=None, digit=5): self.scores = None self.sorted = None self.classes = None __compare_assign_handler__(self, cm_dict, weight, digit) __compare_class_handler__(self, cm_dict) __compare_overall_handler__(self, cm_dict) __compare_rounder__(self, cm_dict) scores_list = list(self.scores.values()) (max_overall_name, max_class_name) = __compare_sort_handler__(self) if scores_list.count(self.scores[max_class_name]) == 1: if by_class: self.best = cm_dict[max_class_name] self.best_name = max_class_name else: if max_overall_name == max_class_name: self.best = cm_dict[max_class_name] self.best_name = max_overall_name else: warn(COMPARE_RESULT_WARNING, RuntimeWarning) else: warn(COMPARE_RESULT_WARNING, RuntimeWarning)
MIT License
hubblestack/hubble-salt
hubblestack_nova/win_auditpol.py
audit
python
def audit(data_list, tags, labels, debug=False, **kwargs): __data__ = {} __auditdata__ = _auditpol_import() for profile, data in data_list: _merge_yaml(__data__, data, profile) __data__ = apply_labels(__data__, labels) __tags__ = _get_tags(__data__) if debug: log.debug('auditpol audit __data__:') log.debug(__data__) log.debug('auditpol audit __tags__:') log.debug(__tags__) ret = {'Success': [], 'Failure': [], 'Controlled': []} for tag in __tags__: if fnmatch.fnmatch(tag, tags): for tag_data in __tags__[tag]: if 'control' in tag_data: ret['Controlled'].append(tag_data) continue name = tag_data['name'] audit_type = tag_data['type'] match_output = tag_data['match_output'].lower() if 'blacklist' in audit_type: if name not in __auditdata__: ret['Success'].append(tag_data) else: tag_data['failure_reason'] = "Value of balcklisted attribute '{0}' is " "configured on your system. It should not " "be configured".format(name) ret['Failure'].append(tag_data) if 'whitelist' in audit_type: if name in __auditdata__: audit_value = __auditdata__[name].lower() tag_data['found_value'] = audit_value secret = _translate_value_type(audit_value, tag_data['value_type'], match_output) if secret: ret['Success'].append(tag_data) else: tag_data['failure_reason'] = "Value of attribute '{0}' is currently" " set as '{1}'. Expected value is '{2}({3})'" .format(name, audit_value, match_output, tag_data['value_type']) ret['Failure'].append(tag_data) else: log.debug('When trying to audit the advanced auditpol section,' ' the yaml contained incorrect data for the key') return ret
Runs auditpol on the local machine and audits the return data with the CIS yaml processed by __virtual__
https://github.com/hubblestack/hubble-salt/blob/fe9da66ac84be972be6966f57fae061736e78e8c/hubblestack_nova/win_auditpol.py#L50-L108
from __future__ import absolute_import import copy import csv import fnmatch import logging import salt.utils import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'win_auditpol' def __virtual__(): if not salt.utils.platform.is_windows(): return False, 'This audit module only runs on windows' return True def apply_labels(__data__, labels): labelled_data = {} if labels: labelled_data[__virtualname__] = {} for topkey in ('blacklist', 'whitelist'): if topkey in __data__.get(__virtualname__, {}): labelled_test_cases=[] for test_case in __data__[__virtualname__].get(topkey, []): if isinstance(test_case, dict) and test_case: test_case_body = test_case.get(next(iter(test_case))) if set(labels).issubset(set(test_case_body.get('labels',[]))): labelled_test_cases.append(test_case) labelled_data[__virtualname__][topkey]=labelled_test_cases else: labelled_data = __data__ return labelled_data
Apache License 2.0
openstack/manila
manila/api/v2/share_replicas.py
ShareReplicationController.force_delete
python
def force_delete(self, req, id, body): return self._force_delete(req, id, body)
Force deletion on the database, attempt on the backend.
https://github.com/openstack/manila/blob/34d209484366cd921e052d37c5f9daef5e97af20/manila/api/v2/share_replicas.py#L277-L279
import six from six.moves import http_client import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_replicas as replication_view from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila import share MIN_SUPPORTED_API_VERSION = '2.11' PRE_GRADUATION_VERSION = '2.55' GRADUATION_VERSION = '2.56' class ShareReplicationController(wsgi.Controller, wsgi.AdminActionsMixin): resource_name = 'share_replica' _view_builder_class = replication_view.ReplicationViewBuilder def __init__(self): super(ShareReplicationController, self).__init__() self.share_api = share.API() def _update(self, *args, **kwargs): db.share_replica_update(*args, **kwargs) def _get(self, *args, **kwargs): return db.share_replica_get(*args, **kwargs) def _delete(self, context, resource, force=True): try: self.share_api.delete_share_replica(context, resource, force=True) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def index(self, req): return self._get_replicas(req) @wsgi.Controller.api_version(GRADUATION_VERSION) def index(self, req): return self._get_replicas(req) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def detail(self, req): return self._get_replicas(req, is_detail=True) @wsgi.Controller.api_version(GRADUATION_VERSION) def detail(self, req): return self._get_replicas(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_replicas(self, req, is_detail=False): context = req.environ['manila.context'] share_id = req.params.get('share_id') if share_id: try: replicas = db.share_replicas_get_all_by_share( context, share_id) except exception.NotFound: msg = _("Share with share ID %s not found.") % share_id raise exc.HTTPNotFound(explanation=msg) else: replicas = db.share_replicas_get_all(context) limited_list = common.limited(replicas, req) if is_detail: replicas = self._view_builder.detail_list(req, limited_list) else: replicas = self._view_builder.summary_list(req, limited_list) return replicas @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version(GRADUATION_VERSION) def show(self, req, id): return self._show(req, id) @wsgi.Controller.authorize('show') def _show(self, req, id): context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("Replica %s not found.") % id raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, replica) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.response(202) def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version(GRADUATION_VERSION) @wsgi.response(202) def create(self, req, body): return self._create(req, body) @wsgi.Controller.authorize('create') def _create(self, req, body): context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_replica'): msg = _("Body does not contain 'share_replica' information.") raise exc.HTTPUnprocessableEntity(explanation=msg) share_id = body.get('share_replica').get('share_id') availability_zone = body.get('share_replica').get('availability_zone') if not share_id: msg = _("Must provide Share ID to add replica.") raise exc.HTTPBadRequest(explanation=msg) try: share_ref = db.share_get(context, share_id) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % share_id) share_network_id = share_ref.get('share_network_id', None) if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) try: new_replica = self.share_api.create_share_replica( context, share_ref, availability_zone=availability_zone, share_network_id=share_network_id) except exception.AvailabilityZoneNotFound as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.ShareBusyException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return self._view_builder.detail(req, new_replica) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def delete(self, req, id): return self._delete_share_replica(req, id) @wsgi.Controller.api_version(GRADUATION_VERSION) def delete(self, req, id): return self._delete_share_replica(req, id) @wsgi.Controller.authorize('delete') def _delete_share_replica(self, req, id): context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) try: self.share_api.delete_share_replica(context, replica) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.response(202) @wsgi.action('promote') def promote(self, req, id, body): return self._promote(req, id, body) @wsgi.Controller.api_version(GRADUATION_VERSION) @wsgi.response(202) @wsgi.action('promote') def promote(self, req, id, body): return self._promote(req, id, body) @wsgi.Controller.authorize('promote') def _promote(self, req, id, body): context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) share_network_id = replica.get('share_network_id') if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) replica_state = replica.get('replica_state') if replica_state == constants.REPLICA_STATE_ACTIVE: return webob.Response(status_int=http_client.OK) try: replica = self.share_api.promote_share_replica(context, replica) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) return self._view_builder.detail(req, replica) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.action('reset_status') def reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version(GRADUATION_VERSION) @wsgi.action('reset_status') def reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.action('force_delete') def force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version(GRADUATION_VERSION) @wsgi.action('force_delete')
Apache License 2.0
storjold/dataserv
dataserv/Farmer.py
Farmer.register
python
def register(self, payout_addr=None): self.payout_addr = payout_addr if payout_addr else self.btc_addr self.validate(registering=True) db.session.add(self) db.session.commit()
Add the farmer to the database.
https://github.com/storjold/dataserv/blob/65b89f1be7478fd38e7f3d2b608f465dc4b17f93/dataserv/Farmer.py#L81-L86
import json import hashlib import storjcore from datetime import datetime from sqlalchemy import DateTime from dataserv.run import db, app from btctxstore import BtcTxStore from dataserv.config import logging logger = logging.getLogger(__name__) is_btc_address = BtcTxStore().validate_address def sha256(content): content = content.encode('utf-8') return hashlib.sha256(content).hexdigest() class Farmer(db.Model): id = db.Column(db.Integer, primary_key=True) btc_addr = db.Column(db.String(35), unique=True) payout_addr = db.Column(db.String(35)) last_seen = db.Column(DateTime, index=True, default=datetime.utcnow) height = db.Column(db.Integer, default=0) def __init__(self, btc_addr, last_seen=None): if not is_btc_address(btc_addr): msg = "Invalid BTC Address: {0}".format(btc_addr) logger.warning(msg) raise ValueError(msg) self.btc_addr = btc_addr self.last_seen = last_seen def __repr__(self): return '<Farmer BTC Address: %r>' % self.btc_addr @staticmethod def get_server_address(): return app.config["ADDRESS"] @staticmethod def get_server_authentication_timeout(): return app.config["AUTHENTICATION_TIMEOUT"] def authenticate(self, headers): if app.config["SKIP_AUTHENTICATION"]: return True if not headers.get("Authorization"): raise storjcore.auth.AuthError("Authorization header required!") if not headers.get("Date"): raise storjcore.auth.AuthError("Date header required!") btctxstore = BtcTxStore() timeout = self.get_server_authentication_timeout() recipient_address = self.get_server_address() sender_address = self.btc_addr return storjcore.auth.verify_headers(btctxstore, headers, timeout, sender_address, recipient_address) def validate(self, registering=False): if not is_btc_address(self.payout_addr): msg = "Invalid BTC Address: {0}".format(self.payout_addr) logger.warning(msg) raise ValueError(msg) exists = self.exists() if exists and registering: msg = "Address already registered: {0}".format(self.payout_addr) logger.warning(msg) raise LookupError(msg)
MIT License
therve/twotp
twotp/parser.py
Parser.parse_float
python
def parse_float(self, data): floatData = data[:31] try: nullIndex = floatData.index(chr(0)) floatStr = floatData[0:nullIndex] except ValueError: floatStr = floatData floatValue = float(floatStr) return Float(floatValue), data[31:]
Parse a float number.
https://github.com/therve/twotp/blob/67d0c9475c5c211e8f9d6280f8c3e04fff944a73/twotp/parser.py#L233-L244
import struct import zlib from twotp.term import Integer, List, Tuple, Float, Atom, Reference from twotp.term import Port, Pid, Binary, Fun, NewFun, Export, BitBinary from twotp.term import ConstantHolder, Dict, Set class UnhandledCode(KeyError): class RemainingDataError(ValueError): class Parser(ConstantHolder): def __init__(self): ConstantHolder.__init__(self) self.mapping = {} for name, val in ConstantHolder.__dict__.iteritems(): if name.startswith('MAGIC_'): name = name.split('MAGIC_')[1].lower() self.mapping[val] = getattr(self, 'parse_%s' % name) def parseChar(self, bytes): return ord(bytes) def parseShort(self, bytes): return struct.unpack("!H", bytes)[0] def parseInt(self, bytes): return struct.unpack("!I", bytes)[0] def parse_version(self, data): raise RuntimeError("Should not ne here!") def parse_compressed(self, data): length = self.parseInt(data[:4]) uncompressedData = zlib.decompress(data[4:], 15, length) if len(uncompressedData) != length: raise ValueError("Too big uncompressed data") return self.binaryToTerm(uncompressedData) def parse_string(self, data): strLen = self.parseShort(data[:2]) strText = data[2:2 + strLen] lst = map(ord, strText) return List(lst), data[2 + strLen:] def parse_nil(self, data): return List([]), data def parse_list(self, data): arity = self.parseInt(data[:4]) elements, data = self._parse_seq(arity, data[4:]) if len(data) == 0 or ord(data[0]) != self.MAGIC_NIL: term, data = self.binaryToTerm(data) elements.append(term) return List(elements), data else: return List(elements), data[1:] def _parse_seq(self, arity, data): res = [] for i in xrange(arity): term, data = self.binaryToTerm(data) res.append(term) return res, data def _identify_dict(self, elements): if elements: if isinstance(elements[0], Atom) and elements[0].text == "dict": if len(elements) == 9: d = [] for i in elements[8][0]: if i: d.append((i[0][0], i[0][1])) return d return None def _identify_set(self, elements): if elements: if isinstance(elements[0], Atom) and elements[0].text == "set": if len(elements) == 9: d = [] for i in elements[8][0]: if i: d.append(i[0]) return d return None def parse_small_tuple(self, data): arity = self.parseChar(data[0]) elements, data = self._parse_seq(arity, data[1:]) d = self._identify_dict(elements) if d is not None: return Dict(d), data s = self._identify_set(elements) if s is not None: return Set(s), data return Tuple(elements), data def parse_large_tuple(self, data): arity = self.parseInt(data[:4]) elements, data = self._parse_seq(arity, data[4:]) d = self._identify_dict(elements) if d is not None: return Dict(d), data s = self._identify_set(elements) if s is not None: return Set(s), data return Tuple(elements), data def parse_large_big(self, data): n = self.parseInt(data[:4]) sign = self.parseChar(data[4]) bignum = 0 for i in xrange(n - 1, -1, -1): d = self.parseChar(data[5 + i]) bignum = bignum * 256 + d if sign: bignum *= -1 return Integer(bignum), data[5 + n:] def parse_small_big(self, data): n = self.parseChar(data[0]) sign = self.parseChar(data[1]) bignum = 0 for i in xrange(n - 1, -1, -1): d = self.parseChar(data[2 + i]) bignum = bignum * 256 + d if sign: bignum *= -1 return Integer(bignum), data[2 + n:]
MIT License
mozilla/firefox-flicks
vendor-local/lib/python/oauthlib/common.py
add_params_to_uri
python
def add_params_to_uri(uri, params, fragment=False): sch, net, path, par, query, fra = urlparse.urlparse(uri) if fragment: fra = add_params_to_qs(query, params) else: query = add_params_to_qs(query, params) return urlparse.urlunparse((sch, net, path, par, query, fra))
Add a list of two-tuples to the uri query components.
https://github.com/mozilla/firefox-flicks/blob/ad19ed59aac682744badae6d19a149327037f293/vendor-local/lib/python/oauthlib/common.py#L212-L219
from __future__ import absolute_import, unicode_literals import random import re import sys import time try: from urllib import quote as _quote from urllib import unquote as _unquote from urllib import urlencode as _urlencode except ImportError: from urllib.parse import quote as _quote from urllib.parse import unquote as _unquote from urllib.parse import urlencode as _urlencode try: import urlparse except ImportError: import urllib.parse as urlparse UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789') always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') PY3 = sys.version_info[0] == 3 if PY3: unicode_type = str bytes_type = bytes else: unicode_type = unicode bytes_type = str def quote(s, safe=b'/'): s = _quote(s.encode('utf-8'), safe) if isinstance(s, bytes_type): s = s.decode('utf-8') return s def unquote(s): s = _unquote(s) if isinstance(s, bytes_type): s = s.decode('utf-8') return s def urlencode(params): utf8_params = encode_params_utf8(params) urlencoded = _urlencode(utf8_params) if isinstance(urlencoded, unicode_type): return urlencoded else: return urlencoded.decode("utf-8") def encode_params_utf8(params): encoded = [] for k, v in params: encoded.append(( k.encode('utf-8') if isinstance(k, unicode_type) else k, v.encode('utf-8') if isinstance(v, unicode_type) else v)) return encoded def decode_params_utf8(params): decoded = [] for k, v in params: decoded.append(( k.decode('utf-8') if isinstance(k, bytes_type) else k, v.decode('utf-8') if isinstance(v, bytes_type) else v)) return decoded urlencoded = set(always_safe) | set('=&;%+~') def urldecode(query): if query and not set(query) <= urlencoded: query = quote(query) invalid_hex = '%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]' if len(re.findall(invalid_hex, query)): raise ValueError('Invalid hex encoding in query string.') query = query.decode('utf-8') if isinstance(query, bytes_type) else query params = urlparse.parse_qsl(query, keep_blank_values=True) return decode_params_utf8(params) def extract_params(raw): if isinstance(raw, bytes_type) or isinstance(raw, unicode_type): try: params = urldecode(raw) except ValueError: params = None elif hasattr(raw, '__iter__'): try: dict(raw) except ValueError: params = None except TypeError: params = None else: params = list(raw.items() if isinstance(raw, dict) else raw) params = decode_params_utf8(params) else: params = None return params def generate_nonce(): return unicode_type(unicode_type(random.getrandbits(64)) + generate_timestamp()) def generate_timestamp(): return unicode_type(int(time.time())) def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET): rand = random.SystemRandom() return ''.join(rand.choice(chars) for x in range(length)) def add_params_to_qs(query, params): if isinstance(query, dict): queryparams = query.items() else: queryparams = urlparse.parse_qsl(query, keep_blank_values=True) queryparams.extend(params) return urlencode(queryparams)
BSD 3-Clause New or Revised License
freifeld/cpabdiffeo
cpab/cpa2d/inference/transformation/Register.py
Register.set_dense
python
def set_dense(self,domain_start=-10,domain_end=10): self.src_dense = self.tw.pts_src_dense self.transformed_dense = self.tw.transformed_dense
Remarks: 1) The range of the domain has already been determined in self.tw 2) For now, the src is always the "uniform cdf"
https://github.com/freifeld/cpabdiffeo/blob/22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6/cpab/cpa2d/inference/transformation/Register.py#L94-L102
import numpy as np import cv2 import pylab from pylab import plt from of.gpu import CpuGpuArray from of.utils import * from cpab.cpa2d.TransformWrapper import TransformWrapper from cpab.cpaNd.inference.Metropolis import Metropolis from cpab.cpaNd.inference.Proposal import Proposal from cpab.cpaNd.model import LogLikelihood as LL from cpab.cpaNd.model import LogPrior as LP from cpab.cpa2d.model.transformations.register import ScaleDependentLogLikelihoodGaussian as SDLL_gaussian from cpab.cpaNd.model import ScaleDependentLogPrior as SDLP class Register(object): def __init__(self,nRows=100, nCols=100, base = [2,2], nLevels=4, tess='tri', zero_v_across_bdry=[False]*2, scale_spatial=1.0 * 10, scale_value=2.0, sigma_signal=None, wlp=1e-4, ll_type=['gaussian','gaussian_on_distancetransform'][0], only_local=False, valid_outside=True): ll_type = ll_type.lower() if ll_type == 'gaussian': self.SDLL=SDLL_gaussian else: raise ValueError(ll_type) self.base = base self.nLevels=nLevels if sigma_signal is None: raise ValueError("sigma_signal cannot be None") self.sigma_signal = sigma_signal self.wlp = wlp self.tw = TransformWrapper(nRows=nRows,nCols=nCols, nLevels=nLevels, base=base, tess=tess, scale_spatial=scale_spatial, scale_value=scale_value, zero_v_across_bdry=zero_v_across_bdry, only_local=only_local, valid_outside=valid_outside )
MIT License
wright-group/wrighttools
WrightTools/kit/_array.py
orthogonal
python
def orthogonal(*args) -> bool: for i, arg in enumerate(args): if hasattr(arg, "shape"): args[i] = arg.shape for s in zip(*args): if np.product(s) != max(s): return False return True
Determine if a set of arrays are orthogonal. Parameters ---------- args : array-likes or array shapes Returns ------- bool Array orthogonality condition.
https://github.com/wright-group/wrighttools/blob/7531965dec9a8f52557fbd3c60e12dcd3b6e000b/WrightTools/kit/_array.py#L182-L200
import numpy as np from .. import exceptions as wt_exceptions __all__ = [ "closest_pair", "diff", "fft", "joint_shape", "orthogonal", "remove_nans_1D", "share_nans", "smooth_1D", "svd", "unique", "valid_index", "mask_reduce", "enforce_mask_shape", ] def closest_pair(arr, give="indicies"): idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair") def diff(xi, yi, order=1) -> np.ndarray: yi = np.array(yi).copy() flip = False if xi[-1] < xi[0]: xi = np.flipud(xi.copy()) yi = np.flipud(yi) flip = True midpoints = (xi[1:] + xi[:-1]) / 2 for _ in range(order): d = np.diff(yi) d /= np.diff(xi) yi = np.interp(xi, midpoints, d) if flip: yi = np.flipud(yi) return yi def fft(xi, yi, axis=0) -> tuple: if xi.ndim != 1: raise wt_exceptions.DimensionalityError(1, xi.ndim) spacing = np.diff(xi) if not np.allclose(spacing, spacing.mean()): raise RuntimeError("WrightTools.kit.fft: argument xi must be evenly spaced") yi = np.fft.fft(yi, axis=axis) d = (xi.max() - xi.min()) / (xi.size - 1) xi = np.fft.fftfreq(xi.size, d=d) xi = np.fft.fftshift(xi) yi = np.fft.fftshift(yi, axes=axis) return xi, yi def joint_shape(*args) -> tuple: if len(args) == 0: return () shape = [] shapes = [a.shape for a in args] ndim = args[0].ndim for i in range(ndim): shape.append(max([s[i] for s in shapes])) return tuple(shape)
MIT License
data-apis/array-api-tests
array_api_tests/array_helpers.py
true
python
def true(shape): return full(shape, True, dtype=bool)
Returns a full True array with dtype=bool.
https://github.com/data-apis/array-api-tests/blob/18c425e0b161431798991cb4eac442ecaa089157/array_api_tests/array_helpers.py#L92-L96
import itertools from ._array_module import (isnan, all, any, equal, not_equal, logical_and, logical_or, isfinite, greater, less, less_equal, zeros, ones, full, bool, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, nan, inf, pi, remainder, divide, isinf, negative, asarray) from ._array_module import logical_not, subtract, floor, ceil, where from . import dtype_helpers as dh __all__ = ['all', 'any', 'logical_and', 'logical_or', 'logical_not', 'less', 'less_equal', 'greater', 'subtract', 'negative', 'floor', 'ceil', 'where', 'isfinite', 'equal', 'not_equal', 'zero', 'one', 'NaN', 'infinity', 'π', 'isnegzero', 'non_zero', 'isposzero', 'exactly_equal', 'assert_exactly_equal', 'notequal', 'assert_finite', 'assert_non_zero', 'ispositive', 'assert_positive', 'isnegative', 'assert_negative', 'isintegral', 'assert_integral', 'isodd', 'iseven', "assert_iseven", 'assert_isinf', 'positive_mathematical_sign', 'assert_positive_mathematical_sign', 'negative_mathematical_sign', 'assert_negative_mathematical_sign', 'same_sign', 'assert_same_sign', 'ndindex', 'float64', 'asarray', 'full', 'true', 'false', 'isnan'] def zero(shape, dtype): return zeros(shape, dtype=dtype) def one(shape, dtype): return ones(shape, dtype=dtype) def NaN(shape, dtype): if dtype not in [float32, float64]: raise RuntimeError(f"Unexpected dtype {dtype} in NaN().") return full(shape, nan, dtype=dtype) def infinity(shape, dtype): if dtype not in [float32, float64]: raise RuntimeError(f"Unexpected dtype {dtype} in infinity().") return full(shape, inf, dtype=dtype) def π(shape, dtype): if dtype not in [float32, float64]: raise RuntimeError(f"Unexpected dtype {dtype} in π().") return full(shape, pi, dtype=dtype)
MIT License
viktorc/pararealml
pararealml/core/constrained_problem.py
ConstrainedProblem.mesh
python
def mesh(self) -> Optional[Mesh]: return self._mesh
The mesh over which the differential equation is to be solved.
https://github.com/viktorc/pararealml/blob/6d7faf06d33832d8a30a4fd718d9191c01987956/pararealml/core/constrained_problem.py#L105-L109
from typing import Tuple, Optional, Sequence, List, Union import numpy as np from pararealml.core.boundary_condition import BoundaryCondition, VectorizedBoundaryConditionFunction from pararealml.core.constraint import Constraint from pararealml.core.differential_equation import DifferentialEquation from pararealml.core.mesh import Mesh BoundaryConditionPair = Tuple[BoundaryCondition, BoundaryCondition] class ConstrainedProblem: def __init__( self, diff_eq: DifferentialEquation, mesh: Optional[Mesh] = None, boundary_conditions: Optional[Sequence[BoundaryConditionPair]] = None): self._diff_eq = diff_eq self._mesh: Optional[Mesh] self._boundary_conditions: Optional[Tuple[BoundaryConditionPair, ...]] if diff_eq.x_dimension: if mesh is None: raise ValueError('mesh cannot be None for PDEs') if mesh.dimensions != diff_eq.x_dimension: raise ValueError( f'mesh dimensions ({mesh.dimensions}) must match ' 'differential equation spatial dimensions ' f'({diff_eq.x_dimension})') if boundary_conditions is None: raise ValueError('boundary conditions cannot be None for PDEs') if len(boundary_conditions) != diff_eq.x_dimension: raise ValueError( 'number of boundary condition pairs ' f'({len(boundary_conditions)}) must match differential ' f'equation spatial dimensions ({diff_eq.x_dimension})') self._mesh = mesh self._boundary_conditions = tuple(boundary_conditions) self._y_vertices_shape = mesh.vertices_shape + (diff_eq.y_dimension,) self._y_cells_shape = mesh.cells_shape + (diff_eq.y_dimension,) self._are_all_bcs_static = np.all([ bc_lower.is_static and bc_upper.is_static for (bc_lower, bc_upper) in boundary_conditions ]) self._are_there_bcs_on_y = np.any([ bc_lower.has_y_condition or bc_upper.has_y_condition for (bc_lower, bc_upper) in boundary_conditions ]) self._boundary_vertex_constraints = None self._boundary_cell_constraints = None self._boundary_vertex_constraints = self.create_boundary_constraints(True) self._boundary_vertex_constraints[0].setflags(write=False) self._boundary_vertex_constraints[1].setflags(write=False) self._boundary_cell_constraints = self.create_boundary_constraints(False) self._boundary_cell_constraints[0].setflags(write=False) self._boundary_cell_constraints[1].setflags(write=False) self._y_vertex_constraints = self.create_y_vertex_constraints( self._boundary_vertex_constraints[0]) self._y_vertex_constraints.setflags(write=False) else: self._mesh = None self._boundary_conditions = None self._y_vertices_shape = self._y_cells_shape = diff_eq.y_dimension, self._are_all_bcs_static = False self._are_there_bcs_on_y = False self._boundary_vertex_constraints = None self._boundary_cell_constraints = None self._y_vertex_constraints = None @property def differential_equation(self) -> DifferentialEquation: return self._diff_eq @property
MIT License
psf/requests-html
requests_html.py
BaseParser.text
python
def text(self) -> _Text: return self.pq.text()
The text content of the :class:`Element <Element>` or :class:`HTML <HTML>`.
https://github.com/psf/requests-html/blob/026c4e5217cfc8347614148aab331d81402f596b/requests_html.py#L168-L172
import sys import asyncio from urllib.parse import urlparse, urlunparse, urljoin from concurrent.futures import ThreadPoolExecutor from concurrent.futures._base import TimeoutError from functools import partial from typing import Set, Union, List, MutableMapping, Optional import pyppeteer import requests import http.cookiejar from pyquery import PyQuery from fake_useragent import UserAgent from lxml.html.clean import Cleaner import lxml from lxml import etree from lxml.html import HtmlElement from lxml.html import tostring as lxml_html_tostring from lxml.html.soupparser import fromstring as soup_parse from parse import search as parse_search from parse import findall, Result from w3lib.encoding import html_to_unicode DEFAULT_ENCODING = 'utf-8' DEFAULT_URL = 'https://example.org/' DEFAULT_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8' DEFAULT_NEXT_SYMBOL = ['next', 'more', 'older'] cleaner = Cleaner() cleaner.javascript = True cleaner.style = True useragent = None _Find = Union[List['Element'], 'Element'] _XPath = Union[List[str], List['Element'], str, 'Element'] _Result = Union[List['Result'], 'Result'] _HTML = Union[str, bytes] _BaseHTML = str _UserAgent = str _DefaultEncoding = str _URL = str _RawHTML = bytes _Encoding = str _LXML = HtmlElement _Text = str _Search = Result _Containing = Union[str, List[str]] _Links = Set[str] _Attrs = MutableMapping _Next = Union['HTML', List[str]] _NextSymbol = List[str] try: assert sys.version_info.major == 3 assert sys.version_info.minor > 5 except AssertionError: raise RuntimeError('Requests-HTML requires Python 3.6+!') class MaxRetries(Exception): def __init__(self, message): self.message = message class BaseParser: def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None: self.element = element self.url = url self.skip_anchors = True self.default_encoding = default_encoding self._encoding = None self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html self._lxml = None self._pq = None @property def raw_html(self) -> _RawHTML: if self._html: return self._html else: return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding) @property def html(self) -> _BaseHTML: if self._html: return self.raw_html.decode(self.encoding, errors='replace') else: return etree.tostring(self.element, encoding='unicode').strip() @html.setter def html(self, html: str) -> None: self._html = html.encode(self.encoding) @raw_html.setter def raw_html(self, html: bytes) -> None: self._html = html @property def encoding(self) -> _Encoding: if self._encoding: return self._encoding if self._html: self._encoding = html_to_unicode(self.default_encoding, self._html)[0] try: self.raw_html.decode(self.encoding, errors='replace') except UnicodeDecodeError: self._encoding = self.default_encoding return self._encoding if self._encoding else self.default_encoding @encoding.setter def encoding(self, enc: str) -> None: self._encoding = enc @property def pq(self) -> PyQuery: if self._pq is None: self._pq = PyQuery(self.lxml) return self._pq @property def lxml(self) -> HtmlElement: if self._lxml is None: try: self._lxml = soup_parse(self.html, features='html.parser') except ValueError: self._lxml = lxml.html.fromstring(self.raw_html) return self._lxml @property
MIT License
mindprince/pinboardtoevernote
lib/thrift/server/TProcessPoolServer.py
TProcessPoolServer.serveClient
python
def serveClient(self, client): itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransportException, tx: pass except Exception, x: logging.exception(x) itrans.close() otrans.close()
Process input/output from a client for as long as possible
https://github.com/mindprince/pinboardtoevernote/blob/4dd2b462f4100853c66ab433c0e94533863f2352/lib/thrift/server/TProcessPoolServer.py#L67-L83
import logging from multiprocessing import Process, Value, Condition, reduction from TServer import TServer from thrift.transport.TTransport import TTransportException class TProcessPoolServer(TServer): def __init__(self, * args): TServer.__init__(self, *args) self.numWorkers = 10 self.workers = [] self.isRunning = Value('b', False) self.stopCondition = Condition() self.postForkCallback = None def setPostForkCallback(self, callback): if not callable(callback): raise TypeError("This is not a callback!") self.postForkCallback = callback def setNumWorkers(self, num): self.numWorkers = num def workerProcess(self): if self.postForkCallback: self.postForkCallback() while self.isRunning.value == True: try: client = self.serverTransport.accept() self.serveClient(client) except (KeyboardInterrupt, SystemExit): return 0 except Exception, x: logging.exception(x)
Apache License 2.0
nthuoj/nthuoj_web
status/status_info.py
get_visible_submission
python
def get_visible_submission(user): user = validate_user(user) submissions = Submission.objects.all() if user.has_admin_auth(): return submissions submissions = submissions.exclude( user__in=User.objects.filter(user_level=User.ADMIN) ) invisible_problem = Problem.objects.filter( visible=False ).exclude( owner=user ) contests = Contest.objects.filter( is_homework=False, end_time__gte=datetime.now() ) for contest in contests: if not has_contest_ownership(user, contest): submissions = submissions.exclude( Q(user__in=contest.coowner.all()) | Q(user=contest.owner), problem__in=contest.problem.all(), submit_time__gte=contest.creation_time ) submissions = submissions.exclude( user=get_contestant(contest).exclude(username=user.username), problem__in=contest.problem.all(), submit_time__gte=get_freeze_time_datetime(contest) ) else: invisible_problem = invisible_problem.exclude( id__in=contest.problem.filter(visible=False).values_list('id', flat=True) ) submissions = submissions.exclude( problem__in=invisible_problem ) return submissions
Get all submissions that can be viewed by the given user.
https://github.com/nthuoj/nthuoj_web/blob/cbe9207cd3d5618568dd0c50fefb07dcece639d7/status/status_info.py#L25-L77
from datetime import datetime from django.db.models import Q from contest.contest_info import get_running_contests, get_freeze_time_datetime, get_contestant from contest.models import Contest from problem.models import Problem, Submission, SubmissionDetail from users.models import User from utils.user_info import validate_user, has_contest_ownership def regroup_submission(submissions): submission_groups = [] for submission in submissions: submission_groups.append({ 'grouper': submission, 'list': SubmissionDetail.objects.filter( sid=submission.id ).order_by('tid') }) return submission_groups
MIT License
zhaochaocs/dualenc
plan/example.py
dfs
python
def dfs(G): def dfs_util(cur_node, visited): neighbors = list(G.neighbors(cur_node)) random.shuffle(neighbors) for n in neighbors: if n not in visited: visited[n] = 1 dfs_util(n, visited) graph_size = len(list(G.nodes)) assert graph_size visited = collections.OrderedDict() cur_node = random.choice(list(G.nodes)) visited[cur_node] = 1 dfs_util(cur_node, visited) return list(visited.keys())
dfs for an undirected graph :param G: :return:
https://github.com/zhaochaocs/dualenc/blob/4175a7ed3f2c3232152ecce5ffd6ee4c727e64b9/plan/example.py#L272-L292
import collections import copy import random import dgl import networkx as nx import numpy as np import torch edge_to_id = {e: i for i, e in enumerate(['sp', 'po', 'op', 'ps', 'll', 'ne'])} class Example: def __init__(self, lex_id, predicates, nodes, nodes1, nodes2, labels, node_feature_ids, lex=None): self.entry_id = lex_id self.predicates = predicates self.nodes = nodes self.node_feature_ids = node_feature_ids self.nodes1 = nodes1 self.nodes2 = nodes2 self.labels = labels self.node_size = len(nodes) self.triple_size = labels.count('A1') self.predicate_size = len(predicates) assert self.predicate_size == self.triple_size if self.predicate_size == 0: print(lex) self.nx_graph = self.create_nx_graph().to_undirected() def get_id(self, lex=True): if lex: return self.entry_id else: return self.entry_id.rsplit("_", 1)[0] def reset_predicates(self, plan): self.predicates = [self.predicates[p] for p in plan] def set_split(self, split_): self.split_ = split_ def get_predicates(self, mode='local', idlize=True): if mode == 'global': if not idlize: return self.get_predicates(idlize=False) else: return [self.node_feature_ids[local_idx] for local_idx in self.get_predicates()] elif mode == 'local': if idlize: return self.predicates else: return [self.nodes[p] for p in self.predicates] else: raise TypeError('Invalid mode!') def get_label(self, encoding='one-hot'): local_predicates = self.get_predicates(mode='local', idlize=True) if encoding == 'p-id': return local_predicates elif encoding == 'global-id': return self.get_predicates(mode='global', idlize=True) elif encoding == 'one-hot': label = torch.zeros(self.node_size, self.predicate_size) for t in range(0, self.predicate_size): label[local_predicates[t], t] = 1 label = torch.index_select(label, dim=0, index=torch.tensor(sorted(local_predicates)).long()) return label.permute(1, 0).tolist() else: raise TypeError('Invalid encoding mode!') def build_rdf_graph(self, accessed_predicates=None): g_list = [] triple_size_list = [] local_predicate_list = self.get_predicates(mode="local", idlize=True) self.dgl_graph = self.create_graph() if accessed_predicates is None: for t in range(0, self.predicate_size): g = copy.deepcopy(self.dgl_graph) for i in range(t): local_predicate = local_predicate_list[i] g.nodes[local_predicate].data['access'] += 1 if i == t-1: g.nodes[local_predicate].data['pre_access'] += 1 g_list.append(g) triple_size_list.append(self.predicate_size) return g_list, triple_size_list else: g = copy.deepcopy(self.dgl_graph) assert g.ndata['spo'][:,1].sum().item() == self.predicate_size for p in accessed_predicates: g.nodes[p].data['access'] = torch.tensor([[1]]) if len(accessed_predicates): g.nodes[accessed_predicates[-1]].data['pre_access'] = torch.tensor([[1]]) g_list.append(g) triple_size_list.append(self.predicate_size) return g_list, triple_size_list def create_nx_graph(self): edge_list = [] g = nx.DiGraph() for node_1, node_2, edge in zip(self.nodes1, self.nodes2, self.labels): if edge == "A0": edge_list.append((node_1, node_2)) elif edge == "A1": edge_list.append((node_2, node_1)) g.add_edges_from(edge_list) return g def create_graph(self): g = dgl.DGLGraph() node_size = self.node_size g.add_nodes(node_size) g.ndata['global_id'] = torch.tensor(self.node_feature_ids) g.ndata['spo'] = torch.zeros(self.node_size, 3).float() edge_list = [] edge_type_list = [] node_mention = [[self.node_feature_ids[i]] for i in range(node_size)] for node_1, node_2, edge in zip(self.nodes1, self.nodes2, self.labels): if edge == "A0": g.nodes[node_1].data['spo'] += torch.tensor([1.0, 0, 0]) g.nodes[node_2].data['spo'] += torch.tensor([0, 0.5, 0]) edge_list.append((node_1, node_2)) edge_list.append((node_2, node_1)) edge_list.append((node_1, node_1)) edge_list.append((node_2, node_2)) edge_type_list += [edge_to_id[e] for e in ['sp', 'ps', 'll', 'll']] elif edge == "A1": g.nodes[node_1].data['spo'] += torch.tensor([0, 0, 1.0]) g.nodes[node_2].data['spo'] += torch.tensor([0, 0.5, 0]) edge_list.append((node_1, node_2)) edge_list.append((node_2, node_1)) edge_list.append((node_1, node_1)) edge_list.append((node_2, node_2)) edge_type_list += [edge_to_id[e] for e in ['op', 'po', 'll', 'll']] elif edge == 'NE': edge_list.append((node_1, node_2)) edge_type_list += [edge_to_id["ne"]] node_mention[node_2].append(self.node_feature_ids[node_1]) else: raise ValueError("Do not support the edge type {}".format(edge)) new_edge_list, new_edge_type_list = [], [] added_edge = set() for edge, edge_type in zip(edge_list, edge_type_list): edge_id = "{}-{}-{}".format(edge[0], edge[1], edge_type) if edge_id not in added_edge: added_edge.add(edge_id) new_edge_list.append(edge) new_edge_type_list.append(edge_type) src, dst = tuple(zip(*new_edge_list)) g.add_edges(src, dst) g.ndata['global_id'] = torch.tensor(self.node_feature_ids) g.ndata['access'] = torch.zeros(self.node_size, 1).long() g.ndata['pre_access'] = torch.zeros(self.node_size, 1).long() g.ndata['spo'] = torch.gt(g.ndata['spo'], 0).float() g.edata['type'] = torch.tensor(new_edge_type_list).reshape(-1, 1) max_mention_len = 15 node_mention_tensor = torch.zeros((self.node_size, max_mention_len), dtype=torch.long) for idx, mention in enumerate(node_mention): if len(mention) > max_mention_len: mention = mention[:max_mention_len] node_mention_tensor[idx, :len(mention)] = torch.tensor(mention, dtype=torch.long) g.ndata['node_mention'] = node_mention_tensor dst_in_deg = {} for dst_node in set(dst): if dst_node not in dst_in_deg: dst_in_deg[dst_node] = {} for edge_type in range(len(edge_to_id)): if edge_type not in dst_in_deg[dst_node]: dst_in_deg[dst_node][edge_type] = 0 for dst_node, edge_type in zip(dst, new_edge_type_list): dst_in_deg[dst_node][edge_type] += 1 e_norm = [1.0 / dst_in_deg[dst_node][e_type] for dst_node, e_type in zip(dst, new_edge_type_list)] g.edata['norm'] = torch.tensor(e_norm).reshape(-1, 1) return g def get_random_plan(self, walk_func=None): if walk_func is None or walk_func == "None": plan = copy.deepcopy(self.get_predicates(mode="local", idlize=True)) np.random.shuffle(plan) return plan else: assert walk_func in ['random_walk', "dfs", "bfs"] if not nx.is_connected(self.nx_graph): graphs = [self.nx_graph.subgraph(c) for c in nx.connected_components(self.nx_graph)] else: graphs = [self.nx_graph] if walk_func == "random_walk": random_list = flat_list([random_walk(g) for g in graphs]) elif walk_func == "dfs": random_list = flat_list([dfs(g) for g in graphs]) else: random_list = flat_list([bfs(g) for g in graphs]) local_random_plan = [n for n in random_list if n in self.predicates] return local_random_plan def random_walk(G): graph_size = len(list(G.nodes)) assert graph_size visited = collections.OrderedDict() cur_node = random.choice(list(G.nodes)) visited[cur_node] = 1 while not len(visited) == graph_size: next_node = random.choice(list(G.neighbors(cur_node))) if next_node not in visited: visited[next_node] = 1 cur_node = next_node return list(visited.keys())
MIT License
doist/todoist-python
todoist/managers/uploads.py
UploadsManager.delete
python
def delete(self, file_url): params = {"token": self.token, "file_url": file_url} return self.api._get("uploads/delete", params=params)
Deletes upload. param file_url: (str) uploaded file URL
https://github.com/doist/todoist-python/blob/d564db81932771541c3a718698f8018756c6091d/todoist/managers/uploads.py#L29-L36
from .generic import Manager class UploadsManager(Manager): def add(self, filename, **kwargs): data = {"token": self.token} data.update(kwargs) files = {"file": open(filename, "rb")} return self.api._post("uploads/add", data=data, files=files) def get(self, **kwargs): params = {"token": self.token} params.update(kwargs) return self.api._get("uploads/get", params=params)
MIT License
schlitzered/pyredis
pyredis/commands.py
Key.persist
python
def persist(self, *args): if self._cluster: return self.execute(b'PERSIST', *args, shard_key=args[0]) return self.execute(b'PERSIST', *args)
Execute PERSIST Command, consult Redis documentation for details. :return: result, exception
https://github.com/schlitzered/pyredis/blob/4f5049aae7ae2702f22ccfb661ca3f13869a3c31/pyredis/commands.py#L254-L261
__author__ = 'schlitzer' __all__ = [ 'Connection', 'Hash', 'HyperLogLog', 'Key', 'List', 'Publish', 'Scripting', 'Set', 'SSet', 'String', 'Subscribe', 'Transaction' ] class BaseCommand(object): def __init__(self): self._cluster = False def execute(self, *args, **kwargs): raise NotImplemented class Connection(BaseCommand): def __init__(self): super().__init__() def echo(self, *args, shard_key=None, sock=None): if self._cluster: return self.execute(b'ECHO', *args, shard_key=shard_key, sock=sock) return self.execute(b'ECHO', *args) def ping(self, shard_key=None, sock=None): if self._cluster: return self.execute(b'PING', shard_key=shard_key, sock=sock) return self.execute(b'PING') class Geo(BaseCommand): def __init__(self): super().__init__() def geoadd(self, *args): if self._cluster: return self.execute(b'GEOADD', *args, shard_key=args[0]) return self.execute(b'GEOADD', *args) def geodist(self, *args): if self._cluster: return self.execute(b'GEODIST', *args, shard_key=args[0]) return self.execute(b'GEODIST', *args) def geohash(self, *args): if self._cluster: return self.execute(b'GEOHASH', *args, shard_key=args[0]) return self.execute(b'GEOHASH', *args) def georadius(self, *args): if self._cluster: return self.execute(b'GEORADIUS', *args, shard_key=args[0]) return self.execute(b'GEORADIUS', *args) def geopos(self, *args): if self._cluster: return self.execute(b'GEOPOS', *args, shard_key=args[0]) return self.execute(b'GEOPOS', *args) def georadiusbymember(self, *args): if self._cluster: return self.execute(b'GEORADIUSBYMEMBER', *args, shard_key=args[0]) return self.execute(b'GEORADIUSBYMEMBER', *args) class Key(BaseCommand): def __init__(self): super().__init__() def delete(self, *args): if self._cluster: return self.execute(b'DEL', *args, shard_key=args[0]) return self.execute(b'DEL', *args) def dump(self, *args): if self._cluster: return self.execute(b'DUMP', *args, shard_key=args[0]) return self.execute(b'DUMP', *args) def exists(self, *args): if self._cluster: return self.execute(b'EXISTS', *args, shard_key=args[0]) return self.execute(b'EXISTS', *args) def expire(self, *args): if self._cluster: return self.execute(b'EXPIRE', *args, shard_key=args[0]) return self.execute(b'EXPIRE', *args) def expireat(self, *args): if self._cluster: return self.execute(b'EXPIREAT') return self.execute(b'EXPIREAT', *args) def keys(self, *args, shard_key=None, sock=None): if self._cluster: return self.execute(b'KEYS', *args, shard_key=shard_key, sock=sock) return self.execute(b'KEYS', *args) def migrate(self, *args): if self._cluster: raise NotImplemented return self.execute(b'MIGRATE', *args) def move(self, *args): if self._cluster: return self.execute(b'MOVE', *args, shard_key=args[0]) return self.execute(b'MOVE', *args) def object(self, *args, shard_key=None, sock=None): if self._cluster: return self.execute(b'DEL', *args, shard_key=shard_key, sock=sock) return self.execute(b'OBJECT', *args)
MIT License
bvbohnen/x4_customizer
Plugins/Utilities/Catalog.py
Cat_Unpack
python
def Cat_Unpack( source_cat_path, dest_dir_path, include_pattern = None, exclude_pattern = None, allow_md5_errors = False ): try: source_cat_path = Path(source_cat_path).resolve() assert source_cat_path.exists() except Exception: raise AssertionError('Error in the source path ({})'.format(source_cat_path)) try: dest_dir_path = Path(dest_dir_path).resolve() except Exception: raise AssertionError('Error in the dest path ({})'.format(dest_dir_path)) if isinstance(include_pattern, str): include_pattern = [include_pattern] if isinstance(exclude_pattern, str): exclude_pattern = [exclude_pattern] if source_cat_path.is_dir(): extension_summary = None content_xml_path = source_cat_path / 'content.xml' if content_xml_path.exists(): extension_summary = File_Manager.Extension_Finder.Extension_Summary(content_xml_path) source_reader = File_Manager.Source_Reader.Location_Source_Reader( location = source_cat_path, extension_summary = extension_summary) Print(('{} catalog files found using standard naming convention.' ).format(len(source_reader.catalog_file_dict))) else: source_reader = File_Manager.Source_Reader.Location_Source_Reader( location = None) source_reader.Add_Catalog(source_cat_path) num_writes = 0 num_pattern_skips = 0 num_hash_skips = 0 num_md5_skips = 0 for virtual_path, cat_entry in source_reader.Get_Cat_Entries().items(): if not _Pattern_Match(virtual_path, include_pattern, exclude_pattern): num_pattern_skips += 1 continue dest_path = dest_dir_path / cat_entry.cat_path if dest_path.exists(): existing_binary = dest_path.read_bytes() dest_hash = File_Manager.Cat_Reader.Get_Hash_String(existing_binary) if (dest_hash == cat_entry.hash_str or (not existing_binary and cat_entry.hash_str == '00000000000000000000000000000000')): num_hash_skips += 1 continue dest_path.parent.mkdir(parents = True, exist_ok = True) try: cat_path, file_binary = source_reader.Read_Catalog_File( virtual_path, allow_md5_error = allow_md5_errors) except Cat_Hash_Exception: num_md5_skips += 1 continue with open(dest_path, 'wb') as file: file.write(file_binary) num_writes += 1 Print('Extracted {}'.format(virtual_path)) Print('Files written : {}'.format(num_writes)) Print('Files skipped (pattern mismatch) : {}'.format(num_pattern_skips)) Print('Files skipped (hash match) : {}'.format(num_hash_skips)) Print('Files skipped (md5 hash failure) : {}'.format(num_md5_skips)) return
Unpack a single catalog file, or a group if a folder given. When a file is in multiple catalogs, the latest one in the list will be used. If a file is already present at the destination, it is compared to the catalog version and skipped if the same. * source_cat_path - Path to the catalog file, or to a folder. - When a folder given, catalogs are read in X4 priority order according to its expected names. * dest_dir_path - Path to the folder where unpacked files are written. * include_pattern - String or list of strings, optional, wildcard patterns for file names to include in the unpacked output. - Eg. "*.xml" to unpack only xml files - Case is ignored. * exclude_pattern - String or list of strings, optional, wildcard patterns for file names to include in the unpacked output. - Eg. "['*.lua']" to skip lua files. * allow_md5_errors - Bool, if True then files with md5 errors will be unpacked, otherwise they are skipped. - Such errors may arise from poorly constructed catalog files.
https://github.com/bvbohnen/x4_customizer/blob/6f865008690916a66a44c97331d9a2692baedb35/Plugins/Utilities/Catalog.py#L12-L168
from pathlib import Path from fnmatch import fnmatch from Framework import Utility_Wrapper, File_Manager, Cat_Hash_Exception, Print @Utility_Wrapper(uses_paths_from_settings = False)
MIT License
open2c/cooltools
cooltools/lib/peaks.py
find_peak_prominence_iterative
python
def find_peak_prominence_iterative( arr, min_prom=None, max_prom=None, steps_prom=1000, log_space_proms=True, min_n_peak_pairs=5, ): if ((min_prom is None) and (max_prom is not None)) or ( (min_prom is not None) and (max_prom is None) ): raise Exception( "Please, provide either both min_prom and max_prom or " "none to infer these values from the data." ) if (min_prom is None) and (max_prom is None): arr_sorted = np.sort(arr) arr_sorted = arr_sorted[np.isfinite(arr_sorted)] max_prom = arr_sorted[-1] - arr_sorted[0] diffs = np.diff(arr_sorted) min_prom = diffs[diffs != 0].min() if log_space_proms: proms = 10 ** np.linspace(np.log10(min_prom), np.log10(max_prom), steps_prom) else: proms = np.linspace(min_prom, max_prom, steps_prom) minproms = np.nan * np.ones_like(arr) maxproms = np.nan * np.ones_like(arr) for p in proms: minidxs, maxidxs = peakdet(arr, p) if (len(minidxs) >= min_n_peak_pairs) and (len(minidxs) >= min_n_peak_pairs): valid_mins = minidxs[np.isfinite(arr[minidxs])] minproms[valid_mins] = p valid_maxs = maxidxs[np.isfinite(arr[maxidxs])] maxproms[valid_maxs] = p return minproms, maxproms
Finds the minima/maxima of an array using the peakdet algorithm at different values of the threshold prominence. For each location, returns the maximal threshold prominence at which it is called as a minimum/maximum. Note that this function is inferior in every aspect to find_peak_prominence. We keep it for testing purposes and will remove in the future. Parameters ---------- arr : array_like min_prom, max_prom : float The minimal and the maximal values of prominence to probe. If None, these values are inferred as the minimal and the maximal non-zero difference between any two elements of `v`. steps_prom : int The number of threshold prominence values to probe in the range between `min_prom` and `max_prom`. log_space_proms : bool If True, probe logarithmically spaced values of the threshold prominence in the range between `min_prom` and `max_prom`. min_n_peak_pairs : int If the number of detected minima/maxima at a certain threshold prominence is < `min_n_peak_pairs`, the detected peaks are ignored. Returns ------- minproms, maxproms : numpy.array The prominence of detected minima and maxima.
https://github.com/open2c/cooltools/blob/a8ace1e8b38a04e6863ed56526fc6febfacfd2fc/cooltools/lib/peaks.py#L200-L270
import warnings import numpy as np def find_peak_prominence(arr, max_dist=None): arr = np.asarray(arr) n = len(arr) max_dist = len(arr) if max_dist is None else int(max_dist) arr_nonans = arr[~np.isnan(arr)] idxs_nonans2idx = np.arange(arr.size)[~np.isnan(arr)] with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) is_min_left = np.r_[False, arr_nonans[:-1] > arr_nonans[1:]] is_min_right = np.r_[arr_nonans[:-1] < arr_nonans[1:], False] is_loc_min = is_min_left & is_min_right loc_min_poss = np.where(is_loc_min)[0] loc_min_poss = idxs_nonans2idx[loc_min_poss] is_max_left = np.r_[False, arr_nonans[:-1] < arr_nonans[1:]] is_max_right = np.r_[arr_nonans[:-1] > arr_nonans[1:], False] is_loc_max = is_max_left & is_max_right loc_max_poss = np.where(is_loc_max)[0] loc_max_poss = idxs_nonans2idx[loc_max_poss] left_maxs = -1 * np.ones(len(loc_max_poss), dtype=np.int) right_maxs = -1 * np.ones(len(loc_max_poss), dtype=np.int) for i, pos in enumerate(loc_max_poss): for j in range(pos - 1, -1, -1): if (arr[j] > arr[pos]) or (pos - j > max_dist): left_maxs[i] = j break for j in range(pos + 1, n): if (arr[j] > arr[pos]) or (j - pos > max_dist): right_maxs[i] = j break left_max_proms = np.array( [ ( arr[pos] - np.nanmin(arr[left_maxs[i] : pos]) if (left_maxs[i] >= 0) else np.nan ) for i, pos in enumerate(loc_max_poss) ] ) right_max_proms = np.array( [ ( arr[pos] - np.nanmin(arr[pos : right_maxs[i]]) if (right_maxs[i] >= 0) else np.nan ) for i, pos in enumerate(loc_max_poss) ] ) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) max_proms = np.nanmin(np.vstack([left_max_proms, right_max_proms]), axis=0) global_max_mask = (left_maxs == -1) & (right_maxs == -1) if (global_max_mask).sum() > 0: global_max_idx = np.where(global_max_mask)[0][0] global_max_pos = loc_max_poss[global_max_idx] neighbor_loc_mins = (loc_min_poss >= global_max_pos - max_dist) & ( loc_min_poss < global_max_pos + max_dist ) if np.any(neighbor_loc_mins): max_proms[global_max_idx] = arr[global_max_pos] - np.nanmin( arr[loc_min_poss[neighbor_loc_mins]] ) else: max_proms[global_max_idx] = arr[global_max_pos] - np.nanmin( arr[max(global_max_pos - max_dist, 0) : global_max_pos + max_dist] ) return loc_max_poss, max_proms def peakdet(arr, min_prominence): maxidxs = [] minidxs = [] x = np.arange(len(arr)) arr = np.asarray(arr) if not np.isscalar(min_prominence): raise Exception("Input argument delta must be a scalar") if min_prominence <= 0: raise Exception("Input argument delta must be positive") mn, mx = np.inf, -np.inf mnpos, mxpos = np.nan, np.nan lookformax = True for i in np.arange(len(arr)): this = arr[i] if this > mx: mx = this mxpos = x[i] if this < mn: mn = this mnpos = x[i] if lookformax: if this < mx - min_prominence: maxidxs.append(mxpos) mn = this mnpos = x[i] lookformax = False else: if this > mn + min_prominence: minidxs.append(mnpos) mx = this mxpos = x[i] lookformax = True return np.array(minidxs), np.array(maxidxs)
MIT License
nestauk/old_nesta_daps
nesta/packages/worldbank/collect_worldbank.py
unpack_quantity
python
def unpack_quantity(row, concept, value): for quantity in row['variable']: if quantity['concept'] == concept: return quantity[value] raise NameError(f"No item found in {row['variable']} with " f"concept = {concept}")
Unpack row like {"variable": [{"concept":<concept>, <value>:_i_want_this_}]} Args: row (dict): Row of Worldbank API data. concept (str): The name of the dataset containing the variable. value (str): The name of the variable to unpack. Returns: A value.
https://github.com/nestauk/old_nesta_daps/blob/4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3/nesta/packages/worldbank/collect_worldbank.py#L205-L219
import requests from retrying import retry import json from collections import defaultdict import re import math import pandas as pd WORLDBANK_ENDPOINT = "http://api.worldbank.org/v2/{}" DEAD_RESPONSE = (None, None) def worldbank_request(suffix, page, per_page=10000, data_key_path=None): response = _worldbank_request(suffix=suffix, page=page, per_page=per_page) metadata, data = data_from_response(response=response, data_key_path=data_key_path) return metadata, data @retry(stop_max_attempt_number=3, wait_fixed=2000) def _worldbank_request(suffix, page, per_page): r = requests.get(WORLDBANK_ENDPOINT.format(suffix), params=dict(per_page=per_page, format="json", page=page)) if r.status_code == 400: return DEAD_RESPONSE r.raise_for_status() response = DEAD_RESPONSE try: response = r.json() except json.JSONDecodeError: pass finally: return response def data_from_response(response, data_key_path=None): if data_key_path is None or response == DEAD_RESPONSE: metadata, datarows = response else: metadata = response datarows = response.copy() for key in data_key_path: datarows = datarows[key] if key != data_key_path[-1] and type(datarows) is list: datarows = datarows[0] return metadata, datarows def calculate_number_of_api_pages(suffix, per_page=10000, data_key_path=None): metadata, _ = worldbank_request(suffix=suffix, page=1, per_page=1, data_key_path=data_key_path) if metadata is None: return 0 total = int(metadata["total"]) n_pages = math.floor(total / per_page) + int(total % per_page > 0) return n_pages def worldbank_data_interval(suffix, first_page, last_page, per_page=10000, data_key_path=None): for page in range(first_page, last_page+1): _, datarows = worldbank_request(suffix=suffix, page=page, per_page=per_page, data_key_path=data_key_path) if datarows is None: continue for row in datarows: yield row def worldbank_data(suffix, per_page=10000, data_key_path=None): n_pages = calculate_number_of_api_pages(suffix=suffix, per_page=per_page, data_key_path=data_key_path) return worldbank_data_interval(suffix, first_page=1, last_page=n_pages, per_page=per_page, data_key_path=data_key_path) def get_worldbank_resource(resource): collection = [] for row in worldbank_data(resource): data = {} for k, v in row.items(): if type(v) is dict: v = v["value"] data[k] = v collection.append(data) return collection def get_variables_by_code(codes): key_path = ["source", "concept", "variable"] variables = defaultdict(list) sources = get_worldbank_resource("source") for source in sources: suffix = f"sources/{source['id']}/series/data" data = worldbank_data(suffix, data_key_path=key_path) filtered_data = filter(lambda row: (row['id'] in codes), data) for row in filtered_data: variables[row['id']].append(source['id']) return variables
MIT License
stevengrove/dynamichead
cvpods/modeling/backbone/shufflenet.py
build_shufflenetv2_backbone
python
def build_shufflenetv2_backbone(cfg, input_shape): channel_mapper = { "0.5x": [24, 48, 96, 192, 1024], "1.0x": [24, 116, 232, 464, 1024], "1.5x": [24, 176, 352, 704, 1024], "2.0x": [24, 244, 488, 976, 2048], } model_size = cfg.MODEL.SHUFFLENET.MODEL_SIZE output_feautres = cfg.MODEL.SHUFFLENET.OUT_FEATURES num_classes = cfg.MODEL.SHUFFLENET.NUM_CLASSES norm = cfg.MODEL.SHUFFLENET.NORM init_model = cfg.MODEL.SHUFFLENET.INIT_MODEL assert model_size in channel_mapper, "Model size {} not supported.".format(model_size) channels = channel_mapper[model_size] model = ShuffleNetV2( input_shape.channels, channels, num_classes=num_classes, dropout=model_size == "2.0", out_features=output_feautres, norm=norm, init_model=init_model, ) model.freeze(cfg.MODEL.BACKBONE.FREEZE_AT) return model
Create a ShuffleNetV2 instance from config. Returns: ShuffleNetV2: a :class:`ShuffleNetV2` instance.
https://github.com/stevengrove/dynamichead/blob/5097e2bd21e36fbdcab90f0b23b67162157e46bb/cvpods/modeling/backbone/shufflenet.py#L295-L327
import numpy as np import torch import torch.nn as nn from cvpods.layers import Conv2d, FrozenBatchNorm2d, ShapeSpec, get_norm from cvpods.modeling.backbone import Backbone class ShuffleV2Block(nn.Module): def __init__( self, input_channels, output_channels, mid_channels, kernel_size, stride, bias=False, norm="BN" ): super(ShuffleV2Block, self).__init__() assert stride in [1, 2] self.stride = stride padding = kernel_size // 2 delta_channels = output_channels - input_channels branch_main = [ Conv2d( input_channels, mid_channels, kernel_size=1, bias=bias, norm=get_norm(norm, mid_channels), activation=nn.ReLU(inplace=True), ), Conv2d( mid_channels, mid_channels, kernel_size, stride, padding, groups=mid_channels, bias=bias, norm=get_norm(norm, mid_channels), ), Conv2d( mid_channels, delta_channels, kernel_size=1, bias=bias, norm=get_norm(norm, delta_channels), activation=nn.ReLU(inplace=True), ) ] self.branch_main = nn.Sequential(*branch_main) self.branch_proj = None if stride == 2: branch_proj = [ Conv2d( input_channels, input_channels, kernel_size, stride, padding, groups=input_channels, bias=bias, norm=get_norm(norm, input_channels) ), Conv2d( input_channels, input_channels, kernel_size=1, bias=bias, norm=get_norm(norm, input_channels), activation=nn.ReLU(inplace=True) ) ] self.branch_proj = nn.Sequential(*branch_proj) def forward(self, x): if self.branch_proj is None: x_proj, x = self.channel_shuffle(x) else: x_proj = self.branch_proj(x) x = self.branch_main(x) return torch.cat([x_proj, x], dim=1) def channel_shuffle(self, x): N, C, H, W = x.shape assert C % 2 == 0, "number of channels must be divided by 2, got {}".format(C) x = x.view(N, C // 2, 2, H, W).permute(2, 0, 1, 3, 4).contiguous() return x[0], x[1] def freeze(self): for p in self.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self) return self class ShuffleNetV2(Backbone): def __init__( self, in_channels, channels, num_classes=None, dropout=False, out_features=None, norm="BN", init_model=False ): super(ShuffleNetV2, self).__init__() self.stage_out_channels = channels self.num_classes = num_classes input_channels = self.stage_out_channels[0] self.stem = nn.Sequential(*[ Conv2d( in_channels, input_channels, kernel_size=3, stride=2, padding=1, bias=False, norm=get_norm(norm, input_channels), activation=nn.ReLU(inplace=True), ), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ]) current_stride = 4 self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": input_channels} self.stage_num_blocks = [4, 8, 4] self.stages_and_names = [] for i in range(len(self.stage_num_blocks)): num_blocks = self.stage_num_blocks[i] output_channels = self.stage_out_channels[i + 1] name = "shuffle" + str(i + 3) block_list = make_stage(num_blocks, input_channels, output_channels, norm) current_stride = current_stride * np.prod([block.stride for block in block_list]) stages = nn.Sequential(*block_list) self._out_feature_strides[name] = current_stride self._out_feature_channels[name] = output_channels self.add_module(name, stages) self.stages_and_names.append((stages, name)) input_channels = output_channels name = "shuffle" + str(len(self.stage_num_blocks) + 2) + "-last" last_output_channels = self.stage_out_channels[-1] last_conv = Conv2d( output_channels, last_output_channels, kernel_size=1, bias=False, norm=get_norm(norm, last_output_channels), activation=nn.ReLU(inplace=True) ) self._out_feature_strides[name] = current_stride self._out_feature_channels[name] = last_output_channels self.add_module(name, last_conv) self.stages_and_names.append((last_conv, name)) if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = dropout if dropout: self.dropout = nn.Dropout(0.2) self.classifier = nn.Linear(self.stage_out_channels[-1], num_classes, bias=False) name = "linear" self._out_features = [name] if out_features is None else out_features if init_model: self._initialize_weights() def forward(self, x): outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for stages, name in self.stages_and_names: x = stages(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) if self.dropout: x = self.dropout(x) x = x.reshape(-1, self.stage_out_channels[-1]) x = self.classifier(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } def freeze(self, freeze_at): if freeze_at >= 1: for p in self.stem.parameters(): p.requires_grad = False FrozenBatchNorm2d.convert_frozen_batchnorm(self.stem) for i in range(freeze_at - 1): FrozenBatchNorm2d.convert_frozen_batchnorm(self.stages_and_names[i][0]) def _initialize_weights(self): for name, m in self.named_modules(): if isinstance(m, nn.Conv2d): if 'first' in name: nn.init.normal_(m.weight, 0, 0.01) else: nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0.0001) nn.init.constant_(m.running_mean, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0.0001) nn.init.constant_(m.running_mean, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) def make_stage(num_blocks, input_channels, output_channels, norm): blocks = [] blocks.append(ShuffleV2Block( input_channels, output_channels, mid_channels=output_channels // 2, kernel_size=3, stride=2, norm=norm) ) input_channels = output_channels for i in range(num_blocks - 1): blocks.append(ShuffleV2Block( input_channels // 2, output_channels, mid_channels=output_channels // 2, kernel_size=3, stride=1, norm=norm) ) return blocks
Apache License 2.0
sumanyu/stacked-denoising-autoencoders
dA.py
dA.get_cost_updates
python
def get_cost_updates(self, corruption_level, learning_rate): tilde_x = self.get_corrupted_input(self.x, corruption_level) y = self.get_hidden_values(tilde_x) z = self.get_reconstructed_input(y) L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1) cost = T.mean(L) gparams = T.grad(cost, self.params) updates = [ (param, param - learning_rate * gparam) for param, gparam in zip(self.params, gparams) ] return (cost, updates)
This function computes the cost and the updates for one trainng step of the dA
https://github.com/sumanyu/stacked-denoising-autoencoders/blob/678012cb3acadbbc2f7e9635db8fbced46471bf0/dA.py#L220-L247
import numpy import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams class dA(object): def __init__( self, numpy_rng, theano_rng=None, input=None, n_visible=784, n_hidden=500, W=None, bhid=None, bvis=None ): self.n_visible = n_visible self.n_hidden = n_hidden if not theano_rng: theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) if not W: initial_W = numpy.asarray( numpy_rng.uniform( low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)), high=4 * numpy.sqrt(6. / (n_hidden + n_visible)), size=(n_visible, n_hidden) ), dtype=theano.config.floatX ) W = theano.shared(value=initial_W, name='W', borrow=True) if not bvis: bvis = theano.shared( value=numpy.zeros( n_visible, dtype=theano.config.floatX ), borrow=True ) if not bhid: bhid = theano.shared( value=numpy.zeros( n_hidden, dtype=theano.config.floatX ), name='b', borrow=True ) self.W = W self.b = bhid self.b_prime = bvis self.W_prime = self.W.T self.theano_rng = theano_rng if input is None: self.x = T.dmatrix(name='input') else: self.x = input self.params = [self.W, self.b, self.b_prime] def get_corrupted_input(self, input, corruption_level): return self.theano_rng.binomial(size=input.shape, n=1, p=1 - corruption_level, dtype=theano.config.floatX) * input def get_hidden_values(self, input): return T.nnet.sigmoid(T.dot(input, self.W) + self.b) def get_reconstructed_input(self, hidden): return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
Apache License 2.0
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/sendable/passport.py
PassportElementErrorDataField.__init__
python
def __init__(self, type, field_name, data_hash, message): super(PassportElementErrorDataField, self).__init__() self.source = 'data' assert_type_or_raise(type, unicode_type, parameter_name="type") self.type = type assert_type_or_raise(field_name, unicode_type, parameter_name="field_name") self.field_name = field_name assert_type_or_raise(data_hash, unicode_type, parameter_name="data_hash") self.data_hash = data_hash assert_type_or_raise(message, unicode_type, parameter_name="message") self.message = message
Represents an issue in one of the data fields that was provided by the user. The error is considered resolved when the field's value changes. https://core.telegram.org/bots/api#passportelementerrordatafield Parameters: :param type: The section of the user's Telegram Passport which has the error, one of "personal_details", "passport", "driver_license", "identity_card", "internal_passport", "address" :type type: str|unicode :param field_name: Name of the data field which has the error :type field_name: str|unicode :param data_hash: Base64-encoded data hash :type data_hash: str|unicode :param message: Error message :type message: str|unicode Optional keyword parameters:
https://github.com/luckydonald/pytgbot/blob/e29a0b5f8f8331bd347c8e2b8e75af19b12d1bc5/code_generation/output/pytgbot/api_types/sendable/passport.py#L59-L92
from luckydonaldUtils.encoding import unicode_type, to_unicode as u from luckydonaldUtils.exceptions import assert_type_or_raise from . import Sendable __author__ = 'luckydonald' __all__ = [ 'PassportElementError', 'PassportElementErrorDataField', 'PassportElementErrorFrontSide', 'PassportElementErrorReverseSide', 'PassportElementErrorSelfie', 'PassportElementErrorFile', 'PassportElementErrorFiles', 'PassportElementErrorTranslationFile', 'PassportElementErrorTranslationFiles', 'PassportElementErrorUnspecified', ] class PassportElementError(Sendable): pass class PassportElementErrorDataField(PassportElementError):
MIT License
eelcohoogendoorn/numpy_arraysetops_ep
numpy_indexed/funcs.py
Table.unique
python
def unique(self, values): _, count = self.count() if not np.array_equiv(count, 1): raise ValueError("Not every entry in the table is assigned a unique value") return self.sum(values)
Place each entry in a table, while asserting that each entry occurs once
https://github.com/eelcohoogendoorn/numpy_arraysetops_ep/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L175-L180
from __future__ import absolute_import, division, print_function, unicode_literals from builtins import * import numpy as np from numpy_indexed.grouping import GroupBy, group_by from numpy_indexed.index import LexIndex, as_index from numpy_indexed import semantics __author__ = "Eelco Hoogendoorn" __license__ = "LGPL" __email__ = "hoogendoorn.eelco@gmail.com" def count(keys, axis=semantics.axis_default): index = as_index(keys, axis, base=True) return index.unique, index.count def count_table(*keys): indices = [as_index(k, axis=0) for k in keys] uniques = [i.unique for i in indices] inverses = [i.inverse for i in indices] shape = [i.groups for i in indices] table = np.zeros(shape, np.int) np.add.at(table, inverses, 1) return tuple(uniques), table def binning(keys, start, end, count, axes=None): if isinstance(keys, tuple): n_keys = len(keys) else: n_keys = 1 bins = np.linspace(start, end, count+1, endpoint=True) idx = np.searchsorted(bins, keys) if axes is None: axes = [-1] class Table(object): def __init__(self, *keys): self.keys = tuple(keys) self.indices = [as_index(k, axis=0) for k in keys] self.uniques = [i.unique for i in self.indices] self.shape = [i.groups for i in self.indices] def get_inverses(self, keys): return tuple([as_index(k, axis=0).inverse for k in keys]) def allocate(self, dtype, fill=0): arr = np.empty(self.shape, dtype=dtype) arr.fill(fill) return arr def count(self): table = self.allocate(np.int) np.add.at(table, self.get_inverses(self.indices), 1) return tuple(self.uniques), table def sum(self, values): table = self.allocate(values.dtype) keys, values = group_by(self.keys).sum(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table def mean(self, values): table = self.allocate(np.float, np.nan) keys, values = group_by(self.keys).mean(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table def first(self, values): table = self.allocate(np.float, np.nan) keys, values = group_by(self.keys).first(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table def last(self, values): table = self.allocate(np.float, np.nan) keys, values = group_by(self.keys).last(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table def min(self, values, default=None): if default is None: try: info = np.iinfo(values.dtype) default = info.max except: default = +np.inf table = self.allocate(values.dtype, default) keys, values = group_by(self.keys).min(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table def max(self, values, default=None): if default is None: try: info = np.iinfo(values.dtype) default = info.min except: default = -np.inf table = self.allocate(values.dtype, default) keys, values = group_by(self.keys).max(values) table[self.get_inverses(keys)] = values return tuple(self.uniques), table
MIT License
google/clusterfuzz
src/clusterfuzz/_internal/system/minijail.py
MinijailChroot.add_binding
python
def add_binding(self, binding): if binding in self._bindings: return self._makedirs(binding.dest_path) self._bindings.append(binding)
Adds a directory to be bound to the chroot. Args: binding: A ChrootBinding.
https://github.com/google/clusterfuzz/blob/e9e105d66f009356c4f3fe9ae7873ffff126b234/src/clusterfuzz/_internal/system/minijail.py#L179-L189
from collections import namedtuple import os import shutil import signal import subprocess import tempfile from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment from clusterfuzz._internal.system import new_process from clusterfuzz._internal.system import shell def _get_minijail_path(): return os.path.join(environment.get_platform_resources_directory(), 'minijail0') def _get_minijail_user_namespace_args(): arguments = ['-U'] uid_map = [ '0 {0} 1'.format(os.getuid()), ] arguments.extend(['-m', ','.join(uid_map)]) return arguments def _create_chroot_dir(base_dir): return tempfile.mkdtemp(dir=base_dir) def _create_tmp_mount(base_dir): return tempfile.mkdtemp(dir=base_dir) ChrootBinding = namedtuple('ChrootBinding', ['src_path', 'dest_path', 'writeable']) class MinijailChroot(object): DEFAULT_BINDINGS = [ '/lib', '/lib32', '/lib64', '/usr/lib', '/usr/lib32', ] def __init__(self, base_dir=None, bindings=None, use_existing_base=False): if not use_existing_base: self._chroot_dir = _create_chroot_dir(base_dir=base_dir) else: self._chroot_dir = base_dir os.mkdir(os.path.join(self._chroot_dir, 'tmp')) os.mkdir(os.path.join(self._chroot_dir, 'proc')) self._tmp_mount = _create_tmp_mount(base_dir=base_dir) self._bindings = [ ChrootBinding(self._tmp_mount, '/tmp', True), ] self._create_devices() for directory in self.DEFAULT_BINDINGS: if not os.path.exists(directory): continue self.add_binding(ChrootBinding(directory, directory, False)) if not bindings: return for binding in bindings: self.add_binding(binding) def _mknod(self, path, file_type, major, minor): try: with open(os.devnull) as devnull: subprocess.check_output( [ 'sudo', '-S', 'mknod', '-m', '666', path, file_type, str(major), str(minor) ], stdin=devnull, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: logs.log_error('Failed to call mknod.', output=e.output) def _create_devices(self): dev_dir = os.path.join(self._chroot_dir, 'dev') os.mkdir(dev_dir) self._mknod(os.path.join(dev_dir, 'null'), 'c', 1, 3) self._mknod(os.path.join(dev_dir, 'random'), 'c', 1, 8) self._mknod(os.path.join(dev_dir, 'urandom'), 'c', 1, 9) os.mkdir(os.path.join(dev_dir, 'shm')) def _makedirs(self, directory): if directory[0] == '/': directory = directory[1:] shell.create_directory( os.path.join(self._chroot_dir, directory), create_intermediates=True) @property def bindings(self): return self._bindings @property def directory(self): return self._chroot_dir @property def tmp_directory(self): return self._tmp_mount
Apache License 2.0
discord-py-ui/discord-ui
discord_ui/receive.py
Message.disable_action_row
python
async def disable_action_row(self, row, disable = True): comps = [] if isinstance(row, range): for i, _ in enumerate(self.action_rows): if i >= len(self.action_rows) - 1 or i < 0: raise OutOfValidRange("row[" + str(i) + "]", 0, len(self.action_rows) - 1) for comp in self.action_rows[i]: if i in row: comp.disabled = disable comps.append(comp) else: for i, _ in enumerate(self.action_rows): if i >= len(self.action_rows) - 1 or i < 0: raise OutOfValidRange("row", 0, len(self.action_rows) - 1) for comp in self.action_rows[i]: if i == row: comp.disabled = disable comps.append(comp) await self.edit(components=comps)
Disables an action row of components in the message Parameters ---------- row: :class:`int` | :class:`range` Which rows to disable, first row is ``0``; If row parameter is of type :class:`int`, the nth row will be disabled, if type is :class:`range`, the range is going to be iterated and all rows in the range will be disabled disable: :class:`bool`, optional Whether to disable (``True``) or enable (``False``) the components; default True Raises ------ :raises: :class:`discord_ui.errors.OutOfValidRange` : The specified range was out of the possible range of the component rows :raises: :class:`discord_ui.errors.OutOfValidRange` : The specified row was out of the possible range of the component rows
https://github.com/discord-py-ui/discord-ui/blob/b888081628b0679183284626cac5ce898cebd13a/discord_ui/receive.py#L501-L535
from .slash.http import ModifiedSlashState from .errors import InvalidEvent, OutOfValidRange, WrongType from .slash.errors import AlreadyDeferred, EphemeralDeletion from .tools import MISSING, setup_logger, _none, get, _default from .slash.types import ContextCommand, SlashCommand, SlashOption, SlashPermission, SlashSubcommand from .http import BetterRoute, jsonifyMessage, send_files from .components import ActionRow, Button, LinkButton, SelectMenu, SelectOption, UseableComponent, make_component import discord from discord.ext.commands import Bot from discord.errors import HTTPException from discord.state import ConnectionState from typing import Any, List, Union, Dict try: from typing import Literal except ImportError: from typing_extensions import Literal logging = setup_logger("discord-ui") class InteractionType: PING = Ping = 1 APPLICATION_COMMAND = Command = 2 MESSAGE_COMPONENT = Component = 3 APPLICATION_COMMAND_AUTOCOMPLETE = Autocomplete = 4 class Interaction(): def __init__(self, state, data, user=None, message=None) -> None: self._state: ModifiedSlashState = state self.deferred: bool = False self.responded: bool = False self._deferred_hidden: bool = False self._original_payload: dict = data self.author: Union[discord.Member, discord.User] = user self.application_id: int = data["application_id"] self.token: str = data["token"] self.id: int = data["id"] self.type: int = data["type"] self.version: int = data["version"] self.data: dict = data["data"] self.channel_id: int = int(data.get("channel_id")) if data.get("channel_id") is not None else None self.guild_id: int = int(data["guild_id"]) if data.get("guild_id") is not None else None self.message: Message = message @property def guild(self) -> discord.Guild: return self._state._get_guild(self.guild_id) @property def channel(self) -> Union[discord.TextChannel, discord.DMChannel]: return self._state.get_channel(self.channel_id) or self._state.get_channel(self.author.id) async def defer(self, hidden=False): if self.deferred: logging.error(AlreadyDeferred()) return payload = None if hidden is True: payload = {"flags": 64} self._deferred_hidden = True await self._state.slash_http.respond_to(self.id, self.token, 5, payload) self.deferred = True async def respond(self, content=MISSING, *, tts=False, embed=MISSING, embeds=MISSING, file=MISSING, files=MISSING, nonce=MISSING, allowed_mentions=MISSING, mention_author=MISSING, components=MISSING, delete_after=MISSING, listener=MISSING, hidden=False, ninja_mode=False) -> Union['Message', 'EphemeralMessage']: if ninja_mode is True or all(y in [MISSING, False] for x, y in locals().items() if x not in ["self"]): try: await self._state.slash_http.respond_to(self.id, self.token, 6) return except HTTPException as x: if "value must be one of (4, 5)" in str(x).lower(): logging.warning(str(x) + "\n" + "The 'ninja_mode' parameter is not supported for slash commands!") ninja_mode = False else: raise x if self.responded is True: return await self.send(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components, listener=listener, hidden=hidden) if components is MISSING and listener is not MISSING: components = listener.to_components() payload = jsonifyMessage(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components) if self._deferred_hidden is hidden: if self._deferred_hidden is False and hidden is True: logging.warning("Your response should be hidden, but the interaction was deferred public. This results in a public response.") if self._deferred_hidden is True and hidden is False: logging.warning("Your response should be public, but the interaction was deferred hidden. This results in a hidden response.") hide_message = self._deferred_hidden or not self.deferred and hidden is True r = None if delete_after is not MISSING and hide_message is True: raise EphemeralDeletion() if hide_message: payload["flags"] = 64 if self.deferred: route = BetterRoute("PATCH", f'/webhooks/{self.application_id}/{self.token}/messages/@original') if file is not MISSING or files is not MISSING: await send_files(route=route, files=[file] if files is MISSING else files, payload=payload, http=self._state.http) else: await self._state.http.request(route, json=payload) else: await self._state.slash_http.respond_to(self.id, self.token, 4, payload, files=[file] if file is not MISSING else _default(None, files)) self.responded = True r = await self._state.http.request(BetterRoute("GET", f"/webhooks/{self.application_id}/{self.token}/messages/@original")) if hide_message is True: msg = EphemeralMessage(state=self._state, channel=self.channel, data=r, application_id=self.application_id, token=self.token) else: msg = await getMessage(self._state, data=r, response=False) if listener is not MISSING: listener._start(msg) if not _none(delete_after): await msg.delete(delete_after) return msg async def send(self, content=None, *, tts=False, embed=MISSING, embeds=MISSING, file=MISSING, files=MISSING, nonce=MISSING, allowed_mentions=MISSING, mention_author=MISSING, components=MISSING, delete_after=MISSING, listener=MISSING, hidden=False) -> Union['Message', 'EphemeralMessage']: if self.responded is False: return await self.respond(content=content, tts=tts, embed=embed, embeds=embeds, file=file, files=files, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components, delete_after=delete_after, listener=listener, hidden=hidden) if components is MISSING and listener is not MISSING: components = listener.to_components() payload = jsonifyMessage(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components) if hidden: payload["flags"] = 64 route = BetterRoute("POST", f'/webhooks/{self.application_id}/{self.token}') if file is not MISSING or files is not MISSING: r = await send_files(route=route, files=[file] if files is MISSING else files, payload=payload, http=self._state.http) else: r = await self._state.http.request(route, json=payload) if hidden is True: msg = EphemeralMessage(state=self._state, channel=self._state.get_channel(int(r["channel_id"])), data=r, application_id=self.application_id, token=self.token) else: msg = await getMessage(self._state, r, response=False) if delete_after is not MISSING: await msg.delete(delete_after) if listener is not MISSING: listener._start(msg) return msg def _handle_auto_defer(self, auto_defer): self.deferred = auto_defer[0] self._deferred_hidden = auto_defer[1] class ChoiceGeneratorContext(Interaction): def __init__(self, command, state, data, options, user=None) -> None: Interaction.__init__(self, state, data, user=user) self.focused_option: dict = options[get(options, check=lambda x: options[x].get("focused", False))] self.value_query: Union[str, int] = self.focused_option["value"] self.selected_options: Dict[str, Any] = {options[x]["name"]: options[x]["value"] for x in options} self.command: Union[SlashedCommand, SlashedCommand, SlashedContext] = command async def defer(self, *args, **kwargs): raise NotImplementedError() async def respond(self, *args, **kwargs): raise NotImplementedError() async def send(self, *args, **kwargs): raise NotImplementedError() class ComponentContext(Interaction, UseableComponent): def __init__(self, state, data, user, message) -> None: Interaction.__init__(self, state, data, user=user, message=message) UseableComponent.__init__(self, data["data"]["component_type"]) class SelectedMenu(Interaction, SelectMenu): def __init__(self, data, user, s, msg, client) -> None: Interaction.__init__(self, client._connection, data, user, msg) default = [i for i, o in enumerate(s.options) if o.default is True] SelectMenu.__init__(self, s.custom_id, s.options, s.min_values, s.max_values, s.placeholder, default[0] if len(default) == 1 else None, s.disabled) self.bot: Bot = client self.selected_options: List[SelectOption] = [] self.selected_values: List[str] = [] for val in data["data"]["values"]: for x in self.options: if x.value == val: self.selected_options.append(x) self.selected_values.append(x.value) self.author: discord.Member = user class PressedButton(Interaction, Button): def __init__(self, data, user, b, message, client) -> None: Interaction.__init__(self, client._connection, data, user, message) Button.__init__(self, b.custom_id, b.label, b.color, b.emoji, b.new_line, b.disabled) self._json = b.to_dict() self.bot: Bot = client self.author: discord.Member = user class SlashedCommand(Interaction, SlashCommand): def __init__(self, client, command: SlashCommand, data, user, args = None) -> None: Interaction.__init__(self, client._connection, data, user) SlashCommand.__init__(self, command.callback, command.name, command.description, command.options, guild_ids=command.guild_ids, guild_permissions=command.guild_permissions) for x in self.__slots__: setattr(self, x, getattr(command, x)) self.bot: Bot = client self._json = command.to_dict() self.author: discord.Member = user self.args: Dict[str, Union[str, int, bool, discord.Member, discord.TextChannel, discord.Role, float]] = args self.permissions: SlashPermission = command.guild_permissions.get(self.guild_id) if command.guild_permissions is not None else None class SlashedSubCommand(SlashedCommand, SlashSubcommand): def __init__(self, client, command, data, user, args = None) -> None: SlashedCommand.__init__(self, client, command, data, user, args) SlashSubcommand.__init__(self, command.callback, command.base_names, command.name, command.description, command.options, command.guild_ids, command.default_permission, command.guild_permissions) class SlashedContext(Interaction, ContextCommand): def __init__(self, client, command: ContextCommand, data, user, param) -> None: Interaction.__init__(self, client._connection, data, user) ContextCommand.__init__(self, command.command_type, command.callback, command.name, guild_ids=command.guild_ids, guild_permissions=command.guild_permissions) for x in self.__slots__: setattr(self, x, getattr(command, x)) self._json = command.to_dict() self.bot: Bot = client self.param: Union[Message, discord.Member, discord.User] = param self.permissions: SlashPermission = command.guild_permissions.get(self.guild_id) if command.guild_permissions is not None else None async def getMessage(state: ConnectionState, data, response=True): msg_base = data.get("message", data) channel = state.get_channel(int(data["channel_id"])) or state.get_channel(int(msg_base["author"]["id"])) if response: if msg_base["flags"] == 64: return EphemeralResponseMessage(state=state, channel=channel, data=data.get("message", data)) return Message(state=state, channel=channel, data=msg_base) if msg_base["flags"] == 64: return EphemeralMessage(state=state, channel=channel, data=msg_base) return Message(state=state, channel=channel, data=msg_base) class Message(discord.Message): def __init__(self, *, state, channel, data): self.__slots__ = discord.Message.__slots__ + ("components", "supressed") self._payload = data self._state: ConnectionState = None discord.Message.__init__(self, state=state, channel=channel, data=data) self.components: List[Union[Button, LinkButton, SelectMenu]] = [] self.suppressed = False self._update_components(data) @property def buttons(self) -> List[Union[Button, LinkButton]]: if hasattr(self, "components") and self.components is not None: return [x for x in self.components if isinstance(x, (Button, LinkButton))] return [] @property def select_menus(self) -> List[SelectMenu]: if hasattr(self, "components") and self.components is not None: return [x for x in self.components if isinstance(x, SelectMenu)] return [] @property def action_row(self) -> ActionRow: return ActionRow(self.components) def _update_components(self, data): if data.get("components") is None: self.components = [] return self.components = [] if len(data["components"]) == 0: pass elif len(data["components"]) > 1: for componentWrapper in data["components"]: for index, com in enumerate(componentWrapper["components"]): self.components.append(make_component(com, index==0)) elif len(data["components"][0]["components"]) > 1: for index, com in enumerate(data["components"][0]["components"]): self.components.append(make_component(com, index==0)) else: component = data["components"][0]["components"][0] self.components.append(make_component(component)) def _update(self, data): super()._update(data) self._update_components(data) async def edit(self, content=MISSING, *, embed=MISSING, embeds=MISSING, attachments=MISSING, suppress=MISSING, delete_after=MISSING, allowed_mentions=MISSING, components=MISSING): payload = jsonifyMessage(content, embed=embed, embeds=embeds, allowed_mentions=allowed_mentions, attachments=attachments, suppress=suppress, flags=self.flags.value, components=components) data = await self._state.http.edit_message(self.channel.id, self.id, **payload) self._update(data) if delete_after is not MISSING: await self.delete(delay=delete_after)
MIT License
halit/isip
isip/scapy/contrib/gsm_um.py
partialRelease
python
def partialRelease(): a = TpPd(pd=0x6) b = MessageType(mesType=0xa) c = ChannelDescription() packet = a / b / c return packet
PARTIAL RELEASE Section 9.1.26
https://github.com/halit/isip/blob/fad1f10b02f9e075451588cc6a18a46cc5fbd66b/isip/scapy/contrib/gsm_um.py#L914-L920
import logging from types import IntType from types import NoneType from types import StringType import socket logging.getLogger("scapy").setLevel(1) from scapy.all import * def sendum(x, typeSock=0): try: if type(x) is not str: x = str(x) if typeSock is 0: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) host = '127.0.0.1' port = 28670 s.connect((host, port)) elif typeSock is 1: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect("/tmp/osmoL") elif typeSock is 2: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '127.0.0.1' port = 43797 s.connect((host, port)) s.send(x) s.close() except: print "[Error]: There was a problem when trying to transmit data.\ Please make sure you started the socket server." class ErrorLength(Exception): def __str__(self): error = "ERROR: Please make sure you build entire, 8 bit fields." return repr(error) def adapt(min_length, max_length, fields, fields2, location=2): location = min_length - location i = len(fields) - 1 rm = mysum = 0 while i >= 0: if fields[i] is None: rm += 1 try: mysum += fields2[i].size except AttributeError: mysum += 8 else: break i -= 1 if mysum % 8 is 0: length = mysum / 8 dyn_length = (max_length - min_length - length) if dyn_length < 0: dyn_length = 0 if length is max_length: length -= min_length return [length, dyn_length + location] else: raise ErrorLength() def examples(example=None): if example == None: print """This command presents some example to introduce scapy gsm-um to new users. The following parameters can be used: examples("imsiDetach") examples("call") examples("dissect")""" elif example == "imsiDetach": print """ >>> a=imsiDetachIndication() ... a.typeOfId=1; a.odd=1; a.idDigit1=0xF; ... a.idDigit2_1=2; a.idDigit2=7; a.idDigit3_1=0; ... a.idDigit3=7; a.idDigit4_1=7; a.idDigit4=2; ... a.idDigit5_1=0; a.idDigit5=0; a.idDigit6_1=0; ... a.idDigit6=1; a.idDigit7_1=2; a.idDigit7=7; ... a.idDigit8_1=7; a.idDigit8=5; a.idDigit9_1=1; a.idDigit9=4; >>> hexdump(a) 0000 05 01 00 08 F0 27 07 72 00 01 27 75 14 .....'.r..'u. >>> sendum(a) """ elif example == "call": print """ If you use an USRP and the testcall function this sets up a phonecall: >>> sendum(setupMobileOriginated()) >>> sendum(connectAcknowledge()) """ class TpPd(Packet): name = "Skip Indicator And Transaction Identifier and Protocol \ Discriminator" fields_desc = [ BitField("ti", 0x0, 4), BitField("pd", 0x3, 4) ] class MessageType(Packet): name = "Message Type" fields_desc = [ XByteField("mesType", 0x3C) ] def additionalAssignment(MobileAllocation_presence=0, StartingTime_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x3B) c = ChannelDescription() packet = a / b / c if MobileAllocation_presence is 1: d = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / d if StartingTime_presence is 1: e = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / e return packet def assignmentCommand(FrequencyList_presence=0, CellChannelDescription_presence=0, CellChannelDescription_presence1=0, MultislotAllocation_presence=0, ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0, ChannelDescription=0, ChannelMode2_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, FrequencyList_presence1=0, ChannelDescription2_presence=0, ChannelDescription_presence=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence1=0, CipherModeSetting_presence=0, VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x2e) c = ChannelDescription2() d = PowerCommand() packet = a / b / c / d if FrequencyList_presence is 1: e = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0) packet = packet / e if CellChannelDescription_presence is 1: f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / f if MultislotAllocation_presence is 1: g = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0) packet = packet / g if ChannelMode_presence is 1: h = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / h if ChannelMode_presence1 is 1: i = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / i if ChannelMode_presence2 is 1: j = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / j if ChannelMode_presence3 is 1: k = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / k if ChannelMode_presence4 is 1: l = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / l if ChannelMode_presence5 is 1: m = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / m if ChannelMode_presence6 is 1: n = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / n if ChannelMode_presence7 is 1: o = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / o if ChannelDescription_presence is 1: p = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0) packet = packet / p if ChannelMode2_presence is 1: q = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0) packet = packet / q if MobileAllocation_presence is 1: r = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / r if StartingTime_presence is 1: s = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / s if FrequencyList_presence1 is 1: t = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / t if ChannelDescription2_presence is 1: u = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0) packet = packet / u if ChannelDescription_presence is 1: v = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0) packet = packet / v if FrequencyChannelSequence_presence is 1: w = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / w if MobileAllocation_presence1 is 1: x = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / x if CipherModeSetting_presence is 1: y = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0) packet = packet / y if VgcsTargetModeIdentication_presence is 1: z = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / z if MultiRateConfiguration_presence is 1: aa = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / aa return packet def assignmentComplete(): a = TpPd(pd=0x6) b = MessageType(mesType=0x29) c = RrCause() packet = a / b / c return packet def assignmentFailure(): a = TpPd(pd=0x6) b = MessageType(mesType=0x2F) c = RrCause() packet = a / b / c return packet def channelModeModify(VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x8) c = ChannelDescription2() d = ChannelMode() packet = a / b / c / d if VgcsTargetModeIdentication is 1: e = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / e if MultiRateConfiguration is 1: f = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / f return packet def channelModeModifyAcknowledge(): a = TpPd(pd=0x6) b = MessageType(mesType=0x17) c = ChannelDescription2() d = ChannelMode() packet = a / b / c / d return packet def channelRelease(BaRange_presence=0, GroupChannelDescription_presence=0, GroupCipherKeyNumber_presence=0, GprsResumption_presence=0, BaListPref_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0xD) c = RrCause() packet = a / b / c if BaRange_presence is 1: d = BaRangeHdr(ieiBR=0x73, eightBitBR=0x0) packet = packet / d if GroupChannelDescription_presence is 1: e = GroupChannelDescriptionHdr(ieiGCD=0x74, eightBitGCD=0x0) packet = packet / e if GroupCipherKeyNumber_presence is 1: f = GroupCipherKeyNumber(ieiGCKN=0x8) packet = packet / f if GprsResumption_presence is 1: g = GprsResumptionHdr(ieiGR=0xC, eightBitGR=0x0) packet = packet / g if BaListPref_presence is 1: h = BaListPrefHdr(ieiBLP=0x75, eightBitBLP=0x0) packet = packet / h return packet class ChannelRequest(Packet): name = "Channel Request" fields_desc = [ ByteField("estCause", 0x0) ] def channelRequest(): return ChannelRequest() def cipheringModeCommand(): a = TpPd(pd=0x6) b = MessageType(mesType=0x35) c = RrCause() d = CipherModeSettingAndcipherResponse() packet = a / b / c / d return packet def cipheringModeComplete(MobileId_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x32) packet = a / b if MobileId_presence is 1: c = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / c return packet def classmarkChange(MobileStationClassmark3_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x16) c = MobileStationClassmark2() packet = a / b / c if MobileStationClassmark3_presence is 1: e = MobileStationClassmark3(ieiMSC3=0x20) packet = packet / e return packet def classmarkEnquiry(): a = TpPd(pd=0x6) b = MessageType(mesType=0x13) packet = a / b return packet def configurationChangeCommand(ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x30) c = MultislotAllocation() packet = a / b / c if ChannelMode_presence is 1: d = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / d if ChannelMode_presence1 is 1: e = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / e if ChannelMode_presence2 is 1: f = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / f if ChannelMode_presence3 is 1: g = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / g if ChannelMode_presence4 is 1: h = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / h if ChannelMode_presence5 is 1: i = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / i if ChannelMode_presence6 is 1: j = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / j if ChannelMode_presence7 is 1: k = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / k return packet def configurationChangeAcknowledge(): a = TpPd(pd=0x6) b = MessageType(mesType=0x31) c = MobileId() packet = a / b / c return packet def configurationChangeReject(): a = TpPd(pd=0x6) b = MessageType(mesType=0x33) c = RrCause() packet = a / b / c return packet def frequencyRedefinition(CellChannelDescription_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x14) c = ChannelDescription() d = MobileAllocation() e = StartingTime() packet = a / b / c / d / e if CellChannelDescription_presence is 1: f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / f return packet def pdchAssignmentCommand(ChannelDescription_presence=0, CellChannelDescription_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, FrequencyList_presence=0, ChannelDescription_presence1=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence1=0, PacketChannelDescription_presence=0, DedicatedModeOrTBF_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x23) c = ChannelDescription() packet = a / b / c if ChannelDescription_presence is 1: d = ChannelDescriptionHdr(ieiCD=0x62, eightBitCD=0x0) packet = packet / d if CellChannelDescription_presence is 1: e = CellChannelDescriptionHdr(ieiCCD=0x05, eightBitCCD=0x0) packet = packet / e if MobileAllocation_presence is 1: f = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / f if StartingTime_presence is 1: g = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / g if FrequencyList_presence is 1: h = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / h if ChannelDescription_presence1 is 1: i = ChannelDescriptionHdr(ieiCD=0x1C, eightBitCD=0x0) packet = packet / i if FrequencyChannelSequence_presence is 1: j = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / j if MobileAllocation_presence1 is 1: k = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / k if PacketChannelDescription_presence is 1: l = PacketChannelDescription(ieiPCD=0x22) packet = packet / l if DedicatedModeOrTBF_presence is 1: m = DedicatedModeOrTBFHdr(ieiDMOT=0x23, eightBitDMOT=0x0) packet = packet / m return packet def gprsSuspensionRequest(): a = TpPd(pd=0x6) b = MessageType() c = Tlli() d = RoutingAreaIdentification() e = SuspensionCause() packet = a / b / c / d / e return packet class HandoverAccess(Packet): name = "Handover Access" fields_desc = [ ByteField("handover", None), ] def handoverCommand(SynchronizationIndication_presence=0, FrequencyShortList_presence=0, FrequencyList_presence=0, CellChannelDescription_presence=0, MultislotAllocation_presence=0, ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0, ChannelDescription_presence1=0, ChannelMode2_presence=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, TimeDifference_presence=0, TimingAdvance_presence=0, FrequencyShortList_presence1=0, FrequencyList_presence1=0, ChannelDescription2_presence=0, ChannelDescription_presence2=0, FrequencyChannelSequence_presence1=0, MobileAllocation_presence1=0, CipherModeSetting_presence=0, VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): name = "Handover Command" a = TpPd(pd=0x6) b = MessageType(mesType=0x2b) c = CellDescription() d = ChannelDescription2() e = HandoverReference() f = PowerCommandAndAccessType() packet = a / b / c / d / e / f if SynchronizationIndication_presence is 1: g = SynchronizationIndicationHdr(ieiSI=0xD, eightBitSI=0x0) packet = packet / g if FrequencyShortList_presence is 1: h = FrequencyShortListHdr(ieiFSL=0x02) packet = packet / h if FrequencyList_presence is 1: i = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0) packet = packet / i if CellChannelDescription_presence is 1: j = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / j if MultislotAllocation_presence is 1: k = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0) packet = packet / k if ChannelMode_presence is 1: l = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / l if ChannelMode_presence1 is 1: m = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / m if ChannelMode_presence2 is 1: n = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / n if ChannelMode_presence3 is 1: o = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / o if ChannelMode_presence4 is 1: p = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / p if ChannelMode_presence5 is 1: q = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / q if ChannelMode_presence6 is 1: r = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / r if ChannelMode_presence7 is 1: s = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / s if ChannelDescription_presence1 is 1: s1 = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0) packet = packet / s1 if ChannelMode2_presence is 1: t = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0) packet = packet / t if FrequencyChannelSequence_presence is 1: u = FrequencyChannelSequenceHdr(ieiFCS=0x69, eightBitFCS=0x0) packet = packet / u if MobileAllocation_presence is 1: v = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / v if StartingTime_presence is 1: w = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / w if TimeDifference_presence is 1: x = TimeDifferenceHdr(ieiTD=0x7B, eightBitTD=0x0) packet = packet / x if TimingAdvance_presence is 1: y = TimingAdvanceHdr(ieiTA=0x7D, eightBitTA=0x0) packet = packet / y if FrequencyShortList_presence1 is 1: z = FrequencyShortListHdr(ieiFSL=0x12) packet = packet / z if FrequencyList_presence1 is 1: aa = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / aa if ChannelDescription2_presence is 1: ab = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0) packet = packet / ab if ChannelDescription_presence2 is 1: ac = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0) packet = packet / ac if FrequencyChannelSequence_presence1 is 1: ad = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / ad if MobileAllocation_presence1 is 1: ae = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / ae if CipherModeSetting_presence is 1: af = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0) packet = packet / af if VgcsTargetModeIdentication_presence is 1: ag = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / ag if MultiRateConfiguration_presence is 1: ah = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / ah return packet def handoverComplete(MobileTimeDifference_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x2c) c = RrCause() packet = a / b / c if MobileTimeDifference_presence is 1: d = MobileTimeDifferenceHdr(ieiMTD=0x77, eightBitMTD=0x0) packet = packet / d return packet def handoverFailure(): a = TpPd(pd=0x6) b = MessageType(mesType=0x28) c = RrCause() packet = a / b / c return packet def immediateAssignment(ChannelDescription_presence=0, PacketChannelDescription_presence=0, StartingTime_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x3F) d = PageModeAndDedicatedModeOrTBF() packet = a / b / c / d if ChannelDescription_presence is 1: f = ChannelDescription() packet = packet / f if PacketChannelDescription_presence is 1: g = PacketChannelDescription() packet = packet / g h = RequestReference() i = TimingAdvance() j = MobileAllocation() packet = packet / h / i / j if StartingTime_presence is 1: k = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / k l = IaRestOctets() packet = packet / l return packet def immediateAssignmentExtended(StartingTime_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x39) d = PageModeAndSpareHalfOctets() f = ChannelDescription() g = RequestReference() h = TimingAdvance() i = MobileAllocation() packet = a / b / c / d / f / g / h / i if StartingTime_presence is 1: j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / j k = IaxRestOctets() packet = packet / k return packet def immediateAssignmentReject(): a = L2PseudoLength(l2pLength=0x13) b = TpPd(pd=0x6) c = MessageType(mesType=0x3a) d = PageModeAndSpareHalfOctets() f = RequestReference() g = WaitIndication() h = RequestReference() i = WaitIndication() j = RequestReference() k = WaitIndication() l = RequestReference() m = WaitIndication() n = IraRestOctets() packet = a / b / c / d / f / g / h / i / j / k / l / m / n return packet def measurementReport(): a = TpPd(pd=0x6) b = MessageType(mesType=0x15) c = MeasurementResults() packet = a / b / c return packet class NotificationFacch(): name = "Notification/facch" fields_desc = [ BitField("rr", 0x0, 1), BitField("msgTyoe", 0x0, 5), BitField("layer2Header", 0x0, 2), BitField("frChanDes", 0x0, 24) ] def notificationNch(): a = L2PseudoLength(l2pLength=0x01) b = TpPd(pd=0x6) c = MessageType(mesType=0x20) d = NtNRestOctets() packet = a / b / c / d return packet def notificationResponse(): a = TpPd(pd=0x6) b = MessageType(mesType=0x26) c = MobileStationClassmark2() d = MobileId() e = DescriptiveGroupOrBroadcastCallReference() packet = a / b / c / d / e return packet def rrCellChangeOrder(): a = TpPd(pd=0x6) b = MessageType(mesType=0x8) c = CellDescription() d = NcModeAndSpareHalfOctets() packet = a / b / c / d return packet def pagingRequestType1(MobileId_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x21) d = PageModeAndChannelNeeded() f = MobileId() packet = a / b / c / d / f if MobileId_presence is 1: g = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / g h = P1RestOctets() packet = packet / h return packet def pagingRequestType2(MobileId_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x22) d = PageModeAndChannelNeeded() f = MobileId() g = MobileId() packet = a / b / c / d / f / g if MobileId_presence is 1: h = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / h i = P2RestOctets() packet = packet / i return packet def pagingRequestType3(): a = L2PseudoLength(l2pLength=0x13) b = TpPd(pd=0x6) c = MessageType(mesType=0x24) d = PageModeAndChannelNeeded() e = TmsiPTmsi() f = TmsiPTmsi() g = TmsiPTmsi() h = TmsiPTmsi() i = P3RestOctets() packet = a / b / c / d / e / f / g / h / i return packet def pagingResponse(): a = TpPd(pd=0x6) b = MessageType(mesType=0x27) c = CiphKeySeqNrAndSpareHalfOctets() d = MobileStationClassmark2() e = MobileId() packet = a / b / c / d / e return packet
MIT License
slimmer-ai/mpire
mpire/comms.py
WorkerComms.set_worker_alive
python
def set_worker_alive(self, worker_id: int) -> None: self._workers_dead[worker_id].clear()
Indicate that a worker is alive :param worker_id: Worker ID
https://github.com/slimmer-ai/mpire/blob/ddcfbf113fa53c9b9de92d203d684e684f100344/mpire/comms.py#L492-L498
import logging import multiprocessing as mp import queue import threading from datetime import datetime from typing import Any, Generator, List, Optional, Tuple, Union from mpire.context import DEFAULT_START_METHOD, MP_CONTEXTS from mpire.params import WorkerMapParams from mpire.signal import DelayedKeyboardInterrupt logger = logging.getLogger(__name__) POISON_PILL = '\0' NON_LETHAL_POISON_PILL = '\1' NEW_MAP_PARAMS_PILL = '\2' class WorkerComms: def __init__(self, ctx: mp.context.BaseContext, n_jobs: int, use_dill: bool, using_threading: bool) -> None: self.ctx = ctx self.ctx_for_threading = MP_CONTEXTS['mp_dill' if use_dill else 'mp'][DEFAULT_START_METHOD] self.n_jobs = n_jobs self.using_threading = using_threading self._keep_order = self.ctx.Event() self._task_queues = None self._task_idx = None self._last_completed_task_worker_id = None self._results_queue = None self._exit_results_queues = [] self._all_exit_results_obtained = None self._worker_done_array = None self._workers_dead = None self._exception_queue = None self.exception_lock = self.ctx.Lock() self._exception_thrown = self.ctx_for_threading.Event() if using_threading else self.ctx.Event() self._kill_signal_received = self.ctx_for_threading.Event() if using_threading else self.ctx.Event() self._task_completed_queue = None self._progress_bar_complete = None def init_comms(self, has_worker_exit: bool, has_progress_bar: bool) -> None: self._task_queues = [self.ctx.JoinableQueue() for _ in range(self.n_jobs)] self.reset_last_completed_task_info() self._results_queue = self.ctx.JoinableQueue() if has_worker_exit: self._exit_results_queues = [self.ctx.JoinableQueue() for _ in range(self.n_jobs)] self._all_exit_results_obtained = self.ctx.Event() else: self._exit_results_queues = [] self._all_exit_results_obtained = None self._worker_done_array = self.ctx.Array('b', self.n_jobs, lock=False) self._workers_dead = [self.ctx.Event() for _ in range(self.n_jobs)] [worker_dead.set() for worker_dead in self._workers_dead] self._exception_queue = (self.ctx_for_threading.JoinableQueue() if self.using_threading else self.ctx.JoinableQueue()) self._exception_thrown.clear() self._kill_signal_received.clear() if has_progress_bar: self._task_completed_queue = (self.ctx_for_threading.JoinableQueue() if self.using_threading else self.ctx.JoinableQueue()) self._progress_bar_complete = self.ctx_for_threading.Event() if self.using_threading else self.ctx.Event() else: self._task_completed_queue = None self._progress_bar_complete = None def reset_last_completed_task_info(self) -> None: self._task_idx = 0 self._last_completed_task_worker_id = None def has_progress_bar(self) -> bool: return self._task_completed_queue is not None def task_completed_progress_bar(self, progress_bar_last_updated: Optional[datetime] = None, progress_bar_n_tasks_completed: Optional[int] = None, force_update: bool = False) -> Tuple[datetime, int]: if not force_update: progress_bar_n_tasks_completed += 1 now = datetime.now() if force_update or (now - progress_bar_last_updated).total_seconds() > 0.1: self._task_completed_queue.put(progress_bar_n_tasks_completed) progress_bar_last_updated = now progress_bar_n_tasks_completed = 0 return progress_bar_last_updated, progress_bar_n_tasks_completed def add_progress_bar_poison_pill(self) -> None: self._task_completed_queue.put(POISON_PILL) def get_tasks_completed_progress_bar(self) -> Tuple[Union[int, str], bool]: while not self.exception_thrown() and not self.kill_signal_received(): try: return self._task_completed_queue.get(block=True, timeout=0.01), True except queue.Empty: pass return POISON_PILL, False def task_done_progress_bar(self) -> None: self._task_completed_queue.task_done() def set_progress_bar_complete(self) -> None: self._progress_bar_complete.set() def wait_until_progress_bar_is_complete(self) -> None: while not self.exception_thrown(): if self._progress_bar_complete.wait(timeout=0.01): return def set_keep_order(self) -> None: return self._keep_order.set() def clear_keep_order(self) -> None: return self._keep_order.clear() def keep_order(self) -> bool: return self._keep_order.is_set() def add_task(self, task: Any, worker_id: Optional[int] = None) -> None: if worker_id is None: if self._last_completed_task_worker_id is not None: worker_id = self._last_completed_task_worker_id self._last_completed_task_worker_id = None else: worker_id = self._task_idx % self.n_jobs self._task_idx += 1 with DelayedKeyboardInterrupt(): self._task_queues[worker_id].put(task, block=True) def get_task(self, worker_id: int) -> Any: while not self.exception_thrown(): try: return self._task_queues[worker_id].get(block=True, timeout=0.01) except queue.Empty: pass return None def task_done(self, worker_id: int) -> None: self._task_queues[worker_id].task_done() def add_results(self, worker_id: int, results: Any) -> None: self._results_queue.put((worker_id, results)) def get_results(self, block: bool = True, timeout: Optional[float] = None, in_thread: bool = False) -> Any: queue_empty_error = None with DelayedKeyboardInterrupt(in_thread=in_thread): try: self._last_completed_task_worker_id, results = self._results_queue.get(block=block, timeout=timeout) self._results_queue.task_done() except queue.Empty as e: queue_empty_error = e if queue_empty_error is not None: raise queue_empty_error return results def add_exit_results(self, worker_id: int, results: Any) -> None: self._exit_results_queues[worker_id].put(results) def get_exit_results(self, worker_id: int, block: bool = True, in_thread: bool = False) -> Any: while not self.exception_thrown() or not block: queue_empty_error = None with DelayedKeyboardInterrupt(in_thread=in_thread): try: results = self._exit_results_queues[worker_id].get(block=block, timeout=0.01) self._exit_results_queues[worker_id].task_done() return results except queue.Empty as e: if not block: queue_empty_error = e if queue_empty_error is not None: raise queue_empty_error def get_exit_results_all_workers(self) -> List[Any]: exit_results = [] for worker_id in range(self.n_jobs): results = self.get_exit_results(worker_id) if self.exception_thrown(): return exit_results exit_results.append(results) return exit_results def set_all_exit_results_obtained(self) -> None: self._all_exit_results_obtained.set() def wait_until_all_exit_results_obtained(self) -> None: while not self.exception_thrown(): if self._all_exit_results_obtained.wait(timeout=0.01): return def add_new_map_params(self, map_params: WorkerMapParams) -> None: for worker_id in range(self.n_jobs): self.add_task(NEW_MAP_PARAMS_PILL, worker_id) self.add_task(map_params, worker_id) def add_exception(self, err_type: type, traceback_str: str) -> None: self._exception_queue.put((err_type, traceback_str)) def add_exception_poison_pill(self) -> None: with DelayedKeyboardInterrupt(): self._exception_queue.put((POISON_PILL, POISON_PILL)) def get_exception(self) -> Tuple[type, str]: with DelayedKeyboardInterrupt(): return self._exception_queue.get(block=True) def task_done_exception(self) -> None: self._exception_queue.task_done() def set_exception_thrown(self) -> None: self._exception_thrown.set() def exception_thrown(self) -> bool: return self._exception_thrown.is_set() def wait_for_exception_thrown(self, timeout: Optional[float]) -> bool: return self._exception_thrown.wait(timeout=timeout) def set_kill_signal_received(self) -> None: self._kill_signal_received.set() def kill_signal_received(self) -> bool: return self._kill_signal_received.is_set() def insert_poison_pill(self) -> None: for worker_id in range(self.n_jobs): self.add_task(POISON_PILL, worker_id) def insert_non_lethal_poison_pill(self) -> None: for worker_id in range(self.n_jobs): self.add_task(NON_LETHAL_POISON_PILL, worker_id) def signal_worker_restart(self, worker_id: int) -> None: self._worker_done_array[worker_id] = True def get_worker_restarts(self) -> Generator[int, None, None]: return (worker_id for worker_id, restart_worker in enumerate(self._worker_done_array) if restart_worker) def reset_worker_restart(self, worker_id) -> None: self._worker_done_array[worker_id] = False
MIT License
0b01001001/spectree
tests/test_utils.py
demo_func
python
def demo_func():
summary description
https://github.com/0b01001001/spectree/blob/952a606579d050e52b2a149adebf7dbaded450e9/tests/test_utils.py#L27-L31
import pytest from spectree.response import Response from spectree.spec import SpecTree from spectree.utils import ( get_model_path_key, has_model, parse_code, parse_comments, parse_name, parse_params, parse_request, parse_resp, ) from .common import DemoModel, DemoQuery api = SpecTree() def undecorated_func(): @api.validate(json=DemoModel, resp=Response(HTTP_200=DemoModel))
Apache License 2.0
toastdriven/restless
restless/dj.py
DjangoResource.urls
python
def urls(cls, name_prefix=None): return [ url(r'^$', cls.as_list(), name=cls.build_url_name('list', name_prefix)), url(r'^(?P<pk>[\w-]+)/$', cls.as_detail(), name=cls.build_url_name('detail', name_prefix)), ]
A convenience method for hooking up the URLs. This automatically adds a list & a detail endpoint to your URLconf. :param name_prefix: (Optional) A prefix for the URL's name (for resolving). The default is ``None``, which will autocreate a prefix based on the class name. Ex: ``BlogPostResource`` -> ``api_blogpost_list`` :type name_prefix: string :returns: A list of ``url`` objects for ``include(...)``
https://github.com/toastdriven/restless/blob/49b579da4ee1e410c4323ecc629bf63c0b74dccb/restless/dj.py#L116-L133
import six from django.conf import settings from django.conf.urls import url from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import Paginator from django.http import HttpResponse, Http404 from django.views.decorators.csrf import csrf_exempt from .constants import OK, NO_CONTENT from .exceptions import NotFound, BadRequest from .resources import Resource class DjangoResource(Resource): def serialize_list(self, data): if data is None: return super(DjangoResource, self).serialize_list(data) if getattr(self, 'paginate', False): page_size = getattr(self, 'page_size', getattr(settings, 'RESTLESS_PAGE_SIZE', 10)) paginator = Paginator(data, page_size) page_number = self.request.GET.get('p', 1) if page_number not in paginator.page_range: raise BadRequest('Invalid page number') self.page = paginator.page(page_number) data = self.page.object_list return super(DjangoResource, self).serialize_list(data) def wrap_list_response(self, data): response_dict = super(DjangoResource, self).wrap_list_response(data) if hasattr(self, 'page'): next_page = self.page.has_next() and self.page.next_page_number() or None previous_page = self.page.has_previous() and self.page.previous_page_number() or None response_dict['pagination'] = { 'num_pages': self.page.paginator.num_pages, 'count': self.page.paginator.count, 'page': self.page.number, 'start_index': self.page.start_index(), 'end_index': self.page.end_index(), 'next_page': next_page, 'previous_page': previous_page, 'per_page': self.page.paginator.per_page, } return response_dict @classmethod def as_list(self, *args, **kwargs): return csrf_exempt(super(DjangoResource, self).as_list(*args, **kwargs)) @classmethod def as_detail(self, *args, **kwargs): return csrf_exempt(super(DjangoResource, self).as_detail(*args, **kwargs)) def is_debug(self): return settings.DEBUG def build_response(self, data, status=OK): if status == NO_CONTENT: content_type = 'text/plain' else: content_type = 'application/json' resp = HttpResponse(data, content_type=content_type, status=status) return resp def build_error(self, err): if isinstance(err, (ObjectDoesNotExist, Http404)): err = NotFound(msg=six.text_type(err)) return super(DjangoResource, self).build_error(err) @classmethod def build_url_name(cls, name, name_prefix=None): if name_prefix is None: name_prefix = 'api_{}'.format( cls.__name__.replace('Resource', '').lower() ) name_prefix = name_prefix.rstrip('_') return '_'.join([name_prefix, name]) @classmethod
BSD 3-Clause New or Revised License
pypa/pip
src/pip/_vendor/distro.py
linux_distribution
python
def linux_distribution(full_distribution_name=True): warnings.warn( "distro.linux_distribution() is deprecated. It should only be used as a " "compatibility shim with Python's platform.linux_distribution(). Please use " "distro.id(), distro.version() and distro.name() instead.", DeprecationWarning, stacklevel=2, ) return _distro.linux_distribution(full_distribution_name)
.. deprecated:: 1.6.0 :func:`distro.linux_distribution()` is deprecated. It should only be used as a compatibility shim with Python's :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, :func:`distro.version` and :func:`distro.name` instead. Return information about the current OS distribution as a tuple ``(id_name, version, codename)`` with items as follows: * ``id_name``: If *full_distribution_name* is false, the result of :func:`distro.id`. Otherwise, the result of :func:`distro.name`. * ``version``: The result of :func:`distro.version`. * ``codename``: The result of :func:`distro.codename`. The interface of this function is compatible with the original :py:func:`platform.linux_distribution` function, supporting a subset of its parameters. The data it returns may not exactly be the same, because it uses more data sources than the original function, and that may lead to different data if the OS distribution is not consistent across multiple data sources it provides (there are indeed such distributions ...). Another reason for differences is the fact that the :func:`distro.id` method normalizes the distro ID string to a reliable machine-readable value for a number of popular OS distributions.
https://github.com/pypa/pip/blob/0442875a68f19b0118b0b88c747bdaf6b24853ba/src/pip/_vendor/distro.py#L136-L176
import argparse import json import logging import os import re import shlex import subprocess import sys import warnings __version__ = "1.6.0" if False: from typing import ( Any, Callable, Dict, Iterable, Optional, Sequence, TextIO, Tuple, Type, TypedDict, Union, ) VersionDict = TypedDict( "VersionDict", {"major": str, "minor": str, "build_number": str} ) InfoDict = TypedDict( "InfoDict", { "id": str, "version": str, "version_parts": VersionDict, "like": str, "codename": str, }, ) _UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") _UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") _OS_RELEASE_BASENAME = "os-release" NORMALIZED_OS_ID = { "ol": "oracle", } NORMALIZED_LSB_ID = { "enterpriseenterpriseas": "oracle", "enterpriseenterpriseserver": "oracle", "redhatenterpriseworkstation": "rhel", "redhatenterpriseserver": "rhel", "redhatenterprisecomputenode": "rhel", } NORMALIZED_DISTRO_ID = { "redhat": "rhel", } _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" ) _DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") _DISTRO_RELEASE_IGNORE_BASENAMES = ( "debian_version", "lsb-release", "oem-release", _OS_RELEASE_BASENAME, "system-release", "plesk-release", "iredmail-release", )
MIT License
covid-19-impact-lab/sid
src/sid/msm.py
get_msm_func
python
def get_msm_func( simulate, calc_moments, empirical_moments, replace_nans, weighting_matrix=None, additional_outputs=None, ): if weighting_matrix is None: weighting_matrix = get_diag_weighting_matrix(empirical_moments) if not _is_diagonal(weighting_matrix): raise ValueError("weighting_matrix must be diagonal.") empirical_moments = _harmonize_input(empirical_moments) calc_moments = _harmonize_input(calc_moments) if callable(replace_nans): replace_nans = {k: replace_nans for k in empirical_moments} replace_nans = _harmonize_input(replace_nans) if 1 < len(replace_nans) < len(empirical_moments): raise ValueError( "Replacement functions can only be matched 1:1 or 1:n with sets of " "empirical moments." ) elif len(replace_nans) > len(empirical_moments): raise ValueError( "There are more replacement functions than sets of empirical moments." ) else: pass if len(calc_moments) != len(empirical_moments): raise ValueError( "Number of functions to calculate simulated moments must be equal to " "the number of sets of empirical moments." ) if additional_outputs is not None: if not _is_dict_of_callables(additional_outputs): raise ValueError("additional_outputs must be a dict of callables.") else: additional_outputs = {} invalid_keys = { "value", "root_contributions", "root_contributions", "empirical_moments", "simulated_moments", } invalid_present = invalid_keys.intersection(additional_outputs) if invalid_present: raise ValueError("Invalid keys in additional_outputs: {invalid}") msm_func = functools.partial( _msm, simulate=simulate, calc_moments=calc_moments, empirical_moments=empirical_moments, replace_nans=replace_nans, weighting_matrix=weighting_matrix, additional_outputs=additional_outputs, ) return msm_func
Get the msm function. Args: simulate (callable): Function which accepts parameters and returns simulated data. calc_moments (callable or dict): Function(s) used to calculate simulated moments. If it is a dictionary, it must have the same keys as empirical_moments empirical_moments (pandas.DataFrame or pandas.Series or dict): One pandas object or a dictionary of pandas objects with empirical moments. replace_nans (callable or list): Functions(s) specifying how to handle NaNs in simulated_moments. Must match structure of empirical_moments. Exception: If only one replacement function is specified, it will be used on all sets of simulated moments. weighting_matrix (numpy.ndarray): Square matrix of dimension (NxN) with N denoting the number of empirical_moments. Used to weight squared moment errors. additional_outputs (dict or None): Dictionary of functions. Each function is evaluated on the output of the simulate function and the result is saved in the output dictionary of the msm function. Returns: msm_func (callable): MSM function where all arguments except the parameter vector are set.
https://github.com/covid-19-impact-lab/sid/blob/ac01bd49ff51cd5b1aebcc0c058c6ca0baaeb973/src/sid/msm.py#L24-L122
import functools import numpy as np import pandas as pd
MIT License
cortex-lab/phy
phy/plot/gloo/array.py
VertexArray._delete
python
def _delete(self): if self._handle > -1: self._buffer._delete() gl.glDeleteVertexArrays(1, np.array([self._handle]))
Delete vertex array from GPU
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/plot/gloo/array.py#L66-L71
import logging import numpy as np from . import gl from .gpudata import GPUData from .globject import GLObject from .buffer import VertexBuffer log = logging.getLogger(__name__) class VertexArray(GPUData, GLObject): def __init__(self, usage=gl.GL_DYNAMIC_DRAW): GLObject.__init__(self) self._target = gl.GL_ARRAY_BUFFER self._buffer = self.view(VertexBuffer) self._buffer.__init__(usage) @property def need_update(self): return self._buffer.need_update def _update(self): self._buffer._update() def _create(self): self._handle = gl.glGenVertexArrays(1) log.debug("GPU: Creating vertex array (id=%d)" % self._id) self._deactivate() self._buffer._create()
BSD 3-Clause New or Revised License
theno/fabsetup
fabsetup/fabfile/setup/powerline.py
install_special_glyphs
python
def install_special_glyphs(): from_dir = '~/repos/powerline/font' run('mkdir -p ~/.local/share/fonts') run(flo('cp {from_dir}/PowerlineSymbols.otf ~/.local/share/fonts')) to_dir = '~/.config/fontconfig/conf.d/' run(flo('mkdir -p {to_dir}')) run(flo('cp {from_dir}/10-powerline-symbols.conf {to_dir}'))
More infos: https://powerline.readthedocs.io/en/latest/installation/linux.html#fonts-installation https://wiki.archlinux.org/index.php/Font_configuration $XDG_CONFIG_HOME: http://superuser.com/a/365918
https://github.com/theno/fabsetup/blob/a2e03b3e438fc2b0fc39d564c4c174270d027f09/fabsetup/fabfile/setup/powerline.py#L40-L54
import os.path from fabric.api import env from fabsetup.fabutils import checkup_git_repo_legacy, needs_packages from fabsetup.fabutils import needs_repo_fabsetup_custom, suggest_localhost from fabsetup.fabutils import install_file_legacy, run, subtask, subsubtask, task from fabsetup.utils import flo, update_or_append_line, comment_out_line from fabsetup.utils import uncomment_or_update_or_append_line, query_yes_no @task @needs_repo_fabsetup_custom @suggest_localhost @needs_packages('python-pip') def powerline(): bindings_dir, scripts_dir = install_upgrade_powerline() set_up_powerline_fonts() set_up_powerline_daemon(scripts_dir) powerline_for_vim(bindings_dir) powerline_for_bash_or_powerline_shell(bindings_dir) powerline_for_tmux(bindings_dir) powerline_for_i3(bindings_dir) print('\nYou may have to reboot for make changes take effect') @subsubtask
MIT License
daveoncode/django-easy-currencies
django_easy_currencies/management/commands/currencies.py
Command.update_currency_rates
python
def update_currency_rates(self): self.stdout.write('Updating currency rates...') currency_types = self.get_currency_list() info = self.get_rates_info(self.get_service_url(), currency_types) try: usd_currency, _ = Currency.objects.update_or_create(code='USD', defaults={'code': 'USD'}) currencies = self.create_or_update_currency_objects(currency_types) usd_rates = self.create_or_update_usd_currency_rates(info, usd_currency) self.create_or_update_inverted_usd_currency_rates(currencies, usd_rates) self.create_or_update_inverted_currency_rates_permutations(currencies, currency_types, usd_rates) self.stdout.write('Currency rates have been updated, run command with "--list" to see current status.') except Exception as e: self.stderr.write('An error occurred while updating currency rates: {0}'.format(e))
Updates currencies/rates by following these steps: 1. Calls the remote service and retrieve the json response converted into a python dictionary 2. Retrieve base USD Currency (creates it if does not exist) 3. Retrieve extra USD currencies supported by configuration (creates them if not defined) 4. Creates/updates USD rates 5. Creates/updates all other supported currency rates recursively
https://github.com/daveoncode/django-easy-currencies/blob/1da1d8e3f411c0c2aa10aee29069988ac40f7bae/django_easy_currencies/management/commands/currencies.py#L145-L166
from __future__ import unicode_literals from optparse import make_option from urllib2 import URLError import urllib2 import json from itertools import product from decimal import Decimal from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.conf import settings from django_easy_currencies.models.Currency import Currency from django_easy_currencies.models.CurrencyRate import CurrencyRate class Command(BaseCommand): help = 'Updates or list rates of supported currencies.' base_service_url = 'http://openexchangerates.org/api/latest.json?app_id={0}' option_list = BaseCommand.option_list + ( make_option('--update', action='store_true', dest='update', default=False, help='Update currency rates'), make_option('--list', action='store_true', dest='list', default=False, help='List current currency rates'), ) @staticmethod def is_valid_config(): c = getattr(settings, 'EASY_CURRENCIES', None) return isinstance(c, dict) and isinstance(c.get('currencies'), (list, tuple)) and bool(c.get('app_id')) def get_rates_info(self, url, currencies): try: self.stdout.write('Calling service: {0}'.format(url)) response = urllib2.urlopen(url) if not response: raise Exception('Invalid response') info = json.loads(response.read(), parse_float=Decimal, parse_int=Decimal) info['rates'] = [(k, v) for k, v in info['rates'].items() if k in currencies] return info except URLError as url_error: self.stderr.write('Unable to connect to service {0}: {1}'.format(url, url_error)) raise url_error except Exception as exception: self.stderr.write('Unable to retrieve ratings info: {0}'.format(exception)) raise exception def create_or_update_currency_objects(self, currency_types): self.stdout.write('Updating currency objects...') currencies = {} for c in currency_types: self.stdout.write('Updating currency: {0}'.format(c)) currency, created = Currency.objects.update_or_create(code=c, defaults={'code': c}) currencies[c] = currency return currencies def create_or_update_usd_currency_rates(self, info, usd_currency): rates = {} for rate_code, rate_value in info['rates']: self.stdout.write('Updating rates for currency: {0}'.format(rate_code)) rate_obj, _ = CurrencyRate.objects.update_or_create(original_currency=usd_currency, target_currency=rate_code, defaults={'rate': rate_value}) rates[rate_code] = rate_obj.rate return rates def create_or_update_inverted_usd_currency_rates(self, currencies, usd_rates): self.stdout.write('Updating reversed rates for USD currency...') for code, currency_obj in currencies.items(): self.stdout.write('Updating rate {0}/USD'.format(code)) rate_value = Decimal('1') if code == 'USD' else usd_rates[code] CurrencyRate.objects.update_or_create(original_currency=currency_obj, target_currency='USD', defaults={'rate': rate_value}) def create_or_update_inverted_currency_rates_permutations(self, currencies, currency_types, usd_rates): self.stdout.write('Updating reversed rates permutations...') for p in [x for x in product(currency_types, repeat=2)]: from_currency, to_currency = p self.stdout.write('Updating rate {0}/{1}'.format(from_currency, to_currency)) if from_currency == to_currency: rate_value = Decimal('1') else: rate_value = usd_rates[to_currency] / usd_rates[from_currency] CurrencyRate.objects.update_or_create(original_currency=currencies[from_currency], target_currency=to_currency, defaults={'rate': rate_value}) @staticmethod def get_currency_list(): return [c[0] for c in settings.EASY_CURRENCIES['currencies']] def get_service_url(self): return self.base_service_url.format(settings.EASY_CURRENCIES['app_id'])
MIT License
angr/claripy
claripy/vsa/discrete_strided_interval_set.py
DiscreteStridedIntervalSet.normalize
python
def normalize(self): if self.should_collapse(): return self.collapse() elif self.number_of_values == 1: return list(self._si_set)[0] else: for si in self._si_set: self._update_bits(si) return self
Return the collapsed object if ``should_collapse()`` is True, otherwise return self. :return: A DiscreteStridedIntervalSet object.
https://github.com/angr/claripy/blob/a18a0e9bba80470cbd139d7d876f504da634076a/claripy/vsa/discrete_strided_interval_set.py#L153-L164
import functools import numbers import itertools from .strided_interval import StridedInterval DEFAULT_MAX_CARDINALITY_WITHOUT_COLLAPSING = 256 def apply_on_each_si(f): @functools.wraps(f) def operator(self, o=None): if o is None: new_si_set = set() for a in self._si_set: new_si_set.add(getattr(a, f.__name__)()) ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set) return ret.normalize() if isinstance(o, DiscreteStridedIntervalSet): new_si_set = set() for a in self._si_set: for b in o._si_set: new_si_set.add(getattr(a, f.__name__)(b)) ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set) return ret.normalize() elif isinstance(o, (StridedInterval, numbers.Number, BVV)): new_si_set = set() for si in self._si_set: new_si_set.add(getattr(si, f.__name__)(o)) ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set) return ret.normalize() else: raise ClaripyVSAOperationError('Unsupported operand type %s' % (type(o))) return operator def convert_operand_to_si(f): @functools.wraps(f) def converter(self, o): if isinstance(o, BVV): o = o.value if isinstance(o, numbers.Number): o = StridedInterval(bits=self.bits, stride=0, lower_bound=o, upper_bound=o) return f(self, o) return converter def collapse_operand(f): @functools.wraps(f) def collapser(self, o): if isinstance(o, DiscreteStridedIntervalSet): return f(self, o.collapse()) else: return f(self, o) return collapser dsis_id_ctr = itertools.count() class DiscreteStridedIntervalSet(StridedInterval): def __init__(self, name=None, bits=0, si_set=None, max_cardinality=None): if name is None: name = "DSIS_%d" % next(dsis_id_ctr) if si_set is not None and len(si_set): self._si_set = si_set else: self._si_set = set() self._max_cardinality = DEFAULT_MAX_CARDINALITY_WITHOUT_COLLAPSING if max_cardinality is None else max_cardinality StridedInterval.__init__(self, name=name, bits=bits) for si in self._si_set: self._update_bounds(si) self._update_bits(si) def __repr__(self): representatives = ", ".join([ i.__repr__() for i in list(self._si_set)[ : 5] ]) if self.number_of_values > 5: representatives += ", ..." return "%s<%d>(%d){%s}" % (self._name, self._bits, self.number_of_values, representatives) @property def cardinality(self): cardinality = 0 for si in self._si_set: cardinality += si.cardinality return cardinality @property def number_of_values(self): return len(self._si_set) @property def stride(self): return self.collapse().stride def should_collapse(self): return self.cardinality > self._max_cardinality def collapse(self): if self.cardinality: r = None for si in self._si_set: r = r._union(si) if r is not None else si return r else: return StridedInterval.empty(self._bits)
BSD 2-Clause Simplified License
elfi-dev/elfi
tests/conftest.py
client
python
def client(request): client_module = request.param client_name = client_module.__name__.split('.')[-1] use_client = request.config.getoption('--client') if use_client != 'all' and use_client != client_name: pytest.skip("Skipping client {}".format(client_name)) try: client = client_module.Client() except BaseException: pytest.skip("Client {} not available".format(client_name)) yield client
Provides a fixture for all the different supported clients
https://github.com/elfi-dev/elfi/blob/07ac0ed5e81d5d5fb42de63db3cf9ccc9135b88c/tests/conftest.py#L36-L54
import logging import os import time import numpy as np import pytest import scipy.stats as ss import elfi import elfi.clients.dask as dask import elfi.clients.ipyparallel as eipp import elfi.clients.multiprocessing as mp import elfi.clients.native as native import elfi.examples.gauss import elfi.examples.ma2 from elfi.methods.bo.gpy_regression import GPyRegression from elfi.methods.bo.acquisition import ExpIntVar, MaxVar, RandMaxVar from elfi.model.extensions import ModelPrior elfi.clients.native.set_as_default() def pytest_addoption(parser): parser.addoption( "--client", action="store", default="all", help="perform the tests for the specified client (default all)") @pytest.fixture(scope="session", params=[eipp, dask, mp, native])
BSD 3-Clause New or Revised License
gimelstudio/gimel-studio
src/GimelStudio/interface/dark_menu_renderer.py
DarkMenuRenderer.DrawMenuBar
python
def DrawMenuBar(self, menubar, dc): fnt = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) textColour = wx.Colour("#fff") highlightTextColour = wx.Colour("#fff") dc.SetFont(fnt) dc.SetTextForeground(textColour) clientRect = menubar.GetClientRect() self.DrawMenuBarBackground(dc, clientRect) padding, dummy = dc.GetTextExtent("W") posx = 0 posy = menubar._margin menuBarRect = menubar.GetClientRect() for item in menubar._items: item.SetRect(wx.Rect()) for item in menubar._items: title = item.GetTitle() fixedText = title location, labelOnly = flatmenu.GetAccelIndex(fixedText) textWidth, textHeight = dc.GetTextExtent(fixedText) rect = wx.Rect(posx + padding / 2, posy, textWidth, textHeight) if posx + rect.width + flatmenu.DROP_DOWN_ARROW_WIDTH >= menuBarRect.width: break button_rect = wx.Rect(*rect) button_rect.height = menubar._menuBarHeight button_rect.width = rect.width + padding button_rect.x = posx button_rect.y = 0 copy = wx.Rect(*button_rect) item.SetRect(copy) selected = False if item.GetState() == ControlFocus: self.DrawMenuBarButton(dc, button_rect, ControlFocus) dc.SetTextForeground(highlightTextColour) selected = True else: dc.SetTextForeground(textColour) ww, hh = dc.GetTextExtent(labelOnly) textOffset = (rect.width - ww) / 2 if not menubar._isLCD and item.GetTextBitmap().IsOk() and not selected: dc.DrawBitmap(item.GetTextBitmap(), rect.x, rect.y, True) elif not menubar._isLCD and item.GetSelectedTextBitmap().IsOk() and selected: dc.DrawBitmap(item.GetSelectedTextBitmap(), rect.x, rect.y, True) else: if not menubar._isLCD: bmp = wx.Bitmap(rect.width, rect.height) memDc = wx.MemoryDC() memDc.SelectObject(bmp) if selected: memDc.SetTextForeground(highlightTextColour) else: memDc.SetTextForeground(textColour) memDc.SetPen(wx.Pen(wx.Colour(255, 0, 0))) memDc.SetBrush(wx.Brush(wx.Colour(255, 0, 0))) memDc.DrawRectangle(0, 0, rect.width, rect.height) memDc.SetFont(fnt) if location == wx.NOT_FOUND or location >= len(fixedText): if not menubar._isLCD: memDc.DrawText(title, textOffset, 0) dc.DrawText(title, rect.x + textOffset, rect.y) else: before = labelOnly[0:location] underlineLetter = labelOnly[location] after = labelOnly[location + 1:] if not menubar._isLCD: memDc.DrawText(before, textOffset, 0) dc.DrawText(before, rect.x + textOffset, rect.y) if "__WXGTK__" not in wx.Platform: w1, h = dc.GetTextExtent(before) fnt.SetUnderlined(True) dc.SetFont(fnt) dc.DrawText(underlineLetter, rect.x + w1 + textOffset, rect.y) if not menubar._isLCD: memDc.SetFont(fnt) memDc.DrawText(underlineLetter, textOffset + w1, 0) else: w1, h = dc.GetTextExtent(before) dc.DrawText(underlineLetter, rect.x + w1 + textOffset, rect.y) if not menubar._isLCD: memDc.DrawText(underlineLetter, textOffset + w1, 0) uderlineLetterW, uderlineLetterH = dc.GetTextExtent(underlineLetter) dc.DrawLine(rect.x + w1 + textOffset, rect.y + uderlineLetterH - 2, rect.x + w1 + textOffset + uderlineLetterW, rect.y + uderlineLetterH - 2) w2, h = dc.GetTextExtent(underlineLetter) fnt.SetUnderlined(False) dc.SetFont(fnt) dc.DrawText(after, rect.x + w1 + w2 + textOffset, rect.y) if not menubar._isLCD: memDc.SetFont(fnt) memDc.DrawText(after, w1 + w2 + textOffset, 0) if not menubar._isLCD: memDc.SelectObject(wx.NullBitmap) bmp.SetMask(wx.Mask(bmp, wx.Colour(255, 0, 0))) if selected: item.SetSelectedTextBitmap(bmp) else: item.SetTextBitmap(bmp) posx += rect.width + padding moreMenubtnBgBmpRect = wx.Rect(*menubar.GetMoreMenuButtonRect()) if not menubar._moreMenuBgBmp: menubar._moreMenuBgBmp = wx.Bitmap(moreMenubtnBgBmpRect.width, moreMenubtnBgBmpRect.height) if menubar._showToolbar and len(menubar._tbButtons) > 0: rectX = 0 rectWidth = clientRect.width - moreMenubtnBgBmpRect.width if len(menubar._items) == 0: rectHeight = clientRect.height rectY = 0 else: rectHeight = clientRect.height - menubar._menuBarHeight rectY = menubar._menuBarHeight rr = wx.Rect(rectX, rectY, rectWidth, rectHeight) self.DrawToolBarBg(dc, rr) menubar.DrawToolbar(dc, rr) if menubar._showCustomize or menubar.GetInvisibleMenuItemCount() > 0 or menubar.GetInvisibleToolbarItemCount() > 0: memDc = wx.MemoryDC() memDc.SelectObject(menubar._moreMenuBgBmp) try: memDc.Blit(0, 0, menubar._moreMenuBgBmp.GetWidth(), menubar._moreMenuBgBmp.GetHeight(), dc, moreMenubtnBgBmpRect.x, moreMenubtnBgBmpRect.y) except: pass memDc.SelectObject(wx.NullBitmap) menubar.DrawMoreButton(dc, menubar._dropDownButtonState) menubar._dropDownButtonArea = moreMenubtnBgBmpRect
Draws everything for :class:`FlatMenuBar`. :param `menubar`: an instance of :class:`FlatMenuBar`. :param `dc`: an instance of :class:`wx.DC`.
https://github.com/gimelstudio/gimel-studio/blob/e2750576e72edee6f2f4c268045b81459df82d89/src/GimelStudio/interface/dark_menu_renderer.py#L244-L439
import wx import wx.lib.agw.flatmenu as flatmenu from wx.lib.agw.artmanager import ArtManager, RendererBase, DCSaver from wx.lib.agw.fmresources import ControlFocus, ControlPressed def switchRGBtoBGR(colour): return wx.Colour(colour.Blue(), colour.Green(), colour.Red()) class DarkMenuRenderer(flatmenu.FMRenderer): def __init__(self): flatmenu.FMRenderer.__init__(self) self.highlightCheckAndRadio = True self.menuFaceColour = wx.Colour("#333") self.menuBarFaceColour = wx.Colour("#333") self.menuBarFocusFaceColour = wx.Colour("#5874C5") self.menuBarFocusBorderColour = wx.Colour("#5874C5") self.menuBarPressedFaceColour = wx.Colour("#5874C5") self.menuBarPressedBorderColour = wx.Colour("#5874C5") self.menuFocusFaceColour = wx.Colour("#5874C5") self.menuFocusBorderColour = wx.Colour("#5874C5") self.menuPressedFaceColour = wx.Colour("#5874C5") self.menuPressedBorderColour = wx.Colour("#5874C5") self.buttonFaceColour = wx.Colour("#5874C5") self.buttonBorderColour = wx.Colour("#5874C5") self.buttonFocusFaceColour = wx.Colour("#5874C5") self.buttonFocusBorderColour = wx.Colour("#5874C5") self.buttonPressedFaceColour = wx.Colour("#5874C5") self.buttonPressedBorderColour = wx.Colour("#5874C5") def DrawMenuItem(self, item, dc, xCoord, yCoord, imageMarginX, markerMarginX, textX, rightMarginX, selected=False, backgroundImage=None): borderXSize = item._parentMenu.GetBorderXWidth() itemHeight = item._parentMenu.GetItemHeight() menuWidth = item._parentMenu.GetMenuWidth() itemRect = wx.Rect(xCoord, yCoord, menuWidth, itemHeight) rect = wx.Rect(xCoord + 2, yCoord, menuWidth - 4, itemHeight) backColour = self.menuFaceColour penColour = backColour backBrush = wx.Brush(backColour) leftMarginWidth = item._parentMenu.GetLeftMarginWidth() if backgroundImage is None: pen = wx.Pen(penColour) dc.SetPen(pen) dc.SetBrush(backBrush) dc.DrawRectangle(rect) if self.drawLeftMargin: self.DrawLeftMargin(item, dc, itemRect) if item.IsSeparator(): sepWidth = xCoord + menuWidth - textX - 1 self.DrawSeparator(dc, xCoord, yCoord, textX, sepWidth) return item._rect = itemRect bmp = item.GetSuitableBitmap(selected) if selected: self.DrawMenuButton(dc, rect.Deflate(1, 0), ControlFocus) if bmp.IsOk(): imgHeight = bmp.GetHeight() imgWidth = bmp.GetWidth() if imageMarginX == 0: xx = rect.x + (leftMarginWidth - imgWidth) / 2 else: xx = rect.x + ((leftMarginWidth - rect.height) - imgWidth) / 2 + rect.height yy = rect.y + (rect.height - imgHeight) / 2 dc.DrawBitmap(bmp, xx, yy, True) if item.GetKind() == wx.ITEM_CHECK: if item.IsChecked(): xx = rect.x + 1 yy = rect.y + 1 rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2) if not selected and self.highlightCheckAndRadio: self.DrawButton(dc, rr, ControlFocus) dc.DrawBitmap(item._checkMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True) if item.GetKind() == wx.ITEM_RADIO: if item.IsChecked(): xx = rect.x + 1 yy = rect.y + 1 rr = wx.Rect(xx, yy, rect.height - 2, rect.height - 2) if not selected and self.highlightCheckAndRadio: self.DrawButton(dc, rr, ControlFocus) dc.DrawBitmap(item._radioMarkBmp, rr.x + (rr.width - 16) / 2, rr.y + (rr.height - 16) / 2, True) text = item.GetLabel() if text: font = item.GetFont() if font is None: font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) enabledTxtColour = wx.Colour("#fff") disabledTxtColour = self.itemTextColourDisabled textColour = (item.IsEnabled() and [enabledTxtColour] or [disabledTxtColour])[0] if item.IsEnabled() and item.GetTextColour(): textColour = item.GetTextColour() dc.SetFont(font) w, h = dc.GetTextExtent(text) dc.SetTextForeground(textColour) if item._mnemonicIdx != wx.NOT_FOUND: text1 = text[0:item._mnemonicIdx] text2 = text[item._mnemonicIdx] text3 = text[item._mnemonicIdx + 1:] w1, dummy = dc.GetTextExtent(text1) w2, dummy = dc.GetTextExtent(text2) w3, dummy = dc.GetTextExtent(text3) posx = xCoord + textX + borderXSize posy = (itemHeight - h) / 2 + yCoord dc.DrawText(text1, posx, posy) if "__WXGTK__" not in wx.Platform: font.SetUnderlined(True) dc.SetFont(font) posx += w1 dc.DrawText(text2, posx, posy) font.SetUnderlined(False) dc.SetFont(font) posx += w2 dc.DrawText(text3, posx, posy) else: w, h = dc.GetTextExtent(text) dc.DrawText(text, xCoord + textX + borderXSize, (itemHeight - h) / 2 + yCoord) if item.GetAccelString(): accelWidth, accelHeight = dc.GetTextExtent(item.GetAccelString()) dc.DrawText(item.GetAccelString(), xCoord + rightMarginX - accelWidth, (itemHeight - accelHeight) / 2 + yCoord) if item.GetSubMenu(): rightArrowBmp = wx.Bitmap(menu_right_arrow_xpm) rightArrowBmp.SetMask(wx.Mask(rightArrowBmp, wx.WHITE)) xx = xCoord + rightMarginX + borderXSize rr = wx.Rect(xx, rect.y + 1, rect.height - 2, rect.height - 2) dc.DrawBitmap(rightArrowBmp, rr.x + 4, rr.y + (rr.height - 16) / 2, True)
Apache License 2.0
nuagenetworks/vspk-python
vspk/v6/nuunderlaytest.py
NUUnderlayTest.associated_data_path_id
python
def associated_data_path_id(self, value): self._associated_data_path_id = value
Set associated_data_path_id value. Notes: The associated data path ID This attribute is named `associatedDataPathID` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v6/nuunderlaytest.py#L267-L277
from bambou import NURESTObject class NUUnderlayTest(NURESTObject): __rest_name__ = "underlaytest" __resource_name__ = "underlaytests" CONST_UNDERLAY_TEST_TYPE_PRE_BOOTSTRAP = "PRE_BOOTSTRAP" CONST_UNDERLAY_TEST_TYPE_ON_DEMAND = "ON_DEMAND" CONST_TEST_RESULT_NOT_APPLICABLE = "NOT_APPLICABLE" CONST_TEST_RESULT_FAIL = "FAIL" CONST_TEST_RESULT_PASS = "PASS" CONST_TEST_RESULT_DEGRADED = "DEGRADED" CONST_UNDERLAY_TEST_TYPE_BIRTH_CERTIFICATE = "BIRTH_CERTIFICATE" def __init__(self, **kwargs): super(NUUnderlayTest, self).__init__() self._name = None self._test_result = None self._underlay_test_server = None self._underlay_test_type = None self._create_only = None self._associated_data_path_id = None self._associated_ns_gateway_id = None self._associated_ns_gateway_name = None self._associated_system_id = None self._associated_test_suite_run_id = None self._associated_uplink_connection_id = None self._associated_uplink_interface = None self._start_date_time = None self._stop_date_time = None self._run_bandwidth_test = None self._run_connectivity_test = None self._run_mtu_discovery_test = None self._duration = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="test_result", remote_name="testResult", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEGRADED', u'FAIL', u'NOT_APPLICABLE', u'PASS']) self.expose_attribute(local_name="underlay_test_server", remote_name="underlayTestServer", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="underlay_test_type", remote_name="underlayTestType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BIRTH_CERTIFICATE', u'ON_DEMAND', u'PRE_BOOTSTRAP']) self.expose_attribute(local_name="create_only", remote_name="createOnly", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_data_path_id", remote_name="associatedDataPathID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_ns_gateway_id", remote_name="associatedNSGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_ns_gateway_name", remote_name="associatedNSGatewayName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_system_id", remote_name="associatedSystemID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_test_suite_run_id", remote_name="associatedTestSuiteRunID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_uplink_connection_id", remote_name="associatedUplinkConnectionID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_uplink_interface", remote_name="associatedUplinkInterface", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="start_date_time", remote_name="startDateTime", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="stop_date_time", remote_name="stopDateTime", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="run_bandwidth_test", remote_name="runBandwidthTest", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="run_connectivity_test", remote_name="runConnectivityTest", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="run_mtu_discovery_test", remote_name="runMTUDiscoveryTest", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="duration", remote_name="duration", attribute_type=int, is_required=False, is_unique=False) self._compute_args(**kwargs) @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def test_result(self): return self._test_result @test_result.setter def test_result(self, value): self._test_result = value @property def underlay_test_server(self): return self._underlay_test_server @underlay_test_server.setter def underlay_test_server(self, value): self._underlay_test_server = value @property def underlay_test_type(self): return self._underlay_test_type @underlay_test_type.setter def underlay_test_type(self, value): self._underlay_test_type = value @property def create_only(self): return self._create_only @create_only.setter def create_only(self, value): self._create_only = value @property def associated_data_path_id(self): return self._associated_data_path_id @associated_data_path_id.setter
BSD 3-Clause New or Revised License
olitheolix/aiokubernetes
aiokubernetes/models/apps_v1beta1_deployment_list.py
AppsV1beta1DeploymentList.kind
python
def kind(self): return self._kind
Gets the kind of this AppsV1beta1DeploymentList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this AppsV1beta1DeploymentList. # noqa: E501 :rtype: str
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/apps_v1beta1_deployment_list.py#L115-L123
import pprint import re from aiokubernetes.models.apps_v1beta1_deployment import AppsV1beta1Deployment from aiokubernetes.models.v1_list_meta import V1ListMeta class AppsV1beta1DeploymentList(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'items': 'list[AppsV1beta1Deployment]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None): self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): return self._api_version @api_version.setter def api_version(self, api_version): self._api_version = api_version @property def items(self): return self._items @items.setter def items(self, items): if items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property
Apache License 2.0
lord63/pyhipku
pyhipku/encode.py
get_key
python
def get_key(is_ipv6): if is_ipv6: key = [adjectives, nouns, adjectives, nouns, verbs, adjectives, adjectives, adjectives, adjectives, adjectives, nouns, adjectives, nouns, verbs, adjectives, nouns] else: key = [animal_adjectives, animal_colors, animal_nouns, animal_verbs, nature_adjectives, nature_nouns, plant_nouns, plant_verbs] return key
Return an array of dictionaries representing the correct word order for the haiku
https://github.com/lord63/pyhipku/blob/699c7c4e855ee70f1e73c605809177c5c4dee651/pyhipku/encode.py#L105-L115
from __future__ import absolute_import, division import socket from .dictionary import (adjectives, nouns, verbs, animal_adjectives, animal_colors, animal_nouns, animal_verbs, nature_adjectives, nature_nouns, plant_nouns, plant_verbs) def encode(ip): is_ipv6 = ip_is_ipv6(ip) decimal_octect_array = split_ip(ip, is_ipv6) factord_octet_array = factor_octets(decimal_octect_array, is_ipv6) encoded_word_array = encode_words(factord_octet_array, is_ipv6) haiku_text = write_haiku(encoded_word_array, is_ipv6) return haiku_text def ip_is_ipv6(ip): if ip.find(':') != -1: return True elif ip.find('.') != -1: return False else: raise ValueError("Formatting error in IP address input. " "Contains neither ':' or '.'") def split_ip(ip, is_ipv6): if is_ipv6: separator = ':' else: separator = '.' ip = ''.join(ip.split()) try: if is_ipv6: socket.inet_pton(socket.AF_INET6, ip) else: socket.inet_pton(socket.AF_INET, ip) except (OSError, socket.error): raise ValueError("Illegal IP address.") octet_array = ip.split(separator) if len(octet_array) < 8 and is_ipv6: octet_missing_num = 8 - len(octet_array) octet_array = pad_octets(octet_array, octet_missing_num) decimal_octect_array = [] if is_ipv6: for i in range(len(octet_array)): decimal_octect_array.append(int(octet_array[i], 16)) else: decimal_octect_array = [int(num) for num in octet_array] return decimal_octect_array def pad_octets(octet_array, octet_missing_num): padded_octect = '0' length = len(octet_array) if octet_array[0] == '': octet_array[0] = padded_octect if octet_array[length - 1] == '': octet_array[length - 1] = padded_octect for i in range(length): if octet_array[i] == '': octet_array[i] = padded_octect for j in range(octet_missing_num): octet_array.insert(i, padded_octect) return octet_array def factor_octets(octet_array, is_ipv6): if is_ipv6: divisor = 256 else: divisor = 16 factord_octet_array = [] for i in range(len(octet_array)): factord_octet_array.extend([octet_array[i] // divisor, octet_array[i] % divisor]) return factord_octet_array def encode_words(factor_array, is_ipv6): key = get_key(is_ipv6) encoded_word_array = [] for i in range(len(factor_array)): encoded_word_array.append(key[i][factor_array[i]]) return encoded_word_array
MIT License
lunixbochs/actualvim
lib/neovim/api/nvim.py
Nvim.from_session
python
def from_session(cls, session): session.error_wrapper = lambda e: NvimError(e[1]) channel_id, metadata = session.request(b'vim_get_api_info') if IS_PYTHON3: metadata = walk(decode_if_bytes, metadata) types = { metadata['types']['Buffer']['id']: Buffer, metadata['types']['Window']['id']: Window, metadata['types']['Tabpage']['id']: Tabpage, } return cls(session, channel_id, metadata, types)
Create a new Nvim instance for a Session instance. This method must be called to create the first Nvim instance, since it queries Nvim metadata for type information and sets a SessionHook for creating specialized objects from Nvim remote handles.
https://github.com/lunixbochs/actualvim/blob/1f555ce719e49d6584f0e35e9f0db2f216b98fa5/lib/neovim/api/nvim.py#L43-L63
import functools import os import sys from traceback import format_stack from ActualVim.lib import msgpack from .buffer import Buffer from .common import (Remote, RemoteApi, RemoteMap, RemoteSequence, decode_if_bytes, walk) from .tabpage import Tabpage from .window import Window from ..compat import IS_PYTHON3 from ..util import Version, format_exc_skip __all__ = ('Nvim') os_chdir = os.chdir class Nvim(object): @classmethod
MIT License
readthedocs/readthedocs.org
readthedocs/organizations/models.py
TeamMember.send_add_notification
python
def send_add_notification(self, request): if self.invite is None and self.member is not None: send_team_add_email(team_member=self, request=request) elif self.member is None and self.invite is not None: send_team_invite_email(invite=self.invite, request=request)
Notify member or invite of being added to a team.
https://github.com/readthedocs/readthedocs.org/blob/2cff8376f0ef8f25ae6d8763bdbec86f47e33ab9/readthedocs/organizations/models.py#L357-L362
from autoslug import AutoSlugField from django.contrib.auth.models import User from django.db import models from django.urls import reverse from django.utils.crypto import salted_hmac from django.utils.translation import ugettext_lazy as _ from readthedocs.core.history import ExtraHistoricalRecords from readthedocs.core.permissions import AdminPermission from readthedocs.core.utils import slugify from . import constants from .managers import TeamManager, TeamMemberManager from .querysets import OrganizationQuerySet from .utils import send_team_add_email, send_team_invite_email class Organization(models.Model): pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True) modified_date = models.DateTimeField(_('Modified date'), auto_now=True) projects = models.ManyToManyField( 'projects.Project', verbose_name=_('Projects'), related_name='organizations', ) owners = models.ManyToManyField( User, verbose_name=_('Owners'), related_name='owner_organizations', through='OrganizationOwner', ) name = models.CharField(_('Name'), max_length=100) slug = models.SlugField( _('Slug'), max_length=255, unique=True, null=False, blank=False, ) email = models.EmailField( _('E-mail'), help_text='How can we get in touch with you?', max_length=255, blank=True, null=True, ) description = models.TextField( _('Description'), help_text='Tell us a little about yourself.', blank=True, null=True, ) url = models.URLField( _('Home Page'), help_text='The main website for your Organization', max_length=255, blank=True, null=True, ) disabled = models.BooleanField( _('Disabled'), help_text='Docs and builds are disabled for this organization', default=False, ) artifacts_cleaned = models.BooleanField( _('Artifacts Cleaned'), help_text='Artifacts are cleaned out from storage', default=False, ) max_concurrent_builds = models.IntegerField( _('Maximum concurrent builds allowed for this organization'), null=True, blank=True, ) stripe_id = models.CharField( _('Stripe customer ID'), max_length=100, blank=True, null=True, ) objects = OrganizationQuerySet.as_manager() history = ExtraHistoricalRecords() class Meta: base_manager_name = 'objects' ordering = ['name'] get_latest_by = ['-pub_date'] def __str__(self): return self.name def get_absolute_url(self): return reverse('organization_detail', args=(self.slug,)) @property def users(self): return AdminPermission.members(self) @property def members(self): return AdminPermission.members(self) def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) super().save(*args, **kwargs) def add_member(self, user, team): if not team.members.filter(pk=user.pk).exists(): TeamMember.objects.create(team=team, member=user) class OrganizationOwner(models.Model): owner = models.ForeignKey( User, on_delete=models.CASCADE, ) organization = models.ForeignKey( Organization, on_delete=models.CASCADE, ) def __str__(self): return _('{org} owner {owner}').format( org=self.organization.name, owner=self.owner.username, ) class Team(models.Model): pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True) modified_date = models.DateTimeField(_('Modified date'), auto_now=True) organization = models.ForeignKey( Organization, related_name='teams', on_delete=models.CASCADE, ) projects = models.ManyToManyField( 'projects.Project', verbose_name=_('Projects'), related_name='teams', blank=True, ) members = models.ManyToManyField( User, verbose_name=_('Users'), related_name='teams', blank=True, through='TeamMember', ) name = models.CharField(_('Name'), max_length=100) slug = AutoSlugField( populate_from='name', always_update=True, unique_with=['organization'], ) access = models.CharField( _('Access'), max_length=100, choices=constants.ACCESS_LEVELS, default='readonly', ) auto_join_email_users = models.BooleanField( default=False, help_text="Auto join users with an organization's email address to this team.", ) objects = TeamManager() history = ExtraHistoricalRecords() class Meta: base_manager_name = 'objects' unique_together = ( ('slug', 'organization'), ('name', 'organization'), ) def get_absolute_url(self): return reverse( 'organization_team_detail', args=(self.organization.slug, self.slug), ) def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) super().save(*args, **kwargs) class TeamInvite(models.Model): pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True) modified_date = models.DateTimeField(_('Modified date'), auto_now=True) organization = models.ForeignKey( Organization, related_name='invites', on_delete=models.CASCADE, ) team = models.ForeignKey( Team, verbose_name=_('Team'), related_name='invites', on_delete=models.CASCADE, ) email = models.EmailField(_('E-mail')) hash = models.CharField(_('Hash'), max_length=250) count = models.IntegerField(_('Count'), default=0) total = models.IntegerField(_('Total'), default=10) class Meta: unique_together = ('team', 'email') def __str__(self): return '{email} to {team}'.format( email=self.email, team=self.team, ) def save(self, *args, **kwargs): hash_ = salted_hmac( '.'.join([self.__module__, self.__class__.__name__]), ''.join([str(self.team), str(self.email)]), ) self.hash = hash_.hexdigest()[::2] super().save(*args, **kwargs) class TeamMember(models.Model): class Meta: unique_together = ( ('team', 'member', 'invite'), ('team', 'member'), ('team', 'invite'), ) team = models.ForeignKey( Team, on_delete=models.CASCADE, ) member = models.ForeignKey( User, blank=True, null=True, default=None, on_delete=models.CASCADE, ) invite = models.ForeignKey( TeamInvite, blank=True, null=True, default=None, on_delete=models.SET_NULL, ) objects = TeamMemberManager() def __str__(self): state = '' if self.is_invite: state = ' (pending)' return '{username} to {team}{state}'.format( username=self.username, team=self.team, state=state, ) @property def username(self): if self.is_member: return self.member.username if self.invite is not None: return self.invite.email return 'Unknown' @property def full_name(self): if self.is_member: return self.member.get_full_name() return '' @property def email(self): if self.is_member: return self.member.email return self.invite.email @property def is_member(self): return self.member is not None @property def is_invite(self): return self.member is None and self.invite is not None
MIT License
abelfunctions/abelfunctions
abelfunctions/riemann_constant_vector.py
sum_partitions
python
def sum_partitions(n): cartesian = product(range(n+1), repeat=n) for p in cartesian: if sum(p) == n: yield p return
r"""A generator of all length n tuples :math:`(m_1,...,m_n)` such that .. math:: m_1 + \cdots + m_n = n, where each :math:`m_i \geq 0`. Used by :func:`half_lattice_vector` to generate a collection of effective degree g-1 divisors. Parameters ---------- n : int Returns ------- p : generator
https://github.com/abelfunctions/abelfunctions/blob/67757a0b3744191c179ca4757e0db4a312bfd86a/abelfunctions/riemann_constant_vector.py#L205-L229
from abelfunctions.abelmap import AbelMap, Jacobian from abelfunctions.divisor import Place from abelfunctions.riemann_theta import RiemannTheta import numpy from numpy import dot from itertools import product from sage.all import cached_function def initialize_half_lattice_vectors(X): g = X.genus() Omega = X.riemann_matrix() half = list(product((0,0.5),repeat=g)) half_lattice_vectors = numpy.array( [h1 + dot(Omega,h2) for h1 in half for h2 in half], dtype=numpy.complex ) return half_lattice_vectors def half_lattice_filter(half_lattice_vectors, J, C, D, epsilon=1e-8): Z = AbelMap(D) - 0.5*AbelMap(C) shifted_half_lattice_vectors = [J(elt) for elt in half_lattice_vectors + Z] theta_values = RiemannTheta.oscillatory_part( shifted_half_lattice_vectors, J.Omega, epsilon=epsilon ) theta_values = abs(theta_values) half_lattice_vectors = half_lattice_vectors[theta_values < epsilon] return half_lattice_vectors def find_regular_places(X, n): XPF = X.PF.XPF places = [] a = 0 while len(places) < n: b = X.closest_discriminant_point(a, exact=False) R = XPF.radius(b) if abs(a-b) > R: places.extend(X(a)) if a > 0: a = -a else: a += 1 places = places[:n] return places
MIT License
python-control/python-control
control/matlab/timeresp.py
lsim
python
def lsim(sys, U=0., T=None, X0=0.): from ..timeresp import forced_response out = forced_response(sys, T, U, X0, return_x=True, transpose=True) return out[1], out[0], out[2]
Simulate the output of a linear system. As a convenience for parameters `U`, `X0`: Numbers (scalars) are converted to constant arrays with the correct shape. The correct shape is inferred from arguments `sys` and `T`. Parameters ---------- sys: LTI (StateSpace, or TransferFunction) LTI system to simulate U: array-like or number, optional Input array giving input at each time `T` (default = 0). If `U` is ``None`` or ``0``, a special algorithm is used. This special algorithm is faster than the general algorithm, which is used otherwise. T: array-like, optional for discrete LTI `sys` Time steps at which the input is defined; values must be evenly spaced. X0: array-like or number, optional Initial condition (default = 0). Returns ------- yout: array Response of the system. T: array Time values of the output. xout: array Time evolution of the state vector. See Also -------- step, initial, impulse Examples -------- >>> yout, T, xout = lsim(sys, U, T, X0)
https://github.com/python-control/python-control/blob/5ab0a1c41a5ec906f825c30cbfbc6352a17a3a5d/control/matlab/timeresp.py#L252-L298
__all__ = ['step', 'stepinfo', 'impulse', 'initial', 'lsim'] def step(sys, T=None, X0=0., input=0, output=None, return_x=False): from ..timeresp import step_response out = step_response(sys, T, X0, input, output, transpose=True, return_x=return_x) return (out[1], out[0], out[2]) if return_x else (out[1], out[0]) def stepinfo(sysdata, T=None, yfinal=None, SettlingTimeThreshold=0.02, RiseTimeLimits=(0.1, 0.9)): from ..timeresp import step_info S = step_info(sysdata, T=T, T_num=None, yfinal=yfinal, SettlingTimeThreshold=SettlingTimeThreshold, RiseTimeLimits=RiseTimeLimits) return S def impulse(sys, T=None, X0=0., input=0, output=None, return_x=False): from ..timeresp import impulse_response out = impulse_response(sys, T, X0, input, output, transpose = True, return_x=return_x) return (out[1], out[0], out[2]) if return_x else (out[1], out[0]) def initial(sys, T=None, X0=0., input=None, output=None, return_x=False): from ..timeresp import initial_response T, yout, xout = initial_response(sys, T, X0, output=output, transpose=True, return_x=True) return (yout, T, xout) if return_x else (yout, T)
BSD 3-Clause New or Revised License
europython/djep
pyconde/schedule/migrations/0015_fix_tags.py
Migration.forwards
python
def forwards(self, orm): return ContentType = orm['contenttypes.ContentType'] TaggedItem = orm['taggit.TaggedItem'] Session = orm['schedule.Session'] ct_sess = ContentType.objects.get(app_label='schedule', model='session') ct_prop = ContentType.objects.get(app_label='proposals', model='proposal') TaggedItem.objects.filter(content_type=ct_sess).delete() for sess in Session.objects.all(): tags = [] for tag in TaggedItem.objects.filter(content_type=ct_prop, object_id=sess.proposal_id): tags.append(TaggedItem(content_type=ct_sess, object_id=sess.pk, tag_id=tag.tag_id)) TaggedItem.objects.bulk_create(tags)
Write your forwards methods here.
https://github.com/europython/djep/blob/afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21/pyconde/schedule/migrations/0015_fix_tags.py#L13-L39
import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): depends_on = ( ("schedule", "0015_auto__add_field_session_accept_recording__chg_field_session_duration__"), )
BSD 3-Clause New or Revised License
quantmind/pulsar
examples/chat/manage.py
WebChat.setup
python
def setup(self, environ): request = wsgi_request(environ) cfg = request.cache.cfg loop = request.cache._loop self.store = create_store(cfg.data_store, loop=loop) pubsub = self.store.pubsub(protocol=Protocol()) channel = '%s_webchat' % self.name ensure_future(pubsub.subscribe(channel), loop=loop) return WsgiHandler([Router('/', get=self.home_page), WebSocket('/message', Chat(pubsub, channel)), Router('/rpc', post=Rpc(pubsub, channel), response_content_types=JSON_CONTENT_TYPES)], [AsyncResponseMiddleware, GZipMiddleware(min_length=20)])
Called once only to setup the WSGI application handler. Check :ref:`lazy wsgi handler <wsgi-lazy-handler>` section for further information.
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/chat/manage.py#L134-L152
import os import time from pulsar.api import create_future, ensure_future from pulsar.apps.wsgi import (Router, WsgiHandler, LazyWsgi, WSGIServer, GZipMiddleware, wsgi_request) from pulsar.apps.ws import WS, WebSocket from pulsar.apps.rpc import PulsarServerCommands from pulsar.apps.data import create_store, PubSubClient from pulsar.utils.httpurl import JSON_CONTENT_TYPES from pulsar.apps.ds import pulsards_url from pulsar.utils.system import json from pulsar.utils.string import to_string CHAT_DIR = os.path.dirname(__file__) class ChatClient(PubSubClient): __slots__ = ('connection', 'channel') def __init__(self, connection, channel): self.connection = connection self.channel = channel def __call__(self, channel, message): self.connection.write(message) class Protocol: def encode(self, message): if not isinstance(message, dict): message = {'message': message} message['time'] = time.time() return json.dumps(message) def decode(self, message): return to_string(message) class Chat(WS): def __init__(self, pubsub, channel): self.pubsub = pubsub self.channel = channel def on_open(self, websocket): self.pubsub.add_client(ChatClient(websocket, self.channel)) def on_message(self, websocket, msg): if msg: lines = [] for li in msg.split('\n'): li = li.strip() if li: lines.append(li) msg = ' '.join(lines) if msg: return self.pubsub.publish(self.channel, msg) class Rpc(PulsarServerCommands): def __init__(self, pubsub, channel, **kwargs): self.pubsub = pubsub self.channel = channel super().__init__(**kwargs) async def rpc_message(self, request, message): await self.pubsub.publish(self.channel, message) return 'OK' class WebChat(LazyWsgi): def __init__(self, server_name): self.name = server_name
BSD 3-Clause New or Revised License
sisl/adaptivestresstestingtoolbox
src/ast_toolbox/simulators/example_av_simulator/example_av_simulator.py
ExampleAVSimulator.get_reward_info
python
def get_reward_info(self): sim_state = self.simulator.get_ground_truth() return {"peds": sim_state['peds'], "car": sim_state['car'], "is_goal": self.is_goal(), "is_terminal": self.is_terminal()}
Returns any info needed by the reward function to calculate the current reward.
https://github.com/sisl/adaptivestresstestingtoolbox/blob/184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed/src/ast_toolbox/simulators/example_av_simulator/example_av_simulator.py#L119-L129
import numpy as np from ast_toolbox.simulators import ASTSimulator from ast_toolbox.simulators.example_av_simulator import ToyAVSimulator class ExampleAVSimulator(ASTSimulator): def __init__(self, num_peds=1, simulator_args=None, **kwargs): self.c_num_peds = num_peds if simulator_args is None: simulator_args = {} self._action = np.array([0] * (6 * self.c_num_peds)) self.simulator = ToyAVSimulator(num_peds=num_peds, **simulator_args) super().__init__(**kwargs) def get_first_action(self): return np.array([0] * (6 * self.c_num_peds)) def simulate(self, actions, s_0): return self.simulator.run_simulation(actions=actions, s_0=s_0, simulation_horizon=self.c_max_path_length) def closed_loop_step(self, action): self.observation = np.ndarray.flatten(self.simulator.step_simulation(action)) return self.observation_return() def reset(self, s_0): super(ExampleAVSimulator, self).reset(s_0=s_0) self.observation = np.ndarray.flatten(self.simulator.reset(s_0)) return self.observation_return()
MIT License
xiaohangzhan/mix-and-match
caffe/python/caffe/test/test_net.py
simple_net_file
python
def simple_net_file(num_output): f = tempfile.NamedTemporaryFile(delete=False) f.write("""name: 'testnet' force_backward: true layer { type: 'DummyData' name: 'data' top: 'data' top: 'label' dummy_data_param { num: 5 channels: 2 height: 3 width: 4 num: 5 channels: 1 height: 1 width: 1 data_filler { type: 'gaussian' std: 1 } data_filler { type: 'constant' } } } layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv' convolution_param { num_output: 11 kernel_size: 2 pad: 3 weight_filler { type: 'gaussian' std: 1 } bias_filler { type: 'constant' value: 2 } } param { decay_mult: 1 } param { decay_mult: 0 } } layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip' inner_product_param { num_output: """ + str(num_output) + """ weight_filler { type: 'gaussian' std: 2.5 } bias_filler { type: 'constant' value: -3 } } } layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label' top: 'loss' }""") f.close() return f.name
Make a simple net prototxt, based on test_net.cpp, returning the name of the (temporary) file.
https://github.com/xiaohangzhan/mix-and-match/blob/8d4c9df80ef281b4112bf27d8901700dcedc798f/caffe/python/caffe/test/test_net.py#L9-L33
import unittest import tempfile import os import numpy as np import caffe
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_session_affinity_config.py
V1SessionAffinityConfig.__init__
python
def __init__(self, client_ip=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._client_ip = None self.discriminator = None if client_ip is not None: self.client_ip = client_ip
V1SessionAffinityConfig - a model defined in OpenAPI
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_session_affinity_config.py#L43-L53
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1SessionAffinityConfig(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'client_ip': 'V1ClientIPConfig' } attribute_map = { 'client_ip': 'clientIP' }
Apache License 2.0
zendesk/basecrm-python
basecrm/services.py
LeadsService.create
python
def create(self, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for Lead are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, lead = self.http_client.post("/leads", body=attributes) return lead
Create a lead Creates a new lead A lead may represent a single individual or an organization :calls: ``post /leads`` :param tuple *args: (optional) Single object representing Lead resource. :param dict **kwargs: (optional) Lead attributes. :return: Dictionary that support attriubte-style access and represents newely created Lead resource. :rtype: dict
https://github.com/zendesk/basecrm-python/blob/d6c26aca8850ba3fa0b9dff48d816b721d181ef7/basecrm/services.py#L668-L689
from decimal import * from coercion import Coercion class AccountsService(object): def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def self(self): _, _, resource = self.http_client.get("/accounts/self") return resource class AssociatedContactsService(object): """ Allowed attributes for AssociatedContact to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['contact_id', 'role'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, deal_id, **params): _, _, associated_contacts = self.http_client.get("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), params=params) return associated_contacts def create(self, deal_id, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for AssociatedContact are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, associated_contact = self.http_client.post("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), body=attributes) return associated_contact def destroy(self, deal_id, contact_id) : status_code, _, _ = self.http_client.delete("/deals/{deal_id}/associated_contacts/{contact_id}".format(deal_id=deal_id, contact_id=contact_id)) return status_code == 204 class ContactsService(object): """ Allowed attributes for Contact to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['address', 'contact_id', 'custom_fields', 'customer_status', 'description', 'email', 'facebook', 'fax', 'first_name', 'industry', 'is_organization', 'last_name', 'linkedin', 'mobile', 'name', 'owner_id', 'phone', 'prospect_status', 'skype', 'tags', 'title', 'twitter', 'website'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, **params): _, _, contacts = self.http_client.get("/contacts", params=params) return contacts def create(self, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for Contact are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, contact = self.http_client.post("/contacts", body=attributes) return contact def retrieve(self, id) : _, _, contact = self.http_client.get("/contacts/{id}".format(id=id)) return contact def update(self, id, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for Contact are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, contact = self.http_client.put("/contacts/{id}".format(id=id), body=attributes) return contact def destroy(self, id) : status_code, _, _ = self.http_client.delete("/contacts/{id}".format(id=id)) return status_code == 204 class DealsService(object): """ Allowed attributes for Deal to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['contact_id', 'currency', 'custom_fields', 'hot', 'loss_reason_id', 'name', 'owner_id', 'source_id', 'stage_id', 'last_stage_change_at', 'tags', 'value', 'estimated_close_date', 'customized_win_likelihood', 'added_on'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, **params): _, _, deals = self.http_client.get("/deals", params=params) for deal in deals: deal['value'] = Coercion.to_decimal(deal['value']) return deals def create(self, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for Deal are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) if "value" in attributes: attributes["value"] = Coercion.to_string(attributes["value"]) _, _, deal = self.http_client.post("/deals", body=attributes) deal["value"] = Coercion.to_decimal(deal["value"]) return deal def retrieve(self, id) : _, _, deal = self.http_client.get("/deals/{id}".format(id=id)) deal["value"] = Coercion.to_decimal(deal["value"]) return deal def update(self, id, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for Deal are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) if "value" in attributes: attributes["value"] = Coercion.to_string(attributes["value"]) _, _, deal = self.http_client.put("/deals/{id}".format(id=id), body=attributes) deal["value"] = Coercion.to_decimal(deal["value"]) return deal def destroy(self, id) : status_code, _, _ = self.http_client.delete("/deals/{id}".format(id=id)) return status_code == 204 class DealSourcesService(object): """ Allowed attributes for DealSource to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['name', 'resource_type'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, **params): _, _, deal_sources = self.http_client.get("/deal_sources", params=params) return deal_sources def create(self, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for DealSource are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, deal_source = self.http_client.post("/deal_sources", body=attributes) return deal_source def retrieve(self, id) : _, _, deal_source = self.http_client.get("/deal_sources/{id}".format(id=id)) return deal_source def update(self, id, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for DealSource are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, deal_source = self.http_client.put("/deal_sources/{id}".format(id=id), body=attributes) return deal_source def destroy(self, id) : status_code, _, _ = self.http_client.delete("/deal_sources/{id}".format(id=id)) return status_code == 204 class DealUnqualifiedReasonsService(object): """ Allowed attributes for DealUnqualifiedReason to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['name'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, **params): _, _, deal_unqualified_reasons = self.http_client.get("/deal_unqualified_reasons", params=params) return deal_unqualified_reasons def create(self, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for DealUnqualifiedReason are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, deal_unqualified_reason = self.http_client.post("/deal_unqualified_reasons", body=attributes) return deal_unqualified_reason def retrieve(self, id) : _, _, deal_unqualified_reason = self.http_client.get("/deal_unqualified_reasons/{id}".format(id=id)) return deal_unqualified_reason def update(self, id, *args, **kwargs): if not args and not kwargs: raise Exception('attributes for DealUnqualifiedReason are missing') attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, deal_unqualified_reason = self.http_client.put("/deal_unqualified_reasons/{id}".format(id=id), body=attributes) return deal_unqualified_reason def destroy(self, id) : status_code, _, _ = self.http_client.delete("/deal_unqualified_reasons/{id}".format(id=id)) return status_code == 204 class LeadsService(object): """ Allowed attributes for Lead to send to Base CRM backend servers. """ OPTS_KEYS_TO_PERSIST = ['address', 'custom_fields', 'description', 'email', 'facebook', 'fax', 'first_name', 'industry', 'last_name', 'linkedin', 'mobile', 'organization_name', 'owner_id', 'phone', 'skype', 'source_id', 'status', 'tags', 'title', 'twitter', 'website'] def __init__(self, http_client): self.__http_client = http_client @property def http_client(self): return self.__http_client def list(self, **params): _, _, leads = self.http_client.get("/leads", params=params) return leads
Apache License 2.0
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1alpha1_priority_level_configuration_status.py
V1alpha1PriorityLevelConfigurationStatus.__ne__
python
def __ne__(self, other): if not isinstance(other, V1alpha1PriorityLevelConfigurationStatus): return True return self.to_dict() != other.to_dict()
Returns true if both objects are not equal
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1alpha1_priority_level_configuration_status.py#L117-L122
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1alpha1PriorityLevelConfigurationStatus(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'conditions': 'list[V1alpha1PriorityLevelConfigurationCondition]' } attribute_map = { 'conditions': 'conditions' } def __init__(self, conditions=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._conditions = None self.discriminator = None if conditions is not None: self.conditions = conditions @property def conditions(self): return self._conditions @conditions.setter def conditions(self, conditions): self._conditions = conditions def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, V1alpha1PriorityLevelConfigurationStatus): return False return self.to_dict() == other.to_dict()
Apache License 2.0
spyder-ide/spyder-unittest
spyder_unittest/widgets/datatree.py
TestDataView.rowsInserted
python
def rowsInserted(self, parent, firstRow, lastRow): QTreeView.rowsInserted(self, parent, firstRow, lastRow) self.resizeColumns() self.spanFirstColumn(firstRow, lastRow)
Called when rows are inserted.
https://github.com/spyder-ide/spyder-unittest/blob/fc29baa9edd8614a341bbcfde93aa6fea5c4afb5/spyder_unittest/widgets/datatree.py#L89-L93
from collections import Counter from operator import attrgetter from qtpy import PYQT4 from qtpy.QtCore import QAbstractItemModel, QModelIndex, Qt, Signal from qtpy.QtGui import QBrush, QColor, QFont from qtpy.QtWidgets import QMenu, QTreeView from spyder.config.base import get_translation from spyder.utils.qthelpers import create_action from spyder_unittest.backend.abbreviator import Abbreviator from spyder_unittest.backend.runnerbase import Category try: _ = get_translation("unittest", dirname="spyder_unittest") except KeyError: import gettext _ = gettext.gettext COLORS = { Category.OK: QBrush(QColor("#C1FFBA")), Category.FAIL: QBrush(QColor("#FF5050")), Category.SKIP: QBrush(QColor("#C5C5C5")), Category.PENDING: QBrush(QColor("#C5C5C5")) } COLORS_DARK = { Category.OK: QBrush(QColor("#008000")), Category.FAIL: QBrush(QColor("#C6001E")), Category.SKIP: QBrush(QColor("#505050")), Category.PENDING: QBrush(QColor("#505050")) } STATUS_COLUMN = 0 NAME_COLUMN = 1 MESSAGE_COLUMN = 2 TIME_COLUMN = 3 HEADERS = [_('Status'), _('Name'), _('Message'), _('Time (ms)')] TOPLEVEL_ID = 2 ** 32 - 1 class TestDataView(QTreeView): sig_edit_goto = Signal(str, int) def __init__(self, parent=None): QTreeView.__init__(self, parent) self.header().setDefaultAlignment(Qt.AlignCenter) self.setItemsExpandable(True) self.setSortingEnabled(True) self.header().setSortIndicatorShown(False) self.header().sortIndicatorChanged.connect(self.sortByColumn) self.header().sortIndicatorChanged.connect( lambda col, order: self.header().setSortIndicatorShown(True)) self.setExpandsOnDoubleClick(False) self.doubleClicked.connect(self.go_to_test_definition) def reset(self): QTreeView.reset(self) self.resizeColumns() self.spanFirstColumn(0, self.model().rowCount() - 1)
MIT License
virtuesecurity/aws-extender
BappModules/boto/__init__.py
connect_sts
python
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sts import STSConnection return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.sts.STSConnection` :return: A connection to Amazon's STS
https://github.com/virtuesecurity/aws-extender/blob/3029dd26bd7bdf7f4148e1e92adf9f8c547cafbe/BappModules/boto/__init__.py#L566-L578
from boto.pyami.config import Config, BotoConfigLocations from boto.storage_uri import BucketStorageUri, FileStorageUri import boto.plugin import datetime import os import platform import re import sys import logging import logging.config from boto.compat import urlparse from boto.exception import InvalidUriError __version__ = '2.48.0' Version = __version__ datetime.datetime.strptime('', '') UserAgent = 'Boto/%s Python/%s %s/%s' % ( __version__, platform.python_version(), platform.system(), platform.release() ) config = Config() BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)' r'#(?P<generation>[0-9]+)$') VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$') ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): for file in BotoConfigLocations: try: logging.config.fileConfig(os.path.expanduser(file)) except: pass class NullHandler(logging.Handler): def emit(self, record): pass log = logging.getLogger('boto') perflog = logging.getLogger('boto.perf') log.addHandler(NullHandler()) perflog.addHandler(NullHandler()) init_logging() def set_file_logger(name, filepath, level=logging.INFO, format_string=None): global log if not format_string: format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" logger = logging.getLogger(name) logger.setLevel(level) fh = logging.FileHandler(filepath) fh.setLevel(level) formatter = logging.Formatter(format_string) fh.setFormatter(formatter) logger.addHandler(fh) log = logger def set_stream_logger(name, level=logging.DEBUG, format_string=None): global log if not format_string: format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" logger = logging.getLogger(name) logger.setLevel(level) fh = logging.StreamHandler() fh.setLevel(level) formatter = logging.Formatter(format_string) fh.setFormatter(formatter) logger.addHandler(fh) log = logger def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sqs.connection import SQSConnection return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.s3.connection import S3Connection return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs): from boto.gs.connection import GSConnection return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs) def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.connection import EC2Connection return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.elb import ELBConnection return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.autoscale import AutoScaleConnection return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.cloudwatch import CloudWatchConnection return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sdb.connection import SDBConnection return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.fps.connection import FPSConnection return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.mturk.connection import MTurkConnection return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudfront import CloudFrontConnection return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.vpc import VPCConnection return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.rds import RDSConnection return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.rds2.layer1 import RDSConnection return RDSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, **kwargs ) def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.emr import EmrConnection return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.sns import SNSConnection return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.iam import IAMConnection return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.route53 import Route53Connection return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.cloudformation import CloudFormationConnection return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Eucalyptus', is_secure=False, **kwargs): from boto.ec2 import EC2Connection from boto.ec2.regioninfo import RegionInfo if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'eucalyptus_host', None) reg = RegionInfo(name='eucalyptus', endpoint=host) return EC2Connection(aws_access_key_id, aws_secret_access_key, region=reg, port=port, path=path, is_secure=is_secure, **kwargs) def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.glacier.layer2 import Layer2 return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ec2.regioninfo import RegionInfo purl = urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name=purl.hostname, endpoint=purl.hostname) kwargs['aws_access_key_id'] = aws_access_key_id kwargs['aws_secret_access_key'] = aws_secret_access_key return(connect_ec2(**kwargs)) def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Walrus', is_secure=False, **kwargs): from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'walrus_host', None) return S3Connection(aws_access_key_id, aws_secret_access_key, host=host, port=port, path=path, calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs) def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): from boto.ses import SESConnection return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
MIT License
media-smart/volkscv
volkscv/metrics/classification/libs/utils/_label.py
LabelEncoder.fit
python
def fit(self, y): y = column_or_1d(y, warn=True) self.classes_ = _encode(y) return self
Fit label encoder Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- self : returns an instance of self.
https://github.com/media-smart/volkscv/blob/62b991e3810f6dfc5ae073ec7fd196529efc1543/volkscv/metrics/classification/libs/utils/_label.py#L358-L372
import numpy as np import scipy.sparse as sp from ._base import check_array from .multiclass import type_of_target, unique_labels from .validation import column_or_1d, _num_samples, check_is_fitted from .base import BaseEstimator, TransformerMixin def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False): if not isinstance(y, list): y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None) else: if _num_samples(y) == 0: raise ValueError('y has 0 samples: %r' % y) if neg_label >= pos_label: raise ValueError("neg_label={0} must be strictly less than " "pos_label={1}.".format(neg_label, pos_label)) if (sparse_output and (pos_label == 0 or neg_label != 0)): raise ValueError("Sparse binarization is only supported with non " "zero pos_label and zero neg_label, got " "pos_label={0} and neg_label={1}" "".format(pos_label, neg_label)) pos_switch = pos_label == 0 if pos_switch: pos_label = -neg_label y_type = type_of_target(y) if 'multioutput' in y_type: raise ValueError("Multioutput target data is not supported with label " "binarization") if y_type == 'unknown': raise ValueError("The type of target data is not known") n_samples = y.shape[0] if sp.issparse(y) else len(y) n_classes = len(classes) classes = np.asarray(classes) if y_type == "binary": if n_classes == 1: if sparse_output: return sp.csr_matrix((n_samples, 1), dtype=int) else: Y = np.zeros((len(y), 1), dtype=np.int) Y += neg_label return Y elif len(classes) >= 3: y_type = "multiclass" sorted_class = np.sort(classes) if y_type == "multilabel-indicator": y_n_classes = y.shape[1] if hasattr(y, 'shape') else len(y[0]) if classes.size != y_n_classes: raise ValueError("classes {0} mismatch with the labels {1}" " found in the data" .format(classes, unique_labels(y))) if y_type in ("binary", "multiclass"): y = column_or_1d(y) y_in_classes = np.in1d(y, classes) y_seen = y[y_in_classes] indices = np.searchsorted(sorted_class, y_seen) indptr = np.hstack((0, np.cumsum(y_in_classes))) data = np.empty_like(indices) data.fill(pos_label) Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes)) elif y_type == "multilabel-indicator": Y = sp.csr_matrix(y) if pos_label != 1: data = np.empty_like(Y.data) data.fill(pos_label) Y.data = data else: raise ValueError("%s target data is not supported with label " "binarization" % y_type) if not sparse_output: Y = Y.toarray() Y = Y.astype(int, copy=False) if neg_label != 0: Y[Y == 0] = neg_label if pos_switch: Y[Y == pos_label] = 0 else: Y.data = Y.data.astype(int, copy=False) if np.any(classes != sorted_class): indices = np.searchsorted(sorted_class, classes) Y = Y[:, indices] if y_type == "binary": if sparse_output: Y = Y.getcol(-1) else: Y = Y[:, -1].reshape((-1, 1)) return Y def _encode_python(values, uniques=None, encode=False): if uniques is None: uniques = sorted(set(values)) uniques = np.array(uniques, dtype=values.dtype) if encode: table = {val: i for i, val in enumerate(uniques)} try: encoded = np.array([table[v] for v in values]) except KeyError as e: raise ValueError("y contains previously unseen labels: %s" % str(e)) return uniques, encoded else: return uniques def _encode_numpy(values, uniques=None, encode=False, check_unknown=True): if uniques is None: if encode: uniques, encoded = np.unique(values, return_inverse=True) return uniques, encoded else: return np.unique(values) if encode: if check_unknown: diff = _encode_check_unknown(values, uniques) if diff: raise ValueError("y contains previously unseen labels: %s" % str(diff)) encoded = np.searchsorted(uniques, values) return uniques, encoded else: return uniques def _encode(values, uniques=None, encode=False, check_unknown=True): if values.dtype == object: try: res = _encode_python(values, uniques, encode) except TypeError: raise TypeError("argument must be a string or number") return res else: return _encode_numpy(values, uniques, encode, check_unknown=check_unknown) def _encode_check_unknown(values, uniques, return_mask=False): if values.dtype == object: uniques_set = set(uniques) diff = list(set(values) - uniques_set) if return_mask: if diff: valid_mask = np.array([val in uniques_set for val in values]) else: valid_mask = np.ones(len(values), dtype=bool) return diff, valid_mask else: return diff else: unique_values = np.unique(values) diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True)) if return_mask: if diff: valid_mask = np.in1d(values, uniques) else: valid_mask = np.ones(len(values), dtype=bool) return diff, valid_mask else: return diff class LabelEncoder(TransformerMixin, BaseEstimator):
Apache License 2.0
indigo-dc/udocker
udocker/container/localrepo.py
LocalRepository._verify_layer_file
python
def _verify_layer_file(self, structure, layer_id): (layer_algorithm, layer_hash) = self._split_layer_id(layer_id) layer_f = structure["repolayers"][layer_id]["layer_f"] if not (os.path.exists(layer_f) and os.path.islink(layer_f)): Msg().err("Error: layer data file symbolic link not found", layer_id) return False if not os.path.exists(self.cur_tagdir + '/' + os.readlink(layer_f)): Msg().err("Error: layer data file not found") return False if "gzip" in OSInfo('/').get_filetype(layer_f): if not FileUtil(layer_f).verify_tar(): Msg().err("Error: layer tar verify failed:", layer_f) return False if layer_algorithm: layer_f_chksum = ChkSUM().hash(layer_f, layer_algorithm) if layer_f_chksum and layer_f_chksum != layer_hash: Msg().err("Error: layer file chksum failed:", layer_f) return False return True
Verify layer file in repository
https://github.com/indigo-dc/udocker/blob/87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f/udocker/container/localrepo.py#L708-L730
import os import re import sys import stat import json from udocker import is_genstr from udocker.config import Config from udocker.msg import Msg from udocker.utils.fileutil import FileUtil from udocker.utils.chksum import ChkSUM from udocker.utils.uprocess import Uprocess from udocker.helper.osinfo import OSInfo class LocalRepository(object): def __init__(self, topdir=None): self.topdir = topdir if topdir else Config.conf['topdir'] self.bindir = Config.conf['bindir'] self.libdir = Config.conf['libdir'] self.docdir = Config.conf['docdir'] self.reposdir = Config.conf['reposdir'] self.layersdir = Config.conf['layersdir'] self.containersdir = Config.conf['containersdir'] self.homedir = Config.conf['homedir'] if not self.bindir: self.bindir = self.topdir + "/bin" if not self.libdir: self.libdir = self.topdir + "/lib" if not self.docdir: self.docdir = self.topdir + "/doc" if not self.reposdir: self.reposdir = self.topdir + "/repos" if not self.layersdir: self.layersdir = self.topdir + "/layers" if not self.containersdir: self.containersdir = self.topdir + "/containers" self.cur_repodir = "" self.cur_tagdir = "" self.cur_containerdir = "" FileUtil(self.reposdir).register_prefix() FileUtil(self.layersdir).register_prefix() FileUtil(self.containersdir).register_prefix() def setup(self, topdir=None): self.__init__(topdir) def create_repo(self): try: if not os.path.exists(self.topdir): os.makedirs(self.topdir) if not os.path.exists(self.reposdir): os.makedirs(self.reposdir) if not os.path.exists(self.layersdir): os.makedirs(self.layersdir) if not os.path.exists(self.containersdir): os.makedirs(self.containersdir) if not os.path.exists(self.bindir): os.makedirs(self.bindir) if not os.path.exists(self.libdir): os.makedirs(self.libdir) if not os.path.exists(self.docdir): os.makedirs(self.docdir) if not (Config.conf['keystore'].startswith("/") or os.path.exists(self.homedir)): os.makedirs(self.homedir) except(IOError, OSError): return False return True def is_repo(self): dirs_exist = [os.path.exists(self.reposdir), os.path.exists(self.layersdir), os.path.exists(self.containersdir), os.path.exists(self.bindir), os.path.exists(self.libdir)] return all(dirs_exist) def is_container_id(self, obj): if not is_genstr(obj): return False match = re.match( "^[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+-[a-z0-9]+$", obj) if match: return True return False def protect_container(self, container_id): return self._protect(self.cd_container(container_id)) def unprotect_container(self, container_id): return self._unprotect(self.cd_container(container_id)) def isprotected_container(self, container_id): return self._isprotected(self.cd_container(container_id)) def _protect(self, directory): try: open(directory + "/PROTECT", 'w').close() return True except (IOError, OSError): return False def _unprotect(self, directory): return FileUtil(directory + "/PROTECT").remove() def _isprotected(self, directory): return os.path.exists(directory + "/PROTECT") def iswriteable_container(self, container_id): container_root = self.cd_container(container_id) + "/ROOT" if not os.path.exists(container_root): return 2 if not os.path.isdir(container_root): return 3 if os.access(container_root, os.W_OK): return 1 return 0 def get_size(self, container_id): container_root = self.cd_container(container_id) + "/ROOT" try: size, dummy = Uprocess().get_output(["du", "-s", "-m", "-x", container_root]).split() return int(size) except (ValueError, NameError, AttributeError): return -1 def get_containers_list(self, dir_only=True): containers_list = [] if not os.path.isdir(self.containersdir): return [] for fname in os.listdir(self.containersdir): container_dir = self.containersdir + '/' + fname if os.path.isdir(container_dir): try: filep = open(container_dir + "/imagerepo.name", 'r') except (IOError, OSError): reponame = "" else: reponame = filep.read() filep.close() if dir_only: containers_list.append(container_dir) elif not os.path.islink(container_dir): names = self.get_container_name(fname) if not names: names = "" containers_list.append((fname, reponame, str(names))) return containers_list def del_container(self, container_id, force=False): container_dir = self.cd_container(container_id) if not container_dir: return False if container_dir in self.get_containers_list(True): for name in self.get_container_name(container_id): self.del_container_name(name) if force: FileUtil(container_dir).rchmod(stat.S_IWUSR | stat.S_IRUSR, stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR) if FileUtil(container_dir).remove(recursive=True): self.cur_containerdir = "" return True return False def cd_container(self, container_id): container_dir = self.containersdir + '/' + str(container_id) if os.path.exists(container_dir): if container_dir in self.get_containers_list(True): return container_dir return "" def _symlink(self, existing_file, link_file): if os.path.exists(link_file): return False rel_path_to_existing = os.path.relpath( existing_file, os.path.dirname(link_file)) try: os.symlink(rel_path_to_existing, link_file) except (IOError, OSError): return False return True def _name_is_valid(self, name): invalid_chars = ("/", ".", " ", "[", "]") if name and any(x in name for x in invalid_chars): return False return not len(name) > 2048 def set_container_name(self, container_id, name): if self._name_is_valid(name): container_dir = self.cd_container(container_id) if container_dir: linkname = os.path.realpath(self.containersdir + '/' + name) if os.path.exists(linkname): return False real_container_dir = os.path.realpath(container_dir) return self._symlink(real_container_dir, linkname) return False def del_container_name(self, name): if self._name_is_valid(name): linkname = self.containersdir + '/' + name if os.path.islink(linkname): return FileUtil(linkname).remove() return False def get_container_id(self, container_name): if container_name: pathname = self.containersdir + "/" + container_name if os.path.islink(pathname): return os.path.basename(os.readlink(pathname)) if os.path.isdir(pathname): return container_name return "" def get_container_name(self, container_id): if not os.path.isdir(self.containersdir): return [] link_list = [] for fname in os.listdir(self.containersdir): container = self.containersdir + "/" + fname if os.path.islink(container): real_container = os.readlink(container) if os.path.basename(real_container) == container_id: link_list.append(fname) return link_list def setup_container(self, imagerepo, tag, container_id): container_dir = self.containersdir + "/" + str(container_id) if os.path.exists(container_dir): return "" try: os.makedirs(container_dir + "/ROOT") out_imagerepo = open(container_dir + "/imagerepo.name", 'w') except (IOError, OSError): return None else: out_imagerepo.write(imagerepo + ":" + tag) out_imagerepo.close() self.cur_containerdir = container_dir return container_dir def _is_tag(self, tag_dir): try: if os.path.isfile(tag_dir + "/TAG"): return True except (IOError, OSError): pass return False def protect_imagerepo(self, imagerepo, tag): return self._protect(self.reposdir + "/" + imagerepo + "/" + tag) def unprotect_imagerepo(self, imagerepo, tag): return self._unprotect(self.reposdir + "/" + imagerepo + "/" + tag) def isprotected_imagerepo(self, imagerepo, tag): return self._isprotected(self.reposdir + "/" + imagerepo + "/" + tag) def cd_imagerepo(self, imagerepo, tag): if imagerepo and tag: tag_dir = self.reposdir + "/" + imagerepo + "/" + tag if os.path.exists(tag_dir): if self._is_tag(tag_dir): self.cur_repodir = self.reposdir + "/" + imagerepo self.cur_tagdir = self.cur_repodir + "/" + tag return self.cur_tagdir return "" def _find(self, filename, in_dir): found_list = [] if FileUtil(in_dir).isdir(): for fullname in os.listdir(in_dir): f_path = in_dir + '/' + fullname if os.path.islink(f_path): if filename in fullname: found_list.append(f_path) elif os.path.isdir(f_path): found_list.extend(self._find(filename, f_path)) return found_list def _inrepository(self, filename): return self._find(filename, self.reposdir) def _remove_layers(self, tag_dir, force): for fname in os.listdir(tag_dir): f_path = tag_dir + '/' + fname if os.path.islink(f_path): linkname = os.readlink(f_path) layer_file = tag_dir + '/' + linkname if not FileUtil(f_path).remove() and not force: return False if not self._inrepository(os.path.basename(linkname)): if not FileUtil(layer_file).remove() and not force: return False return True def del_imagerepo(self, imagerepo, tag, force=False): tag_dir = self.cd_imagerepo(imagerepo, tag) if (tag_dir and self._remove_layers(tag_dir, force) and FileUtil(tag_dir).remove(recursive=True)): self.cur_repodir = "" self.cur_tagdir = "" while imagerepo: FileUtil(self.reposdir + '/' + imagerepo).rmdir() imagerepo = "/".join(imagerepo.split("/")[:-1]) return True return False def _get_tags(self, tag_dir): tag_list = [] if FileUtil(tag_dir).isdir(): for fname in os.listdir(tag_dir): f_path = tag_dir + '/' + fname if self._is_tag(f_path): tag_list.append( (tag_dir.replace(self.reposdir + '/', ""), fname)) elif os.path.isdir(f_path): tag_list.extend(self._get_tags(f_path)) return tag_list def get_imagerepos(self): return self._get_tags(self.reposdir) def get_layers(self, imagerepo, tag): layers_list = [] tag_dir = self.cd_imagerepo(imagerepo, tag) if tag_dir: for fname in os.listdir(tag_dir): filename = tag_dir + "/" + fname if os.path.islink(filename): size = FileUtil(filename).size() layers_list.append((filename, size)) return layers_list def add_image_layer(self, filename, linkname=None): if not self.cur_tagdir: return False if not os.path.exists(filename): return False if not os.path.exists(self.cur_tagdir): return False if linkname: linkname = self.cur_tagdir + '/' + os.path.basename(linkname) else: linkname = self.cur_tagdir + '/' + os.path.basename(filename) if os.path.islink(linkname): FileUtil(linkname).remove() self._symlink(filename, linkname) return True def setup_imagerepo(self, imagerepo): if not imagerepo: return None directory = self.reposdir + "/" + imagerepo try: if not os.path.exists(directory): os.makedirs(directory) self.cur_repodir = directory return True self.cur_repodir = directory return False except (IOError, OSError): return None def setup_tag(self, tag): directory = self.cur_repodir + "/" + tag try: if not os.path.exists(directory): os.makedirs(directory) self.cur_tagdir = directory out_tag = open(directory + "/TAG", 'w') except (IOError, OSError): return False else: out_tag.write(self.cur_repodir + ":" + tag) out_tag.close() return True def set_version(self, version): if not (self.cur_repodir and self.cur_tagdir): return False if not os.path.exists(self.cur_repodir): return False if not os.path.exists(self.cur_tagdir): return False directory = self.cur_tagdir if (os.path.exists(directory + "/v1") and version != "v1" or os.path.exists(directory + "/v2") and version != "v2"): if len(os.listdir(directory)) == 1: try: FileUtil(directory + "/v1").remove() FileUtil(directory + "/v2").remove() except (IOError, OSError): pass if os.listdir(directory): return False try: open(directory + "/" + version, 'a').close() except (IOError, OSError): return False return True def _get_image_attributes_v1(self, directory): files = [] layer_list = self.load_json("ancestry") if layer_list: for layer_id in reversed(layer_list): layer_file = directory + '/' + layer_id + ".layer" if not os.path.exists(layer_file): return (None, None) files.append(layer_file) json_file_list = [directory + "/container.json", directory + '/' + layer_list[0] + ".json", ] for json_file in json_file_list: if os.path.exists(json_file): container_json = self.load_json(json_file) return (container_json, files) return (None, None) def _get_image_attributes_v2_s1(self, directory, manifest): files = [] for layer in reversed(manifest["fsLayers"]): layer_file = directory + '/' + layer["blobSum"] if not os.path.exists(layer_file): return (None, None) files.append(layer_file) try: json_string = manifest["history"][0]["v1Compatibility"].strip() container_json = json.loads(json_string) except (IOError, OSError, AttributeError, ValueError, TypeError, IndexError, KeyError): return (None, files) return (container_json, files) def _get_image_attributes_v2_s2(self, directory, manifest): files = [] for layer in manifest["layers"]: layer_file = directory + '/' + layer["digest"] if not os.path.exists(layer_file): return (None, None) files.append(layer_file) try: json_file = directory + '/' + manifest["config"]["digest"] container_json = json.loads(FileUtil(json_file).getdata('r')) except (IOError, OSError, AttributeError, ValueError, TypeError, IndexError, KeyError): return (None, files) return (container_json, files) def get_image_attributes(self): directory = self.cur_tagdir if os.path.exists(directory + "/v1"): return self._get_image_attributes_v1(directory) if os.path.exists(directory + "/v2"): manifest = self.load_json("manifest") if manifest and "fsLayers" in manifest: return self._get_image_attributes_v2_s1(directory, manifest) if manifest and "layers" in manifest: return self._get_image_attributes_v2_s2(directory, manifest) return (None, None) def save_json(self, filename, data): if filename.startswith("/"): out_filename = filename else: if not (self.cur_repodir and self.cur_tagdir): return False if not os.path.exists(self.cur_repodir): return False if not os.path.exists(self.cur_tagdir): return False out_filename = self.cur_tagdir + "/" + filename outfile = None try: outfile = open(out_filename, 'w') json.dump(data, outfile) except (IOError, OSError, AttributeError, ValueError, TypeError): if outfile: outfile.close() return False outfile.close() return True def load_json(self, filename): if filename.startswith('/'): in_filename = filename else: if not (self.cur_repodir and self.cur_tagdir): return False if not os.path.exists(self.cur_repodir): return False if not os.path.exists(self.cur_tagdir): return False in_filename = self.cur_tagdir + '/' + filename json_obj = None infile = None try: infile = open(in_filename, 'r') json_obj = json.load(infile) except (IOError, OSError, AttributeError, ValueError, TypeError): pass if infile: infile.close() return json_obj def _load_structure(self, imagetagdir): structure = {} structure["repolayers"] = dict() if FileUtil(imagetagdir).isdir(): for fname in os.listdir(imagetagdir): f_path = imagetagdir + '/' + fname if fname == "ancestry": structure["ancestry"] = self.load_json(f_path) if fname == "manifest": structure["manifest"] = self.load_json(f_path) if len(fname) >= 64: layer_id = fname.replace(".json", "").replace(".layer", "") if layer_id not in structure["repolayers"]: structure["repolayers"][layer_id] = dict() if fname.endswith("json"): structure["repolayers"][layer_id]["json"] = self.load_json(f_path) structure["repolayers"][layer_id]["json_f"] = f_path structure["has_json_f"] = True elif fname.endswith("layer"): structure["repolayers"][layer_id]["layer_f"] = f_path elif ':' in fname: structure["repolayers"][layer_id]["layer_f"] = f_path else: Msg().out("Warning: unkwnon file in layer:", f_path, l=Msg.WAR) elif fname in ("TAG", "v1", "v2", "PROTECT", "container.json"): pass Msg().out("Warning: unkwnon file in image:", f_path, l=Msg.WAR) return structure def _find_top_layer_id(self, structure, my_layer_id=""): if "repolayers" not in structure: return "" if not my_layer_id: if sys.version_info[0] >= 3: my_layer_id = list(structure["repolayers"].keys())[0] else: my_layer_id = structure["repolayers"].keys()[0] found = "" for layer_id in structure["repolayers"]: if "json" not in structure["repolayers"][layer_id]: continue if "parent" not in structure["repolayers"][layer_id]["json"]: continue if (my_layer_id == structure["repolayers"][layer_id]["json"]["parent"]): found = self._find_top_layer_id(structure, layer_id) break if not found: return my_layer_id return found def _sorted_layers(self, structure, top_layer_id): sorted_layers = [] next_layer = top_layer_id while next_layer: sorted_layers.append(next_layer) if "json" not in structure["repolayers"][next_layer]: break if "parent" not in structure["repolayers"][next_layer]["json"]: break next_layer = structure["repolayers"][next_layer]["json"]["parent"] if not next_layer: break return sorted_layers def _split_layer_id(self, layer_id): if ':' in layer_id: return layer_id.split(":", 1) return ("", layer_id)
Apache License 2.0
siviltaram/persona-dialogue-generation
parlai/agents/legacy_agents/seq2seq/modules_v1.py
Seq2seq._decode_forced
python
def _decode_forced(self, ys, encoder_states): bsz = ys.size(0) seqlen = ys.size(1) hidden = encoder_states[1] attn_params = (encoder_states[0], encoder_states[2]) y_in = ys.narrow(1, 0, seqlen - 1) xs = torch.cat([self._starts(bsz), y_in], 1) scores = [] if self.attn_type == 'none': output, hidden = self.decoder(xs, hidden, attn_params) score = self.output(output) scores.append(score) else: for i in range(seqlen): xi = xs.select(1, i).unsqueeze(1) output, hidden = self.decoder(xi, hidden, attn_params) score = self.output(output) scores.append(score) scores = torch.cat(scores, 1) return scores
Decode with teacher forcing.
https://github.com/siviltaram/persona-dialogue-generation/blob/3cc800ffe3c5a8d16ed26522cda839acfab8d417/parlai/agents/legacy_agents/seq2seq/modules_v1.py#L112-L141
import math import torch import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence import torch.nn.functional as F from parlai.core.utils import NEAR_INF def opt_to_kwargs(opt): kwargs = {} for k in ['numlayers', 'dropout', 'bidirectional', 'rnn_class', 'lookuptable', 'decoder', 'numsoftmax', 'attention', 'attention_length', 'attention_time', 'input_dropout']: if k in opt: kwargs[k] = opt[k] return kwargs def pad(tensor, length, dim=0, pad=0): if tensor.size(dim) < length: return torch.cat( [tensor, tensor.new(*tensor.size()[:dim], length - tensor.size(dim), *tensor.size()[dim + 1:]).fill_(pad)], dim=dim) else: return tensor class Seq2seq(nn.Module): RNN_OPTS = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM} def __init__( self, num_features, embeddingsize, hiddensize, numlayers=2, dropout=0, bidirectional=False, rnn_class='lstm', lookuptable='unique', decoder='same', numsoftmax=1, attention='none', attention_length=48, attention_time='post', padding_idx=0, start_idx=1, unknown_idx=3, input_dropout=0, longest_label=1, ): super().__init__() self.attn_type = attention self.NULL_IDX = padding_idx self.register_buffer('START', torch.LongTensor([start_idx])) self.longest_label = longest_label rnn_class = Seq2seq.RNN_OPTS[rnn_class] self.decoder = RNNDecoder( num_features, embeddingsize, hiddensize, padding_idx=padding_idx, rnn_class=rnn_class, numlayers=numlayers, dropout=dropout, attn_type=attention, attn_length=attention_length, attn_time=attention_time, bidir_input=bidirectional) shared_lt = (self.decoder.lt if lookuptable in ('enc_dec', 'all') else None) shared_rnn = self.decoder.rnn if decoder == 'shared' else None self.encoder = RNNEncoder( num_features, embeddingsize, hiddensize, padding_idx=padding_idx, rnn_class=rnn_class, numlayers=numlayers, dropout=dropout, bidirectional=bidirectional, shared_lt=shared_lt, shared_rnn=shared_rnn, unknown_idx=unknown_idx, input_dropout=input_dropout) shared_weight = (self.decoder.lt.weight if lookuptable in ('dec_out', 'all') else None) self.output = OutputLayer( num_features, embeddingsize, hiddensize, dropout=dropout, numsoftmax=numsoftmax, shared_weight=shared_weight, padding_idx=padding_idx) def _encode(self, xs, prev_enc=None): if prev_enc is not None: return prev_enc else: return self.encoder(xs) def _starts(self, bsz): return self.START.detach().expand(bsz, 1)
MIT License
mme/vergeml
vergeml/command.py
Command.parse
python
def parse(self, argv): res = {} at_names, rest = parse_trained_models(argv) sub_res = self._parse_subcommand(argv, rest) if sub_res is not None: return sub_res at_opt = self._parse_at_option(at_names, res) assert self.name == rest.pop(0) if self.free_form: return (res.get(at_opt.name) if at_opt else None, rest) args, extra = self._parse_opts(rest) self._parse_arguments(extra, res) self._parse_validate(args, res) return res
Parse command line options.
https://github.com/mme/vergeml/blob/3dc30ba4e0f3d038743b6d468860cbcf3681acc6/vergeml/command.py#L375-L404
import inspect import getopt from copy import deepcopy from vergeml.plugins import PLUGINS from vergeml.utils import did_you_mean, VergeMLError, parse_trained_models from vergeml.option import Option from vergeml.config import parse_command _CMD_META_KEY = '__vergeml_command__' class _CommandCallProxy: def __init__(self, cmd, obj): self.__cmd__ = cmd self.__wrapped_obj__ = obj @staticmethod def _wrap_call(cmd, fun, args, env): fn_args = deepcopy(args) config_name = cmd.name if env.current_command: sub_option = next(filter(lambda c: c.subcommand, env.current_command[0].options), None) if sub_option and args.get(sub_option.name) == cmd.name: config_name = env.current_command[0].name + '.' + cmd.name if not cmd.free_form: config = parse_command(cmd, env.get(config_name)) for k, arg in config.items(): fn_args.setdefault(k, arg) for opt in cmd.options: if opt.name not in fn_args and (opt.default is not None or not opt.is_required()): fn_args[opt.name] = opt.default for opt in cmd.options: if opt.is_required() and opt.name not in fn_args: raise VergeMLError(f'Missing argument --{opt.name}.', help_topic=cmd.name) env.current_command = (cmd, fn_args) env.set_defaults(cmd.name, fn_args) return fun(fn_args, env) @staticmethod def class_wrapper(klass, name): def _wrapper(*args, **kwargs): return _CommandCallProxy(name, klass(*args, **kwargs)) return _wrapper def __call__(self, args, env): return _CommandCallProxy._wrap_call( self.__cmd__, self.__wrapped_obj__, args, env) def __getattr__(self, name): if name in ('__wrapped_obj__', '__cmd__'): raise AttributeError() return getattr(self.__wrapped_obj__, name) def __setattr__(self, name, value): if name in ('__wrapped_obj__', '__cmd__'): self.__dict__[name] = value else: setattr(self.__wrapped_obj__, name, value) @staticmethod def function_wrapper(fun, name): def _wrapper(*fn_args): *self_, args, env = fn_args _fun = fun.__get__(self_[0]) if self_ else fun return _CommandCallProxy._wrap_call(name, _fun, args, env) return _wrapper def command(name=None, descr=None, long_descr=None, examples=None, free_form=False, type='command'): def decorator(obj): assert getattr(obj, _CMD_META_KEY, None) is None _name = name or getattr(obj, '__name__', None) options = list(reversed(Option.discover(obj))) cmd = Command(_name, descr=descr, long_descr=long_descr, examples=examples, options=options, free_form=free_form, type=type) if inspect.isclass(obj): setattr(obj, _CMD_META_KEY, cmd) _wrapper = _CommandCallProxy.class_wrapper(obj, cmd) else: _wrapper = _CommandCallProxy.function_wrapper(obj, cmd) setattr(_wrapper, _CMD_META_KEY, cmd) return _wrapper return decorator def train(name=None, descr=None, long_descr=None, examples=None, free_form=False): return command(name=name, descr=descr, long_descr=long_descr, examples=examples, free_form=free_form, type='train') def predict(name=None, descr=None, long_descr=None, examples=None, free_form=False): return command(name=name, descr=descr, long_descr=long_descr, examples=examples, free_form=free_form, type='predict') class Command: def __init__(self, name, descr=None, long_descr=None, examples=None, free_form=False, type='command', options=None, plugins=PLUGINS): self.name = name self.descr = (descr or long_descr or "") self.long_descr = (long_descr or descr or "") self.examples = examples self.options = options or [] self.plugins = plugins self.type = type self.free_form = free_form at_option = list(filter(lambda o: o.is_at_option(), options)) assert len(at_option) <= 1, "Can only have one @option." if at_option: at_option = at_option[0] assert at_option.has_type(None, list, '@', 'Optional[@]', 'List[@]') arg_param = list(filter(lambda o: o.is_argument_option(), options)) assert len(arg_param) <= 1, "Can only have one argument parameter." @staticmethod def discover(obj, plugins=PLUGINS): res = None if hasattr(obj, _CMD_META_KEY): res = getattr(obj, _CMD_META_KEY) res.plugins = plugins for option in res.options: option.plugins = plugins return res @staticmethod def find_functions(obj): fns = [m[1] for m in inspect.getmembers(obj) if not m[0].startswith("_") and callable(m[1])] fns = list(sorted(fns, key=lambda f: f.__code__.co_firstlineno)) fns = filter(lambda f: hasattr(f, _CMD_META_KEY), fns) return list(fns) def usage(self, short=False, parent_command=None): opt = self._usage_partition_options() if self.long_descr and not short: result = self.long_descr.strip() + "\n\n" else: result = "" result += "Usage:\n ml" result += self._usage_command(opt, parent_command) result += self._usage_options(opt) if self.examples and not short: result += "\n\nExamples:\n" result += "\n".join(map(lambda l: " " + l, self.examples.splitlines())) return result def _usage_partition_options(self): res = dict( at=None, arg=None, sub=None, mandatory=[], optional=[] ) for option in self.options: if option.is_at_option(): res['at'] = option elif option.is_argument_option(): res['arg'] = option elif bool(option.subcommand): res['sub'] = option elif option.is_optional(): res['optional'].append(option) else: res['mandatory'].append(option) return res def _usage_command(self, opt, parent_command): result = "" if opt['at']: if opt['at'].has_type(list, 'List[@]'): result += f" [{opt['at'].name} ...]" elif opt['at'].is_optional(): result += f" [{opt['at'].name}]" else: result += f" {opt['at'].name}" result += f" {parent_command}:{self.name}" if parent_command else " " + self.name if opt['sub']: result += f":{opt['sub'].name}" if opt['mandatory']: val = " ".join(map(lambda o: f"--{o.name}=<{o.name}>", opt['mandatory'])) result += f" {val}" if opt['optional']: result += " [options]" if opt['arg']: if opt['arg'].has_type(str, list): result += f" [{opt['arg'].name} ...]" elif opt['arg'].is_optional(): result += f" [{opt['arg'].name}]" else: result += f" {opt['arg'].name}" return result def _usage_options(self, opt): result = "" indent = 2 n_spaces = 4 opt_descr = [] opt_descr = self._usage_opt_descr(opt) if opt_descr: max_name = max(map(lambda o: len(o[0]), opt_descr)) result += "\n\nOptions:" for k, val in opt_descr: result += "\n" + str(indent * ' ') space = (max_name + n_spaces) - len(k) if val: result += k + str(space * ' ') + val else: result += k if opt['sub']: plugins = self.plugins.all(opt['sub'].subcommand) max_name = max(map(len, plugins.keys())) if plugins.keys() else 0 name = opt['sub'].name.capitalize() + "s" if plugins.keys(): result += f"\n\n{name}:" for k, val in plugins.items(): result += "\n" + str(indent * ' ') space = (max_name + n_spaces) - len(k) cmd = Command.discover(val) if cmd.descr: result += k + str(space * ' ') + cmd.descr else: result += k return result def _usage_opt_descr(self, opt): opt_descr = [] if opt['at']: if opt['at'].has_type(list, 'List[@]'): opt_descr.append((opt['at'].name, "A list of trained models.")) else: opt_descr.append((opt['at'].name, "The name of a trained model.")) for option in self.options: if option.is_at_option() or option.is_argument_option() or bool(option.subcommand): continue opt_name = "--" + option.name if option.short: opt_name = "-" + option.short + ", " + opt_name descr = (option.descr or "") if option.default is not None: if isinstance(option.default, bool): default_str = 'true' if option.default else 'false' else: default_str = str(option.default) if descr: descr += " " descr += f"[default: {default_str}]" opt_descr.append((opt_name, descr)) if opt['arg'] and opt['arg'].descr: opt_descr.append((opt['arg'].name, opt['arg'].descr or "")) return opt_descr
MIT License
mjocean/pyprocgamehd-skeletongame
procgame/dmd/particle.py
ParticleLayer.reset
python
def reset(self): self.start_time = None self.stalls = self.num_hold_frames self.buffer.clear() self.ps.reset()
Resets the animation back to the first frame.
https://github.com/mjocean/pyprocgamehd-skeletongame/blob/782b2cb994f9a76cb34aedb4ae6a3a00bc0b6553/procgame/dmd/particle.py#L328-L333
import sys, random, os from dmd import Layer, Frame import sdl2 from sdl2_displaymanager import sdl2_DisplayManager import time class Particle(object): def __init__(self, x, y, emitter): self.x = x self.y = y self.parent = emitter self.life = random.randint(int(self.parent.max_life*0.80),self.parent.max_life) self.dx = random.randint(-5,5) self.dy = random.randint(-5,5) self._r = 255 self._g = 255 self._b = 255 self._a = 255 self.tx_num = 0 self.color_changed = True self.alpha_changed = True @property def r(self): return self._r @r.setter def r(self, value): self._r = value self.color_changed = True @property def g(self): return self._g @g.setter def g(self, value): self._g = value self.color_changed = True @property def b(self): return self._b @b.setter def b(self, value): self._b = value self.color_changed = True @property def a(self): return self._a @a.setter def a(self, value): self._a = value self.alpha_changed = True def update(self): self.life = self.life - 1 self.update_location() self.update_appearance() def update_location(self): self.x = int(self.x + self.dx) self.y = int(self.y + self.dy) def update_appearance(self): if(self.life < .8 * self.parent.max_life): self.b = 0 self.g = int(self.life/float(self.parent.max_life) * 220) + 35 self.a = ((self.life/float(self.parent.max_life)) * 255) class SnowParticle(Particle): def __init__(self, x, y, emitter): self.x = x + random.randint(-450,450) super(SnowParticle, self).__init__(self.x,y,emitter) self.r = 225 self.g = 225 self.b = 255 def update_location(self): self.dx = random.randint(-20,20) self.dy = random.randint(0,20) super(SnowParticle, self).update_location() def update_appearance(self): pass class FireParticle(Particle): def __init__(self, x, y, emitter): super(FireParticle, self).__init__(x,y,emitter) self.dx = random.randint(-5,5) self.dy = random.randint(-4,4) def update_location(self): self.dx = random.randint(-3,3) self.dy = random.randint(-5,1) super(FireParticle, self).update_location() class FireworkParticle(Particle): def __init__(self, x, y, emitter): super(FireworkParticle, self).__init__(x,y,emitter) self.dy = random.randint(-5,3) self.dx = random.randint(-10,10) self.a = 192 def update_location(self): if(self.life < .75 * self.parent.max_life): self.dy = 3 self.dx = 0 super(FireworkParticle, self).update_location() def update_appearance(self): if(self.life < .8 * self.parent.max_life): self.g = 0 self.b = int(self.life/float(self.parent.max_life) * 220) + 35 self.r = self.b class ParticleEmitter(object): def __init__(self, x, y, max_life=60, max_particles=200, particles_per_update=5, total_creations=None, particle_class=Particle, random_next=False, dx=0, dy=0): self.x = x self.y = y self.orig_x = x self.orig_y = y self.dx = dx self.dy = dy self.particle_class = particle_class self.random_next = random_next self.particles = list() self.particles_per_update = particles_per_update self.max_particles = max_particles self.max_life = max_life self.total_creations = total_creations self.creations_remaining = total_creations self.stopped = False for i in range(0,particles_per_update): p = self.particle_class(x,y, emitter=self) p.update() self.particles.append(p) if(self.total_creations is not None): self.creations_remaining = self.creations_remaining - particles_per_update else: self.creations_remaining = self.max_particles cwd = os.path.dirname(__file__) sprImg8 = sdl2_DisplayManager.inst().load_surface(os.path.join(cwd,"exp8.png")) sprImg16 = sdl2_DisplayManager.inst().load_surface(os.path.join(cwd,"exp16.png")) self.txImg8 = sdl2_DisplayManager.inst().texture_from_surface(sprImg8) self.txImg16 = sdl2_DisplayManager.inst().texture_from_surface(sprImg16) (self.p8_w,self.p8_h) = self.txImg8.size (self.p16_w,self.p16_h) = self.txImg16.size sdl2.SDL_SetTextureBlendMode(self.txImg8.texture, sdl2.SDL_BLENDMODE_BLEND) sdl2.SDL_SetTextureBlendMode(self.txImg16.texture, sdl2.SDL_BLENDMODE_BLEND) del sprImg8 del sprImg16 def reset(self, new_x=None, new_y=None): self.stopped = False if(new_x is not None): self.x = new_x else: self.x = self.orig_x if(new_y is not None): self.y = new_y else: self.y = self.orig_y for x in xrange(len(self.particles)-1,0,-1): p = self.particles[x] self.particles.remove(p) del p self.creations_remaining = self.total_creations def update(self): if(self.total_creations is None) and (not self.stopped): self.creations_remaining = self.max_particles for p in self.particles: p.update() for x in xrange(len(self.particles)-1,-1,-1): p = self.particles[x] if(p.life <= 0): self.particles.remove(p) del p if(self.stopped): return if(self.creations_remaining <= 0): if(self.random_next): if(len(self.particles)==0): self.reset(new_x = random.randint(0,200), new_y = random.randint(0,200)) return for r in range(0,min(self.particles_per_update, self.max_particles-len(self.particles), self.creations_remaining)): p = self.particle_class(self.x, self.y, emitter=self) p.update() self.particles.append(p) self.creations_remaining = self.creations_remaining - 1 self.x = self.x + self.dx self.y = self.y + self.dy def stop(self, immediate_stop = False): self.creations_remaining = 0 self.stopped = True if(immediate_stop): for x in xrange(len(self.particles)-1,-1,-1): p = self.particles[x] self.particles.remove(p) del p def draw(self, destination_texture = None): for x in xrange(0,len(self.particles)): p = self.particles[x] tx = None if(p.life > self.max_life * 0.55): tx = self.txImg16 (self.p_w, self.p_h) = (self.p16_w,self.p16_h) else: tx = self.txImg8 (self.p_w, self.p_h) = (self.p8_w,self.p8_h) if(p.color_changed): sdl2.SDL_SetTextureColorMod(tx.texture, p.r,p.g,p.b) p.color_changed = False if(p.alpha_changed): sdl2.SDL_SetTextureAlphaMod(tx.texture, int(p.a)) p.alpha_changed = False if(destination_texture is None): sdl2_DisplayManager.inst().screen_blit(tx, x=p.x, y=p.y, expand_to_fill=False) else: sdl2_DisplayManager.inst().blit(source_tx = tx, dest_tx=destination_texture, dest=(p.x,p.y,self.p_w, self.p_h)) class ParticleSystem(object): def __init__(self, emitters=None, destination_texture=None): self.emitters = emitters self.dest_tx = destination_texture def update(self): for e in self.emitters: e.update() def draw(self): for e in self.emitters: e.draw(self.dest_tx) def reset(self): for e in self.emitters: e.reset() class ParticleLayer(Layer): def __init__(self, width, height, emitters, duration=None, num_hold_frames=1): super(ParticleLayer, self).__init__() self.buffer = Frame(width, height) self.start_time = None self.duration = duration self.width = width self.height = height self.num_hold_frames = num_hold_frames self.stalls = self.num_hold_frames self.ps = ParticleSystem(emitters=emitters, destination_texture=self.buffer.pySurface) def next_frame(self): if(self.start_time is None): self.start_time = time.time() elif(self.duration is not None and (self.start_time + self.duration > time.time())): return None self.stalls = self.stalls - 1 if(self.stalls <= 0): self.stalls = self.num_hold_frames else: return self.buffer self.buffer.clear() self.ps.update() self.ps.draw() return self.buffer
MIT License
medipixel/rl_algorithms
rl_algorithms/common/abstract/distributed_logger.py
DistributedLogger.recv_log_info
python
def recv_log_info(self): received = False try: log_info_id = self.pull_socket.recv(zmq.DONTWAIT) received = True except zmq.Again: pass if received: self.log_info_queue.append(log_info_id)
Receive info from learner.
https://github.com/medipixel/rl_algorithms/blob/96bceb9d65c6d66fca59c4115cd7947f87b87ebc/rl_algorithms/common/abstract/distributed_logger.py#L134-L144
from abc import ABC, abstractmethod from collections import deque import os import shutil from typing import Dict, List import gym import numpy as np import plotly.graph_objects as go import pyarrow as pa import torch import wandb import zmq from rl_algorithms.common.env.atari_wrappers import atari_env_generator import rl_algorithms.common.env.utils as env_utils from rl_algorithms.common.helper_functions import numpy2floattensor, smoothen_graph from rl_algorithms.common.networks.brain import Brain from rl_algorithms.utils.config import ConfigDict class DistributedLogger(ABC): def __init__( self, log_cfg: ConfigDict, comm_cfg: ConfigDict, backbone: ConfigDict, head: ConfigDict, env_name: str, is_atari: bool, state_size: int, output_size: int, max_update_step: int, episode_num: int, max_episode_steps: int, interim_test_num: int, is_log: bool, is_render: bool, ): self.log_cfg = log_cfg self.comm_cfg = comm_cfg self.device = torch.device("cpu") head.configs.state_size = state_size head.configs.output_size = output_size self.brain = Brain(backbone, head).to(self.device) self.env_name = env_name self.is_atari = is_atari self.max_update_step = max_update_step self.episode_num = episode_num self.max_episode_steps = max_episode_steps self.interim_test_num = interim_test_num self.is_log = is_log self.is_render = is_render self.update_step = 0 self.log_info_queue = deque(maxlen=100) self._init_env() def _init_env(self): if self.is_atari: self.env = atari_env_generator(self.env_name, self.max_episode_steps) else: self.env = gym.make(self.env_name) self.env, self.max_episode_steps = env_utils.set_env( self.env, self.max_episode_steps ) @abstractmethod def load_params(self, path: str): if not os.path.exists(path): raise Exception( f"[ERROR] the input path does not exist. Wrong path: {path}" ) def init_communication(self): ctx = zmq.Context() self.pull_socket = ctx.socket(zmq.PULL) self.pull_socket.bind(f"tcp://127.0.0.1:{self.comm_cfg.learner_logger_port}") @abstractmethod def select_action(self, state: np.ndarray): pass @abstractmethod def write_log(self, log_value: dict): pass @staticmethod def _preprocess_state(state: np.ndarray, device: torch.device) -> torch.Tensor: state = numpy2floattensor(state, device) return state def set_wandb(self): wandb.init( project=self.env_name, name=f"{self.log_cfg.agent}/{self.log_cfg.curr_time}", ) additional_log = dict( episode_num=self.episode_num, max_episode_steps=self.max_episode_steps, ) wandb.config.update(additional_log) shutil.copy(self.log_cfg.cfg_path, os.path.join(wandb.run.dir, "config.py"))
MIT License
pyhdi/pyverilog
pyverilog/vparser/parser.py
VerilogParser.p_expression_power
python
def p_expression_power(self, p): p[0] = Power(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression POWER expression
https://github.com/pyhdi/pyverilog/blob/2a42539bebd1b4587ee577d491ff002d0cc7295d/pyverilog/vparser/parser.py#L999-L1002
from __future__ import absolute_import from __future__ import print_function import sys import os import pathlib from ply.yacc import yacc from pyverilog.vparser.preprocessor import VerilogPreprocessor from pyverilog.vparser.lexer import VerilogLexer from pyverilog.vparser.ast import * class VerilogParser(object): precedence = ( ('left', 'LOR'), ('left', 'LAND'), ('left', 'OR'), ('left', 'XOR', 'XNOR'), ('left', 'AND'), ('left', 'EQ', 'NE', 'EQL', 'NEL'), ('left', 'LT', 'GT', 'LE', 'GE'), ('left', 'LSHIFT', 'RSHIFT', 'LSHIFTA', 'RSHIFTA'), ('left', 'PLUS', 'MINUS'), ('left', 'TIMES', 'DIVIDE', 'MOD'), ('left', 'POWER'), ('right', 'UMINUS', 'UPLUS', 'ULNOT', 'UNOT', 'UAND', 'UNAND', 'UOR', 'UNOR', 'UXOR', 'UXNOR'), ) def __init__(self, outputdir=".", debug=True): self.lexer = VerilogLexer(error_func=self._lexer_error_func) self.lexer.build() self.tokens = self.lexer.tokens pathlib.Path(outputdir).mkdir(parents=True, exist_ok=True) self.parser = yacc( module=self, method="LALR", outputdir=outputdir, debug=debug ) def _lexer_error_func(self, msg, line, column): coord = self._coord(line, column) raise ParseError('%s: %s' % (coord, msg)) def get_directives(self): return self.lexer.get_directives() def get_default_nettype(self): return self.lexer.get_default_nettype() def parse(self, text, debug=0): return self.parser.parse(text, lexer=self.lexer, debug=debug) def p_source_text(self, p): p[0] = Source(name='', description=p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_description(self, p): p[0] = Description(definitions=p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_definitions(self, p): p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1)) def p_definitions_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_definition(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_definition_pragma(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_pragma_assign(self, p): p[0] = Pragma(PragmaEntry(p[3], p[5], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_pragma(self, p): p[0] = Pragma(PragmaEntry(p[3], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_moduledef(self, p): p[0] = ModuleDef(name=p[2], paramlist=p[3], portlist=p[4], items=p[5], default_nettype=self.get_default_nettype(), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) p[0].end_lineno = p.lineno(6) def p_modulename(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_modulename_or(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_paramlist(self, p): p[0] = Paramlist(params=p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_paramlist_empty(self, p): p[0] = Paramlist(params=()) def p_params(self, p): p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1)) def p_params_begin(self, p): p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1)) def p_params_begin_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_params_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_param(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(2)) for rname, rvalue in p[2]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_signed(self, p): paramlist = [Parameter(rname, rvalue, signed=True, lineno=p.lineno(2)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_width(self, p): paramlist = [Parameter(rname, rvalue, p[2], lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_signed_width(self, p): paramlist = [Parameter(rname, rvalue, p[3], signed=True, lineno=p.lineno(3)) for rname, rvalue in p[4]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_integer(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_end(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(2)) for rname, rvalue in p[2]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_end_signed(self, p): paramlist = [Parameter(rname, rvalue, signed=True, lineno=p.lineno(2)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_end_width(self, p): paramlist = [Parameter(rname, rvalue, p[2], lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_end_signed_width(self, p): paramlist = [Parameter(rname, rvalue, p[3], signed=True, lineno=p.lineno(3)) for rname, rvalue in p[4]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_end_integer(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_portlist(self, p): p[0] = Portlist(ports=p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_portlist_io(self, p): p[0] = Portlist(ports=p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_portlist_paren_empty(self, p): p[0] = Portlist(ports=(), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_portlist_empty(self, p): p[0] = Portlist(ports=(), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_ports(self, p): port = Port(name=p[3], width=None, dimensions=None, type=None, lineno=p.lineno(1)) p[0] = p[1] + (port,) p.set_lineno(0, p.lineno(1)) def p_ports_one(self, p): port = Port(name=p[1], width=None, dimensions=None, type=None, lineno=p.lineno(1)) p[0] = (port,) p.set_lineno(0, p.lineno(1)) def p_portname(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtypes(self, p): p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1)) def p_sigtypes_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_sigtype_input(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_output(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_inout(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_tri(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_reg(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_logic(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_wire(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_signed(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_supply0(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_sigtype_supply1(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_ioports(self, p): if isinstance(p[3], str): t = None for r in reversed(p[1]): if isinstance(r.first, Input): t = Ioport(Input(name=p[3], width=r.first.width, lineno=p.lineno(3)), lineno=p.lineno(3)) break if isinstance(r.first, Output) and r.second is None: t = Ioport(Output(name=p[3], width=r.first.width, lineno=p.lineno(3)), lineno=p.lineno(3)) break if isinstance(r.first, Output) and isinstance(r.second, Reg): t = Ioport(Output(name=p[3], width=r.first.width, lineno=p.lineno(3)), Reg(name=p[3], width=r.first.width, lineno=p.lineno(3)), lineno=p.lineno(3)) break if isinstance(r.first, Inout): t = Ioport(Inout(name=p[3], width=r.first.width, lineno=p.lineno(3)), lineno=p.lineno(3)) break p[0] = p[1] + (t,) else: p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_ioports_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def create_ioport(self, sigtypes, name, width=None, dimensions=None, lineno=0): self.typecheck_ioport(sigtypes) first = None second = None signed = False if 'signed' in sigtypes: signed = True if 'input' in sigtypes: first = Input(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) if 'output' in sigtypes: first = Output(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) if 'inout' in sigtypes: first = Inout(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) if 'wire' in sigtypes: second = Wire(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) if 'reg' in sigtypes: second = Reg(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) if 'tri' in sigtypes: second = Tri(name=name, width=width, signed=signed, dimensions=dimensions, lineno=lineno) return Ioport(first, second, lineno=lineno) def typecheck_ioport(self, sigtypes): if 'input' not in sigtypes and 'output' not in sigtypes and 'inout' not in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'input' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'tri' in sigtypes: raise ParseError("Syntax Error") if 'output' in sigtypes and 'tri' in sigtypes: raise ParseError("Syntax Error") def p_ioport(self, p): p[0] = self.create_ioport(p[1], p[2], lineno=p.lineno(2)) p.set_lineno(0, p.lineno(1)) def p_ioport_width(self, p): p[0] = self.create_ioport(p[1], p[3], width=p[2], lineno=p.lineno(3)) p.set_lineno(0, p.lineno(1)) def p_ioport_dimensions(self, p): p[0] = self.create_ioport(p[1], p[3], width=p[2], dimensions=p[4], lineno=p.lineno(3)) p.set_lineno(0, p.lineno(1)) def p_ioport_head(self, p): p[0] = self.create_ioport(p[1], p[2], lineno=p.lineno(2)) p.set_lineno(0, p.lineno(1)) def p_ioport_head_width(self, p): p[0] = self.create_ioport(p[1], p[3], width=p[2], lineno=p.lineno(3)) p.set_lineno(0, p.lineno(1)) def p_ioport_head_dimensions(self, p): p[0] = self.create_ioport(p[1], p[3], width=p[2], dimensions=p[4], lineno=p.lineno(3)) p.set_lineno(0, p.lineno(1)) def p_ioport_portname(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_width(self, p): p[0] = Width(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_length(self, p): p[0] = Length(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_dimensions(self, p): dims = p[1].lengths + [p[2]] p[0] = Dimensions(dims, lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_dimensions_one(self, p): dims = [p[1]] p[0] = Dimensions(dims, lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_items(self, p): p[0] = p[1] + (p[2],) p.set_lineno(0, p.lineno(1)) def p_items_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_items_empty(self, p): p[0] = () def p_item(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_standard_item(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def create_decl(self, sigtypes, name, width=None, dimensions=None, lineno=0): self.typecheck_decl(sigtypes, dimensions) decls = [] signed = False if 'signed' in sigtypes: signed = True if 'input' in sigtypes: decls.append(Input(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'output' in sigtypes: decls.append(Output(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'inout' in sigtypes: decls.append(Inout(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'wire' in sigtypes: decls.append(Wire(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'reg' in sigtypes: decls.append(Reg(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'tri' in sigtypes: decls.append(Tri(name=name, width=width, signed=signed, lineno=lineno, dimensions=dimensions)) if 'supply0' in sigtypes: decls.append(Supply(name=name, value=IntConst('0', lineno=lineno), width=width, signed=signed, lineno=lineno)) if 'supply1' in sigtypes: decls.append(Supply(name=name, value=IntConst('1', lineno=lineno), width=width, signed=signed, lineno=lineno)) return decls def typecheck_decl(self, sigtypes, dimensions=None): if ('supply0' in sigtypes or 'supply1' in sigtypes) and dimensions is not None: raise ParseError("SyntaxError") if len(sigtypes) == 1 and 'signed' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'input' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'tri' in sigtypes: raise ParseError("Syntax Error") if 'output' in sigtypes and 'tri' in sigtypes: raise ParseError("Syntax Error") def p_decl(self, p): decllist = [] for rname, rdimensions in p[2]: decllist.extend(self.create_decl(p[1], rname, dimensions=rdimensions, lineno=p.lineno(2))) p[0] = Decl(tuple(decllist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_decl_width(self, p): decllist = [] for rname, rdimensions in p[3]: decllist.extend(self.create_decl(p[1], rname, width=p[2], dimensions=rdimensions, lineno=p.lineno(3))) p[0] = Decl(tuple(decllist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_declnamelist(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_declnamelist_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_declname(self, p): p[0] = (p[1], None) p.set_lineno(0, p.lineno(1)) def p_declarray(self, p): p[0] = (p[1], p[2]) p.set_lineno(0, p.lineno(1)) def create_declassign(self, sigtypes, name, assign, width=None, lineno=0): self.typecheck_declassign(sigtypes) decls = [] signed = False if 'signed' in sigtypes: signed = True if 'input' in sigtypes: decls.append(Input(name=name, width=width, signed=signed, lineno=lineno)) if 'output' in sigtypes: decls.append(Output(name=name, width=width, signed=signed, lineno=lineno)) if 'inout' in sigtypes: decls.append(Inout(name=name, width=width, signed=signed, lineno=lineno)) if 'wire' in sigtypes: decls.append(Wire(name=name, width=width, signed=signed, lineno=lineno)) if 'reg' in sigtypes: decls.append(Reg(name=name, width=width, signed=signed, lineno=lineno)) decls.append(assign) return decls def typecheck_declassign(self, sigtypes): if len(sigtypes) == 1 and 'signed' in sigtypes: raise ParseError("Syntax Error") if 'reg' not in sigtypes and 'wire' not in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'output' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'input' in sigtypes: raise ParseError("Syntax Error") if 'input' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'inout' in sigtypes and 'reg' in sigtypes: raise ParseError("Syntax Error") if 'supply0' in sigtypes and len(sigtypes) != 1: raise ParseError("Syntax Error") if 'supply1' in sigtypes and len(sigtypes) != 1: raise ParseError("Syntax Error") def p_declassign(self, p): decllist = self.create_declassign( p[1], p[2][0], p[2][1], lineno=p.lineno(2)) p[0] = Decl(decllist, lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_declassign_width(self, p): decllist = self.create_declassign( p[1], p[3][0], p[3][1], width=p[2], lineno=p.lineno(3)) p[0] = Decl(tuple(decllist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_declassign_element(self, p): assign = Assign(Lvalue(Identifier(p[1], lineno=p.lineno(1)), lineno=p.lineno(1)), p[3], lineno=p.lineno(1)) p[0] = (p[1], assign) p.set_lineno(0, p.lineno(1)) def p_declassign_element_delay(self, p): assign = Assign(Lvalue(Identifier(p[2], lineno=p.lineno(1)), lineno=p.lineno(2)), p[5], p[1], p[4], lineno=p.lineno(2)) p[0] = (p[1], assign) p.set_lineno(0, p.lineno(2)) def p_integerdecl(self, p): intlist = [Integer(rname, Width(msb=IntConst('31', lineno=p.lineno(2)), lsb=IntConst('0', lineno=p.lineno(2)), lineno=p.lineno(2)), signed=True, value=rvalue, lineno=p.lineno(2)) for rname, rvalue in p[2]] p[0] = Decl(tuple(intlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_integerdecl_signed(self, p): intlist = [Integer(rname, Width(msb=IntConst('31', lineno=p.lineno(3)), lsb=IntConst('0', lineno=p.lineno(3)), lineno=p.lineno(3)), signed=True, value=rvalue, lineno=p.lineno(3)) for rname, rvalue in p[2]] p[0] = Decl(tuple(intlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_integernamelist(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_integernamelist_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_integername_init(self, p): p[0] = (p[1], p[3]) p.set_lineno(0, p.lineno(1)) def p_integername(self, p): p[0] = (p[1], None) p.set_lineno(0, p.lineno(1)) def p_realdecl(self, p): reallist = [Real(p[1], Width(msb=IntConst('31', lineno=p.lineno(2)), lsb=IntConst('0', lineno=p.lineno(2)), lineno=p.lineno(2)), lineno=p.lineno(2)) for r in p[2]] p[0] = Decl(tuple(reallist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_realnamelist(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_realnamelist_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_realname(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_parameterdecl(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(2)) for rname, rvalue in p[2]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_parameterdecl_signed(self, p): paramlist = [Parameter(rname, rvalue, signed=True, lineno=p.lineno(2)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_parameterdecl_width(self, p): paramlist = [Parameter(rname, rvalue, p[2], lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_parameterdecl_signed_width(self, p): paramlist = [Parameter(rname, rvalue, p[3], signed=True, lineno=p.lineno(3)) for rname, rvalue in p[4]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_parameterdecl_integer(self, p): paramlist = [Parameter(rname, rvalue, lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_localparamdecl(self, p): paramlist = [Localparam(rname, rvalue, lineno=p.lineno(2)) for rname, rvalue in p[2]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_localparamdecl_signed(self, p): paramlist = [Localparam(rname, rvalue, signed=True, lineno=p.lineno(2)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_localparamdecl_width(self, p): paramlist = [Localparam(rname, rvalue, p[2], lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_localparamdecl_signed_width(self, p): paramlist = [Localparam(rname, rvalue, p[3], signed=True, lineno=p.lineno(3)) for rname, rvalue in p[4]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_localparamdecl_integer(self, p): paramlist = [Localparam(rname, rvalue, lineno=p.lineno(3)) for rname, rvalue in p[3]] p[0] = Decl(tuple(paramlist), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_param_substitution_list(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_param_substitution_list_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_param_substitution(self, p): p[0] = (p[1], p[3]) p.set_lineno(0, p.lineno(1)) def p_assignment(self, p): p[0] = Assign(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_assignment_delay(self, p): p[0] = Assign(p[3], p[6], p[2], p[5], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect_lpointer(self, p): p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect_lpointer_plus(self, p): p[0] = Partselect(p[1], p[3], Plus(p[3], p[5]), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect_lpointer_minus(self, p): p[0] = Partselect(p[1], p[3], Minus(p[3], p[5]), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect(self, p): p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect_plus(self, p): p[0] = Partselect(p[1], p[3], Plus(p[3], p[5]), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpartselect_minus(self, p): p[0] = Partselect(p[1], p[3], Minus(p[3], p[5]), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lpointer(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_lconcat(self, p): p[0] = LConcat(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lconcatlist(self, p): p[0] = p[1] + (p[3],) p.set_lineno(0, p.lineno(1)) def p_lconcatlist_one(self, p): p[0] = (p[1],) p.set_lineno(0, p.lineno(1)) def p_lconcat_one_identifier(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_lconcat_one_lpartselect(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_lconcat_one_lpointer(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_lconcat_one_lconcat(self, p): p[0] = p[1] p.set_lineno(0, p.lineno(1)) def p_lvalue_partselect(self, p): p[0] = Lvalue(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lvalue_pointer(self, p): p[0] = Lvalue(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lvalue_concat(self, p): p[0] = Lvalue(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_lvalue_one(self, p): p[0] = Lvalue(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_rvalue(self, p): p[0] = Rvalue(p[1], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uminus(self, p): p[0] = Uminus(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uplus(self, p): p[0] = p[2] p.set_lineno(0, p.lineno(1)) def p_expression_ulnot(self, p): p[0] = Ulnot(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_unot(self, p): p[0] = Unot(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uand(self, p): p[0] = Uand(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_unand(self, p): p[0] = Unand(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_unor(self, p): p[0] = Unor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uor(self, p): p[0] = Uor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uxor(self, p): p[0] = Uxor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1)) def p_expression_uxnor(self, p): p[0] = Uxnor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
Apache License 2.0
criteo/criteo-python-marketing-sdk
criteo_marketing/api/advertisers_api.py
AdvertisersApi.get_category_with_http_info
python
def get_category_with_http_info(self, advertiser_id, category_hash_code, authorization, **kwargs): local_var_params = locals() all_params = ['advertiser_id', 'category_hash_code', 'authorization'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_category" % key ) local_var_params[key] = val del local_var_params['kwargs'] if ('advertiser_id' not in local_var_params or local_var_params['advertiser_id'] is None): raise ApiValueError("Missing the required parameter `advertiser_id` when calling `get_category`") if ('category_hash_code' not in local_var_params or local_var_params['category_hash_code'] is None): raise ApiValueError("Missing the required parameter `category_hash_code` when calling `get_category`") if ('authorization' not in local_var_params or local_var_params['authorization'] is None): raise ApiValueError("Missing the required parameter `authorization` when calling `get_category`") collection_formats = {} path_params = {} if 'advertiser_id' in local_var_params: path_params['advertiserId'] = local_var_params['advertiser_id'] if 'category_hash_code' in local_var_params: path_params['categoryHashCode'] = local_var_params['category_hash_code'] query_params = [] header_params = {} if 'authorization' in local_var_params: header_params['Authorization'] = local_var_params['authorization'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'text/json', 'application/xml', 'text/xml', 'text/html']) auth_settings = ['Authorization'] return self.api_client.call_api( '/v1/advertisers/{advertiserId}/categories/{categoryHashCode}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[CategoryMessage]', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
Gets a specific advertiser's category # noqa: E501 Get a specific category linked to the requested advertiser. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_category_with_http_info(advertiser_id, category_hash_code, authorization, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param int advertiser_id: Mandatory. The id of the advertiser to return. (required) :param int category_hash_code: Mandatory. The id of the category to return. (required) :param str authorization: JWT Bearer Token (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(list[CategoryMessage], status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread.
https://github.com/criteo/criteo-python-marketing-sdk/blob/1093f86cf035cb6ce657b47f0f5e768c1fc2271c/criteo_marketing/api/advertisers_api.py#L302-L397
from __future__ import absolute_import import re import six from criteo_marketing.api_client import ApiClient from criteo_marketing.exceptions import ( ApiTypeError, ApiValueError ) class AdvertisersApi(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_campaigns(self, advertiser_id, authorization, **kwargs): kwargs['_return_http_data_only'] = True return self.get_campaigns_with_http_info(advertiser_id, authorization, **kwargs) def get_campaigns_with_http_info(self, advertiser_id, authorization, **kwargs): local_var_params = locals() all_params = ['advertiser_id', 'authorization'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_campaigns" % key ) local_var_params[key] = val del local_var_params['kwargs'] if ('advertiser_id' not in local_var_params or local_var_params['advertiser_id'] is None): raise ApiValueError("Missing the required parameter `advertiser_id` when calling `get_campaigns`") if ('authorization' not in local_var_params or local_var_params['authorization'] is None): raise ApiValueError("Missing the required parameter `authorization` when calling `get_campaigns`") collection_formats = {} path_params = {} if 'advertiser_id' in local_var_params: path_params['advertiserId'] = local_var_params['advertiser_id'] query_params = [] header_params = {} if 'authorization' in local_var_params: header_params['Authorization'] = local_var_params['authorization'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'text/json', 'application/xml', 'text/xml', 'text/html']) auth_settings = ['Authorization'] return self.api_client.call_api( '/v1/advertisers/{advertiserId}/campaigns', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[CampaignMessage]', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_categories(self, advertiser_id, authorization, **kwargs): kwargs['_return_http_data_only'] = True return self.get_categories_with_http_info(advertiser_id, authorization, **kwargs) def get_categories_with_http_info(self, advertiser_id, authorization, **kwargs): local_var_params = locals() all_params = ['advertiser_id', 'authorization', 'enabled_only'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_categories" % key ) local_var_params[key] = val del local_var_params['kwargs'] if ('advertiser_id' not in local_var_params or local_var_params['advertiser_id'] is None): raise ApiValueError("Missing the required parameter `advertiser_id` when calling `get_categories`") if ('authorization' not in local_var_params or local_var_params['authorization'] is None): raise ApiValueError("Missing the required parameter `authorization` when calling `get_categories`") collection_formats = {} path_params = {} if 'advertiser_id' in local_var_params: path_params['advertiserId'] = local_var_params['advertiser_id'] query_params = [] if 'enabled_only' in local_var_params: query_params.append(('enabledOnly', local_var_params['enabled_only'])) header_params = {} if 'authorization' in local_var_params: header_params['Authorization'] = local_var_params['authorization'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'text/json', 'application/xml', 'text/xml', 'text/html']) auth_settings = ['Authorization'] return self.api_client.call_api( '/v1/advertisers/{advertiserId}/categories', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[CategoryMessage]', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_category(self, advertiser_id, category_hash_code, authorization, **kwargs): kwargs['_return_http_data_only'] = True return self.get_category_with_http_info(advertiser_id, category_hash_code, authorization, **kwargs)
Apache License 2.0
facebookresearch/shadow_gnn
shaDow/layers.py
GAT._adj_norm
python
def _adj_norm(self, adj, is_normed, device, dropedge=0): if type(adj) == sp.csr_matrix: assert not is_normed adj_norm = coo_scipy2torch(adj.tocoo()).to(device) if dropedge > 0: masked_indices = torch.floor(torch.rand(int(adj_norm._values().size()[0] * dropedge)) * adj_norm._values().size()[0]).long() adj_norm._values()[masked_indices] = 0 else: assert type(adj) == torch.Tensor and is_normed adj_norm = adj return adj_norm
Will perform edge dropout only when is_normed == False
https://github.com/facebookresearch/shadow_gnn/blob/a7d22eac62af4999828a2c939fabaeff27920d08/shaDow/layers.py#L503-L517
import torch from torch import nn import scipy.sparse as sp import torch.nn.functional as F from shaDow.utils import adj_norm_sym, adj_norm_rw, coo_scipy2torch, get_deg_torch_sparse from torch_scatter import scatter from torch_geometric.nn import global_sort_pool import numpy as np import torch.nn.functional as F from collections import namedtuple Dims_X = namedtuple('Dims_X', ['num_nodes', 'num_feats']) Dims_adj = namedtuple('Dims_adj', ['num_nodes', 'num_edges']) F_ACT = {"relu" : (nn.ReLU, {}), "I" : (nn.LeakyReLU, {"negative_slope": (lambda kwargs: 1)}), "elu" : (nn.ELU, {}), "tanh" : (nn.Tanh, {}), "leakyrelu": (nn.LeakyReLU, {"negative_slope": (lambda kwargs: 0.2)}), "prelu" : (nn.PReLU, {}), "prelu+" : (nn.PReLU, {"num_parameters": (lambda kwargs: kwargs['dim_out'])})} def get_torch_act(act, args): _torch_args = {k: v(args) for k, v in F_ACT[act][1].items()} return F_ACT[act][0](**_torch_args) class EnsembleDummy(nn.Module): def __init__(self, dim_in=0, dim_out=0, **kwargs): super().__init__() def forward(self, Xi): assert len(Xi) == 1, "ONLY USE DUMMY ENSEMBLER WITH ONE BRANCH!" return Xi[0] def complexity(self, dims): assert len(dims) == 1, "ONLY USE DUMMY ENSEMBLER WITH ONE BRANCH!" return dims[0], 0 class ResPool(nn.Module): def __init__(self, dim_in : int, dim_out : int, num_layers : int, type_res : str, type_pool : str, dropout : float, act : str, args_pool : dict=None): super().__init__() self.dim_out = dim_out self.type_pool = type_pool self.type_res = type_res if type_pool == 'center': if type_res == 'none': self.dim_in = self.dim_out = 0 elif type_res in ['cat', 'concat']: self.dim_in = num_layers * dim_in else: self.dim_in = dim_in else: if type_res in ['cat', 'concat']: self.dim_in = 2 * dim_in * num_layers else: self.dim_in = 2 * dim_in if type_pool == 'sort': assert 'k' in args_pool, "Sort pooling needs the budget k as input!" self.k = args_pool['k'] _f_lin_pool = nn.Linear(self.k * int(self.dim_in / 2), int(self.dim_in / 2)) _f_dropout_pool = nn.Dropout(p=dropout) _act_pool = get_torch_act(act, locals()) self.nn_pool = nn.Sequential(_f_dropout_pool, _f_lin_pool, _act_pool) if self.dim_in > 0 and self.dim_out > 0: _act = get_torch_act(act, locals()) _f_lin = nn.Linear(self.dim_in, self.dim_out, bias=True) _f_dropout = nn.Dropout(p=dropout) self.nn = nn.Sequential(_f_dropout, _f_lin, _act) self.offset = nn.Parameter(torch.zeros(self.dim_out)) self.scale = nn.Parameter(torch.ones(self.dim_out)) def f_norm(self, _feat): mean = _feat.mean(dim=1).view(_feat.shape[0], 1) var = _feat.var(dim=1, unbiased=False).view(_feat.shape[0], 1) + 1e-9 feat_out = (_feat - mean) * self.scale * torch.rsqrt(var) + self.offset return feat_out def f_residue(self, feat_l): if self.type_res in ['cat', 'concat']: feat_ret = torch.cat(feat_l, dim=1) elif self.type_res == 'sum': feat_ret = torch.stack(feat_l, dim=0).sum(dim=0) elif self.type_res == 'max': feat_ret = torch.max(torch.stack(feat_l, dim=0), dim=0).values else: raise NotImplementedError return feat_ret def f_res_complexity(self, dim_x_l): if self.type_res in ['cat', 'concat']: return sum([d.num_feats for d in dim_x_l]), 0 elif self.type_res == 'sum': return dim_x_l[-1].num_feats, (len(dim_x_l) - 1) * (dim_x_l[-1].num_nodes * dim_x_l[-1].num_feats) elif self.type_res == 'max': return dim_x_l[-1].num_feats, (len(dim_x_l) - 1) * (dim_x_l[-1].num_nodes * dim_x_l[-1].num_feats) else: raise NotImplementedError def forward(self, feats_in_l, idx_targets, sizes_subg): if self.type_pool == 'center': if self.type_res == 'none': return feats_in_l[-1][idx_targets] else: feats_root_l = [f[idx_targets] for f in feats_in_l] feat_in = self.f_residue(feats_root_l) elif self.type_pool in ['max', 'mean', 'sum']: offsets = torch.cumsum(sizes_subg, dim=0) offsets = torch.roll(offsets, 1) offsets[0] = 0 idx = torch.arange(feats_in_l[-1].shape[0]).to(feats_in_l[-1].device) if self.type_res == 'none': feat_pool = F.embedding_bag(idx, feats_in_l[-1], offsets, mode=self.type_pool) feat_root = feats_in_l[-1][idx_targets] else: feat_pool_l = [] for feat in feats_in_l: feat_pool = F.embedding_bag(idx, feat, offsets, mode=self.type_pool) feat_pool_l.append(feat_pool) feat_pool = self.f_residue(feat_pool_l) feat_root = self.f_residue([f[idx_targets] for f in feats_in_l]) feat_in = torch.cat([feat_root, feat_pool], dim=1) elif self.type_pool == 'sort': if self.type_res == 'none': feat_pool_in = feats_in_l[-1] feat_root = feats_in_l[-1][idx_targets] else: feat_pool_in = self.f_residue(feats_in_l) feat_root = self.f_residue([f[idx_targets] for f in feats_in_l]) arange = torch.arange(sizes_subg.size(0)).to(sizes_subg.device) idx_batch = torch.repeat_interleave(arange, sizes_subg) feat_pool_k = global_sort_pool(feat_pool_in, idx_batch, self.k) feat_pool = self.nn_pool(feat_pool_k) feat_in = torch.cat([feat_root, feat_pool], dim=1) else: raise NotImplementedError return self.f_norm(self.nn(feat_in)) def complexity(self, dims_x_l, sizes_subg): num_nodes = len(sizes_subg) num_neigh = dims_x_l[-1].num_nodes assert num_neigh == sizes_subg.sum() if self.type_pool == 'center': if self.type_res == 'none': return Dims_X(num_nodes, dims_x_l[-1].num_feats), 0 else: dims_root_l = [Dims_X(num_nodes, d.num_feats) for d in dims_x_l] dim_f, ops = self.f_res_complexity(dims_root_l) return Dims_X(num_nodes, dim_f), ops elif self.type_pool in ['max', 'mean', 'sum']: ops = dims_x_l[-1].num_nodes * dims_x_l[-1].num_feats mult = 1 if self.type_res == 'none' else len(dims_x_l) ops *= mult dims_root_l = [Dims_X(num_nodes, d.num_feats) for d in dims_x_l] _dim_f, ops_res = self.f_res_complexity(dims_root_l) ops += 2 * ops_res elif self.type_pool == 'sort': if self.type_res == 'none': ops = 0 else: _dim, ops = self.f_res_complexity(dims_x_l) for n in self.nn_pool: if type(n) == nn.Linear: ops += np.pool(list(n.weight.shape)) * num_nodes ops += np.prod(list(self.nn_pool.weight.shape)) * self.k * num_nodes for n in self.nn: if type(n) == nn.Linear: ops += np.prod(list(n.weight.shape)) * num_nodes dim_f = n.weight.shape[0] return Dims_X(num_nodes, dim_f), ops class EnsembleAggregator(nn.Module): def __init__(self, dim_in, dim_out, num_ensemble, dropout=0.0, act="leakyrelu", type_dropout="none"): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dropout = dropout self.act = nn.ModuleList(get_torch_act(act, locals()) for _ in range(num_ensemble)) self.f_lin = nn.Linear(dim_in, dim_out, bias=True) self.f_dropout = nn.Dropout(p=self.dropout) self.q = nn.Parameter(torch.ones(dim_out)) assert type_dropout in ["none", "feat", "coef"] self.type_dropout = type_dropout def forward(self, Xi): omega_ensemble = [] for i, X in enumerate(Xi): if self.type_dropout == "none": X_ = X elif self.type_dropout == "coef": X_ = self.f_dropout(X) else: Xi[i] = self.f_dropout(X) X_ = Xi[i] omega_ensemble.append(self.act[i](self.f_lin(X_)).mm(self.q.view(-1, 1))) omega_ensemble = torch.cat(omega_ensemble, 1) omega_norm = F.softmax(omega_ensemble, dim=1) Y = 0 for i, X in enumerate(Xi): Y += omega_norm[:, i].view(-1, 1) * X return Y def complexity(self, dims_x_l): ops = 0 for dx in dims_x_l: assert dx.num_feats == self.f_lin.weight.shape[1] ops += dx.num_nodes * dx.num_feats * self.f_lin.weight.shape[0] ops += dx.num_nodes * dx.num_feats ops += dx.num_nodes * dx.num_feats return Dims_X(dx.num_nodes, self.f_lin.weight.shape[0]), ops class MLP(nn.Module): def __init__(self, dim_in, dim_out, dropout=0.0, act="relu", **kwargs): super().__init__() self.act = get_torch_act(act, locals()) self.dropout = dropout self.dim_in, self.dim_out = dim_in, dim_out self.f_lin = nn.Linear(dim_in, dim_out) self.offset = nn.Parameter(torch.zeros(dim_out)) self.scale = nn.Parameter(torch.ones(dim_out)) self.f_dropout = nn.Dropout(p=self.dropout) def _f_norm(self, _feat): mean = _feat.mean(dim=1).view(_feat.shape[0], 1) var = _feat.var(dim=1, unbiased=False).view(_feat.shape[0], 1) + 1e-9 feat_out = (_feat - mean) * self.scale * torch.rsqrt(var) + self.offset return feat_out def forward(self, feat_in): feat_in = self.f_dropout(feat_in) feat_out = self.act(self.f_lin(feat_in)) feat_out = self._f_norm(feat_out) return feat_out def complexity(self, dims_x): assert dims_x.num_feats == self.f_lin.weight.shape[1] ops = dims_x.num_nodes * np.product(self.f_lin.weight.shape) return Dims_X(dims_x.num_nodes, self.f_lin.weight.shape[0]), ops class MLPSGC(MLP): def __init__(self, dim_in, dim_out, dropout=0.0, act="relu", **kwargs): super().__init__(dim_in, dim_out, dropout=dropout, act=act) def forward(self, inputs): assert type(inputs) in [list, tuple] and len(inputs) == 4 feat_in = inputs[0] feat_out = super().forward(feat_in) return feat_out, None, None, None class GCN(nn.Module): def __init__(self, dim_in, dim_out, dropout=0.0, act="relu", **kwargs): super().__init__() if "aggr" in kwargs: assert kwargs["aggr"] == "gcn" self.dim_in, self.dim_out = dim_in, dim_out self.dropout = dropout self.act = get_torch_act(act, locals()) self.f_lin = nn.Linear(dim_in, dim_out, bias=True) self.f_dropout = nn.Dropout(p=self.dropout) self.offset = nn.Parameter(torch.zeros(dim_out)) self.scale = nn.Parameter(torch.ones(dim_out)) def f_norm(self, feat_in): mean = feat_in.mean(dim=1).view(feat_in.shape[0], 1) var = feat_in.var(dim=1, unbiased=False).view(feat_in.shape[0], 1) + 1e-10 feat_norm = (feat_in - mean) * self.scale * torch.rsqrt(var) + self.offset return feat_norm def forward(self, inputs): feat_in, adj, is_normed, dropedge = inputs feat_in = self.f_dropout(feat_in) if not is_normed and adj is not None: assert type(adj) == sp.csr_matrix adj_norm = adj_norm_sym(adj, dropedge=dropedge) adj_norm = coo_scipy2torch(adj_norm.tocoo()).to(feat_in.device) else: assert adj is None or type(adj) == torch.Tensor adj_norm = adj feat_aggr = torch.sparse.mm(adj_norm, feat_in) feat_trans = self.f_lin(feat_aggr) feat_out = self.f_norm(self.act(feat_trans)) return feat_out, adj_norm, True, 0. def complexity(self, dims_x, dims_adj): ops = dims_adj.num_edges * dims_x.num_feats + dims_x.num_nodes * np.product(self.f_lin.weight.shape) return (Dims_X(dims_x.num_nodes, self.f_lin.weight.shape[0]), Dims_adj(dims_adj.num_nodes, dims_adj.num_edges)), ops class GraphSAGE(nn.Module): def __init__( self, dim_in, dim_out, dropout=0.0, act="relu", **kwargs ): super().__init__() self.act = get_torch_act(act, locals()) self.dropout = dropout self.f_lin = [] self.offset, self.scale = [], [] self.f_lin_self = nn.Linear(dim_in, dim_out) self.f_lin_neigh = nn.Linear(dim_in, dim_out) self.offset = nn.Parameter(torch.zeros(dim_out * 2)) self.scale = nn.Parameter(torch.ones(dim_out * 2)) self.f_dropout = nn.Dropout(p=self.dropout) self.dim_out = dim_out def _spmm(self, adj_norm, _feat): return torch.sparse.mm(adj_norm, _feat) def _f_norm(self, _feat, _id): mean = _feat.mean(dim=1).view(_feat.shape[0], 1) var = _feat.var(dim=1, unbiased=False).view(_feat.shape[0], 1) + 1e-9 _scale = self.scale[_id * self.dim_out : (_id + 1) * self.dim_out] _offset = self.offset[_id * self.dim_out : (_id + 1) * self.dim_out] feat_out = (_feat - mean) * _scale * torch.rsqrt(var) + _offset return feat_out def forward(self, inputs): feat_in, adj, is_normed, dropedge = inputs if not is_normed and adj is not None: assert type(adj) == sp.csr_matrix adj = coo_scipy2torch(adj.tocoo()).to(feat_in.device) adj_norm = adj_norm_rw(adj, dropedge=dropedge) else: assert adj is None or type(adj) == torch.Tensor or type(adj) == tuple adj_norm = adj feat_in = self.f_dropout(feat_in) feat_self = feat_in feat_neigh = self._spmm(adj_norm, feat_in) feat_self_trans = self._f_norm(self.act(self.f_lin_self(feat_self)), 0) feat_neigh_trans = self._f_norm(self.act(self.f_lin_neigh(feat_neigh)), 1) feat_out = feat_self_trans + feat_neigh_trans return feat_out, adj_norm, True, 0. def complexity(self, dims_x, dims_adj): assert dims_x.num_nodes == dims_adj.num_nodes ops = dims_x.num_nodes * np.product(self.f_lin_self.weight.shape) + dims_adj.num_edges * dims_x.num_feats + dims_x.num_nodes * np.product(self.f_lin_neigh.weight.shape) return (Dims_X(dims_x.num_nodes, self.f_lin_self.weight.shape[0]), Dims_adj(dims_adj.num_nodes, dims_adj.num_edges)), ops class GIN(nn.Module): def __init__(self, dim_in, dim_out, dropout=0.0, act="relu", eps=0, **kwargs): super().__init__() self.dropout = dropout self.act = get_torch_act(act, locals()) self.mlp = nn.Sequential( nn.Linear(dim_in, dim_out, bias=True), nn.ReLU(), nn.Linear(dim_out, dim_out, bias=True)) self.f_dropout = nn.Dropout(p=self.dropout) self.eps = torch.nn.Parameter(torch.Tensor([eps])) self.offset = nn.Parameter(torch.zeros(dim_out)) self.scale = nn.Parameter(torch.ones(dim_out)) def f_norm(self, feat_in): mean = feat_in.mean(dim=1).view(feat_in.shape[0], 1) var = feat_in.var(dim=1, unbiased=False).view(feat_in.shape[0], 1) + 1e-10 feat_norm = (feat_in - mean) * self.scale * torch.rsqrt(var) + self.offset return feat_norm def forward(self, inputs): feat_in, adj, is_normed, dropedge = inputs assert not is_normed feat_in = self.f_dropout(feat_in) if type(adj) == sp.csr_matrix: adj = coo_scipy2torch(adj.tocoo()).to(feat_in.device) deg_orig = get_deg_torch_sparse(adj) masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long() adj._values()[masked_indices] = 0 deg_dropped = torch.clamp(get_deg_torch_sparse(adj), min=1) rescale = torch.repeat_interleave(deg_orig / deg_dropped, deg_orig.long()) adj._values()[:] = adj._values() * rescale feat_aggr = torch.sparse.mm(adj, feat_in) feat_aggr += (1 + self.eps) * feat_in feat_out = self.mlp(feat_aggr) feat_out = self.f_norm(self.act(feat_out)) return feat_out, adj, False, 0. def complexity(self, dims_x, dims_adj): assert dims_x.num_nodes == dims_adj.num_nodes ops = dims_adj.num_edges * dims_x.num_feats ops += dims_x.num_nodes * dims_x.num_feats for m in self.mlp: breakpoint() return class GAT(nn.Module): def __init__( self, dim_in, dim_out, dropout=0.0, act="relu", mulhead=1, **kwargs ): super().__init__() self.mulhead = mulhead self.act = get_torch_act(act, locals()) self.att_act = nn.LeakyReLU(negative_slope=0.2) self.dropout = dropout assert dim_out % self.mulhead == 0, "invalid output dimension: need to be divisible by mulhead" self.dim_slice = int(dim_out / self.mulhead) self.f_lin = nn.ModuleList(nn.Linear(dim_in, dim_out, bias=True) for i in range(2)) self.offset = nn.Parameter(torch.zeros(2, self.mulhead, self.dim_slice)) self.scale = nn.Parameter(torch.ones(2, self.mulhead, self.dim_slice)) self.attention = nn.Parameter(torch.ones(2, self.mulhead, self.dim_slice)) nn.init.xavier_uniform_(self.attention) self.f_dropout = nn.Dropout(p=self.dropout) def _spmm(self, adj_norm, _feat): return torch.sparse.mm(adj_norm, _feat) def _aggregate_attention(self, adj, feat_neigh, feat_self, attention_self, attention_neigh): attention_self = self.att_act(attention_self.mm(feat_self.t())).squeeze() attention_neigh = self.att_act(attention_neigh.mm(feat_neigh.t())).squeeze() val_adj = (attention_self[adj._indices()[0]] + attention_neigh[adj._indices()[1]]) max_per_row = scatter(val_adj, adj._indices()[0], reduce="max") deg = scatter(torch.ones(val_adj.size()).to(feat_neigh.device), adj._indices()[0], reduce="sum") val_adj_norm = val_adj - torch.repeat_interleave(max_per_row, deg.long()) val_adj_exp = torch.exp(val_adj_norm) * adj._values() att_adj = torch.sparse.FloatTensor(adj._indices(), val_adj_exp, torch.Size(adj.shape)) denom = torch.clamp(scatter(val_adj_exp, adj._indices()[0], reduce="sum"), min=1e-10) ret = self._spmm(att_adj, feat_neigh) ret *= 1 / denom.view(-1, 1) return ret
MIT License
sofia-netsurv/python-netsurv
env/lib/python3.5/site-packages/_pytest/reports.py
BaseReport.longreprtext
python
def longreprtext(self): tw = py.io.TerminalWriter(stringio=True) tw.hasmarkup = False self.toterminal(tw) exc = tw.stringio.getvalue() return exc.strip()
Read-only property that returns the full string representation of ``longrepr``. .. versionadded:: 3.0
https://github.com/sofia-netsurv/python-netsurv/blob/429fb07a2b06cc505fdd9350148266a6b4e23e64/env/lib/python3.5/site-packages/_pytest/reports.py#L59-L70
from pprint import pprint import py from _pytest._code.code import ExceptionInfo from _pytest._code.code import ReprEntry from _pytest._code.code import ReprEntryNative from _pytest._code.code import ReprExceptionInfo from _pytest._code.code import ReprFileLocation from _pytest._code.code import ReprFuncArgs from _pytest._code.code import ReprLocals from _pytest._code.code import ReprTraceback from _pytest._code.code import TerminalRepr from _pytest.outcomes import skip from _pytest.pathlib import Path def getslaveinfoline(node): try: return node._slaveinfocache except AttributeError: d = node.slaveinfo ver = "%s.%s.%s" % d["version_info"][:3] node._slaveinfocache = s = "[{}] {} -- Python {} {}".format( d["id"], d["sysplatform"], ver, d["executable"] ) return s class BaseReport: when = None location = None def __init__(self, **kw): self.__dict__.update(kw) def toterminal(self, out): if hasattr(self, "node"): out.line(getslaveinfoline(self.node)) longrepr = self.longrepr if longrepr is None: return if hasattr(longrepr, "toterminal"): longrepr.toterminal(out) else: try: out.line(longrepr) except UnicodeEncodeError: out.line("<unprintable longrepr>") def get_sections(self, prefix): for name, content in self.sections: if name.startswith(prefix): yield prefix, content @property
MIT License
ceteri/exelixi
src/util.py
get_telemetry
python
def get_telemetry (): telemetry = OrderedDict() telemetry["ip_addr"] = socket.gethostbyname(socket.gethostname()) telemetry["mem_free"] = psutil.virtual_memory().free telemetry["cpu_num"] = psutil.NUM_CPUS x = psutil.cpu_times() telemetry["cpu_times"] = OrderedDict([ ("user", x.user), ("system", x.system), ("idle", x.idle) ]) x = psutil.disk_usage("/tmp") telemetry["disk_usage"] = OrderedDict([ ("free", x.free), ("percent", x.percent) ]) x = psutil.disk_io_counters() telemetry["disk_io"] = OrderedDict([ ("read_count", x.read_count), ("write_count", x.write_count), ("read_bytes", x.read_bytes), ("write_bytes", x.write_bytes), ("read_time", x.read_time), ("write_time", x.write_time) ]) x = psutil.network_io_counters() telemetry["network_io"] = OrderedDict([ ("bytes_sent", x.bytes_sent), ("bytes_recv", x.bytes_recv), ("packets_sent", x.packets_sent), ("packets_recv", x.packets_recv), ("errin", x.errin), ("errout", x.errout), ("dropin", x.dropin), ("dropout", x.dropout) ]) return telemetry
get system resource telemetry on a Mesos slave via psutil
https://github.com/ceteri/exelixi/blob/81bb97d3e99fe055e3816a5692b4dc29cdce6c94/src/util.py#L68-L90
from collections import OrderedDict from httplib import BadStatusLine from importlib import import_module from json import dumps, loads from os.path import abspath from random import random from urllib2 import urlopen, Request, URLError import logging import psutil import socket def instantiate_class (class_path): module_name, class_name = class_path.split(".") return getattr(import_module(module_name), class_name)() def post_distrib_rest (prefix, shard_id, shard_uri, path, base_msg): msg = base_msg.copy() msg["prefix"] = prefix msg["shard_id"] = shard_id uri = "http://" + shard_uri + "/" + path req = Request(uri) req.add_header('Content-Type', 'application/json') logging.debug("send %s %s", shard_uri, path) logging.debug(dumps(msg)) try: f = urlopen(req, dumps(msg)) return f.readlines() except URLError as e: logging.critical("could not reach REST endpoint %s error: %s", uri, str(e.reason), exc_info=True) raise except BadStatusLine as e: logging.critical("REST endpoint died %s error: %s", uri, str(e.line), exc_info=True)
Apache License 2.0
nuitka/nuitka
nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/GettextCommon.py
_POTargetFactory.__init__
python
def __init__( self, env, nodefault = True, alias = None, precious = True , noclean = True ): self.env = env self.alias = alias self.precious = precious self.noclean = noclean self.nodefault = nodefault
Object constructor. **Arguments** - *env* (`SCons.Environment.Environment`) - *nodefault* (`boolean`) - if `True`, produced nodes will be ignored from default target `'.'` - *alias* (`string`) - if provided, produced nodes will be automatically added to this alias, and alias will be set as `AlwaysBuild` - *precious* (`boolean`) - if `True`, the produced nodes will be set as `Precious`. - *noclen* (`boolean`) - if `True`, the produced nodes will be excluded from `Clean`.
https://github.com/nuitka/nuitka/blob/4c5161620ea8f0f1c93a1d6be79e7e6eda7161d4/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/GettextCommon.py#L60-L80
__revision__ = "src/engine/SCons/Tool/GettextCommon.py 2014/07/05 09:42:21 garyo" import SCons.Warnings import re class XgettextToolWarning(SCons.Warnings.Warning): pass class XgettextNotFound(XgettextToolWarning): pass class MsginitToolWarning(SCons.Warnings.Warning): pass class MsginitNotFound(MsginitToolWarning): pass class MsgmergeToolWarning(SCons.Warnings.Warning): pass class MsgmergeNotFound(MsgmergeToolWarning): pass class MsgfmtToolWarning(SCons.Warnings.Warning): pass class MsgfmtNotFound(MsgfmtToolWarning): pass SCons.Warnings.enableWarningClass(XgettextToolWarning) SCons.Warnings.enableWarningClass(XgettextNotFound) SCons.Warnings.enableWarningClass(MsginitToolWarning) SCons.Warnings.enableWarningClass(MsginitNotFound) SCons.Warnings.enableWarningClass(MsgmergeToolWarning) SCons.Warnings.enableWarningClass(MsgmergeNotFound) SCons.Warnings.enableWarningClass(MsgfmtToolWarning) SCons.Warnings.enableWarningClass(MsgfmtNotFound) class _POTargetFactory(object):
Apache License 2.0
cgatoxford/cgatpipelines
obsolete/pipeline_transcriptome.py
buildRepeatTrack
python
def buildRepeatTrack(infile, outfile): nrepeats = 0 for gff in GTF.iterator(gzip.open(infile, "r")): nrepeats += 1 sample = set( random.sample(range(nrepeats), PARAMS["ancestral_repeats_samplesize"])) outf = gzip.open(outfile, "w") gtf = GTF.Entry() for x, gff in enumerate(GTF.iterator(gzip.open(infile, "r"))): if x not in sample: continue gtf.fromGTF(gff, "%08i" % x, "%08i" % x) outf.write("%s\n" % str(gtf)) outf.close() E.debug("created sample of %i repeats out of %i in %s" % (len(sample), nrepeats, outfile))
build a repeat track as negative control.
https://github.com/cgatoxford/cgatpipelines/blob/a34d460b5fc64984f6da0acb18aee43c5e02d5fc/obsolete/pipeline_transcriptome.py#L231-L250
import sys import shutil import itertools import random import re import glob import os import gzip from ruffus import * import sqlite3 import CGAT.Experiment as E import CGATPipelines.Pipeline as P import CGAT.IOTools as IOTools import CGAT.Database as Database import CGAT.GTF as GTF import CGATPipelines.PipelineTracks as PipelineTracks P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"]) PARAMS = P.PARAMS USECLUSTER = True PARAMS_ANNOTATIONS = P.peekParameters( PARAMS["annotations_dir"], "pipeline_annotations.py") PARAMS_ANCESTRAL_REPEATS = P.peekParameters( PARAMS["ancestral_repeats_dir"], "pipeline_ancestral_repeats.py") TRACKS = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory( glob.glob("*.gtf.gz"), "(\S+).gtf.gz", exclude=("repeats.gtf.gz", "introns.gtf.gz", "merged.gtf.gz")) TRACKS_CONTROL = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory( ("repeats.gtf.gz", "introns.gtf.gz"), "(\S+).gtf.gz") TRACKS_META = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory( ("merged.gtf.gz",), "(\S+).gtf.gz") TRACKS_GENESETS = PipelineTracks.Tracks(PipelineTracks.Sample).loadFromDirectory( ("genes.gtf.gz",), "(\S+).gtf.gz") TRACKS_WITH_CONTROLS = TRACKS + TRACKS_CONTROL TRACKS_OVERLAP = TRACKS_META + TRACKS_GENESETS if os.path.exists("pipeline_conf.py"): L.info("reading additional configuration from pipeline_conf.py") exec(compile(open("pipeline_conf.py").read(), "pipeline_conf.py", 'exec')) def getSourceTrack(track, all_tracks): if len(all_tracks) == 0: return None all_tracks = [re.sub("\..*$", "", x) for x in all_tracks] track = re.sub("\..*$", "", track) if len(all_tracks) == 1: if len(os.path.commonprefix((track, all_tracks[0]))) > 0: return all_tracks[0] else: return None prefixes = [t for t in all_tracks if len( os.path.commonprefix((track, t))) > 3] prefixes.sort(key=lambda x: len(x)) return prefixes[0] def getRelatedTracks(track, all_tracks): source = getSourceTrack(track, all_tracks) if not source: source = track related = set([x for x in all_tracks if x.startswith(source)]) if track not in related: related.add(track) for x in related: if x in EXPERIMENTAL_TRACKS: related.add(PARAMS["merged"]) break return list(related) @files(((os.path.join(PARAMS["ancestral_repeats_dir"], PARAMS_ANCESTRAL_REPEATS["interface_rates_query_gff"]), "repeats.gtf.gz"),))
MIT License
cyang-cityu/metacorrection
tools/train_meta.py
main
python
def main(): if not os.path.exists(args.log_dir): os.makedirs(args.log_dir) if not os.path.exists(args.log_dir + '/result'): os.makedirs(args.log_dir + '/result') best_mIoU = 0 mIoU = 0 os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu w, h = map(int, args.input_size.split(',')) input_size = (w, h) w, h = map(int, args.input_size_target.split(',')) input_size_target = (w, h) cudnn.enabled = True if not os.path.exists(args.log_dir): os.makedirs(args.log_dir) metaloader = data.DataLoader(GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=input_size_target, scale=False, mirror=args.random_mirror, mean=IMG_MEAN), batch_size=args.update_f * args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) targetloader = data.DataLoader(cityscapesPseudo(args.data_dir_target, args.data_list_target, max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=input_size_target, scale=False, mirror=args.random_mirror, mean=IMG_MEAN), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) targetloader_iter = enumerate(targetloader) main_model = build_model(args) saved_state_dict = torch.load(args.restore_from) pretrained_dict = {k:v for k,v in saved_state_dict.items() if k in main_model.state_dict()} main_model.load_state_dict(pretrained_dict) optimizer = optim.SGD(main_model.optim_parameters(args), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) optimizer.zero_grad() interp = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True) interp_target = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True) if args.tensorboard: if not os.path.exists(args.log_dir): os.makedirs(args.log_dir) writer = SummaryWriter(args.log_dir) for i_iter in range(args.num_steps): if args.is_meta: main_model.train() l_f_meta = 0 l_g_meta = 0 l_f = 0 optimizer.zero_grad() adjust_learning_rate(optimizer, i_iter) meta_net = Res_Deeplab(num_classes=args.num_classes) meta_net.load_state_dict(main_model.state_dict()) meta_net.cuda() _, batch = targetloader_iter.__next__() image, label, _, _ = batch image = to_var(image, requires_grad=False) label = to_var(label, requires_grad=False) T1 = to_var(torch.eye(19, 19)) T2 = to_var(torch.eye(19, 19)) y_f_hat1, y_f_hat2 = meta_net(image) y_f_hat1 = torch.softmax(interp_target(y_f_hat1), dim=1).permute(0, 2, 3, 1).contiguous().view(-1, args.num_classes) y_f_hat2 = torch.softmax(interp_target(y_f_hat2), dim=1).permute(0, 2, 3, 1).contiguous().view(-1, args.num_classes) pre1 = torch.mm(y_f_hat1, T1).view(args.batch_size, h, w, args.num_classes).permute(0, 3, 1, 2) pre2 = torch.mm(y_f_hat2, T2).view(args.batch_size, h, w, args.num_classes).permute(0, 3, 1, 2) l_f_meta = loss_calc(pre2, label) + 0.1 * loss_calc(pre1, label) meta_net.zero_grad() grads = torch.autograd.grad(l_f_meta, (meta_net.params()), create_graph=True) meta_net.update_params(1e-3, source_params=grads) x_val, y_val, _, _ = next(iter(metaloader)) x_val = to_var(x_val, requires_grad=False) y_val = to_var(y_val, requires_grad=False) meta_source = obtain_meta(x_val) y_val[meta_source] = 255 y_g_hat1, y_g_hat2 = meta_net(x_val) y_g_hat1 = torch.softmax(interp(y_g_hat1), dim=1) y_g_hat2 = torch.softmax(interp(y_g_hat2), dim=1) l_g_meta = loss_calc(y_g_hat2, y_val) + 0.1 * loss_calc(y_g_hat1, y_val) grad_eps1 = torch.autograd.grad(l_g_meta, T1, only_inputs=True, retain_graph=True)[0] grad_eps2 = torch.autograd.grad(l_g_meta, T2, only_inputs=True)[0] grad_eps1 = grad_eps1 / torch.max(grad_eps1) T1 = torch.clamp(T1-0.11*grad_eps1,min=0) norm_c = torch.sum(T1, 1) for j in range(args.num_classes): if norm_c[j] != 0: T1[j, :] /= norm_c[j] grad_eps2 = grad_eps2 / torch.max(grad_eps2) T2 = torch.clamp(T2-0.11*grad_eps2,min=0) norm_c = torch.sum(T2, 1) for j in range(args.num_classes): if norm_c[j] != 0: T2[j, :] /= norm_c[j] y_f_hat1, y_f_hat2 = main_model(image) y_f_hat1 = torch.softmax(interp_target(y_f_hat1), dim=1).permute(0, 2, 3, 1).contiguous().view(-1, args.num_classes) y_f_hat2 = torch.softmax(interp_target(y_f_hat2), dim=1).permute(0, 2, 3, 1).contiguous().view(-1, args.num_classes) pre1 = torch.mm(y_f_hat1, T1).view(args.batch_size, h, w, args.num_classes).permute(0, 3, 1, 2) pre2 = torch.mm(y_f_hat2, T2).view(args.batch_size, h, w, args.num_classes).permute(0, 3, 1, 2) l_f = loss_calc(pre2, label) + 0.1 * loss_calc(pre1, label) optimizer.zero_grad() l_f.backward() optimizer.step() if args.tensorboard: scalar_info = { 'loss_g_meta': l_g_meta.item(), 'loss_f_meta': l_f_meta.item(), 'loss_f': l_f.item(), } if i_iter % 10 == 0: for key, val in scalar_info.items(): writer.add_scalar(key, val, i_iter) print('exp = {}'.format(args.log_dir)) print( 'iter = {0:8d}/{1:8d}, loss_g_meta = {2:.3f} loss_f_meta = {3:.3f} loss_f = {4:.3f}'.format( i_iter, args.num_steps, l_g_meta.item(), l_f_meta.item(), l_f.item())) if i_iter >= args.num_steps_stop - 1: print('save model ...') torch.save(main_model.state_dict(), osp.join(args.log_dir, 'GTA5_' + str(args.num_steps_stop) + '.pth')) break if i_iter % args.save_pred_every == 0 and i_iter > 0: now = datetime.datetime.now() print (now.strftime("%Y-%m-%d %H:%M:%S"), ' Begin evaluation on iter {0:8d}/{1:8d} '.format(i_iter, args.num_steps)) mIoU = evaluate(main_model, pred_dir=args.log_dir + '/result') writer.add_scalar('mIoU', mIoU, i_iter) print('Finish Evaluation: '+time.asctime(time.localtime(time.time()))) if mIoU > best_mIoU: best_mIoU = mIoU torch.save(main_model.state_dict(), osp.join(args.log_dir, 'MetaCorrection_best.pth')) if args.tensorboard: writer.close()
Create the model and start the training.
https://github.com/cyang-cityu/metacorrection/blob/5a0e91b6bdfaaceb61e87690a891bf03d45e3211/tools/train_meta.py#L196-L363
import argparse import torch import torch.nn as nn from torch.utils import data, model_zoo import numpy as np import pickle from torch.autograd import Variable import torch.optim as optim import scipy.misc import torch.backends.cudnn as cudnn import torch.nn.functional as F import sys import os import os.path as osp import random from tensorboardX import SummaryWriter import _init_paths from evaluate_cityscapes import evaluate from nets.deeplab_multi import DeeplabMulti from nets.meta_deeplab_multi import Res_Deeplab from nets.discriminator import FCDiscriminator from utils.loss import CrossEntropy2d from datasets.gta5_dataset import GTA5DataSet from datasets.cityscapes_dataset import cityscapesPseudo import datetime import time IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) MODEL = 'LTIR' BATCH_SIZE = 1 ITER_SIZE = 1 NUM_WORKERS = 4 DATA_DIRECTORY = '/home/cyang53/CED/Data/UDA_Natural/GTA5' DATA_LIST_PATH = '/home/cyang53/CED/Ours/MetaCorrection-CVPR/datasets/gta5_list/train.lst' IGNORE_LABEL = 255 INPUT_SIZE = '1024, 512' DATA_DIRECTORY_TARGET = '/home/cyang53/CED/Data/UDA_Natural/Cityscapes' DATA_LIST_PATH_TARGET = '/home/cyang53/CED/Ours/MetaCorrection-CVPR/datasets/cityscapes_list/pseudo_ltir_new.lst' INPUT_SIZE_TARGET = '1024, 512' LEARNING_RATE = 2.5e-4 MOMENTUM = 0.9 NUM_CLASSES = 19 NUM_STEPS = 250000 NUM_STEPS_STOP = 150000 POWER = 0.9 RANDOM_SEED = 1234 RESTORE_FROM = '/home/cyang53/CED/Ours/MetaCorrection-CVPR/snapshots/Pseudo_LTIR_best.pth' SAVE_PRED_EVERY = 1000 WEIGHT_DECAY = 0.0005 LOG_DIR = '/home/cyang53/CED/Ours/MetaCorrection-CVPR/log/ltir_meta_debug' LAMBDA_SEG = 0.1 GPU = '1' TARGET = 'cityscapes' SET = 'train' T_WEIGHT = 0.11 IS_META = True UPDATA_F = 1 def get_arguments(): parser = argparse.ArgumentParser(description="DeepLab-ResNet Network") parser.add_argument("--model", type=str, default=MODEL, help="available options : DeepLab") parser.add_argument("--target", type=str, default=TARGET, help="available options : cityscapes") parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, help="Number of images sent to the network in one step.") parser.add_argument("--iter-size", type=int, default=ITER_SIZE, help="Accumulate gradients for ITER_SIZE iterations.") parser.add_argument("--num-workers", type=int, default=NUM_WORKERS, help="number of workers for multithread dataloading.") parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, help="Path to the directory containing the source dataset.") parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH, help="Path to the file listing the images in the source dataset.") parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, help="The index of the label to ignore during the training.") parser.add_argument("--input-size", type=str, default=INPUT_SIZE, help="Comma-separated string with height and width of source images.") parser.add_argument("--data-dir-target", type=str, default=DATA_DIRECTORY_TARGET, help="Path to the directory containing the target dataset.") parser.add_argument("--data-list-target", type=str, default=DATA_LIST_PATH_TARGET, help="Path to the file listing the images in the target dataset.") parser.add_argument("--input-size-target", type=str, default=INPUT_SIZE_TARGET, help="Comma-separated string with height and width of target images.") parser.add_argument("--is-training", action="store_true", help="Whether to updates the running means and variances during the training.") parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE, help="Base learning rate for training with polynomial decay.") parser.add_argument("--lambda-seg", type=float, default=LAMBDA_SEG, help="lambda_seg.") parser.add_argument("--momentum", type=float, default=MOMENTUM, help="Momentum component of the optimiser.") parser.add_argument("--is-meta", type=bool, default=IS_META, help="Whether to update T") parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, help="Number of classes to predict (including background).") parser.add_argument("--num-steps", type=int, default=NUM_STEPS, help="Number of training steps.") parser.add_argument("--num-steps-stop", type=int, default=NUM_STEPS_STOP, help="Number of training steps for early stopping.") parser.add_argument("--power", type=float, default=POWER, help="Decay parameter to compute the learning rate.") parser.add_argument("--random-mirror", action="store_true", help="Whether to randomly mirror the inputs during the training.") parser.add_argument("--random-scale", action="store_true", help="Whether to randomly scale the inputs during the training.") parser.add_argument("--random-seed", type=int, default=RANDOM_SEED, help="Random seed to have reproducible results.") parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, help="Where restore model parameters from.") parser.add_argument("--t-weight", type=float, default=T_WEIGHT, help="grad weight to correct T.") parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY, help="Save summaries and checkpoint every often.") parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY, help="Regularisation parameter for L2-loss.") parser.add_argument("--tensorboard", action='store_true', default=True, help="choose whether to use tensorboard.") parser.add_argument("--log-dir", type=str, default=LOG_DIR, help="Path to the directory of log.") parser.add_argument("--gpu", type=str, default=GPU, help="gpu id to run.") parser.add_argument("--set", type=str, default=SET, help="choose adaptation set.") parser.add_argument("--update-f", type=int, default=UPDATA_F, help="update frequency for T.") parser.add_argument("--uncertainty", type=bool, default=True, help="choose adaptation set.") return parser.parse_args() args = get_arguments() def lr_poly(base_lr, iter, max_iter, power): return base_lr * ((1 - float(iter) / max_iter) ** (power)) def adjust_learning_rate(optimizer, i_iter): lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power) optimizer.param_groups[0]['lr'] = lr if len(optimizer.param_groups) > 1: optimizer.param_groups[1]['lr'] = lr * 10 def adjust_learning_rate_D(optimizer, i_iter): lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power) optimizer.param_groups[0]['lr'] = lr if len(optimizer.param_groups) > 1: optimizer.param_groups[1]['lr'] = lr * 10 def build_model(args): net = Res_Deeplab(num_classes=args.num_classes) if torch.cuda.is_available(): net.cuda() torch.backends.cudnn.benchmark=True return net def to_var(x, requires_grad=True): x = x.cuda() return Variable(x, requires_grad=requires_grad) def loss_calc(pred, label): label = Variable(label.long()).cuda() criterion = CrossEntropy2d(is_softmax=False).cuda() return criterion(pred, label) def obtain_meta(source_img): seg_model = DeeplabMulti(num_classes=19).cuda() seg_model.load_state_dict(torch.load('/home/cyang53/CED/Baseline/AdaptSegNet-CVPR2018/snapshots/GTA5_best.pth')) dis_model = FCDiscriminator(num_classes=19).cuda() dis_model.load_state_dict(torch.load('/home/cyang53/CED/Ours/AdaptSegNet-CVPR2018/snapshots/GTA5_best_D2.pth')) seg_model.eval() dis_model.eval() output1, output2 = seg_model(source_img) meta_map = dis_model(F.softmax(output2, dim=1)).cpu().data[0] source_like = torch.where(meta_map < 0.5) return source_like
MIT License
brython-dev/brython
www/src/Lib/tarfile.py
TarInfo.linkpath
python
def linkpath(self): return self.linkname
In pax headers, "linkname" is called "linkpath".
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/tarfile.py#L784-L786
version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." from builtins import open as bltn_open import sys import os import io import shutil import stat import time import struct import copy import re try: import pwd except ImportError: pwd = None try: import grp except ImportError: grp = None symlink_exception = (AttributeError, NotImplementedError) try: symlink_exception += (OSError,) except NameError: pass __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError", "CompressionError", "StreamError", "ExtractError", "HeaderError", "ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT", "DEFAULT_FORMAT", "open"] NUL = b"\0" BLOCKSIZE = 512 RECORDSIZE = BLOCKSIZE * 20 GNU_MAGIC = b"ustar \0" POSIX_MAGIC = b"ustar\x0000" LENGTH_NAME = 100 LENGTH_LINK = 100 LENGTH_PREFIX = 155 REGTYPE = b"0" AREGTYPE = b"\0" LNKTYPE = b"1" SYMTYPE = b"2" CHRTYPE = b"3" BLKTYPE = b"4" DIRTYPE = b"5" FIFOTYPE = b"6" CONTTYPE = b"7" GNUTYPE_LONGNAME = b"L" GNUTYPE_LONGLINK = b"K" GNUTYPE_SPARSE = b"S" XHDTYPE = b"x" XGLTYPE = b"g" SOLARIS_XHDTYPE = b"X" USTAR_FORMAT = 0 GNU_FORMAT = 1 PAX_FORMAT = 2 DEFAULT_FORMAT = PAX_FORMAT SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"} PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } if os.name == "nt": ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() def stn(s, length, encoding, errors): s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): if s[0] in (0o200, 0o377): n = 0 for i in range(len(s) - 1): n <<= 8 n += s[i + 1] if s[0] == 0o377: n = -(256 ** (len(s) - 1) - n) else: try: s = nts(s, "ascii", "strict") n = int(s.strip() or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") return n def itn(n, digits=8, format=DEFAULT_FORMAT): original_n = n n = int(n) if 0 <= n < 8 ** (digits - 1): s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1): if n >= 0: s = bytearray([0o200]) else: s = bytearray([0o377]) n = 256 ** digits + n for i in range(digits - 1): s.insert(1, n & 0o377) n >>= 8 else: raise ValueError("overflow in number field") return s def calc_chksums(buf): unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None): bufsize = bufsize or 16 * 1024 if length == 0: return if length is None: shutil.copyfileobj(src, dst, bufsize) return blocks, remainder = divmod(length, bufsize) for b in range(blocks): buf = src.read(bufsize) if len(buf) < bufsize: raise exception("unexpected end of data") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise exception("unexpected end of data") dst.write(buf) return def _safe_print(s): encoding = getattr(sys.stdout, 'encoding', None) if encoding is not None: s = s.encode(encoding, 'backslashreplace').decode(encoding) print(s, end=' ') class TarError(Exception): pass class ExtractError(TarError): pass class ReadError(TarError): pass class CompressionError(TarError): pass class StreamError(TarError): pass class HeaderError(TarError): pass class EmptyHeaderError(HeaderError): pass class TruncatedHeaderError(HeaderError): pass class EOFHeaderError(HeaderError): pass class InvalidHeaderError(HeaderError): pass class SubsequentHeaderError(HeaderError): pass class _LowLevelFile: def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if hasattr(os, "O_BINARY"): mode |= os.O_BINARY self.fd = os.open(name, mode, 0o666) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream: def __init__(self, name, mode, comptype, fileobj, bufsize): self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = b"" self.pos = 0 self.closed = False try: if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") from None self.zlib = zlib self.crc = zlib.crc32(b"") if mode == "r": self._init_read_gz() self.exception = zlib.error else: self._init_write_gz() elif comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") from None if mode == "r": self.dbuf = b"" self.cmp = bz2.BZ2Decompressor() self.exception = OSError else: self.cmp = bz2.BZ2Compressor() elif comptype == "xz": try: import lzma except ImportError: raise CompressionError("lzma module is not available") from None if mode == "r": self.dbuf = b"" self.cmp = lzma.LZMADecompressor() self.exception = lzma.LZMAError else: self.cmp = lzma.LZMACompressor() elif comptype != "tar": raise CompressionError("unknown compression type %r" % comptype) except: if not self._extfileobj: self.fileobj.close() self.closed = True raise def __del__(self): if hasattr(self, "closed") and not self.closed: self.close() def _init_write_gz(self): self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write(b"\037\213\010\010" + timestamp + b"\002\377") if self.name.endswith(".gz"): self.name = self.name[:-3] self.name = os.path.basename(self.name) self.__write(self.name.encode("iso-8859-1", "replace") + NUL) def write(self, s): if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) self.pos += len(s) if self.comptype != "tar": s = self.cmp.compress(s) self.__write(s) def __write(self, s): self.buf += s while len(self.buf) > self.bufsize: self.fileobj.write(self.buf[:self.bufsize]) self.buf = self.buf[self.bufsize:] def close(self): if self.closed: return self.closed = True try: if self.mode == "w" and self.comptype != "tar": self.buf += self.cmp.flush() if self.mode == "w" and self.buf: self.fileobj.write(self.buf) self.buf = b"" if self.comptype == "gz": self.fileobj.write(struct.pack("<L", self.crc)) self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) finally: if not self._extfileobj: self.fileobj.close() def _init_read_gz(self): self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2) def tell(self): return self.pos def seek(self, pos=0): if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos def read(self, size): assert size is not None buf = self._read(size) self.pos += len(buf) return buf def _read(self, size): if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) t = [self.dbuf] while c < size: if self.buf: buf = self.buf self.buf = b"" else: buf = self.fileobj.read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except self.exception as e: raise ReadError("invalid compressed data") from e t.append(buf) c += len(buf) t = b"".join(t) self.dbuf = t[size:] return t[:size] def __read(self, size): c = len(self.buf) t = [self.buf] while c < size: buf = self.fileobj.read(self.bufsize) if not buf: break t.append(buf) c += len(buf) t = b"".join(t) self.buf = t[size:] return t[:size] class _StreamProxy(object): def __init__(self, fileobj): self.fileobj = fileobj self.buf = self.fileobj.read(BLOCKSIZE) def read(self, size): self.read = self.fileobj.read return self.buf def getcomptype(self): if self.buf.startswith(b"\x1f\x8b\x08"): return "gz" elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY": return "bz2" elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")): return "xz" else: return "tar" def close(self): self.fileobj.close() class _FileInFile(object): def __init__(self, fileobj, offset, size, blockinfo=None): self.fileobj = fileobj self.offset = offset self.size = size self.position = 0 self.name = getattr(fileobj, "name", None) self.closed = False if blockinfo is None: blockinfo = [(0, size)] self.map_index = 0 self.map = [] lastpos = 0 realpos = self.offset for offset, size in blockinfo: if offset > lastpos: self.map.append((False, lastpos, offset, None)) self.map.append((True, offset, offset + size, realpos)) realpos += size lastpos = offset + size if lastpos < self.size: self.map.append((False, lastpos, self.size, None)) def flush(self): pass def readable(self): return True def writable(self): return False def seekable(self): return self.fileobj.seekable() def tell(self): return self.position def seek(self, position, whence=io.SEEK_SET): if whence == io.SEEK_SET: self.position = min(max(position, 0), self.size) elif whence == io.SEEK_CUR: if position < 0: self.position = max(self.position + position, 0) else: self.position = min(self.position + position, self.size) elif whence == io.SEEK_END: self.position = max(min(self.size + position, self.size), 0) else: raise ValueError("Invalid argument") return self.position def read(self, size=None): if size is None: size = self.size - self.position else: size = min(size, self.size - self.position) buf = b"" while size > 0: while True: data, start, stop, offset = self.map[self.map_index] if start <= self.position < stop: break else: self.map_index += 1 if self.map_index == len(self.map): self.map_index = 0 length = min(size, stop - self.position) if data: self.fileobj.seek(offset + (self.position - start)) b = self.fileobj.read(length) if len(b) != length: raise ReadError("unexpected end of data") buf += b else: buf += NUL * length size -= length self.position += length return buf def readinto(self, b): buf = self.read(len(b)) b[:len(buf)] = buf return len(buf) def close(self): self.closed = True class ExFileObject(io.BufferedReader): def __init__(self, tarfile, tarinfo): fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse) super().__init__(fileobj) class TarInfo(object): __slots__ = dict( name = 'Name of the archive member.', mode = 'Permission bits.', uid = 'User ID of the user who originally stored this member.', gid = 'Group ID of the user who originally stored this member.', size = 'Size in bytes.', mtime = 'Time of last modification.', chksum = 'Header checksum.', type = ('File type. type is usually one of these constants: ' 'REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ' 'CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'), linkname = ('Name of the target file name, which is only present ' 'in TarInfo objects of type LNKTYPE and SYMTYPE.'), uname = 'User name.', gname = 'Group name.', devmajor = 'Device major number.', devminor = 'Device minor number.', offset = 'The tar header starts here.', offset_data = "The file's data starts here.", pax_headers = ('A dictionary containing key-value pairs of an ' 'associated pax extended header.'), sparse = 'Sparse member information.', tarfile = None, _sparse_structs = None, _link_target = None, ) def __init__(self, name=""): self.name = name self.mode = 0o644 self.uid = 0 self.gid = 0 self.size = 0 self.mtime = 0 self.chksum = 0 self.type = REGTYPE self.linkname = "" self.uname = "" self.gname = "" self.devmajor = 0 self.devminor = 0 self.offset = 0 self.offset_data = 0 self.sparse = None self.pax_headers = {} @property def path(self): return self.name @path.setter def path(self, name): self.name = name @property
BSD 3-Clause New or Revised License
danielfrg/jupyterhub-kubernetes_spawner
kubernetes_spawner/swagger_client/models/v1_namespace.py
V1Namespace.api_version
python
def api_version(self): return self._api_version
Gets the api_version of this V1Namespace. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources :return: The api_version of this V1Namespace. :rtype: str
https://github.com/danielfrg/jupyterhub-kubernetes_spawner/blob/15a2b63ef719f8c3ff83221333f7de69c1495512/kubernetes_spawner/swagger_client/models/v1_namespace.py#L84-L92
from pprint import pformat from six import iteritems class V1Namespace(object): def __init__(self): self.swagger_types = { 'kind': 'str', 'api_version': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1NamespaceSpec', 'status': 'V1NamespaceStatus' } self.attribute_map = { 'kind': 'kind', 'api_version': 'apiVersion', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } self._kind = None self._api_version = None self._metadata = None self._spec = None self._status = None @property def kind(self): return self._kind @kind.setter def kind(self, kind): self._kind = kind @property
Apache License 2.0
dit/dit
dit/rate_distortion/plotting.py
BasePlotter.__init__
python
def __init__(self, *curves): self.curves = curves
Initialize the plotter. Parameters ---------- curves : *{RDCurve, IBCurve} The curves to plot.
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/rate_distortion/plotting.py#L64-L73
from abc import ABCMeta, abstractmethod from collections import namedtuple from operator import attrgetter import matplotlib.pyplot as plt import numpy as np from .curves import IBCurve, RDCurve __all__ = ( 'IBPlotter', 'RDPlotter', ) Axis = namedtuple('Axis', ['data', 'limit', 'label']) def _rescale_axes(ax, xmin=None, xmax=None, ymin=None, ymax=None): x_min, x_max = ax.get_xlim() y_min, y_max = ax.get_ylim() if xmin is not None: x_min = xmin if xmax is not None and not np.isnan(xmax): x_max = 1.05 * xmax if ymin is not None: y_min = ymin if ymax is not None and not np.isnan(ymax): y_max = 1.05 * ymax ax.set_xlim(x_min, x_max) ax.set_ylim(y_min, y_max) class BasePlotter(metaclass=ABCMeta): _beta_axis = Axis(attrgetter('betas'), lambda _: None, r"$\beta$") _rank_axis = Axis(attrgetter('ranks'), attrgetter('_max_rank'), r"rank") _alphabet_axis = Axis(attrgetter('alphabets'), attrgetter('_max_rank'), r"$|\mathcal{A}|$") _distortion_axis = Axis(attrgetter('distortions'), attrgetter('_max_distortion'), r"$\langle d(x, \hat{x}) \rangle$")
BSD 3-Clause New or Revised License
researchmm/lighttrack
lib/models/backbone/models/builder.py
ChildNetBuilder.__call__
python
def __call__(self, in_chs, model_block_args): if self.verbose: logging.info('Building model trunk with %d stages...' % len(model_block_args)) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 feature_idx = 0 stages = [] for stage_idx, stage_block_args in enumerate(model_block_args): last_stack = stage_idx == (len(model_block_args) - 1) if self.verbose: logging.info('Stack: {}'.format(stage_idx)) assert isinstance(stage_block_args, list) blocks = [] for block_idx, block_args in enumerate(stage_block_args): last_block = block_idx == (len(stage_block_args) - 1) extract_features = '' if self.verbose: logging.info(' Block: {}'.format(block_idx)) assert block_args['stride'] in (1, 2) if block_idx >= 1: block_args['stride'] = 1 do_extract = False if self.feature_location == 'pre_pwl': if last_block: next_stage_idx = stage_idx + 1 if next_stage_idx >= len(model_block_args): do_extract = True else: do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 elif self.feature_location == 'post_exp': if block_args['stride'] > 1 or (last_stack and last_block): do_extract = True if do_extract: extract_features = self.feature_location next_dilation = current_dilation if block_args['stride'] > 1: next_output_stride = current_stride * block_args['stride'] if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 if self.verbose: logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( self.output_stride)) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation block = self._make_block(block_args, total_block_idx, total_block_count) blocks.append(block) if extract_features: feature_module = block.feature_module(extract_features) if feature_module: feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module feature_channels = block.feature_channels(extract_features) self.features[feature_idx] = dict( name=feature_module, num_chs=feature_channels ) feature_idx += 1 total_block_idx += 1 stages.append(nn.Sequential(*blocks)) return stages
Build the blocks Args: in_chs: Number of input-channels passed to first block model_block_args: A list of lists, outer list defines stages, inner list contains strings defining block configuration(s) Return: List of block stacks (each stack wrapped in nn.Sequential)
https://github.com/researchmm/lighttrack/blob/e94368aa80e924f8720887aa8f4fc23db074d3e7/lib/models/backbone/models/builder.py#L248-L334
import logging import re from collections.__init__ import OrderedDict from copy import deepcopy import torch.nn as nn from .utils import _parse_ksize from .units import * def _decode_block_str(block_str): assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] ops = ops[1:] options = {} noskip = False for op in ops: if op == 'noskip': noskip = True elif op.startswith('n'): key = op[0] v = op[1:] if v == 're': value = nn.ReLU elif v == 'r6': value = nn.ReLU6 elif v == 'sw': value = Swish else: continue options[key] = value else: splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value act_layer = options['n'] if 'n' in options else None exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 fake_in_chs = int(options['fc']) if 'fc' in options else 0 num_repeat = int(options['r']) if block_type == 'ir': block_args = dict( block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, noskip=noskip, ) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args = dict( block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or noskip, ) elif block_type == 'cn': block_args = dict( block_type=block_type, kernel_size=int(options['k']), out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer, ) else: assert False, 'Unknown block type (%s)' % block_type return block_args, num_repeat def modify_block_args(block_args, kernel_size, exp_ratio): block_type = block_args['block_type'] if block_type == 'cn': block_args['kernel_size'] = kernel_size elif block_type == 'er': block_args['exp_kernel_size'] = kernel_size else: block_args['dw_kernel_size'] = kernel_size if block_type == 'ir' or block_type == 'er': block_args['exp_ratio'] = exp_ratio return block_args def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): num_repeat = sum(repeats) if depth_trunc == 'round': num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) else: num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) repeats_scaled = [] for r in repeats[::-1]: rs = max(1, round((r / num_repeat * num_repeat_scaled))) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::-1] sa_scaled = [] for ba, rep in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): arch_args = [] for stack_idx, block_strings in enumerate(arch_def): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) ba, rep = _decode_block_str(block_str) if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: ba['num_experts'] *= experts_multiplier stack_args.append(ba) repeats.append(rep) arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) return arch_args class ChildNetBuilder: def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, output_stride=32, pad_type='', act_layer=None, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', verbose=False): self.channel_multiplier = channel_multiplier self.channel_divisor = channel_divisor self.channel_min = channel_min self.output_stride = output_stride self.pad_type = pad_type self.act_layer = act_layer self.se_kwargs = se_kwargs self.norm_layer = norm_layer self.norm_kwargs = norm_kwargs self.drop_path_rate = drop_path_rate self.feature_location = feature_location assert feature_location in ('pre_pwl', 'post_exp', '') self.verbose = verbose self.in_chs = None self.features = OrderedDict() def _round_channels(self, chs): return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) def _make_block(self, ba, block_idx, block_count): drop_path_rate = self.drop_path_rate * block_idx / block_count bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self._round_channels(ba['out_chs']) if 'fake_in_chs' in ba and ba['fake_in_chs']: ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) ba['norm_layer'] = self.norm_layer ba['norm_kwargs'] = self.norm_kwargs ba['pad_type'] = self.pad_type ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None if bt == 'ir': ba['drop_path_rate'] = drop_path_rate ba['se_kwargs'] = self.se_kwargs if self.verbose: logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) block = InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': ba['drop_path_rate'] = drop_path_rate ba['se_kwargs'] = self.se_kwargs if self.verbose: logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) block = DepthwiseSeparableConv(**ba) elif bt == 'cn': if self.verbose: logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) block = ConvBnAct(**ba) else: assert False, 'Uknkown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] return block
MIT License
hyperledger/aries-cloudagent-python
aries_cloudagent/core/conductor.py
Conductor.setup
python
async def setup(self): context = await self.context_builder.build_context() await get_genesis_transactions(context.settings) self.root_profile, self.setup_public_did = await wallet_config(context) context = self.root_profile.context if not await ledger_config( self.root_profile, self.setup_public_did and self.setup_public_did.did ): LOGGER.warning("No ledger configured") self.inbound_transport_manager = InboundTransportManager( self.root_profile, self.inbound_message_router, self.handle_not_returned ) await self.inbound_transport_manager.setup() context.injector.bind_instance( InboundTransportManager, self.inbound_transport_manager ) self.outbound_transport_manager = OutboundTransportManager( self.root_profile, self.handle_not_delivered ) await self.outbound_transport_manager.setup() self.dispatcher = Dispatcher(self.root_profile) await self.dispatcher.setup() wire_format = context.inject_or(BaseWireFormat) if wire_format and hasattr(wire_format, "task_queue"): wire_format.task_queue = self.dispatcher.task_queue if context.settings.get("multitenant.enabled"): context.injector.bind_provider( BaseMultitenantManager, MultitenantManagerProvider(self.root_profile) ) context.injector.bind_instance( DocumentLoader, DocumentLoader(self.root_profile) ) self.outbound_queue = get_outbound_queue(context.settings) if context.settings.get("admin.enabled"): try: admin_host = context.settings.get("admin.host", "0.0.0.0") admin_port = context.settings.get("admin.port", "80") self.admin_server = AdminServer( admin_host, admin_port, context, self.root_profile, self.outbound_message_router, self.webhook_router, self.stop, self.dispatcher.task_queue, self.get_stats, ) context.injector.bind_instance(BaseAdminServer, self.admin_server) except Exception: LOGGER.exception("Unable to register admin server") raise collector = context.inject_or(Collector) if collector: collector.wrap( self, ( "outbound_message_router", ), ) collector.wrap( ConnectionManager, ( "fetch_did_document", "find_inbound_connection", ), )
Initialize the global request context.
https://github.com/hyperledger/aries-cloudagent-python/blob/fec69f1a2301e4745fc9d40cea190050e3f595fa/aries_cloudagent/core/conductor.py#L89-L184
import hashlib import json import logging from ..admin.base_server import BaseAdminServer from ..admin.server import AdminResponder, AdminServer from ..config.default_context import ContextBuilder from ..config.injection_context import InjectionContext from ..config.ledger import get_genesis_transactions, ledger_config from ..config.logging import LoggingConfigurator from ..config.wallet import wallet_config from ..core.profile import Profile from ..ledger.error import LedgerConfigError, LedgerTransactionError from ..messaging.responder import BaseResponder from ..multitenant.base import BaseMultitenantManager from ..multitenant.manager_provider import MultitenantManagerProvider from ..protocols.connections.v1_0.manager import ( ConnectionManager, ConnectionManagerError, ) from ..protocols.connections.v1_0.messages.connection_invitation import ( ConnectionInvitation, ) from ..protocols.coordinate_mediation.v1_0.manager import MediationManager from ..protocols.coordinate_mediation.mediation_invite_store import MediationInviteStore from ..protocols.out_of_band.v1_0.manager import OutOfBandManager from ..protocols.out_of_band.v1_0.messages.invitation import HSProto, InvitationMessage from ..storage.base import BaseStorage from ..transport.inbound.manager import InboundTransportManager from ..transport.inbound.message import InboundMessage from ..transport.outbound.base import OutboundDeliveryError from ..transport.outbound.manager import OutboundTransportManager, QueuedOutboundMessage from ..transport.outbound.message import OutboundMessage from ..transport.outbound.queue.base import BaseOutboundQueue from ..transport.outbound.queue.loader import get_outbound_queue from ..transport.outbound.status import OutboundSendStatus from ..transport.wire_format import BaseWireFormat from ..utils.stats import Collector from ..utils.task_queue import CompletedTask, TaskQueue from ..vc.ld_proofs.document_loader import DocumentLoader from ..wallet.did_info import DIDInfo from .dispatcher import Dispatcher LOGGER = logging.getLogger(__name__) class Conductor: def __init__(self, context_builder: ContextBuilder) -> None: self.admin_server = None self.context_builder = context_builder self.dispatcher: Dispatcher = None self.inbound_transport_manager: InboundTransportManager = None self.outbound_transport_manager: OutboundTransportManager = None self.root_profile: Profile = None self.setup_public_did: DIDInfo = None self.outbound_queue: BaseOutboundQueue = None @property def context(self) -> InjectionContext: return self.root_profile.context
Apache License 2.0
mhaack/home-assistant-config
config/custom_components/aarlo/pyaarlo/doorbell.py
ArloDoorBell.update_silent_mode
python
def update_silent_mode(self): self._arlo.be.notify( base=self.base_station, body={ "action": "get", "resource": self.resource_id, "publishResponse": False, }, )
Requests the latest silent mode settings. Queues a job that requests the info from Arlo.
https://github.com/mhaack/home-assistant-config/blob/0958a01d8ed1d004be3e8c92ab3a3a260e850d20/config/custom_components/aarlo/pyaarlo/doorbell.py#L112-L124
from .constant import ( BATTERY_KEY, BUTTON_PRESSED_KEY, CHIMES_KEY, CONNECTION_KEY, MODEL_WIRED_VIDEO_DOORBELL, MODEL_WIREFREE_VIDEO_DOORBELL, MOTION_DETECTED_KEY, SIGNAL_STR_KEY, SILENT_MODE_ACTIVE_KEY, SILENT_MODE_CALL_KEY, SILENT_MODE_KEY, SIREN_STATE_KEY, ) from .device import ArloChildDevice class ArloDoorBell(ArloChildDevice): def __init__(self, name, arlo, attrs): super().__init__(name, arlo, attrs) self._motion_time_job = None self._ding_time_job = None self._has_motion_detect = False self._chimes = {} def _motion_stopped(self): self._save_and_do_callbacks(MOTION_DETECTED_KEY, False) with self._lock: self._motion_time_job = None def _button_unpressed(self): self._save_and_do_callbacks(BUTTON_PRESSED_KEY, False) with self._lock: self._ding_time_job = None def _event_handler(self, resource, event): self._arlo.debug(self.name + " DOORBELL got one " + resource) if resource == self.resource_id: props = event.get("properties", {}) if MOTION_DETECTED_KEY in props: self._arlo.debug(self.name + " has motion detection support") self._has_motion_detect = True if len(props) == 1 and not self._has_motion_detect: if props.get(CONNECTION_KEY, "") == "available": self._save_and_do_callbacks(MOTION_DETECTED_KEY, True) with self._lock: self._arlo.bg.cancel(self._motion_time_job) self._motion_time_job = self._arlo.bg.run_in( self._motion_stopped, self._arlo.cfg.db_motion_time ) if BUTTON_PRESSED_KEY in props: self._save_and_do_callbacks(BUTTON_PRESSED_KEY, True) with self._lock: self._arlo.bg.cancel(self._ding_time_job) self._ding_time_job = self._arlo.bg.run_in( self._button_unpressed, self._arlo.cfg.db_ding_time ) if CHIMES_KEY in props: self._chimes = props[CHIMES_KEY] silent_mode = props.get(SILENT_MODE_KEY, {}) if silent_mode: self._save_and_do_callbacks(SILENT_MODE_KEY, silent_mode) super()._event_handler(resource, event) @property def resource_type(self): return "doorbells" @property def is_video_doorbell(self): return self.model_id.startswith( MODEL_WIRED_VIDEO_DOORBELL ) or self.model_id.startswith(MODEL_WIREFREE_VIDEO_DOORBELL) def has_capability(self, cap): if cap in (BUTTON_PRESSED_KEY, SILENT_MODE_KEY): return True if cap in (MOTION_DETECTED_KEY, BATTERY_KEY, SIGNAL_STR_KEY): if not self.is_video_doorbell: return True if cap in (CONNECTION_KEY,): if self.is_video_doorbell and self.parent_id == self.device_id: return False if cap in (SIREN_STATE_KEY,): if self.is_video_doorbell: return True return super().has_capability(cap)
MIT License
rwl/muntjac
muntjac/ui/slider.py
Slider.setValue
python
def setValue(self, value, repaintIsNotNeeded=False): v = value if self._resolution > 0: newValue = v * 10**self._resolution newValue = newValue / 10**self._resolution if (self._min > newValue) or (self._max < newValue): raise ValueOutOfBoundsException(value) else: newValue = v if (self._min > newValue) or (self._max < newValue): raise ValueOutOfBoundsException(value) super(Slider, self).setValue(float(newValue), repaintIsNotNeeded)
Set the value of this Slider. @param value: New value of Slider. Must be within Sliders range (min - max), otherwise throws an exception. @param repaintIsNotNeeded: If true, client-side is not requested to repaint itself. @raise ValueOutOfBoundsException:
https://github.com/rwl/muntjac/blob/8db97712edd81b4d25deaaa48587d2a08010f2c8/muntjac/ui/slider.py#L192-L215
from muntjac.ui.abstract_field import AbstractField class Slider(AbstractField): CLIENT_WIDGET = None ORIENTATION_HORIZONTAL = 0 ORIENTATION_VERTICAL = 1 STYLE_SCROLLBAR = 'scrollbar' def __init__(self, *args): self._min = 0 self._max = 100 self._resolution = 0 self._orientation = self.ORIENTATION_HORIZONTAL self._size = -1 self._handleSize = -1 self._arrows = False args = args nargs = len(args) if nargs == 0: super(Slider, self).__init__() super(Slider, self).setValue(float(self._min)) elif nargs == 1: caption, = args Slider.__init__(self) self.setCaption(caption) elif nargs == 2: minn, maxx = args Slider.__init__(self) self.setMin(minn) self.setMax(maxx) self.setResolution(0) elif nargs == 3: if isinstance(args[0], float): minn, maxx, resolution = args Slider.__init__(self) self.setMin(minn) self.setMax(maxx) self.setResolution(resolution) else: caption, minn, maxx = args Slider.__init__(self, minn, maxx) self.setCaption(caption) else: raise ValueError, 'too many arguments' def getMax(self): return self._max def setMax(self, maximum): self._max = maximum try: if float(str( self.getValue() )) > maximum: super(Slider, self).setValue( float(maximum) ) except ValueError: super(Slider, self).setValue( float(maximum) ) self.requestRepaint() def getMin(self): return self._min def setMin(self, minimum): self._min = minimum try: if float( str(self.getValue()) ) < minimum: super(Slider, self).setValue(float(minimum)) except ValueError: super(Slider, self).setValue(float(minimum)) self.requestRepaint() def getOrientation(self): return self._orientation def setOrientation(self, orientation): self._orientation = orientation self.requestRepaint() def getResolution(self): return self._resolution def setResolution(self, resolution): if resolution < 0: return self._resolution = resolution self.requestRepaint()
Apache License 2.0