repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
wy2136/xlearn
xlearn/decomposition.py
PCA.transform
python
def transform(self, da): da = da.copy() if not isinstance(da, xr.DataArray): X = da return super().transform(X) if self.weight is not None: da *= self.weight X = da.data n_samples = X.shape[0] grid_shape = X.shape[1:] n_grids = np.prod(grid_shape) X = X.reshape((n_samples, n_grids)) valid_grids = ~np.isnan(X[0, :]) X = X[:, valid_grids] pcs = super().transform(X) sample_dim = da.dims[0] pcs_dims = (sample_dim, 'mode') pcs_coords = {sample_dim: da[sample_dim], 'mode': np.arange(self.n_components_)} pcs_da = xr.DataArray(pcs, dims=pcs_dims, coords=pcs_coords) return pcs_da
xarray version of sklearn.decomposition.PCA.transform
https://github.com/wy2136/xlearn/blob/51d0713a38ecc5eecd4e72c71aa1d7fba5944259/xlearn/decomposition.py#L86-L120
import xarray as xr import numpy as np from sklearn.decomposition import PCA as PCA_sklearn class PCA(PCA_sklearn): def __init__(self, *args, **kw): weight = kw.pop('weight', None) n_components = kw.pop('n_components', 1) super().__init__(*args, n_components=n_components, **kw) self.weight = weight def __str__(self): attrs = ['copy', 'iterated_power', 'n_components', 'random_state', 'svd_solver', 'tol', 'whiten'] values = [] for attr in attrs: value = getattr(self, attr) if isinstance(value, str): value = '"{}"'.format(value) values.append(value) s = ', '.join(['{}={}'.format(attr, value) for attr,value in zip(attrs, values)]) return '[PCA for xarray]: {}.'.format(s) def __repr__(self): return self.__str__() def fit(self, da): da = da.copy() if not isinstance(da, xr.DataArray): X = da return super().fit(X) if self.weight is not None: da *= self.weight X = da.data n_samples = X.shape[0] grid_shape = X.shape[1:] n_grids = np.prod(grid_shape) X = X.reshape((n_samples, n_grids)) valid_grids = ~np.isnan(X[0, :]) X = X[:, valid_grids] super().fit(X) eofs = np.empty((self.n_components_, n_grids)) * np.nan eofs[:, valid_grids] = self.components_ eofs = eofs.reshape((self.n_components_,) + grid_shape) mean_ = np.empty(n_grids) * np.nan mean_[valid_grids] = self.mean_ mean_ = mean_.reshape(grid_shape) grid_dims = da.dims[1:] eofs_dims = ('mode',) + grid_dims grid_coords = {dim: da[dim] for dim in grid_dims} eofs_coords = grid_coords.copy() eofs_coords[eofs_dims[0]] = np.arange(self.n_components_) self.components_da = xr.DataArray(eofs, dims=eofs_dims, coords=eofs_coords) self.mean_da = xr.DataArray(mean_, dims=grid_dims, coords=grid_coords) return self
BSD 3-Clause New or Revised License
gurkult/gurkbot
bot/exts/fun/xkcd.py
setup
python
def setup(bot: Bot) -> None: bot.add_cog(XKCD(bot))
Loading the XKCD cog.
https://github.com/gurkult/gurkbot/blob/887c2ded7523a43abaa4865fa7978a64517ecbdc/bot/exts/fun/xkcd.py#L107-L109
import logging import re from datetime import datetime from random import randint from typing import Dict, Optional, Union from bot.bot import Bot from bot.constants import Colours from discord import Embed from discord.ext import tasks from discord.ext.commands import Cog, Context, command log = logging.getLogger(__name__) COMIC_FORMAT = re.compile(r"latest|[0-9]+") BASE_URL = "https://xkcd.com" class XKCD(Cog): def __init__(self, bot: Bot) -> None: self.bot = bot self.latest_comic_info: Dict[str, Union[str, int]] = {} self.get_latest_comic_info.start() def cog_unload(self) -> None: self.get_latest_comic_info.cancel() @tasks.loop(minutes=30) async def get_latest_comic_info(self) -> None: async with self.bot.http_session.get(f"{BASE_URL}/info.0.json") as resp: if resp.status == 200: self.latest_comic_info = await resp.json() else: log.debug( f"Failed to get latest XKCD comic information. Status code {resp.status}" ) @command(name="xkcd") async def fetch_xkcd_comics(self, ctx: Context, comic: Optional[str]) -> None: embed = Embed(title=f"XKCD comic '{comic}'") embed.colour = Colours.soft_red if comic and (comic := re.match(COMIC_FORMAT, comic)) is None: embed.description = ( "Comic parameter should either be an integer or 'latest'." ) await ctx.send(embed=embed) return comic = ( randint(1, self.latest_comic_info["num"]) if comic is None else comic.group(0) ) if comic == "latest": info = self.latest_comic_info else: async with self.bot.http_session.get( f"{BASE_URL}/{comic}/info.0.json" ) as resp: if resp.status == 200: info = await resp.json() else: embed.title = f"XKCD comic #{comic}" embed.description = ( f"{resp.status}: Could not retrieve xkcd comic #{comic}." ) log.debug( f"Retrieving xkcd comic #{comic} failed with status code {resp.status}." ) await ctx.send(embed=embed) return embed.title = f"{info['safe_title']} (#{info['num']})" embed.description = info["alt"] embed.url = f"{BASE_URL}/{info['num']}" if info["img"][-3:] in ("jpg", "png", "gif"): date = datetime( year=int(info["year"]), month=int(info["month"]), day=int(info["day"]) ) embed.timestamp = date embed.set_image(url=info["img"]) embed.set_footer(text=f"#{info['num']} • {info['safe_title']}") embed.colour = Colours.green else: embed.description = ( "The selected comic is interactive, and cannot be displayed within an embed.\n" f"Comic can be viewed [here](https://xkcd.com/{info['num']})." ) await ctx.send(embed=embed)
MIT License
sk2/ank_legacy_v2
AutoNetkit/algorithms/ip.py
allocate_subnets
python
def allocate_subnets(network, address_block=IPNetwork("10.0.0.0/8")): LOG.debug("Allocating subnets") ip_as_allocs = {} asgraphs = dict((my_as.asn, my_as) for my_as in ank.get_as_graphs(network)) subnet_list = address_block.subnet(16) ebgp_edges = ank.ebgp_edges(network) visited_ebgp_edges = set() for src, dst in sorted(ebgp_edges): if (dst, src) in visited_ebgp_edges: continue src_as = asgraphs[src.asn] src_as.add_edge(src, dst) ank.dns_advertise_link(src, dst) visited_ebgp_edges.add( (src, dst)) for my_as in sorted(asgraphs.values(), key = lambda x: x.asn): asn = my_as.asn as_subnet = subnet_list.next() as_internal_nodes = [n for n in sorted(my_as.nodes()) if network.asn(n) == asn] host_count = my_as.number_of_nodes() ip_as_allocs[my_as.asn] = as_subnet ptp_count = my_as.number_of_edges() req_sn_count = max(host_count, 4*ptp_count) if req_sn_count == 0: continue req_pref_len = int(32 - math.ceil(math.log(req_sn_count, 2)) ) sn_iter = as_subnet.subnet(req_pref_len) if ptp_count > 0: ptp_subnet = sn_iter.next() loopback_subnet = sn_iter.next() if ptp_count > 0: link_subnet = ptp_subnet.subnet(30) for src, dst in sorted(my_as.edges()): subnet = link_subnet.next() if network.asn(dst) != asn: network.graph[dst][src]['remote_as_sn_block'] = True network.graph[src][dst]['sn'] = subnet network.graph[dst][src]['sn'] = subnet network.graph[src][dst]['ip'] = subnet[1] network.graph[dst][src]['ip'] = subnet[2] loopback_ips = loopback_subnet.subnet(32) for rtr in sorted(as_internal_nodes): lo_ip = loopback_ips.next() network.graph.node[rtr]['lo_ip'] = lo_ip network.ip_as_allocs = ip_as_allocs
Allocates subnets and IP addresses to links in the network. Args: address_block (IPNetwork): The address block to use. Returns: ip_as_allocs Example usage: >>> network = ank.example_multi_as() >>> allocate_subnets(network) >>> print ank.debug_nodes(network.graph, "lo_ip") {'1a.AS1': IPNetwork('10.0.0.32/32'), '1b.AS1': IPNetwork('10.0.0.33/32'), '1c.AS1': IPNetwork('10.0.0.34/32'), '2a.AS2': IPNetwork('10.1.0.64/32'), '2b.AS2': IPNetwork('10.1.0.65/32'), '2c.AS2': IPNetwork('10.1.0.66/32'), '2d.AS2': IPNetwork('10.1.0.67/32'), '3a.AS3': IPNetwork('10.2.0.0/32')} >>> print ank.debug_edges(network.graph, "ip") {('1a.AS1', '1b.AS1'): IPAddress('10.0.0.10'), ('1a.AS1', '1c.AS1'): IPAddress('10.0.0.22'), ('1b.AS1', '1a.AS1'): IPAddress('10.0.0.9'), ('1b.AS1', '1c.AS1'): IPAddress('10.0.0.26'), ('1b.AS1', '3a.AS3'): IPAddress('10.0.0.17'), ('1c.AS1', '1a.AS1'): IPAddress('10.0.0.21'), ('1c.AS1', '1b.AS1'): IPAddress('10.0.0.25'), ('1c.AS1', '2a.AS2'): IPAddress('10.0.0.29'), ('2a.AS2', '1c.AS1'): IPAddress('10.0.0.30'), ('2a.AS2', '2b.AS2'): IPAddress('10.1.0.10'), ('2a.AS2', '2d.AS2'): IPAddress('10.1.0.26'), ('2b.AS2', '2a.AS2'): IPAddress('10.1.0.9'), ('2b.AS2', '2c.AS2'): IPAddress('10.1.0.18'), ('2c.AS2', '2b.AS2'): IPAddress('10.1.0.17'), ('2c.AS2', '2d.AS2'): IPAddress('10.1.0.30'), ('2d.AS2', '2a.AS2'): IPAddress('10.1.0.25'), ('2d.AS2', '2c.AS2'): IPAddress('10.1.0.29'), ('2d.AS2', '3a.AS3'): IPAddress('10.1.0.33'), ('3a.AS3', '1b.AS1'): IPAddress('10.0.0.18'), ('3a.AS3', '2d.AS2'): IPAddress('10.1.0.34')} >>> print ank.debug_edges(network.graph, "sn") {('1a.AS1', '1b.AS1'): IPNetwork('10.0.0.8/30'), ('1a.AS1', '1c.AS1'): IPNetwork('10.0.0.20/30'), ('1b.AS1', '1a.AS1'): IPNetwork('10.0.0.8/30'), ('1b.AS1', '1c.AS1'): IPNetwork('10.0.0.24/30'), ('1b.AS1', '3a.AS3'): IPNetwork('10.0.0.16/30'), ('1c.AS1', '1a.AS1'): IPNetwork('10.0.0.20/30'), ('1c.AS1', '1b.AS1'): IPNetwork('10.0.0.24/30'), ('1c.AS1', '2a.AS2'): IPNetwork('10.0.0.28/30'), ('2a.AS2', '1c.AS1'): IPNetwork('10.0.0.28/30'), ('2a.AS2', '2b.AS2'): IPNetwork('10.1.0.8/30'), ('2a.AS2', '2d.AS2'): IPNetwork('10.1.0.24/30'), ('2b.AS2', '2a.AS2'): IPNetwork('10.1.0.8/30'), ('2b.AS2', '2c.AS2'): IPNetwork('10.1.0.16/30'), ('2c.AS2', '2b.AS2'): IPNetwork('10.1.0.16/30'), ('2c.AS2', '2d.AS2'): IPNetwork('10.1.0.28/30'), ('2d.AS2', '2a.AS2'): IPNetwork('10.1.0.24/30'), ('2d.AS2', '2c.AS2'): IPNetwork('10.1.0.28/30'), ('2d.AS2', '3a.AS3'): IPNetwork('10.1.0.32/30'), ('3a.AS3', '1b.AS1'): IPNetwork('10.0.0.16/30'), ('3a.AS3', '2d.AS2'): IPNetwork('10.1.0.32/30')}
https://github.com/sk2/ank_legacy_v2/blob/83a28aa54a4ea74962ee9a8c44f856a006a2e675/AutoNetkit/algorithms/ip.py#L33-L192
__author__ = "\n".join(['Simon Knight']) __all__ = ['get_ip_as_allocs', 'allocate_subnets', 'alloc_interfaces', 'alloc_tap_hosts', 'get_tap_host', 'int_id', 'ip_addr', 'ip_to_net_ent_title_ios', 'create_ip_overlay', 'ip_to_net_ent_title'] from netaddr import IPNetwork, IPAddress import AutoNetkit as ank import math import networkx as nx import logging LOG = logging.getLogger("ANK") import pprint def create_ip_overlay(network): print "creating ip overlay" overlay = nx.Graph(network.graph) for edge in overlay.edges(data=True): print edge def get_ip_as_allocs(network): return network.ip_as_allocs
BSD 3-Clause New or Revised License
hasgeek/coaster
coaster/sqlalchemy/registry.py
Registry.__setattr__
python
def __setattr__(self, name, value): if name.startswith('_'): raise ValueError("Registry member names cannot be underscore-prefixed") if hasattr(self, name): raise ValueError(f"{name} is already registered") if not callable(value): raise ValueError("Registry members must be callable") self._members.add(name) object.__setattr__(self, name, value)
Incorporate a new registry member.
https://github.com/hasgeek/coaster/blob/3ffbc9d33c981284593445299aaee0c3cc0cdb0b/coaster/sqlalchemy/registry.py#L85-L94
from functools import partial from threading import Lock from typing import Optional, Set from sqlalchemy.ext.declarative import declared_attr __all__ = ['Registry', 'InstanceRegistry', 'RegistryMixin'] _marker = object() class Registry: _param: Optional[str] _name: Optional[str] _lock: Lock _default_property: bool _default_cached_property: bool _members: Set[str] _properties: Set[str] _cached_properties: Set[str] def __init__( self, param: Optional[str] = None, property: bool = False, cached_property: bool = False, ): if property and cached_property: raise TypeError("Only one of property and cached_property can be True") object.__setattr__(self, '_param', str(param) if param else None) object.__setattr__(self, '_name', None) object.__setattr__(self, '_lock', Lock()) object.__setattr__(self, '_default_property', property) object.__setattr__(self, '_default_cached_property', cached_property) object.__setattr__(self, '_members', set()) object.__setattr__(self, '_properties', set()) object.__setattr__(self, '_cached_properties', set()) def __set_name__(self, owner, name): if self._name is None: object.__setattr__(self, '_name', name) elif name != self._name: raise TypeError( f"A registry cannot be used under multiple names {self._name} and" f" {name}" )
BSD 2-Clause Simplified License
amzn/autotrail
src/autotrail/workflow/default_workflow/io.py
InteractiveClientWrapper.rerun
python
def rerun(self, dry_run=True, **tags): self._call_client_method('rerun', self._make_affected_steps_printer('rerun', dry_run), dry_run=dry_run, **tags)
Send message to resume steps. Mark the specified steps so that they are resumed. A step can only be resumed if it is either paused, marked to pause, paused due to failure or interrupted. :param dry_run: Boolean. API call doesn't have any effect when True. :param tags: Any key=value pair provided in the arguments is treated as a tag, except for dry_run=True. Each step by default gets a tag viz., name=<action_function_name>. If tags are provided, then only the steps matching the tags will be re-run. If no tags are provided, all possible steps will be marked to be re-run. :return: None. Prints the 'name' and 'n' tags of steps that were re-run.
https://github.com/amzn/autotrail/blob/182fdfee87b684828f74408dc4214cb6ec9f2a3c/src/autotrail/workflow/default_workflow/io.py#L369-L381
import sys from operator import itemgetter from autotrail.workflow.default_workflow.api import StatusField def print_step_statuses(step_statuses, to): all_fields = {StatusField.NAME, StatusField.TAGS, StatusField.STATE, StatusField.ACTIONS, StatusField.IO, StatusField.RETURN_VALUE, StatusField.EXCEPTION} single_value_fields = [StatusField.STATE, StatusField.RETURN_VALUE, StatusField.EXCEPTION] multi_value_fields = [StatusField.IO, StatusField.OUTPUT, StatusField.ACTIONS] max_field_length = len(max(all_fields, key=len)) header = '- {{field:<{}}}: {{value}}'.format(max_field_length) body = ' {{field:<{}}}: {{value}}'.format(max_field_length) print('Status of steps:', file=to) for step_id, step_status in sorted(step_statuses.items()): print(header.format(field=StatusField.NAME, value=step_status[StatusField.NAME]), file=to) print(body.format(field='n', value=step_id), file=to) for field in step_status: if field in single_value_fields: print(body.format(field=field, value=step_status[field]), file=to) elif field in multi_value_fields: if step_status[field]: for i, message in enumerate(step_status[field]): field = field if i == 0 else '' print(body.format(field=field, value=str(message)), file=to) else: print(body.format(field=field, value=step_status[field]), file=to) elif field == StatusField.TAGS: for key, value in step_status[StatusField.TAGS].items(): print(body.format(field=field, value='{} = {}'.format(key, value)), file=to) field = '' print('', file=to) def print_error(method_name, error_message, to): print('- API Call: {}'.format(method_name), file=to) print(' Error: {}'.format(error_message), file=to) def print_no_result(method_name, to): print('- API Call: {}'.format(method_name), file=to) print(' No result received for the API call.', file=to) def print_affected_steps(method_name, affected_step_ids, dry_run, step_id_to_name_mapping, to): print('- API Call: {}'.format(method_name), file=to) print(' Dry Run: {}'.format(dry_run), file=to) print(' List of step names that are affected:', file=to) for step_id in sorted(affected_step_ids): print(' # {} - {}'.format(step_id, step_id_to_name_mapping[step_id]), file=to) def print_step_list(steps_tags, to): print('List of steps and their tags:', file=to) for step_tags in sorted(steps_tags, key=itemgetter('n')): print('- Name: {}'.format(step_tags['name']), file=to) print(' n: {}'.format(step_tags['n']), file=to) for key, value in step_tags.items(): if key in ['name', 'n']: continue print(' {key}: {value}'.format(key=key, value=value), file=to) class InteractiveClientWrapper: def __init__(self, method_api_client, stdout=sys.stdout, stderr=sys.stderr, status_printer=print_step_statuses, affected_steps_printer=print_affected_steps, step_list_printer=print_step_list, error_printer=print_error, no_result_printer=print_no_result): self.client = method_api_client self.stdout = stdout self.stderr = stderr try: tags_of_all_steps = self.client.list() except Exception as e: raise ValueError('Unable to query the trail due to error: {}'.format(e)) if not tags_of_all_steps: raise ValueError('Unable to query the trail. No response to "status" API call.') self.step_id_to_name_mapping = {step_tags['n']: step_tags['name'] for step_tags in tags_of_all_steps} self.error_printer = error_printer self.no_result_printer = no_result_printer self.status_printer = status_printer self.affected_steps_printer = affected_steps_printer self.step_list_printer = step_list_printer def _make_affected_steps_printer(self, method, dry_run): return lambda result: self.affected_steps_printer(method, result, dry_run, self.step_id_to_name_mapping, self.stdout) def _call_client_method(self, method, printer, *args, **kwargs): try: result = getattr(self.client, method)(*args, **kwargs) except Exception as error: self.error_printer(method, str(error), self.stderr) return if result is not None: printer(result) else: self.no_result_printer(method, self.stderr) def start(self, dry_run=True): self._call_client_method('start', self._make_affected_steps_printer('start', dry_run), dry_run=dry_run) def shutdown(self, dry_run=True): self._call_client_method('shutdown', self._make_affected_steps_printer('shutdown', dry_run), dry_run=dry_run) def send_message_to_steps(self, message, dry_run=True, **tags): self._call_client_method('send_message_to_steps', self._make_affected_steps_printer('send_message_to_steps', dry_run), message, dry_run=dry_run, **tags) def list(self, **tags): printer = lambda result: self.step_list_printer(result, self.stdout) self._call_client_method('list', printer, **tags) def status(self, fields=None, states=None, **tags): printer = lambda result: self.status_printer(result, self.stdout) self._call_client_method('status', printer, fields=fields, states=states, **tags) def steps_waiting_for_user_input(self, **tags): printer = lambda result: self.status_printer(result, self.stdout) self._call_client_method('steps_waiting_for_user_input', printer, **tags) def pause(self, dry_run=True, **tags): self._call_client_method('pause', self._make_affected_steps_printer('pause', dry_run), dry_run=dry_run, **tags) def interrupt(self, dry_run=True, **tags): self._call_client_method('interrupt', self._make_affected_steps_printer('interrupt', dry_run), dry_run=dry_run, **tags) def resume(self, dry_run=True, **tags): self._call_client_method('resume', self._make_affected_steps_printer('resume', dry_run), dry_run=dry_run, **tags)
Apache License 2.0
pokemongof/pokemongo-bot-desktop
build/pywin/Lib/distutils/msvccompiler.py
MSVCCompiler.get_msvc_paths
python
def get_msvc_paths(self, path, platform='x86'): if not _can_read_reg: return [] path = path + " dirs" if self.__version >= 7: key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories" % (self.__root, self.__version)) else: key = (r"%s\6.0\Build System\Components\Platforms" r"\Win32 (%s)\Directories" % (self.__root, platform)) for base in HKEYS: d = read_values(base, key) if d: if self.__version >= 7: return string.split(self.__macros.sub(d[path]), ";") else: return string.split(d[path], ";") if self.__version == 6: for base in HKEYS: if read_values(base, r"%s\6.0" % self.__root) is not None: self.warn("It seems you have Visual Studio 6 installed, " "but the expected registry settings are not present.\n" "You must at least run the Visual Studio GUI once " "so that these entries are created.") break return []
Get a list of devstudio directories (include, lib or path). Return a list of strings. The list will be empty if unable to access the registry or appropriate registry keys not found.
https://github.com/pokemongof/pokemongo-bot-desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/distutils/msvccompiler.py#L602-L637
__revision__ = "$Id$" import sys import os import string from distutils.errors import (DistutilsExecError, DistutilsPlatformError, CompileError, LibError, LinkError) from distutils.ccompiler import CCompiler, gen_lib_options from distutils import log _can_read_reg = 0 try: import _winreg _can_read_reg = 1 hkey_mod = _winreg RegOpenKeyEx = _winreg.OpenKeyEx RegEnumKey = _winreg.EnumKey RegEnumValue = _winreg.EnumValue RegError = _winreg.error except ImportError: try: import win32api import win32con _can_read_reg = 1 hkey_mod = win32con RegOpenKeyEx = win32api.RegOpenKeyEx RegEnumKey = win32api.RegEnumKey RegEnumValue = win32api.RegEnumValue RegError = win32api.error except ImportError: log.info("Warning: Can't read registry to find the " "necessary compiler setting\n" "Make sure that Python modules _winreg, " "win32api or win32con are installed.") pass if _can_read_reg: HKEYS = (hkey_mod.HKEY_USERS, hkey_mod.HKEY_CURRENT_USER, hkey_mod.HKEY_LOCAL_MACHINE, hkey_mod.HKEY_CLASSES_ROOT) def read_keys(base, key): try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 while 1: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) i = i + 1 return L def read_values(base, key): try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while 1: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[convert_mbcs(name)] = convert_mbcs(value) i = i + 1 return d def convert_mbcs(s): enc = getattr(s, "encode", None) if enc is not None: try: s = enc("mbcs") except UnicodeError: pass return s class MacroExpander: def __init__(self, version): self.macros = {} self.load_macros(version) def set_macro(self, macro, path, key): for base in HKEYS: d = read_values(base, path) if d: self.macros["$(%s)" % macro] = d[key] break def load_macros(self, version): vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir") self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir") net = r"Software\Microsoft\.NETFramework" self.set_macro("FrameworkDir", net, "installroot") try: if version > 7.0: self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1") else: self.set_macro("FrameworkSDKDir", net, "sdkinstallroot") except KeyError: raise DistutilsPlatformError, ("""Python was built with Visual Studio 2003; extensions must be built with a compiler than can generate compatible binaries. Visual Studio 2003 was not found on this system. If you have Cygwin installed, you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""") p = r"Software\Microsoft\NET Framework Setup\Product" for base in HKEYS: try: h = RegOpenKeyEx(base, p) except RegError: continue key = RegEnumKey(h, 0) d = read_values(base, r"%s\%s" % (p, key)) self.macros["$(FrameworkVersion)"] = d["version"] def sub(self, s): for k, v in self.macros.items(): s = string.replace(s, k, v) return s def get_build_version(): prefix = "MSC v." i = string.find(sys.version, prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion return None def get_build_architecture(): prefix = " bit (" i = string.find(sys.version, prefix) if i == -1: return "Intel" j = string.find(sys.version, ")", i) return sys.version[i+len(prefix):j] def normalize_and_reduce_paths(paths): reduced_paths = [] for p in paths: np = os.path.normpath(p) if np not in reduced_paths: reduced_paths.append(np) return reduced_paths class MSVCCompiler (CCompiler) : compiler_type = 'msvc' executables = {} _c_extensions = ['.c'] _cpp_extensions = ['.cc', '.cpp', '.cxx'] _rc_extensions = ['.rc'] _mc_extensions = ['.mc'] src_extensions = (_c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions) res_extension = '.res' obj_extension = '.obj' static_lib_extension = '.lib' shared_lib_extension = '.dll' static_lib_format = shared_lib_format = '%s%s' exe_extension = '.exe' def __init__ (self, verbose=0, dry_run=0, force=0): CCompiler.__init__ (self, verbose, dry_run, force) self.__version = get_build_version() self.__arch = get_build_architecture() if self.__arch == "Intel": if self.__version >= 7: self.__root = r"Software\Microsoft\VisualStudio" self.__macros = MacroExpander(self.__version) else: self.__root = r"Software\Microsoft\Devstudio" self.__product = "Visual Studio version %s" % self.__version else: self.__product = "Microsoft SDK compiler %s" % (self.__version + 6) self.initialized = False def initialize(self): self.__paths = [] if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"): self.cc = "cl.exe" self.linker = "link.exe" self.lib = "lib.exe" self.rc = "rc.exe" self.mc = "mc.exe" else: self.__paths = self.get_msvc_paths("path") if len (self.__paths) == 0: raise DistutilsPlatformError, ("Python was built with %s, " "and extensions need to be built with the same " "version of the compiler, but it isn't installed." % self.__product) self.cc = self.find_exe("cl.exe") self.linker = self.find_exe("link.exe") self.lib = self.find_exe("lib.exe") self.rc = self.find_exe("rc.exe") self.mc = self.find_exe("mc.exe") self.set_path_env_var('lib') self.set_path_env_var('include') try: for p in string.split(os.environ['path'], ';'): self.__paths.append(p) except KeyError: pass self.__paths = normalize_and_reduce_paths(self.__paths) os.environ['path'] = string.join(self.__paths, ';') self.preprocess_options = None if self.__arch == "Intel": self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX', '/Z7', '/D_DEBUG'] else: self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' , '/DNDEBUG'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-', '/Z7', '/D_DEBUG'] self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] else: self.ldflags_shared_debug = [ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG' ] self.ldflags_static = [ '/nologo'] self.initialized = True def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: (base, ext) = os.path.splitext (src_name) base = os.path.splitdrive(base)[1] base = base[os.path.isabs(base):] if ext not in self.src_extensions: raise CompileError ("Don't know how to compile %s" % src_name) if strip_dir: base = os.path.basename (base) if ext in self._rc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) elif ext in self._mc_extensions: obj_names.append (os.path.join (output_dir, base + self.res_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): if not self.initialized: self.initialize() macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) compile_opts = extra_preargs or [] compile_opts.append ('/c') if debug: compile_opts.extend(self.compile_options_debug) else: compile_opts.extend(self.compile_options) for obj in objects: try: src, ext = build[obj] except KeyError: continue if debug: src = os.path.abspath(src) if ext in self._c_extensions: input_opt = "/Tc" + src elif ext in self._cpp_extensions: input_opt = "/Tp" + src elif ext in self._rc_extensions: input_opt = src output_opt = "/fo" + obj try: self.spawn ([self.rc] + pp_opts + [output_opt] + [input_opt]) except DistutilsExecError, msg: raise CompileError, msg continue elif ext in self._mc_extensions: h_dir = os.path.dirname (src) rc_dir = os.path.dirname (obj) try: self.spawn ([self.mc] + ['-h', h_dir, '-r', rc_dir] + [src]) base, _ = os.path.splitext (os.path.basename (src)) rc_file = os.path.join (rc_dir, base + '.rc') self.spawn ([self.rc] + ["/fo" + obj] + [rc_file]) except DistutilsExecError, msg: raise CompileError, msg continue else: raise CompileError ( "Don't know how to compile %s to %s" % (src, obj)) output_opt = "/Fo" + obj try: self.spawn ([self.cc] + compile_opts + pp_opts + [input_opt, output_opt] + extra_postargs) except DistutilsExecError, msg: raise CompileError, msg return objects def create_static_lib (self, objects, output_libname, output_dir=None, debug=0, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args (objects, output_dir) output_filename = self.library_filename (output_libname, output_dir=output_dir) if self._need_link (objects, output_filename): lib_args = objects + ['/OUT:' + output_filename] if debug: pass try: self.spawn ([self.lib] + lib_args) except DistutilsExecError, msg: raise LibError, msg else: log.debug("skipping %s (up-to-date)", output_filename) def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): if not self.initialized: self.initialize() (objects, output_dir) = self._fix_object_args (objects, output_dir) (libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args (libraries, library_dirs, runtime_library_dirs) if runtime_library_dirs: self.warn ("I don't know what to do with 'runtime_library_dirs': " + str (runtime_library_dirs)) lib_opts = gen_lib_options (self, library_dirs, runtime_library_dirs, libraries) if output_dir is not None: output_filename = os.path.join (output_dir, output_filename) if self._need_link (objects, output_filename): if target_desc == CCompiler.EXECUTABLE: if debug: ldflags = self.ldflags_shared_debug[1:] else: ldflags = self.ldflags_shared[1:] else: if debug: ldflags = self.ldflags_shared_debug else: ldflags = self.ldflags_shared export_opts = [] for sym in (export_symbols or []): export_opts.append("/EXPORT:" + sym) ld_args = (ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename]) if export_symbols is not None: (dll_name, dll_ext) = os.path.splitext( os.path.basename(output_filename)) implib_file = os.path.join( os.path.dirname(objects[0]), self.library_filename(dll_name)) ld_args.append ('/IMPLIB:' + implib_file) if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath (os.path.dirname (output_filename)) try: self.spawn ([self.linker] + ld_args) except DistutilsExecError, msg: raise LinkError, msg else: log.debug("skipping %s (up-to-date)", output_filename) def library_dir_option (self, dir): return "/LIBPATH:" + dir def runtime_library_dir_option (self, dir): raise DistutilsPlatformError, "don't know how to set runtime library search path for MSVC++" def library_option (self, lib): return self.library_filename (lib) def find_library_file (self, dirs, lib, debug=0): if debug: try_names = [lib + "_d", lib] else: try_names = [lib] for dir in dirs: for name in try_names: libfile = os.path.join(dir, self.library_filename (name)) if os.path.exists(libfile): return libfile else: return None def find_exe(self, exe): for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn for p in string.split(os.environ['Path'],';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn return exe
MIT License
tensorflow/agents
tf_agents/metrics/tf_metric.py
TFStepMetric.init_variables
python
def init_variables(self): if not tf.executing_eagerly(): return tf.compat.v1.group([v.initializer for v in self.variables])
Initializes this Metric's variables. Should be called after variables are created in the first execution of `__call__()`. If using graph execution, the return value should be `run()` in a session before running the op returned by `__call__()`. (See example above.) Returns: If using graph execution, this returns an op to perform the initialization. Under eager execution, the variables are reset to their initial values as a side effect and this function returns None.
https://github.com/tensorflow/agents/blob/ad18e95cfd95e4e76b771aeafa653f70c5080a29/tf_agents/metrics/tf_metric.py#L62-L76
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tf_agents.utils import common class TFStepMetric(tf.Module): def __init__(self, name, prefix='Metrics'): super(TFStepMetric, self).__init__(name) common.check_tf1_allowed() self._prefix = prefix def call(self, *args, **kwargs): raise NotImplementedError('Metrics must define a call() member function') def reset(self): raise NotImplementedError('Metrics must define a reset() member function') def result(self): raise NotImplementedError('Metrics must define a result() member function')
Apache License 2.0
markmichon1/bitglitter-python
bitglitter/utilities/palette.py
get_palette_id_from_hash
python
def get_palette_id_from_hash(name, description, time_created, color_set): color_set_string = str(color_set) color_set_string.replace('[', '(') color_set_string.replace(']', ')') hasher = hashlib.sha256(str(name + description + str(time_created) + color_set_string).encode()) return hasher.hexdigest()
Taking in the various parameters, this creates a unique ID for the custom palettes.
https://github.com/markmichon1/bitglitter-python/blob/c13176084ae71af959d8e551886055cdc1827391/bitglitter/utilities/palette.py#L137-L146
import hashlib import itertools import logging import math from pathlib import Path from random import choice, randint from bitstring import BitArray, ConstBitStream from PIL import Image, ImageDraw from bitglitter.write.render.renderutilities import render_coords_generator class BitsToColor: def __init__(self, color_set_tupled, bit_length, palette_name): logging.debug(f'Generating encoder for {palette_name}...') self.color_set_tupled = color_set_tupled self.bit_length = bit_length self.return_value = self.generate_dictionary() @staticmethod def twenty_four_bit_values(bit_value): red_channel = bit_value.read('uint : 8') green_channel = bit_value.read('uint : 8') blue_channel = bit_value.read('uint : 8') return red_channel, green_channel, blue_channel def generate_dictionary(self): color_dict = {} if self.bit_length != 24: for value in range(len(self.color_set_tupled)): temp_bin_holder = str(BitArray(uint=value, length=self.bit_length)) temp_bin_holder = ConstBitStream(temp_bin_holder) color_dict[temp_bin_holder] = self.color_set_tupled[value] return color_dict else: return self.twenty_four_bit_values def get_color(self, value): if self.bit_length != 24: return self.return_value[value] else: return self.return_value(value) class ColorsToBits: def __init__(self, color_set_tupled, bit_length, palette_name): logging.debug(f'Generating decoder for {palette_name}...') self.color_set_tupled = color_set_tupled self.bit_length = bit_length self.return_value = self.generate_dictionary() @staticmethod def twenty_four_bit_values(color): outgoing_data = BitArray() for color_channel in color: outgoing_data.append(BitArray(uint=color_channel, length=8)) return outgoing_data def generate_dictionary(self): value_dict = {} if self.bit_length != 24: for value in range(len(self.color_set_tupled)): temp_bin_holder = str(BitArray(uint=value, length=self.bit_length)) temp_bin_holder = ConstBitStream(temp_bin_holder) value_dict[self.color_set_tupled[value]] = temp_bin_holder return value_dict else: return self.twenty_four_bit_values def get_value(self, color): if self.bit_length != 24: return self.return_value[color] else: return self.return_value(color) def convert_hex_to_rgb(color_set): returned_list = [] for color in color_set: if isinstance(color, str): stripped = color.replace('#', '') returned_list.append(tuple(int(stripped[i:i + 2], 16) for i in (0, 2, 4))) else: returned_list.append(color) return returned_list def get_color_distance(color_set): min_distance = None for pair in itertools.combinations(color_set, 2): first_color, second_color = pair red_distance = (second_color[0] - first_color[0]) ** 2 green_distance = (second_color[1] - first_color[1]) ** 2 blue_distance = (second_color[2] - first_color[2]) ** 2 sum_of_distances = math.sqrt(red_distance + green_distance + blue_distance) if min_distance is not None: if sum_of_distances < min_distance: min_distance = sum_of_distances else: min_distance = sum_of_distances return round(min_distance, 3)
MIT License
cozysynthesizer/cozy
cozy/wf.py
exp_wf
python
def exp_wf(e : Exp, context : Context, pool = RUNTIME_POOL, assumptions : Exp = ETRUE, solver = None): if solver is None: solver = ModelCachingSolver(vars=[], funcs={}) for x, ctx, p in all_subexpressions_with_context_information(e, context, pool): is_wf = exp_wf_nonrecursive(solver, x, ctx, p, assumptions=ctx.adapt(assumptions, context)) if not is_wf: if isinstance(is_wf, No): return ExpIsNotWf(e, x, is_wf.msg) return is_wf return True
Check the well-formedess of `e`. Returns True or an instance of ExpIsNotWf that indicates why `e` is not well-formed. Parameters: e - an expression to check context - a context describing e's variables pool - what pool e lives in assumptions - facts that are true whenever e begins executing (NOTE: this does NOT need to include the path conditions from the context, but it is fine if it does.) solver - a ModelCachingSolver to use for solving formulas This function requires that: - all free variables in `e` are used in the correct pool - EStateVar only occurs in runtime expressions
https://github.com/cozysynthesizer/cozy/blob/d7b2c0ee575057dea4ebec201d579f0ecd785b1b/cozy/wf.py#L71-L98
import itertools from cozy.common import No, typechecked, OrderedSet, unique from cozy.syntax import Exp, EVar, EAll, ETRUE from cozy.target_syntax import EStateVar from cozy.syntax_tools import pprint, strip_EStateVar, freshen_binders, alpha_equivalent, replace from cozy.solver import ModelCachingSolver from cozy.pools import RUNTIME_POOL, STATE_POOL from cozy.structures import extension_handler from cozy.contexts import Context, all_subexpressions_with_context_information from cozy.logging import task class ExpIsNotWf(No): def __init__(self, toplevel_expression, offending_subexpression, reason): super().__init__("at {}: {}".format(pprint(offending_subexpression), reason)) self.toplevel_expression = toplevel_expression self.offending_subexpression = offending_subexpression self.reason = reason def __repr__(self): return "ExpIsNotWf({!r}, {!r}, {!r})".format( self.toplevel_expression, self.offending_subexpression, self.reason) def exp_wf_nonrecursive(solver, e : Exp, context : Context, pool = RUNTIME_POOL, assumptions : Exp = ETRUE): if hasattr(e, "_wf"): return True state_vars = OrderedSet(v for v, p in context.vars() if p == STATE_POOL) args = OrderedSet(v for v, p in context.vars() if p == RUNTIME_POOL) h = extension_handler(type(e)) if h is not None: assumptions = EAll([assumptions, context.path_condition()]) msg = h.check_wf(e, state_vars=state_vars, args=args, pool=pool, assumptions=assumptions, is_valid=solver.valid) if msg is not None: return No(msg) e._wf = True return True at_runtime = pool == RUNTIME_POOL if isinstance(e, EStateVar) and not at_runtime: return No("EStateVar in state pool position") if isinstance(e, EVar): if at_runtime and e in state_vars: return No("state var at runtime") elif not at_runtime and e in args: return No("arg in state exp") e._wf = True return True @typechecked
Apache License 2.0
seldonio/alibi
alibi/explainers/backends/tensorflow/cfrl_base.py
save_model
python
def save_model(path: Union[str, os.PathLike], model: keras.layers.Layer) -> None: model.save(path, save_format="tf")
Saves a model and its optimizer. Parameters ---------- path Path to the saving location. model Model to be saved.
https://github.com/seldonio/alibi/blob/ef757b9579f85ef2e3dfc7088211969616ee3fdb/alibi/explainers/backends/tensorflow/cfrl_base.py#L621-L632
import os import random import numpy as np import tensorflow as tf import tensorflow.keras as keras from typing import Any, List, Dict, Callable, Union, Optional, TYPE_CHECKING from alibi.explainers.backends.cfrl_base import CounterfactualRLDataset from alibi.models.tensorflow.actor_critic import Actor, Critic if TYPE_CHECKING: from alibi.explainers.cfrl_base import NormalActionNoise class TfCounterfactualRLDataset(CounterfactualRLDataset, keras.utils.Sequence): def __init__(self, X: np.ndarray, preprocessor: Callable, predictor: Callable, conditional_func: Callable, batch_size: int, shuffle: bool = True) -> None: super().__init__() self.X = X self.preprocessor = preprocessor self.predictor = predictor self.conditional_func = conditional_func self.batch_size = batch_size self.shuffle = shuffle self.Y_m = self.predict_batches(X=self.X, predictor=self.predictor, batch_size=self.batch_size) self.num_classes: Optional[int] = None self.max_m: Optional[float] = None self.min_m: Optional[float] = None if self.Y_m.shape[1] > 1: self.num_classes = self.Y_m.shape[1] else: self.min_m = np.min(self.Y_m) self.max_n = np.max(self.Y_m) self.X = self.preprocessor(self.X) self.on_epoch_end() def on_epoch_end(self) -> None: self.indexes = np.arange(self.X.shape[0]) if self.shuffle: np.random.shuffle(self.indexes) def __len__(self) -> int: return self.X.shape[0] // self.batch_size def __getitem__(self, idx) -> Dict[str, np.ndarray]: if self.num_classes is not None: tgts = np.random.randint(low=0, high=self.num_classes, size=self.batch_size) Y_t = np.zeros((self.batch_size, self.num_classes)) Y_t[np.arange(self.batch_size), tgts] = 1 else: Y_t = np.random.uniform(low=self.min_m, high=self.max_m, size=(self.batch_size, 1)) indexes = self.indexes[idx * self.batch_size:(idx + 1) * self.batch_size] C = self.conditional_func(self.X[indexes]) return { "X": self.X[indexes], "Y_m": self.Y_m[indexes], "Y_t": Y_t, "C": C } def get_optimizer(model: Optional[keras.layers.Layer] = None, lr: float = 1e-3) -> keras.optimizers.Optimizer: return keras.optimizers.Adam(learning_rate=lr) def get_actor(hidden_dim: int, output_dim: int) -> keras.layers.Layer: return Actor(hidden_dim=hidden_dim, output_dim=output_dim) def get_critic(hidden_dim: int) -> keras.layers.Layer: return Critic(hidden_dim=hidden_dim) def sparsity_loss(X_hat_cf: tf.Tensor, X: tf.Tensor) -> Dict[str, tf.Tensor]: return {"sparsity_loss": tf.reduce_mean(tf.abs(X_hat_cf - X))} def consistency_loss(Z_cf_pred: tf.Tensor, Z_cf_tgt: tf.Tensor): return {"consistency_loss": 0} def data_generator(X: np.ndarray, encoder_preprocessor: Callable, predictor: Callable, conditional_func: Callable, batch_size: int, shuffle: bool = True, **kwargs): return TfCounterfactualRLDataset(X=X, preprocessor=encoder_preprocessor, predictor=predictor, conditional_func=conditional_func, batch_size=batch_size, shuffle=shuffle) def encode(X: Union[tf.Tensor, np.ndarray], encoder: keras.Model, **kwargs) -> tf.Tensor: return encoder(X, training=False) def decode(Z: Union[tf.Tensor, np.ndarray], decoder: keras.Model, **kwargs): return decoder(Z, training=False) def generate_cf(Z: Union[np.ndarray, tf.Tensor], Y_m: Union[np.ndarray, tf.Tensor], Y_t: Union[np.ndarray, tf.Tensor], C: Optional[Union[np.ndarray, tf.Tensor]], actor: keras.Model, **kwargs) -> tf.Tensor: Y_m = tf.cast(Y_m, dtype=tf.float32) Y_t = tf.cast(Y_t, dtype=tf.float32) C = tf.cast(C, dtype=tf.float32) if (C is not None) else C state = [Z, Y_m, Y_t] + ([C] if (C is not None) else []) state = tf.concat(state, axis=1) Z_cf = actor(state, training=False) return Z_cf def add_noise(Z_cf: Union[tf.Tensor, np.ndarray], noise: 'NormalActionNoise', act_low: float, act_high: float, step: int, exploration_steps: int, **kwargs) -> tf.Tensor: eps = noise(Z_cf.shape) if step > exploration_steps: Z_cf_tilde = Z_cf + eps Z_cf_tilde = tf.clip_by_value(Z_cf_tilde, clip_value_min=act_low, clip_value_max=act_high) else: Z_cf_tilde = tf.random.uniform(Z_cf.shape, minval=act_low, maxval=act_high) return Z_cf_tilde def initialize_optimizer(optimizer: keras.optimizers.Optimizer, model: keras.Model) -> None: zero_grads = [tf.zeros_like(w) for w in model.trainable_weights] optimizer.apply_gradients(zip(zero_grads, model.trainable_weights)) def initialize_optimizers(optimizer_actor, optimizer_critic, actor, critic, **kwargs) -> None: initialize_optimizer(optimizer=optimizer_actor, model=actor) initialize_optimizer(optimizer=optimizer_critic, model=critic) def initialize_actor_critic(actor, critic, Z, Z_cf_tilde, Y_m, Y_t, C, **kwargs): Z = tf.zeros((1, *Z.shape[1:]), dtype=tf.float32) Z_cf_tilde = tf.zeros((1, *Z_cf_tilde.shape[1:]), dtype=tf.float32) Y_m = tf.zeros((1, *Y_m.shape[1:]), dtype=tf.float32) Y_t = tf.zeros((1, *Y_t.shape[1:]), dtype=tf.float32) actor_input = [Z, Y_m, Y_t] if C is not None: C = tf.zeros((1, *C.shape[1:]), dtype=tf.float32) actor_input += [C] actor_input = tf.concat(actor_input, axis=1) critic_input = [actor_input, Z_cf_tilde] critic_input = tf.concat(critic_input, axis=1) actor(actor_input) critic(critic_input) @tf.function() def update_actor_critic(encoder: keras.Model, decoder: keras.Model, critic: keras.Model, actor: keras.Model, optimizer_critic: keras.optimizers.Optimizer, optimizer_actor: keras.optimizers.Optimizer, sparsity_loss: Callable, consistency_loss: Callable, coeff_sparsity: float, coeff_consistency: float, X: np.ndarray, X_cf: np.ndarray, Z: np.ndarray, Z_cf_tilde: np.ndarray, Y_m: np.ndarray, Y_t: np.ndarray, C: Optional[np.ndarray], R_tilde: np.ndarray, **kwargs) -> Dict[str, Any]: losses: Dict[str, float] = dict() Y_m = tf.cast(Y_m, dtype=tf.float32) Y_t = tf.cast(Y_t, dtype=tf.float32) C = tf.cast(C, dtype=tf.float32) if (C is not None) else None state = [Z, Y_m, Y_t] + ([C] if C is not None else []) state = tf.concat(state, axis=1) with tf.GradientTape() as tape_critic: input_critic = tf.concat([state, Z_cf_tilde], axis=1) output_critic = tf.squeeze(critic(input_critic, training=True), axis=1) loss_critic = tf.reduce_mean(tf.square(output_critic - R_tilde)) losses.update({"critic_loss": loss_critic}) grads_critic = tape_critic.gradient(loss_critic, critic.trainable_weights) optimizer_critic.apply_gradients(zip(grads_critic, critic.trainable_weights)) with tf.GradientTape() as tape_actor: Z_cf = actor(state, training=True) input_critic = tf.concat([state, Z_cf], axis=1) output_critic = critic(input_critic, training=False) loss_actor = -tf.reduce_mean(output_critic) losses.update({"actor_loss": loss_actor}) X_hat_cf = decoder(Z_cf, training=False) loss_sparsity = sparsity_loss(X_hat_cf, X) losses.update(loss_sparsity) for key in loss_sparsity.keys(): loss_actor += coeff_sparsity * loss_sparsity[key] Z_cf_tgt = encoder(X_cf, training=False) loss_consistency = consistency_loss(Z_cf_pred=Z_cf, Z_cf_tgt=Z_cf_tgt) losses.update(loss_consistency) for key in loss_consistency.keys(): loss_actor += coeff_consistency * loss_consistency[key] grads_actor = tape_actor.gradient(loss_actor, actor.trainable_weights) optimizer_actor.apply_gradients(zip(grads_actor, actor.trainable_weights)) return losses def to_numpy(X: Optional[Union[List, np.ndarray, tf.Tensor]]) -> Optional[Union[List, np.ndarray]]: if X is not None: if isinstance(X, np.ndarray): return X if isinstance(X, tf.Tensor): return X.numpy() if isinstance(X, list): return [to_numpy(e) for e in X] return np.array(X) return None def to_tensor(X: Union[np.ndarray, tf.Tensor], **kwargs) -> Optional[tf.Tensor]: if X is not None: if isinstance(X, tf.Tensor): return X return tf.constant(X) return None
Apache License 2.0
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_3/api/smi_s_api.py
SMISApi.api23_smi_s_get_with_http_info
python
def api23_smi_s_get_with_http_info( self, authorization=None, x_request_id=None, filter=None, limit=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api23_smi_s_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api23_smi_s_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.3/smi-s', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SmisGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
List SMI-S settings Displays the SMI-S settings, including the name of the array and whether SLP and WBEM-HTTPS are enabled. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api23_smi_s_get_with_http_info(async_req=True) >>> result = thread.get() :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param str filter: Narrows down the results to only the response objects that satisfy the filter criteria. :param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size. :param int offset: The starting position based on the results of the query in relation to the full set of response objects returned. :param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values. :param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SmisGetResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_3/api/smi_s_api.py#L29-L135
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class SMISApi(object): def __init__(self, api_client): self.api_client = api_client
BSD 2-Clause Simplified License
tansey/hrt
examples/timing/timings.py
fit_lasso
python
def fit_lasso(X, y): from sklearn.linear_model import LassoCV lasso = LassoCV(cv=5) lasso.fit(X, y) return lasso
Fit a lasso model
https://github.com/tansey/hrt/blob/f6d271a34590d073a08f0fc40f40e898f38cdf97/examples/timing/timings.py#L79-L84
import numpy as np import time from scipy.stats import norm from collections import defaultdict def bh(p, fdr): p_orders = np.argsort(p) discoveries = [] m = float(len(p_orders)) for k, s in enumerate(p_orders): if p[s] <= (k+1) / m * fdr: discoveries.append(s) else: break return np.array(discoveries) class DataGeneratingModel: def __init__(self, N, P, signals, sigma=1, nfactors=5): self.N = N self.P = P self.signals = signals self.sigma = sigma self.nfactors = nfactors self.covariates() self.response() def covariates(self): self.Z = np.random.gamma(1, 1, size=(self.N, self.nfactors)) self.W = np.random.normal(0, 1/np.sqrt(self.nfactors), size=(self.P, self.nfactors)) self.X = np.random.normal(self.Z.dot(self.W.T), self.sigma) def response(self): self.y = np.tanh(self.X[:,0]) * self.signals[0] + 5*np.tanh(self.X[:,1] * self.signals[1] + self.X[:,2] * self.signals[2]) + np.random.normal(0, self.sigma, size=self.X.shape[0]) def conditional_samples(self, rows, idx, nsamples=1): return np.squeeze(np.random.normal(self.Z[rows].dot(self.W[idx]), self.sigma, size=(len(rows), nsamples))) def conditional_grid(self, rows, idx, ngrid, tol=1e-8): mu = self.Z[rows].dot(self.W[idx]) grid_start = norm.ppf(tol, mu, scale=self.sigma) grid_end = norm.ppf(1-tol, mu, scale=self.sigma) grid = np.array([np.linspace(start, end, ngrid-1) for start, end in zip(grid_start, grid_end)]) grid = np.concatenate([grid, self.X[rows, idx:idx+1]], axis=1) grid_probs = norm.pdf(grid, mu[:,None], scale=self.sigma) grid_probs = grid_probs / grid_probs.sum(axis=1, keepdims=True) return grid, grid_probs def permutation_probs(self, rows, idx): unique_vals = np.unique(self.X[rows,idx]) logprobs = norm.logpdf(unique_vals[None], self.Z[rows].dot(self.W[idx])[:,None], scale=self.sigma) logprobs -= logprobs.max(axis=1, keepdims=True) numerator = np.exp(logprobs) return numerator / numerator.sum(axis=1, keepdims=True)
MIT License
azure/azure-kusto-python
azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py
_convert_dict_to_json
python
def _convert_dict_to_json(array): return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"), sort_keys=True, default=lambda o: o.__dict__)
Converts array to a json string
https://github.com/azure/azure-kusto-python/blob/b25b95c5621dbbe9218dbbc82060dc52e86a79c4/azure-kusto-ingest/azure/kusto/ingest/_ingestion_blob_info.py#L65-L67
import json import uuid from datetime import datetime class _IngestionBlobInfo: def __init__(self, blob_descriptor: "BlobDescriptor", ingestion_properties: "IngestionProperties", auth_context=None): self.properties = dict() self.properties["BlobPath"] = blob_descriptor.path self.properties["RawDataSize"] = blob_descriptor.size self.properties["DatabaseName"] = ingestion_properties.database self.properties["TableName"] = ingestion_properties.table self.properties["RetainBlobOnSuccess"] = True self.properties["FlushImmediately"] = ingestion_properties.flush_immediately self.properties["IgnoreSizeLimit"] = False self.properties["ReportLevel"] = ingestion_properties.report_level.value self.properties["ReportMethod"] = ingestion_properties.report_method.value self.properties["SourceMessageCreationTime"] = datetime.utcnow().isoformat() self.properties["Id"] = ( str(blob_descriptor.source_id) if hasattr(blob_descriptor, "source_id") and blob_descriptor.source_id is not None else str(uuid.uuid4()) ) additional_properties = ingestion_properties.additional_properties or {} additional_properties["authorizationContext"] = auth_context tags = [] if ingestion_properties.additional_tags: tags.extend(ingestion_properties.additional_tags) if ingestion_properties.drop_by_tags: tags.extend(["drop-by:" + drop for drop in ingestion_properties.drop_by_tags]) if ingestion_properties.ingest_by_tags: tags.extend(["ingest-by:" + ingest for ingest in ingestion_properties.ingest_by_tags]) if tags: additional_properties["tags"] = _convert_list_to_json(tags) if ingestion_properties.ingest_if_not_exists: additional_properties["ingestIfNotExists"] = _convert_list_to_json(ingestion_properties.ingest_if_not_exists) if ingestion_properties.ingestion_mapping: json_string = _convert_dict_to_json(ingestion_properties.ingestion_mapping) additional_properties["ingestionMapping"] = json_string if ingestion_properties.ingestion_mapping_reference: additional_properties["ingestionMappingReference"] = ingestion_properties.ingestion_mapping_reference if ingestion_properties.ingestion_mapping_type: additional_properties["ingestionMappingType"] = ingestion_properties.ingestion_mapping_type.value if ingestion_properties.validation_policy: additional_properties["ValidationPolicy"] = _convert_dict_to_json(ingestion_properties.validation_policy) if ingestion_properties.format: additional_properties["format"] = ingestion_properties.format.value if additional_properties: self.properties["AdditionalProperties"] = additional_properties def to_json(self): return _convert_list_to_json(self.properties) def _convert_list_to_json(array): return json.dumps(array, skipkeys=False, allow_nan=False, indent=None, separators=(",", ":"))
MIT License
maojui/cytro
cytro/ecm.py
sub_sure_factors
python
def sub_sure_factors(f, u, curve_params): if len(curve_params) == 1: for factor in sub_sub_sure_factors(f, u, curve_params[0]): yield factor return c1 = curve_params[:len(curve_params) >> 1] c2 = curve_params[len(curve_params) >> 1:] if mainloop(f, u, c1) == 1: for factor in sub_sure_factors(f, u, c2): yield factor return if mainloop(f, u, c2) == 1: for factor in sub_sure_factors(f, u, c1): yield factor return for factor in sub_sure_factors(f, u, c1): if isprime(factor): yield factor else: for factor_of_factor in sub_sure_factors(factor, u, c2): yield factor_of_factor return
Factors n as far as possible using the fact that f came from a mainloop call. Yields factors of n.
https://github.com/maojui/cytro/blob/fdac91f2fbbbfd917f9340410ca8e63785fa01d2/cytro/ecm.py#L640-L669
import math, sys, random from .modular import invmod from .formula import * from operator import mul from functools import reduce class Pyecm_Const : INV_C = 13.0 LOG_2 = math.log(2) LOG_4 = math.log(4) LOG_3_MINUS_LOG_LOG_2 = math.log(3) - math.log(LOG_2) LOG_4_OVER_9 = LOG_4 / 9 _3_OVER_LOG_2 = 3 / LOG_2 _5_LOG_10 = 5 * math.log(10) _7_OVER_LOG_2 = 7 / LOG_2 BIG = 2.0**512 BILLION = 10**9 MULT = math.log(3) / LOG_2 ONE = 1 SMALL = 2.0**(-30) SMALLEST_COUNTEREXAMPLE_FASTPRIME = 2047 VERSION = '2.0.2' _12_LOG_2_OVER_49 = 12 * math.log(2) / 49 RECORD = 1162795072109807846655696105569042240239 def nextPrime(n): PRIMES = (5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 167) n += 1 if n <= 167: if n <= 23: if n <= 3: return 3 - (n <= 2) n += (n & 1) ^ 1 return n + (((4 - (n % 3)) >> 1) & 2) n += (n & 1) ^ 1 inc = n % 3 n += ((4 - inc) >> 1) & 2 inc = 6 - ((inc + ((2 - inc) & 2)) << 1) while 0 in (n % 5, n % 7, n % 11): n += inc inc = 6 - inc return n n += (n & 1) ^ 1 inc = n % 3 n += ((4 - inc) >> 1) & 2 inc = 6 - ((inc + ((2 - inc) & 2)) << 1) should_break = False while 1: for prime in PRIMES: if not n % prime: should_break = True break if should_break: should_break = False n += inc inc = 6 - inc continue p = 1 for i in range(int(math.log(n) / math.log(2)), 0, -1): p <<= (n >> i) & 1 p = (p * p) % n if p == 1: return n n += inc inc = 6 - inc def invert(a, b): if a == 0 or b == 0: return 0 truth = False if a < 0: truth = True a = -a b_orig = b alpha = 1 beta = 0 while not a & 1: if alpha & 1: alpha += b_orig alpha >>= 1 a >>= 1 if b > a: a, b = b, a alpha, beta = beta, alpha while b != 0 and a != b: a -= b alpha -= beta while not a & 1: if alpha & 1: alpha += b_orig alpha >>= 1 a >>= 1 if b > a: a,b = b,a alpha, beta = beta, alpha if a == b: a -= b alpha -= beta a, b = b, a alpha, beta = beta, alpha if a != 1: return 0 if truth: alpha = b_orig - alpha return alpha class ts: def __init__(self, degree, acc, p): self.acc = acc self.coefficients = p[:degree + 1] while len(self.coefficients) <= degree: self.coefficients.append(0) def add(self, a, b): b_ = b.coefficients[:] a_ = a.coefficients[:] self.coefficients = [] while len(b_) > len(a_): a_.append(0) while len(b_) < len(a_): b_.append(0) for i in range(len(a_)): self.coefficients.append(a_[i] + b_[i]) self.acc = a.acc def ev(self, x): answer = 0 for i in range(len(self.coefficients) - 1, -1, -1): answer *= x answer += self.coefficients[i] return answer def evh(self): answer = 0 for i in range(len(self.coefficients) - 1, -1, -1): answer >>= 1 answer += self.coefficients[i] return answer def evmh(self): answer = 0 for i in range(len(self.coefficients) - 1, -1, -1): answer = - answer >> 1 answer += self.coefficients[i] return answer def int(self): self.coefficients = [0] + self.coefficients for i in range(1, len(self.coefficients)): self.coefficients[i] //= i def lindiv(self, a): for i in range(len(self.coefficients) - 1): self.coefficients[i] <<= 1 self.coefficients[i] //= a self.coefficients[i + 1] -= self.coefficients[i] self.coefficients[-1] <<= 1 self.coefficients[-1] //= a def neg(self): for i in range(len(self.coefficients)): self.coefficients[i] = - self.coefficients[i] def set(self, a): self.coefficients = a.coefficients[:] self.acc = a.acc def simp(self): for i in range(len(self.coefficients)): shift = max(0, int(math.log(abs(self.coefficients[i]) + 1) / Pyecm_Const.LOG_2) - 1000) self.coefficients[i] = float(self.coefficients[i] >> shift) shift = self.acc - shift for _ in range(shift >> 9): self.coefficients[i] /= Pyecm_Const.BIG self.coefficients[i] /= 2.0**(shift & 511) if abs(self.coefficients[i] / self.coefficients[0]) <= Pyecm_Const.SMALL: self.coefficients = self.coefficients[:i] break def add(p1, p2, n): inv = list(range(len(p1))) for i in range(len(p1)): inv[i] = p1[i][0] - p2[i][0] inv = parallel_invert(inv, n) if not isinstance(inv, list): return inv for i in range(len(p1)): m = ((p1[i][1] - p2[i][1]) * inv[i]) % n p2[i][0] = (m * m - p1[i][0] - p2[i][0]) % n p2[i][1] = (m * (p1[i][0] - p2[i][0]) - p1[i][1]) % n return p2 def add_sub_x_only(p1, p2, n): sums = list(range(len(p1))) difs = list(range(len(p1))) for i in range(len(p1)): sums[i] = p2[i][0] - p1[i][0] sums = parallel_invert(sums, n) if not isinstance(sums, list): return (sums, None) for i in range(len(p1)): ms = ((p2[i][1] - p1[i][1]) * sums[i]) % n md = ((p2[i][1] + p1[i][1]) * sums[i]) % n sums[i] = (ms * ms - p1[i][0] - p2[i][0]) % n difs[i] = (md * md - p1[i][0] - p2[i][0]) % n sums = tuple(sums) difs = tuple(difs) return (sums, difs) def atdn(a, d, n): x = 1 pos = int(math.log(d) / Pyecm_Const.LOG_2) while pos >= 0: x = (x * x) % n if (d >> pos) & 1: x *= a pos -= 1 return x % n def copy(p): answer = [] for i in p: answer.append(i[:]) return answer def could_be_prime(n): if n < 2: return False if n == 2: return True if not n & 1: return False product = Pyecm_Const.ONE log_n = int(math.log(n)) + 1 bound = int(math.log(n) / (Pyecm_Const.LOG_2 * math.log(math.log(n))**2)) + 1 if bound * log_n >= n: bound = 1 log_n = int(sqrt(n)) prime_bound = 0 prime = 3 for _ in range(bound): p = [] prime_bound += log_n while prime <= prime_bound: p.append(prime) prime = nextPrime(prime) if p != []: p = prod(p) product = (product * p) % n return gcd(n, product) == 1 def double(p, n): inv = list(range(len(p))) for i in range(len(p)): inv[i] = p[i][1] << 1 inv = parallel_invert(inv, n) if not isinstance(inv, list): return inv for i in range(len(p)): x = p[i][0] m = (x * x) % n m = ((m + m + m + p[i][2]) * inv[i]) % n p[i][0] = (m * m - x - x) % n p[i][1] = (m * (x - p[i][0]) - p[i][1]) % n return p def fastprime(n): if not could_be_prime(n): return False if n == 2: return True j = 1 d = n >> 1 while not d & 1: d >>= 1 j += 1 p = 1 pos = int(math.log(d) / Pyecm_Const.LOG_2) while pos >= 0: p = (p * p) % n p <<= (d >> pos) & 1 pos -= 1 if p in (n - 1, n + 1): return True for _ in range(j): p = (p * p) % n if p == 1: return False elif p == n - 1: return True return False def greatest_n(phi_max): phi_product = 1 product = 1 prime = 1 while phi_product <= phi_max: prime = nextPrime(prime) phi_product *= prime - 1 product *= prime n_max = int((phi_max * product) // phi_product) phi_values = list(range(n_max)) prime = 2 while prime <= n_max: for i in range(0, n_max, prime): phi_values[i] -= int(phi_values[i] / prime) prime = nextPrime(prime) for i in range(n_max - 1, 0, -1): if phi_values[i] <= phi_max: return i def inv_const(n): return int(Pyecm_Const.INV_C * math.log(n)**0.42) def naf(d): g = 0 while d: g <<= 2 g ^= ((d & 2) & (d << 1)) ^ (d & 1) d += (d & 2) >> 1 d >>= 1 return g def parallel_invert(l, n): l_ = l[:] for i in range(len(l)-1): l[i+1] = (l[i] * l[i+1]) % n try: inv = invert(l[-1], n) except ZeroDivisionError: inv = 0 if inv == 0: return gcd(l[-1], n) for i in range(len(l)-1, 0, -1): l[i] = (inv * l[i-1]) % n inv = (inv * l_[i]) % n l[0] = inv return l def prod(p): jump = 1 while jump < len(p): for i in range(0, len(p) - jump, jump << 1): p[i] *= p[i + jump] p[i + jump] = None jump <<= 1 return p[0] def rho_ev(x, ts): return ts[int(x)].ev(x - int(x) - 0.5) def rho_ts(n): f = ts(10, 10, []) answer = [ts(10, 10, [1])] for _ in range(n): answer.append(ts(10, 10, [1])) deg = 5 acc = 50 + n * int(1 + math.log(1 + n) + math.log(math.log(3 + n))) r = 1 rho_series = ts(1, 10, [0]) while r != rho_series.coefficients[0]: deg = int((deg + (deg << 2)) // 3) r = rho_series.coefficients[0] rho_series = ts(deg, acc, [(1) << acc]) center = 0.5 for i in range(1, n+1): f.set(rho_series) center += 1 f.lindiv(int(2*center)) f.int() f.neg() d = ts(deg, acc, [rho_series.evh() - f.evmh()]) f.add(f, d) rho_series.set(f) f.simp() answer[i].set(f) rho_series.simp() return answer def sub_sub_sure_factors(f, u, curve_parameter): while not (f & 1): yield 2 f >>= 1 while not (f % 3): yield 3 f = int(f//3) if isprime(f): yield f return log_u = math.log(u) u2 = int(Pyecm_Const._7_OVER_LOG_2 * u * log_u / math.log(log_u)) primes = [] still_a_chance = True log_mo = math.log(f + 1 + sqrt(f << 2)) g = gcd(curve_parameter, f) if g not in (1, f): for factor in sub_sub_sure_factors(g, u, curve_parameter): yield factor for factor in sub_sub_sure_factors(int(f//g), u, curve_parameter): yield factor return g2 = gcd(curve_parameter**2 - 5, f) if g2 not in (1, f): for factor in sub_sub_sure_factors(g2, u, curve_parameter): yield factor for factor in sub_sub_sure_factors(int(f//g2), u, curve_parameter): yield factor return if f in (g, g2): yield f while still_a_chance: p1 = get_points([curve_parameter], f) for prime in primes: p1 = multiply(p1, prime, f) if not isinstance(p1, list): if p1 != f: for factor in sub_sub_sure_factors(p1, u, curve_parameter): yield factor for factor in sub_sub_sure_factors(f//p1, u, curve_parameter): yield factor return else: still_a_chance = False break if not still_a_chance: break prime = 1 still_a_chance = False while prime < u2: prime = nextPrime(prime) should_break = False for _ in range(int(log_mo / math.log(prime))): p1 = multiply(p1, prime, f) if not isinstance(p1, list): if p1 != f: for factor in sub_sub_sure_factors(p1, u, curve_parameter): yield factor for factor in sub_sub_sure_factors(int(f//p1), u, curve_parameter): yield factor return else: still_a_chance = True primes.append(prime) should_break = True break if should_break: break for i in range(2, int(math.log(f) / Pyecm_Const.LOG_2) + 2): r = nroot(f, i) if r[1]: for factor in sub_sub_sure_factors(r[0], u, curve_parameter): for _ in range(i): yield factor return a = 1 + sqrt(f) bsq = a * a - f iter = 0 while bsq != sqrt(bsq)**2 and iter < 3: a += 1 iter += 1 bsq += a + a - 1 if bsq == sqrt(bsq)**2: b = sqrt(bsq) for factor in sub_sub_sure_factors(a - b, u, curve_parameter): yield factor for factor in sub_sub_sure_factors(a + b, u, curve_parameter): yield factor return yield f return
MIT License
tensorflow/model-analysis
tensorflow_model_analysis/api/model_eval_lib.py
_update_eval_config_with_defaults
python
def _update_eval_config_with_defaults( eval_config: config_pb2.EvalConfig, eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] ) -> config_pb2.EvalConfig: eval_shared_models = model_util.verify_and_update_eval_shared_models( eval_shared_model) has_baseline = eval_shared_models and len(eval_shared_models) == 2 return config_util.update_eval_config_with_defaults( eval_config=eval_config, has_baseline=has_baseline, rubber_stamp=model_util.has_rubber_stamp(eval_shared_models))
Returns updated eval config with default values.
https://github.com/tensorflow/model-analysis/blob/6814617c50e073f8d039b96b03b19fef39fa0008/tensorflow_model_analysis/api/model_eval_lib.py#L163-L174
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Text, Union from absl import logging import apache_beam as beam import pandas as pd import pyarrow as pa import tensorflow as tf from tensorflow_model_analysis import constants from tensorflow_model_analysis import types from tensorflow_model_analysis.eval_saved_model import constants as eval_constants from tensorflow_model_analysis.evaluators import evaluator from tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator from tensorflow_model_analysis.extractors import example_weights_extractor from tensorflow_model_analysis.extractors import extractor from tensorflow_model_analysis.extractors import features_extractor from tensorflow_model_analysis.extractors import labels_extractor from tensorflow_model_analysis.extractors import legacy_predict_extractor from tensorflow_model_analysis.extractors import predictions_extractor from tensorflow_model_analysis.extractors import slice_key_extractor from tensorflow_model_analysis.extractors import sql_slice_key_extractor from tensorflow_model_analysis.extractors import tfjs_predict_extractor from tensorflow_model_analysis.extractors import tflite_predict_extractor from tensorflow_model_analysis.extractors import transformed_features_extractor from tensorflow_model_analysis.extractors import unbatch_extractor from tensorflow_model_analysis.post_export_metrics import post_export_metrics from tensorflow_model_analysis.proto import config_pb2 from tensorflow_model_analysis.proto import metrics_for_slice_pb2 from tensorflow_model_analysis.proto import validation_result_pb2 from tensorflow_model_analysis.slicer import slicer_lib as slicer from tensorflow_model_analysis.utils import config_util from tensorflow_model_analysis.utils import model_util from tensorflow_model_analysis.validators import validator from tensorflow_model_analysis.view import util as view_util from tensorflow_model_analysis.view import view_types from tensorflow_model_analysis.writers import eval_config_writer from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer from tensorflow_model_analysis.writers import writer from tfx_bsl.arrow import table_util from tfx_bsl.tfxio import raw_tf_record from tfx_bsl.tfxio import tensor_adapter from tfx_bsl.tfxio import tf_example_record from tensorflow_metadata.proto.v0 import schema_pb2 def _assert_tensorflow_version(): major, minor, _ = tf.version.VERSION.split('.') if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15): raise RuntimeError( 'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please ' 'install the latest 1.x or 2.x version from ' 'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION) if int(major) == 2: logging.warning( 'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 ' 'is currently in beta', tf.version.VERSION) def _is_legacy_eval( config_version: Optional[int], eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels], eval_config: Optional[config_pb2.EvalConfig]): return ((config_version is not None and config_version == 1) or (eval_shared_model and not isinstance(eval_shared_model, dict) and not isinstance(eval_shared_model, list) and (not eval_shared_model.model_loader.tags or eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and not eval_config)) def _default_eval_config(eval_shared_models: List[types.EvalSharedModel], slice_spec: Optional[List[slicer.SingleSliceSpec]], write_config: Optional[bool], compute_confidence_intervals: Optional[bool], min_slice_size: int): model_specs = [] for shared_model in eval_shared_models: example_weight_key = shared_model.example_weight_key example_weight_keys = {} if example_weight_key and isinstance(example_weight_key, dict): example_weight_keys = example_weight_key example_weight_key = '' model_specs.append( config_pb2.ModelSpec( name=shared_model.model_name, example_weight_key=example_weight_key, example_weight_keys=example_weight_keys)) slicing_specs = None if slice_spec: slicing_specs = [s.to_proto() for s in slice_spec] options = config_pb2.Options() options.compute_confidence_intervals.value = compute_confidence_intervals options.min_slice_size.value = min_slice_size if not write_config: options.disabled_outputs.values.append(eval_config_writer.EVAL_CONFIG_FILE) return config_pb2.EvalConfig( model_specs=model_specs, slicing_specs=slicing_specs, options=options) def _model_types( eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] ) -> Optional[Set[Text]]: eval_shared_models = model_util.verify_and_update_eval_shared_models( eval_shared_model) if not eval_shared_models: return None else: return set([m.model_type for m in eval_shared_models])
Apache License 2.0
jeffkinnison/shadho
shadho/hardware.py
ComputeClass.clear
python
def clear(self): self.searchspaces = []
Remove all models from this compute class.
https://github.com/jeffkinnison/shadho/blob/f4d4c9fbf019203b1f984dbe13e0a8e2062f1fa0/shadho/hardware.py#L101-L103
import uuid import sys from pyrameter.optimizer import FMin class ComputeClass(object): def __init__(self, name, resource, value, max_queued_tasks): self.id = str(uuid.uuid4()) self.name = name self.resource = resource self.value = value self.max_queued_tasks = max_queued_tasks self.current_tasks = 0 self.searchspaces = [] def __hash__(self): return hash((self.id, self.name, self.resource, self.value)) def add_searchspace(self, searchspace): if isinstance(searchspace, list): self.searchspaces.extend(searchspace) else: self.searchspaces.append(searchspace) def remove_searchspace(self, ssid): idx = None for i, ss in enumerate(self.searchspaces): if ss.id == ssid: idx = i break if idx is not None: self.searchspaces.pop(idx)
MIT License
berkeley-reclab/reclab
reclab/recommenders/libfm.py
LibFM.model_parameters
python
def model_parameters(self): self._model.train(self._train_data) return self._model.parameters()
Train a libfm model and get the resulting model's parameters. The degree-2 factorization machine model predicts a rating by r(x) = b_0 + w^T x + Ind(j = i) Ind(k = u) V_j^T V_k where b_0 is the global bias, w is the weights, and V is the pairwise interactions with dimension k * (m+n) V_j is the j^th row of V x is defined as the concatenation of two one-hot encodings e_i and e_u, and w^T x correpond to the user and item biases. Returns ------- global_bias : float Global bias term in the model. weights : np.ndarray Linear terms in the model (related to user/item biases). pairwise_interactions : np.ndarray Interaction term in the model (related to user/item factors).
https://github.com/berkeley-reclab/reclab/blob/09d5b1639e9b7f6cbd230f181130b681e31cf4f0/reclab/recommenders/libfm.py#L202-L226
import numpy as np import scipy.sparse import wpyfm from . import recommender class LibFM(recommender.PredictRecommender): def __init__(self, num_user_features, num_item_features, num_rating_features, max_num_users, max_num_items, method='sgd', use_global_bias=True, use_one_way=True, num_two_way_factors=8, learning_rate=0.1, reg=0.0, bias_reg=None, one_way_reg=None, two_way_reg=None, init_stdev=0.1, num_iter=100, seed=0, **kwargs): super().__init__(**kwargs) if bias_reg is None: bias_reg = reg if one_way_reg is None: one_way_reg = reg if two_way_reg is None: two_way_reg = reg self._max_num_users = max_num_users self._max_num_items = max_num_items self._train_data = None self._num_features = (self._max_num_users + num_user_features + self._max_num_items + num_item_features + num_rating_features) self._model = wpyfm.PyFM(method=method, dim=(use_global_bias, use_one_way, num_two_way_factors), lr=learning_rate, reg=(bias_reg, one_way_reg, two_way_reg), init_stdev=init_stdev, num_iter=num_iter, seed=seed) self._hyperparameters.update(locals()) self._has_xt = method in ('mcmc', 'als') del self._hyperparameters['self'] del self._hyperparameters['__class__'] rating_inputs = scipy.sparse.csr_matrix((0, self._num_features)) rating_outputs = np.empty((0,)) self._train_data = wpyfm.Data(rating_inputs, rating_outputs, has_xt=self._has_xt) @property def name(self): return 'libfm' def reset(self, users=None, items=None, ratings=None): rating_inputs = scipy.sparse.csr_matrix((0, self._num_features)) rating_outputs = np.empty((0,)) self._train_data = wpyfm.Data(rating_inputs, rating_outputs, has_xt=self._has_xt) super().reset(users, items, ratings) def update(self, users=None, items=None, ratings=None): super().update(users, items, ratings) if ratings is not None: data = [] row_col = [[], []] new_rating_outputs = [] for row, ((user_id_outer, item_id_outer), (rating, rating_context)) in enumerate(ratings.items()): user_id = self._outer_to_inner_uid[user_id_outer] item_id = self._outer_to_inner_iid[item_id_outer] user_features = self._users[user_id] item_features = self._items[item_id] row_col[0].append(row) row_col[1].append(user_id) data.append(1) for i, feature in enumerate(user_features): row_col[0].append(row) row_col[1].append(self._max_num_users + i) data.append(feature) row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + item_id) data.append(1) for i, feature in enumerate(item_features): row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + self._max_num_items + i) data.append(feature) for i, feature in enumerate(rating_context): row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + self._max_num_items + len(item_features) + i) data.append(feature) new_rating_outputs.append(rating) new_rating_inputs = scipy.sparse.csr_matrix((data, row_col), shape=(len(ratings), self._num_features)) new_rating_outputs = np.array(new_rating_outputs) self._train_data.add_rows(new_rating_inputs, new_rating_outputs) def _predict(self, user_item): test_inputs = [] data = [] row_col = [[], []] for row, (user_id, item_id, rating_context) in enumerate(user_item): user_features = self._users[user_id] item_features = self._items[item_id] row_col[0].append(row) row_col[1].append(user_id) data.append(1) for i, feature in enumerate(user_features): row_col[0].append(row) row_col[1].append(self._max_num_users + i) data.append(feature) row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + item_id) data.append(1) for i, feature in enumerate(item_features): row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + self._max_num_items + i) data.append(feature) for i, feature in enumerate(rating_context): row_col[0].append(row) row_col[1].append(self._max_num_users + len(user_features) + self._max_num_items + len(item_features) + i) data.append(feature) test_inputs = scipy.sparse.csr_matrix((data, row_col), shape=(len(user_item), self._num_features)) test_data = wpyfm.Data(test_inputs, np.zeros(test_inputs.shape[0]), has_xt=self._has_xt) if self._has_xt: self._model.train(self._train_data, test=test_data) else: self._model.train(self._train_data) predictions = self._model.predict(test_data) return predictions
MIT License
megvii-basedetection/dynamicrouting
dl_lib/utils/file_io.py
PathHandler._ls
python
def _ls(self, path: str) -> List[str]: raise NotImplementedError()
List the contents of the directory at the provided URI. Args: path (str): A URI supported by this PathHandler Returns: List[str]: list of contents in given path
https://github.com/megvii-basedetection/dynamicrouting/blob/2ad0a95139b1bf21878dd222854f98974ac4930a/dl_lib/utils/file_io.py#L166-L176
import errno import logging import os import shutil from collections import OrderedDict from typing import IO, Any, Dict, List, MutableMapping, Optional from urllib.parse import urlparse import portalocker from dl_lib.utils.download import download __all__ = ["PathManager", "get_cache_dir", "file_lock"] def get_cache_dir(cache_dir: Optional[str] = None) -> str: if cache_dir is None: cache_dir = os.path.expanduser( os.getenv("DL_LIB_CACHE", "~/.torch/dl_lib_cache")) return cache_dir def file_lock(path: str): dirname = os.path.dirname(path) try: os.makedirs(dirname, exist_ok=True) except OSError: pass return portalocker.Lock(path + ".lock", timeout=1800) class PathHandler: def _get_supported_prefixes(self) -> List[str]: raise NotImplementedError() def _get_local_path(self, path: str) -> str: raise NotImplementedError() def _open(self, path: str, mode: str = "r") -> IO[Any]: raise NotImplementedError() def _copy(self, src_path: str, dst_path: str, overwrite: bool = False) -> bool: raise NotImplementedError() def _exists(self, path: str) -> bool: raise NotImplementedError() def _isfile(self, path: str) -> bool: raise NotImplementedError() def _isdir(self, path: str) -> bool: raise NotImplementedError()
Apache License 2.0
alpacahq/pylivetrader
pylivetrader/finance/asset_restrictions.py
vectorized_is_element
python
def vectorized_is_element(array, choices): return vectorize(choices.__contains__, otypes=[bool])(array)
Check if each element of ``array`` is in choices. Parameters ---------- array : np.ndarray choices : object Object implementing __contains__. Returns ------- was_element : np.ndarray[bool] Array indicating whether each element of ``array`` was in ``choices``.
https://github.com/alpacahq/pylivetrader/blob/2d9bf97103814409ba8b56a4291f2655c59514ee/pylivetrader/finance/asset_restrictions.py#L236-L251
import abc from numpy import vectorize from functools import partial, reduce import operator import pandas as pd from six import with_metaclass, iteritems from collections import namedtuple from toolz import groupby from enum import IntEnum from pylivetrader.assets import Asset Restriction = namedtuple( 'Restriction', ['asset', 'effective_date', 'state'] ) class RESTRICTION_STATES(IntEnum): ALLOWED = 0 FROZEN = 1 class Restrictions(with_metaclass(abc.ABCMeta)): @abc.abstractmethod def is_restricted(self, assets, dt): raise NotImplementedError('is_restricted') def __or__(self, other_restriction): if isinstance(other_restriction, _UnionRestrictions): return other_restriction | self return _UnionRestrictions([self, other_restriction]) class _UnionRestrictions(Restrictions): def __new__(cls, sub_restrictions): sub_restrictions = [ r for r in sub_restrictions if not isinstance(r, NoRestrictions) ] if len(sub_restrictions) == 0: return NoRestrictions() elif len(sub_restrictions) == 1: return sub_restrictions[0] new_instance = super(_UnionRestrictions, cls).__new__(cls) new_instance.sub_restrictions = sub_restrictions return new_instance def __or__(self, other_restriction): if isinstance(other_restriction, _UnionRestrictions): new_sub_restrictions = self.sub_restrictions + other_restriction.sub_restrictions else: new_sub_restrictions = self.sub_restrictions + [other_restriction] return _UnionRestrictions(new_sub_restrictions) def is_restricted(self, assets, dt): if isinstance(assets, Asset): return any( r.is_restricted(assets, dt) for r in self.sub_restrictions ) return reduce( operator.or_, (r.is_restricted(assets, dt) for r in self.sub_restrictions) ) class NoRestrictions(Restrictions): def is_restricted(self, assets, dt): if isinstance(assets, Asset): return False return pd.Series(index=pd.Index(assets), data=False) class StaticRestrictions(Restrictions): def __init__(self, restricted_list): self._restricted_set = frozenset(restricted_list) def is_restricted(self, assets, dt): if isinstance(assets, Asset): return assets in self._restricted_set return pd.Series( index=pd.Index(assets), data=vectorized_is_element(assets, self._restricted_set) ) class HistoricalRestrictions(Restrictions): def __init__(self, restrictions): self._restrictions_by_asset = { asset: sorted( restrictions_for_asset, key=lambda x: x.effective_date ) for asset, restrictions_for_asset in iteritems(groupby(lambda x: x.asset, restrictions)) } def is_restricted(self, assets, dt): if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) ) def _is_restricted_for_asset(self, asset, dt): state = RESTRICTION_STATES.ALLOWED for r in self._restrictions_by_asset.get(asset, ()): if r.effective_date > dt: break state = r.state return state == RESTRICTION_STATES.FROZEN class SecurityListRestrictions(Restrictions): def __init__(self, security_list_by_dt): self.current_securities = security_list_by_dt.current_securities def is_restricted(self, assets, dt): securities_in_list = self.current_securities(dt) if isinstance(assets, Asset): return assets in securities_in_list return pd.Series( index=pd.Index(assets), data=vectorized_is_element(assets, securities_in_list) )
Apache License 2.0
bcicen/docker-replay
docker_replay/parser.py
ConfigParser.get
python
def get(self, key, default=None): key_parts = key.split('.') config = self.config while key_parts: try: config = config[key_parts.pop(0)] except KeyError: log.warn('returning default for missing key: %s' % key) return default log.debug('get key: %s (%s)' % (key, config)) return config
Retrieve a top-level or nested key, e.g: >>> get('Id') >>> get('HostConfig.Binds')
https://github.com/bcicen/docker-replay/blob/e7a4b2142bc6ba130f9b39b867cbb7784da02f88/docker_replay/parser.py#L34-L49
import logging from docker_replay.args import config_args from docker_replay.opts import config_opts from docker_replay.models import DockerOpt, DockerArg log = logging.getLogger('docker-replay') class ConfigParser(object): args = [] opts = [] def __init__(self, config): self.config = config for op in config_opts: o_val = self.get(op.key, op.otype.default) self.opts += list(op.build(o_val)) for ap in config_args: o_val = self.get(ap.key) self.args += list(ap.build(o_val)) olen, alen = len(self.opts), len(self.args) self.opts = [ o for o in self.opts if not o.is_null() ] self.args = [ o for o in self.args if not o.is_null() ] log.info('parsed %d options (%d configured)' % (olen, len(self.opts))) log.info('parsed %d args (%d configured)' % (alen, len(self.args)))
MIT License
evrenesat/ganihomes
easy_thumbnails/management/__init__.py
delete_all_thumbnails
python
def delete_all_thumbnails(path, recursive=True): total = 0 for thumbs in all_thumbnails(path, recursive=recursive).values(): total += _delete_using_thumbs_list(thumbs) return total
Delete all files within a path which match the thumbnails pattern. By default, matching files from all sub-directories are also removed. To only remove from the path directory, set recursive=False.
https://github.com/evrenesat/ganihomes/blob/eece2d8d957989b176cc5a36d723f676862f8d17/easy_thumbnails/management/__init__.py#L117-L127
from django.conf import settings from easy_thumbnails.utils import get_setting import os import re re_thumbnail_file = re.compile(r'(?P<source_filename>.+)_(?P<x>\d+)x(?P<y>\d+)' r'(?:_(?P<options>\w+))?_q(?P<quality>\d+)' r'(?:.[^.]+)?$') def all_thumbnails(path, recursive=True, prefix=None, subdir=None): if prefix is None: prefix = get_setting('PREFIX') if subdir is None: subdir = get_setting('SUBDIR') thumbnail_files = {} if not path.endswith('/'): path = '%s/' % path len_path = len(path) if recursive: all = os.walk(path) else: files = [] for file in os.listdir(path): if os.path.isfile(os.path.join(path, file)): files.append(file) all = [(path, [], files)] for dir_, subdirs, files in all: rel_dir = dir_[len_path:] for file in files: thumb = re_thumbnail_file.match(file) if not thumb: continue d = thumb.groupdict() source_filename = d.pop('source_filename') if prefix: source_path, source_filename = os.path.split(source_filename) if not source_filename.startswith(prefix): continue source_filename = os.path.join(source_path, source_filename[len(prefix):]) d['options'] = d['options'] and d['options'].split('_') or [] if subdir and rel_dir.endswith(subdir): rel_dir = rel_dir[:-len(subdir)] m = re.match(r'(.*)_(.*)', source_filename) if m: source_filename = '%s.%s' % m.groups() filename = os.path.join(rel_dir, source_filename) thumbnail_file = thumbnail_files.setdefault(filename, []) d['filename'] = os.path.join(dir_, file) thumbnail_file.append(d) return thumbnail_files def thumbnails_for_file(relative_source_path, root=None, basedir=None, subdir=None, prefix=None): if root is None: root = settings.MEDIA_ROOT if prefix is None: prefix = get_setting('PREFIX') if subdir is None: subdir = get_setting('SUBDIR') if basedir is None: basedir = get_setting('BASEDIR') source_dir, filename = os.path.split(relative_source_path) thumbs_path = os.path.join(root, basedir, source_dir, subdir) if not os.path.isdir(thumbs_path): return [] files = all_thumbnails(thumbs_path, recursive=False, prefix=prefix, subdir='') return files.get(filename, []) def delete_thumbnails(relative_source_path, root=None, basedir=None, subdir=None, prefix=None): thumbs = thumbnails_for_file(relative_source_path, root, basedir, subdir, prefix) return _delete_using_thumbs_list(thumbs) def _delete_using_thumbs_list(thumbs): deleted = 0 for thumb_dict in thumbs: filename = thumb_dict['filename'] try: os.remove(filename) except: pass else: deleted += 1 return deleted
BSD 2-Clause Simplified License
yongzhuo/pytorch-nlu
pytorch_nlu/pytorch_textclassification/tcGraph.py
TCGraph.__init__
python
def __init__(self, graph_config): self.graph_config = graph_config pretrained_config, pretrained_tokenizer, pretrained_model = PRETRAINED_MODEL_CLASSES[graph_config.model_type] self.pretrained_config = pretrained_config.from_pretrained(graph_config.pretrained_model_name_or_path, output_hidden_states=graph_config.output_hidden_states) self.tokenizer = pretrained_tokenizer.from_pretrained(graph_config.pretrained_model_name_or_path) super(TCGraph, self).__init__(self.pretrained_config) self.model = pretrained_model.from_pretrained(graph_config.pretrained_model_name_or_path, config=self.pretrained_config) self.tokenizer.model_max_length = self.model.config.max_position_embeddings if self.graph_config.output_hidden_states: self.dense = FCLayer(int(self.pretrained_config.hidden_size*len(self.graph_config.output_hidden_states)*3), self.graph_config.num_labels, is_dropout=self.graph_config.is_dropout, is_active=self.graph_config.is_active, active_type=self.graph_config.active_type) else: self.dense = FCLayer(self.pretrained_config.hidden_size, self.graph_config.num_labels, is_dropout=self.graph_config.is_dropout, is_active=self.graph_config.is_active, active_type=self.graph_config.active_type) self.global_maxpooling = torch.nn.AdaptiveMaxPool1d(1) self.global_avgpooling = torch.nn.AdaptiveAvgPool1d(1) self.loss_type = self.graph_config.loss_type if self.graph_config.loss_type else "BCE" self.loss_ce = torch.nn.CrossEntropyLoss(ignore_index=0) self.loss_mlsm = torch.nn.MultiLabelSoftMarginLoss() self.loss_bcelog = torch.nn.BCEWithLogitsLoss() self.loss_bce = torch.nn.BCELoss() self.loss_mse = torch.nn.MSELoss() self.loss_pmlsm = PriorMultiLabelSoftMarginLoss(prior=self.graph_config.prior, num_labels=self.graph_config.num_labels) self.loss_circle = MultiLabelCircleLoss() self.loss_lsce = LabelSmoothingCrossEntropy() self.loss_focal = FocalLoss() self.loss_dice = DiceLoss() self.softmax = torch.nn.Softmax(dim=-1) self.sigmoid = torch.nn.Sigmoid() self.dropout = torch.nn.Dropout
Pytorch Graph of TextClassification, Pre-Trained Model based config: config: json, params of graph, eg. {"num_labels":17, "model_type":"BERT"} Returns: output: Tuple, Tensor of logits and loss Url: https://github.com/yongzhuo
https://github.com/yongzhuo/pytorch-nlu/blob/acb5cdb450efaac0c64b38d58a66aca9f942254b/pytorch_nlu/pytorch_textclassification/tcGraph.py#L17-L60
from tcLayer import PriorMultiLabelSoftMarginLoss, MultiLabelCircleLoss, LabelSmoothingCrossEntropy from tcLayer import FCLayer, FocalLoss, DiceLoss from tcConfig import PRETRAINED_MODEL_CLASSES from transformers import BertPreTrainedModel import torch class TCGraph(BertPreTrainedModel):
Apache License 2.0
netket/netket
netket/hilbert/spin.py
Spin.__init__
python
def __init__( self, s: float, N: int = 1, total_sz: Optional[float] = None, graph: Optional[AbstractGraph] = None, ): N = graph_to_N_depwarn(N=N, graph=graph) local_size = round(2 * s + 1) local_states = np.empty(local_size) assert int(2 * s + 1) == local_size for i in range(local_size): local_states[i] = -round(2 * s) + 2 * i local_states = local_states.tolist() _check_total_sz(total_sz, s, N) if total_sz is not None: def constraints(x): return _sum_constraint(x, total_sz) else: constraints = None self._total_sz = total_sz if total_sz is None else total_sz self._s = s super().__init__(local_states, N, constraints)
r"""Hilbert space obtained as tensor product of local spin states. Args: s: Spin at each site. Must be integer or half-integer. N: Number of sites (default=1) total_sz: If given, constrains the total spin of system to a particular value. graph: (deprecated) a graph from which to extract the number of sites. Examples: Simple spin hilbert space. >>> import netket as nk >>> hi = nk.hilbert.Spin(s=1/2, N=4) >>> print(hi.size) 4
https://github.com/netket/netket/blob/74248a39e86bb501eaf6822e76107c4926321f80/netket/hilbert/spin.py#L62-L109
from fractions import Fraction from typing import Optional, List, Union import numpy as np from netket.graph import AbstractGraph from numba import jit from .homogeneous import HomogeneousHilbert from ._deprecations import graph_to_N_depwarn def _check_total_sz(total_sz, S, size): if total_sz is None: return local_size = 2 * S + 1 m = round(2 * total_sz) if np.abs(m) > size * (2 * S): raise ValueError( "Cannot fix the total magnetization: 2|M| cannot " "exceed Nspins." ) if local_size % 2 == 0: if (size + m) % 2 != 0: raise ValueError( "Cannot fix the total magnetization: Nspins + 2*totalSz must be even." ) else: if m % 2 != 0: raise ValueError( "Cannot fix the total magnetization to a half-integer number" ) @jit(nopython=True) def _sum_constraint(x, total_sz): return np.sum(x, axis=1) == round(2 * total_sz) class Spin(HomogeneousHilbert):
Apache License 2.0
hypothesis/h
h/presenters/document_html.py
DocumentHTMLPresenter.link
python
def link(self): return _format_document_link( self.href, self.title, self.link_text, self.hostname_or_filename )
Return a link to this document. Returns HTML strings like: <a href="{href}" title="{title}">{link_text}</a> {hostname} <em>Local file:</em> {title}<br>{hostname} where: - {href} is the uri of the document, if it has an http(s):// uri - {title} is the title of the document. If the document has no title then its uri will be used instead. If it's a local file:// uri then only the filename part is used, not the full path. - {link_text} is the same as {title}, but truncated with &hellip; if it's too long - {hostname} is the hostname name of the document's uri without the scheme (http(s)://) and www parts, e.g. 'example.com'. If it's a local file:// uri then the filename is used as the hostname. If the hostname is too long it is truncated with &hellip;. The {hostname} part will be missing if it wouldn't be any different from the {link_text} part. The href="{href}" will be missing if there's no http(s) uri to link to for this annotation's document. User-supplied values are escaped so the string is safe for raw rendering (the returned string is actually a Markup object and won't be escaped by Jinja2 when rendering).
https://github.com/hypothesis/h/blob/1bf1fe34fd471f26a216e682d15ce986dd400fdb/h/presenters/document_html.py#L77-L113
from urllib.parse import unquote, urlparse import jinja2 class DocumentHTMLPresenter: def __init__(self, document): self.document = document @property def filename(self): if self.uri.lower().startswith("file:///"): return jinja2.escape(self.uri.split("/")[-1]) return "" @property def href(self): if self.document.web_uri: return jinja2.escape(self.document.web_uri) return "" @property def hostname_or_filename(self): if self.filename: return jinja2.escape(unquote(self.filename)) hostname = urlparse(self.uri).hostname hostname = hostname or "" return jinja2.escape(hostname) @property
BSD 2-Clause Simplified License
aws/sagemaker-huggingface-inference-toolkit
src/sagemaker_huggingface_inference_toolkit/transformers_utils.py
_load_model_from_hub
python
def _load_model_from_hub( model_id: str, model_dir: Path, revision: Optional[str] = None, use_auth_token: Optional[str] = None ): logger.warn( "This is an experimental beta features, which allows downloading model from the Hugging Face Hub on start up. " "It loads the model defined in the env var `HF_MODEL_ID`" ) _api = HfApi() model_info = _api.model_info(repo_id=model_id, revision=revision, token=use_auth_token) os.makedirs(model_dir, exist_ok=True) framework = _get_framework() storage_folder = _build_storage_path(model_id, model_dir, revision) os.makedirs(storage_folder, exist_ok=True) download_file_list = [ file.rfilename for file in model_info.siblings if file.rfilename in FILE_LIST_NAMES + [FRAMEWORK_MAPPING[framework]] ] for file in download_file_list: url = hf_hub_url(model_id, filename=file, revision=revision) path = cached_download(url, cache_dir=storage_folder, force_filename=file, use_auth_token=use_auth_token) if os.path.exists(path + ".lock"): os.remove(path + ".lock") return storage_folder
Downloads a model repository at the specified revision from the Hugging Face Hub. All files are nested inside a folder in order to keep their actual filename relative to that folder. `org__model.revision`
https://github.com/aws/sagemaker-huggingface-inference-toolkit/blob/f884fc65d64f2e15637ccf16d2a83e37114cb23f/src/sagemaker_huggingface_inference_toolkit/transformers_utils.py#L158-L198
import json import logging import os from typing import Optional from huggingface_hub import HfApi from huggingface_hub.file_download import cached_download, hf_hub_url from transformers import pipeline from transformers.file_utils import is_tf_available, is_torch_available from transformers.pipelines import Pipeline, Conversation if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch from pathlib import Path logger = logging.getLogger(__name__) PYTORCH_WEIGHTS_NAME = "pytorch_model.bin" TF2_WEIGHTS_NAME = "tf_model.h5" FRAMEWORK_MAPPING = {"pytorch": PYTORCH_WEIGHTS_NAME, "tensorflow": TF2_WEIGHTS_NAME} FILE_LIST_NAMES = [ "config.json", "special_tokens_map.json", "tokenizer_config.json", "tokenizer.json", "vocab.json", "vocab.txt", "merges.txt", "dict.txt", "preprocessor_config.json", "added_tokens.json", "README.md", "spiece.model", "sentencepiece.bpe.model", "sentencepiece.bpe.vocab", "sentence.bpe.model", "bpe.codes", "source.spm", "target.spm", "spm.model", "sentence_bert_config.json", "sentence_roberta_config.json", "sentence_distilbert_config.json", "added_tokens.json", "model_args.json", "entity_vocab.json", "pooling_config.json", ] REPO_ID_SEPARATOR = "__" ARCHITECTURES_2_TASK = { "TapasForQuestionAnswering": "table-question-answering", "ForQuestionAnswering": "question-answering", "ForTokenClassification": "token-classification", "ForSequenceClassification": "text-classification", "ForMultipleChoice": "multiple-choice", "ForMaskedLM": "fill-mask", "ForCausalLM": "text-generation", "ForConditionalGeneration": "text2text-generation", "MTModel": "text2text-generation", "EncoderDecoderModel": "text2text-generation", "GPT2LMHeadModel": "text-generation", "T5WithLMHeadModel": "text2text-generation", } HF_API_TOKEN = os.environ.get("HF_API_TOKEN", None) HF_MODEL_REVISION = os.environ.get("HF_MODEL_REVISION", None) def wrap_conversation_pipeline(pipeline): def wrapped_pipeline(inputs, *args, **kwargs): converted_input = Conversation( inputs["text"], past_user_inputs=inputs.get("past_user_inputs", []), generated_responses=inputs.get("generated_responses", []), ) prediction = pipeline(converted_input, *args, **kwargs) return { "generated_text": prediction.generated_responses[-1], "conversation": { "past_user_inputs": prediction.past_user_inputs, "generated_responses": prediction.generated_responses, }, } return wrapped_pipeline def _is_gpu_available(): if is_tf_available(): return True if len(tf.config.list_physical_devices("GPU")) > 0 else False elif is_torch_available(): return torch.cuda.is_available() else: raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) def _get_framework(): if is_torch_available(): return "pytorch" elif is_tf_available(): return "tensorflow" else: raise RuntimeError( "At least one of TensorFlow 2.0 or PyTorch should be installed. " "To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ " "To install PyTorch, read the instructions at https://pytorch.org/." ) def _build_storage_path(model_id: str, model_dir: Path, revision: Optional[str] = None): if "/" and revision is None: storage_path = os.path.join(model_dir, model_id.replace("/", REPO_ID_SEPARATOR)) elif "/" and revision is not None: storage_path = os.path.join(model_dir, model_id.replace("/", REPO_ID_SEPARATOR) + "." + revision) elif revision is not None: storage_path = os.path.join(model_dir, model_id + "." + revision) else: storage_path = os.path.join(model_dir, model_id) return storage_path
Apache License 2.0
fake-name/readablewebproxy
WebMirror/OfflineFilters/NewNetlocTracker.py
exposed_new_from_autotreiver_db
python
def exposed_new_from_autotreiver_db(db_path:str): assert os.path.exists(db_path), "File specified by db_path must exist!" import sqlite3 sdb = sqlite3.connect(db_path) res = sdb.execute("SELECT actual_target FROM nu_release_item") urls = res.fetchall() mapdict = {} print("Found %s rows in database." % (len(urls), )) for url, in tqdm.tqdm(urls): if url: itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(url).netloc) mapdict.setdefault(itemnl, set()) mapdict[itemnl].add(url) push_urls_into_table(mapdict)
Given a autotreiver generated sqlite db, fetch all new URLs from that file
https://github.com/fake-name/readablewebproxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/OfflineFilters/NewNetlocTracker.py#L401-L423
import sys import os.path import time import datetime import traceback import urllib.parse import tqdm import sqlalchemy.exc from sqlalchemy.orm import joinedload from sqlalchemy import desc import WebRequest import common.database as db import common.management.util import common.global_constants import common.util.urlFuncs as urlFuncs import WebMirror.OutputFilters.util.feedNameLut import WebMirror.rules def get_wln_release_urls(): print("loading netlocs from WLN release listings") import settings if '__pypy__' in sys.builtin_module_names: import psycopg2cffi as psycopg2 else: import psycopg2 conn = psycopg2.connect( host = settings.WLN_DB_DATABASE_IP, dbname = settings.WLN_DB_DATABASE_DB_NAME, user = settings.WLN_DB_DATABASE_USER, password = settings.WLN_DB_DATABASE_PASS, ) print("Conn:", conn) cur = conn.cursor() print("Fetching rows from changes table") cur.execute(""" SELECT DISTINCT(srcurl) FROM releaseschanges; """) rows_1 = cur.fetchall() print("Fetching rows from main table") cur.execute(""" SELECT DISTINCT(srcurl) FROM releases; """) rows_2 = cur.fetchall() print("Received %s, %s distinct URLs" % (len(rows_1), len(rows_2))) nlfilter = {} for url, in tqdm.tqdm(rows_1 + rows_2): if url: if isinstance(url, bytes): url = url.decode("utf-8") itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(url).netloc) nlfilter.setdefault(itemnl, set()) nlfilter[itemnl].add(url) print("WLN Releases distinct netlocs: %s" % len(nlfilter)) return nlfilter def get_nu_head_urls(): print("Loading netlocs from nuheader system") with db.session_context() as sess: nu_items = sess.query(db.NuReleaseItem) .filter(db.NuReleaseItem.actual_target != None) .all() mapdict = {} for row in nu_items: itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(row.actual_target).netloc) mapdict.setdefault(itemnl, set()) mapdict[itemnl].add(row.actual_target) print("Nu outbound items: ", len(mapdict)) return mapdict def get_distance_of_zero_urls(): print("Loading short-distance netlocs") with db.session_context() as sess: page_items = sess.query(db.WebPages.url) .filter(db.WebPages.distance <= db.DB_DEFAULT_DIST) .filter(db.WebPages.is_text == True) .yield_per(10000) .all() mapdict = {} for row, in tqdm.tqdm(page_items): itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(row).netloc) mapdict.setdefault(itemnl, set()) mapdict[itemnl].add(row) print("short-distance items: ", len(mapdict)) return mapdict def filter_get_have_urls(): rules = WebMirror.rules.load_rules() urls = [item['starturls'] if item['starturls'] else [] + item['feedurls'] if item['feedurls'] else [] for item in rules] urls = [item for sublist in urls for item in sublist] start_netloc_dict = {} for url in urls: itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(url).netloc) start_netloc_dict.setdefault(itemnl, []) start_netloc_dict[itemnl] = url missing = 0 with db.session_context() as sess: rows = sess.query(db.NewNetlocTracker) .filter(db.NewNetlocTracker.ignore == False) .filter(db.NewNetlocTracker.have == False) .all() for row in tqdm.tqdm(rows): if not row.netloc: print("What:", (row.id, row.example_url, row.netloc)) sess.delete(row) sess.commit() continue assert row.netloc netloc = row.netloc.lower() bad = False if urlFuncs.SQUATTER_NETLOC_RE.match(netloc): bad = True if netloc in common.global_constants.NU_NEW_MASK_NETLOCS: bad = True if netloc.endswith(".photobucket.com"): bad = True if netloc.endswith(".postimg.org"): bad = True if "www.novelupdates.com" in netloc: bad = True if netloc.endswith("files.wordpress.com"): bad = True if netloc.endswith("media.tumblr.com"): bad = True if netloc.endswith("bp.blogspot.com"): bad = True cleaned = urlFuncs.cleanUrl(row.example_url) if cleaned != row.example_url: row.example_url = cleaned sess.commit() if WebMirror.OutputFilters.util.feedNameLut.getNiceName(sess, srcurl=None, netloc=netloc): row.ignore = False row.have = True sess.commit() continue if netloc.startswith("www."): if WebMirror.OutputFilters.util.feedNameLut.getNiceName(sess, srcurl=None, netloc=netloc[4:]): bad = True if netloc in start_netloc_dict: row.ignore = False row.have = True sess.commit() continue if bad: row.ignore = True sess.commit() continue missing += 1 total = len(rows) print("Total outbound items: ", total, "missing:", missing) def update_missing_new_with_title(): wg = WebRequest.WebGetRobust() with db.session_context(override_timeout_ms=1000 * 60 * 5) as sess: rows = sess.query(db.NewNetlocTracker) .filter(db.NewNetlocTracker.ignore == False) .filter(db.NewNetlocTracker.have == False) .all() sess.commit() print("Missing items:", len(rows)) for row in tqdm.tqdm(rows): if row.extra is None: row.extra = {} if not 'title' in row.extra: titledict = common.management.util.get_page_title(wg, row.example_url) for key, value in titledict.items(): row.extra[key] = value try: sess.commit() except sqlalchemy.exc.InternalError: print("(session_context) -> Transaction error (sqlalchemy.exc.InternalError).") sess.rollback() except sqlalchemy.exc.OperationalError: print("(session_context) -> Transaction error (sqlalchemy.exc.OperationalError).") sess.rollback() except sqlalchemy.exc.IntegrityError: print("(session_context) -> Transaction error (sqlalchemy.exc.IntegrityError).") sess.rollback() except sqlalchemy.exc.InvalidRequestError: print("(session_context) -> Transaction error (sqlalchemy.exc.InvalidRequestError).") traceback.print_exc() sess.rollback() def get_high_priority_urls(filter_before=None): print("Loading high priority netlocs") with db.session_context() as sess: query = sess.query(db.WebPages.url) .filter(db.WebPages.priority <= db.DB_HIGH_PRIORITY) .filter(db.WebPages.is_text == True) .yield_per(10000) if filter_before: query = query.filter(db.NuReleaseItem.release_date >= filter_before) page_items = query.all() mapdict = {} for row, in tqdm.tqdm(page_items): itemnl = WebMirror.OutputFilters.util.feedNameLut.patch_blogspot(urllib.parse.urlsplit(row).netloc) mapdict.setdefault(itemnl, set()) mapdict[itemnl].add(row) print("High Priority outbound items: ", len(mapdict)) return mapdict def push_urls_into_table(mapdict): with db.session_context() as db_sess: pbar = tqdm.tqdm(mapdict.items()) for netloc, urls in pbar: have_item = db_sess.query(db.NewNetlocTracker) .filter(db.NewNetlocTracker.netloc == netloc) .scalar() if not have_item and netloc: urls = list(urls) urls.sort(key=lambda x:len(x)) pbar.write("New Url: %s -> %s" % (netloc, urls[0])) new = db.NewNetlocTracker( netloc = netloc, example_url = urls[0], ) db_sess.add(new) db_sess.commit() def reset_nu_fails(): with db.session_context() as db_sess: recent_d_2 = datetime.datetime.now() - datetime.timedelta(hours=24*14) bulkq = db_sess.query(db.NuReleaseItem) .outerjoin(db.NuResolvedOutbound) .filter(db.NuReleaseItem.validated == False) .filter(db.NuReleaseItem.release_date >= recent_d_2) .options(joinedload('resolved')) .order_by(desc(db.NuReleaseItem.first_seen)) items = bulkq.all() print("Found %s items." % len(items)) for item in items: resolved = len(item.resolved) item.fetch_attempts = resolved db_sess.commit() def exposed_new_from_wln_feeds(): nlfilter = get_wln_release_urls() push_urls_into_table(mapdict) def exposed_new_from_nu_feeds(): mapdict = get_nu_head_urls() push_urls_into_table(mapdict) def exposed_new_from_high_priority(): mapdict = get_high_priority_urls() push_urls_into_table(mapdict) def exposed_new_from_zero_distance_urls(): mapdict = get_distance_of_zero_urls() push_urls_into_table(mapdict) def exposed_new_from_all_feeds(): mapdict = get_nu_head_urls() mapdict_1 = get_wln_release_urls() mapdict_2 = get_high_priority_urls() mapdict_3 = get_distance_of_zero_urls() print("NU Header urls: %s, wln URLs: %s, %s high priority items, %s with a distance of zero." % (len(mapdict), len(mapdict_1), len(mapdict_2), len(mapdict_3))) for key, value in mapdict_1.items(): mapdict.setdefault(key, set()) mapdict[key].update(value) for key, value in mapdict_2.items(): mapdict.setdefault(key, set()) mapdict[key].update(value) for key, value in mapdict_3.items(): mapdict.setdefault(key, set()) mapdict[key].update(value) print("Total items: %s" % (len(mapdict), )) push_urls_into_table(mapdict)
BSD 3-Clause New or Revised License
project-monai/monailabel
monailabel/interfaces/app.py
MONAILabelApp.deepgrow_infer_tasks
python
def deepgrow_infer_tasks(model_dir, pipeline=True): deepgrow_2d = load_from_mmar("clara_pt_deepgrow_2d_annotation_1", model_dir) deepgrow_3d = load_from_mmar("clara_pt_deepgrow_3d_annotation_1", model_dir) infers = { "deepgrow_2d": InferDeepgrow2D(None, deepgrow_2d), "deepgrow_3d": InferDeepgrow3D(None, deepgrow_3d), } if pipeline: infers["deepgrow_pipeline"] = InferDeepgrowPipeline( path=None, network=deepgrow_2d, model_3d=infers["deepgrow_3d"], description="Combines Deepgrow 2D model and 3D deepgrow model", ) return infers
Dictionary of Default Infer Tasks for Deepgrow 2D/3D
https://github.com/project-monai/monailabel/blob/f7eaeea08ea1ba7698668f1f93b568091e6d1111/monailabel/interfaces/app.py#L528-L546
import copy import itertools import logging import os import platform import shutil import tempfile import time from datetime import timedelta from distutils.util import strtobool from typing import Callable, Dict, Optional, Sequence import requests import schedule from dicomweb_client import DICOMwebClient from dicomweb_client.session_utils import create_session_from_user_pass from monai.apps import download_and_extract, download_url, load_from_mmar from monai.data import partition_dataset from timeloop import Timeloop from monailabel.config import settings from monailabel.datastore.dicom import DICOMWebDatastore from monailabel.datastore.local import LocalDatastore from monailabel.interfaces.datastore import Datastore, DefaultLabelTag from monailabel.interfaces.exception import MONAILabelError, MONAILabelException from monailabel.interfaces.tasks.batch_infer import BatchInferImageType, BatchInferTask from monailabel.interfaces.tasks.infer import InferTask from monailabel.interfaces.tasks.scoring import ScoringMethod from monailabel.interfaces.tasks.strategy import Strategy from monailabel.interfaces.tasks.train import TrainTask from monailabel.tasks.activelearning.random import Random from monailabel.tasks.infer.deepgrow_2d import InferDeepgrow2D from monailabel.tasks.infer.deepgrow_3d import InferDeepgrow3D from monailabel.tasks.infer.deepgrow_pipeline import InferDeepgrowPipeline from monailabel.utils.async_tasks.task import AsyncTask from monailabel.utils.sessions import Sessions logger = logging.getLogger(__name__) class MONAILabelApp: PRE_TRAINED_PATH: str = "https://github.com/Project-MONAI/MONAILabel/releases/download/data/" def __init__( self, app_dir: str, studies: str, conf: Dict[str, str], name: str = "", description: str = "", version: str = "2.0", labels: Optional[Sequence[str]] = None, ): self.app_dir = app_dir self.studies = studies self.conf = conf if conf else {} self.name = name self.description = description self.version = version self.labels = labels self._datastore: Datastore = self.init_datastore() self._infers = self.init_infers() self._trainers = self.init_trainers() self._strategies = self.init_strategies() self._scoring_methods = self.init_scoring_methods() self._batch_infer = self.init_batch_infer() if strtobool(conf.get("download_tools", "true")): self._download_tools() self._server_mode = strtobool(conf.get("server_mode", "false")) self._auto_update_scoring = strtobool(conf.get("auto_update_scoring", "true")) self._sessions = self._load_sessions(strtobool(conf.get("sessions", "true"))) def init_infers(self) -> Dict[str, InferTask]: return {} def init_trainers(self) -> Dict[str, TrainTask]: return {} def init_strategies(self) -> Dict[str, Strategy]: return {"random": Random()} def init_scoring_methods(self) -> Dict[str, ScoringMethod]: return {} def init_batch_infer(self) -> Callable: return BatchInferTask() def init_datastore(self) -> Datastore: logger.info(f"Init Datastore for: {self.studies}") if self.studies.startswith("http://") or self.studies.startswith("https://"): dw_session = None if settings.MONAI_LABEL_DICOMWEB_USERNAME and settings.MONAI_LABEL_DICOMWEB_PASSWORD: dw_session = create_session_from_user_pass( settings.MONAI_LABEL_DICOMWEB_USERNAME, settings.MONAI_LABEL_DICOMWEB_PASSWORD ) dw_client = DICOMwebClient( url=self.studies, session=dw_session, qido_url_prefix=settings.MONAI_LABEL_QIDO_PREFIX, wado_url_prefix=settings.MONAI_LABEL_WADO_PREFIX, stow_url_prefix=settings.MONAI_LABEL_STOW_PREFIX, ) cache_path = settings.MONAI_LABEL_DICOMWEB_CACHE_PATH cache_path = cache_path.strip() if cache_path else "" return DICOMWebDatastore(dw_client, cache_path) if cache_path else DICOMWebDatastore(dw_client) return LocalDatastore( self.studies, extensions=settings.MONAI_LABEL_DATASTORE_FILE_EXT, auto_reload=settings.MONAI_LABEL_DATASTORE_AUTO_RELOAD, ) def info(self): meta = { "name": self.name, "description": self.description, "version": self.version, "labels": self.labels, "models": {k: v.info() for k, v in self._infers.items() if v.is_valid()}, "trainers": {k: v.info() for k, v in self._trainers.items()}, "strategies": {k: v.info() for k, v in self._strategies.items()}, "scoring": {k: v.info() for k, v in self._scoring_methods.items()}, "train_stats": {k: v.stats() for k, v in self._trainers.items()}, "datastore": self._datastore.status(), } if not self.labels: meta["labels"] = list(itertools.chain.from_iterable([v.get("labels", []) for v in meta["models"].values()])) return meta def infer(self, request, datastore=None): model = request.get("model") if not model: raise MONAILabelException( MONAILabelError.INVALID_INPUT, "Model is not provided for Inference Task", ) task = self._infers.get(model) if not task: raise MONAILabelException( MONAILabelError.INVALID_INPUT, f"Inference Task is not Initialized. There is no model '{model}' available", ) request = copy.deepcopy(request) image_id = request["image"] datastore = datastore if datastore else self.datastore() if os.path.exists(image_id): request["save_label"] = False else: request["image"] = datastore.get_image_uri(request["image"]) if os.path.isdir(request["image"]): logger.info("Input is a Directory; Consider it as DICOM") logger.info(os.listdir(request["image"])) request["image"] = [os.path.join(f, request["image"]) for f in os.listdir(request["image"])] logger.info(f"Image => {request['image']}") result_file_name, result_json = task(request) label_id = None if result_file_name and os.path.exists(result_file_name): tag = request.get("label_tag", DefaultLabelTag.ORIGINAL) save_label = request.get("save_label", True) if save_label: label_id = datastore.save_label(image_id, result_file_name, tag, result_json) if os.path.exists(result_file_name): os.unlink(result_file_name) else: label_id = result_file_name return {"label": label_id, "tag": DefaultLabelTag.ORIGINAL, "params": result_json} def batch_infer(self, request, datastore=None): return self._batch_infer(request, datastore if datastore else self.datastore(), self.infer) def scoring(self, request, datastore=None): method = request.get("method") if not method: raise MONAILabelException( MONAILabelError.INVALID_INPUT, "Method is not provided for Scoring Task", ) task = self._scoring_methods.get(method) if not task: raise MONAILabelException( MONAILabelError.INVALID_INPUT, f"Scoring Task is not Initialized. There is no such scoring method '{method}' available", ) request = copy.deepcopy(request) return task(copy.deepcopy(request), datastore if datastore else self.datastore()) def datastore(self) -> Datastore: return self._datastore @staticmethod def partition_datalist(datalist, val_split, shuffle=True): if val_split > 0.0: return partition_dataset(datalist, ratios=[(1 - val_split), val_split], shuffle=shuffle) return datalist, [] def train(self, request): model = request.get("model") if not model: raise MONAILabelException( MONAILabelError.INVALID_INPUT, "Model is not provided for Training Task", ) task = self._trainers.get(model) if not task: raise MONAILabelException( MONAILabelError.INVALID_INPUT, f"Train Task is not Initialized. There is no model '{model}' available", ) request = copy.deepcopy(request) result = task(request, self.datastore()) if self._auto_update_scoring: self.async_scoring(None) return result def next_sample(self, request): strategy = request.get("strategy") strategy = strategy if strategy else "random" task = self._strategies.get(strategy) if task is None: raise MONAILabelException( MONAILabelError.APP_INIT_ERROR, f"ActiveLearning Task is not Initialized. There is no such strategy '{strategy}' available", ) image_id = task(request, self.datastore()) if not image_id: return {} image_path = self._datastore.get_image_uri(image_id) if self._auto_update_scoring: self.async_scoring(None) return { "id": image_id, "path": image_path, } def on_init_complete(self): logger.info("App Init - completed") if self._auto_update_scoring: self.async_scoring(None) def cleanup_sessions(instance): instance.cleanup_sessions() cleanup_sessions(self) time_loop = Timeloop() schedule.every(5).minutes.do(cleanup_sessions, self) @time_loop.job(interval=timedelta(seconds=30)) def run_scheduler(): schedule.run_pending() time_loop.start(block=False) def on_save_label(self, image_id, label_id): logger.info(f"New label saved for: {image_id} => {label_id}") def server_mode(self, mode: bool): self._server_mode = mode def async_scoring(self, method, params=None): if not method and not self._scoring_methods: return {} methods = [method] if method else list(self._scoring_methods.keys()) result = {} for m in methods: if self._server_mode: request = {"method": m} request.update(params[m] if params and params.get(m) else {}) res, _ = AsyncTask.run("scoring", request=request, params=params, enqueue=True) result[m] = res else: url = f"/scoring/{m}" p = params[m] if params and params.get(m) else None result[m] = self._local_request(url, p, "Scoring") return result[method] if method else result def async_training(self, model, params=None, enqueue=False): if not model and not self._trainers: return {} models = [model] if model else list(self._trainers.keys()) enqueue = True if model > 1 else enqueue result = {} for m in models: if self._server_mode: request = {"model": m} request.update(params[m] if params and params.get(m) else {}) res, _ = AsyncTask.run("train", request=request, params=params, enqueue=enqueue) result[m] = res else: url = f"/train/{model}?enqueue={enqueue}" p = params[m] if params and params.get(m) else None result[m] = self._local_request(url, p, "Training") return result[model] if model else result def async_batch_infer(self, model, images: BatchInferImageType, params=None): if self._server_mode: request = {"model": model, "images": images} res, _ = AsyncTask.run("batch_infer", request=request, params=params) return res url = f"/batch/infer/{model}?images={images}" return self._local_request(url, params, "Batch Infer") def _local_request(self, url, params, action): params = params if params else {} response = requests.post(f"http://127.0.0.1:{settings.MONAI_LABEL_SERVER_PORT}{url}", json=params) if response.status_code != 200: logger.error(f"Failed To Trigger {action}: {response.text}") return response.json() if response.status_code == 200 else None def _download_tools(self): target = os.path.join(self.app_dir, "bin") os.makedirs(target, exist_ok=True) dcmqi_tools = ["segimage2itkimage", "itkimage2segimage", "segimage2itkimage.exe", "itkimage2segimage.exe"] existing = [tool for tool in dcmqi_tools if shutil.which(tool) or os.path.exists(os.path.join(target, tool))] logger.debug(f"Existing Tools: {existing}") if len(existing) in [len(dcmqi_tools), len(dcmqi_tools) // 2]: logger.debug("No need to download dcmqi tools") return target_os = "win64.zip" if any(platform.win32_ver()) else "linux.tar.gz" with tempfile.TemporaryDirectory() as tmp: download_and_extract( url=f"https://github.com/QIICR/dcmqi/releases/download/v1.2.4/dcmqi-1.2.4-{target_os}", output_dir=tmp ) for root, _, files in os.walk(tmp): for f in files: if f in dcmqi_tools: shutil.copy(os.path.join(root, f), target) def _load_sessions(self, load=False): if not load: return None return Sessions(settings.MONAI_LABEL_SESSION_PATH, settings.MONAI_LABEL_SESSION_EXPIRY) def cleanup_sessions(self): if not self._sessions: return count = self._sessions.remove_expired() logger.debug("Total sessions cleaned up: {}".format(count)) def sessions(self): return self._sessions @staticmethod def download(resources): if not resources: return for resource in resources: if not os.path.exists(resource[0]): os.makedirs(os.path.dirname(resource[0]), exist_ok=True) logger.info(f"Downloading resource: {resource[0]} from {resource[1]}") download_url(resource[1], resource[0]) time.sleep(1) @staticmethod
Apache License 2.0
dmlc/dgl
python/dgl/distributed/kvstore.py
KVServer.barrier_count
python
def barrier_count(self, count): self._barrier_count = count
Set barrier count
https://github.com/dmlc/dgl/blob/8341244a2dac850bd0c1153c7641c3b8a2bbfc30/python/dgl/distributed/kvstore.py#L704-L706
import os import numpy as np from . import rpc from .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy from .standalone_kvstore import KVClient as SA_KVClient from .. import backend as F from .. import utils from .._ffi.ndarray import empty_shared_mem KVSTORE_PULL = 901231 class PullResponse(rpc.Response): def __init__(self, server_id, data_tensor): self.server_id = server_id self.data_tensor = data_tensor def __getstate__(self): return self.server_id, self.data_tensor def __setstate__(self, state): self.server_id, self.data_tensor = state class PullRequest(rpc.Request): def __init__(self, name, id_tensor): self.name = name self.id_tensor = id_tensor def __getstate__(self): return self.name, self.id_tensor def __setstate__(self, state): self.name, self.id_tensor = state def process_request(self, server_state): kv_store = server_state.kv_store if self.name not in kv_store.part_policy: raise RuntimeError("KVServer cannot find partition policy with name: %s" % self.name) if self.name not in kv_store.data_store: raise RuntimeError("KVServer Cannot find data tensor with name: %s" % self.name) local_id = kv_store.part_policy[self.name].to_local(self.id_tensor) data = kv_store.pull_handlers[self.name](kv_store.data_store, self.name, local_id) res = PullResponse(kv_store.server_id, data) return res KVSTORE_PUSH = 901232 class PushRequest(rpc.Request): def __init__(self, name, id_tensor, data_tensor): self.name = name self.id_tensor = id_tensor self.data_tensor = data_tensor def __getstate__(self): return self.name, self.id_tensor, self.data_tensor def __setstate__(self, state): self.name, self.id_tensor, self.data_tensor = state def process_request(self, server_state): kv_store = server_state.kv_store if self.name not in kv_store.part_policy: raise RuntimeError("KVServer cannot find partition policy with name: %s" % self.name) if self.name not in kv_store.data_store: raise RuntimeError("KVServer Cannot find data tensor with name: %s" % self.name) local_id = kv_store.part_policy[self.name].to_local(self.id_tensor) kv_store.push_handlers[self.name](kv_store.data_store, self.name, local_id, self.data_tensor) INIT_DATA = 901233 INIT_MSG = 'Init' class InitDataResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class InitDataRequest(rpc.Request): def __init__(self, name, shape, dtype, policy_str, init_func): self.name = name self.shape = shape self.dtype = dtype self.policy_str = policy_str self.init_func = init_func def __getstate__(self): return self.name, self.shape, self.dtype, self.policy_str, self.init_func def __setstate__(self, state): self.name, self.shape, self.dtype, self.policy_str, self.init_func = state def process_request(self, server_state): kv_store = server_state.kv_store dtype = F.data_type_dict[self.dtype] if self.name in kv_store.data_store: assert tuple(F.shape(kv_store.data_store[self.name])) == tuple(self.shape) assert F.reverse_data_type_dict[F.dtype(kv_store.data_store[self.name])] == self.dtype assert kv_store.part_policy[self.name].policy_str == self.policy_str else: if not kv_store.is_backup_server(): data_tensor = self.init_func(self.shape, dtype) kv_store.init_data(name=self.name, policy_str=self.policy_str, data_tensor=data_tensor) else: kv_store.init_data(name=self.name, policy_str=self.policy_str) res = InitDataResponse(INIT_MSG) return res BARRIER = 901234 BARRIER_MSG = 'Barrier' class BarrierResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class BarrierRequest(rpc.Request): def __init__(self, role): self.role = role def __getstate__(self): return self.role def __setstate__(self, state): self.role = state def process_request(self, server_state): kv_store = server_state.kv_store role = server_state.roles count = kv_store.barrier_count[self.role] kv_store.barrier_count[self.role] = count + 1 if kv_store.barrier_count[self.role] == len(role[self.role]): kv_store.barrier_count[self.role] = 0 res_list = [] for client_id, _ in role[self.role]: res_list.append((client_id, BarrierResponse(BARRIER_MSG))) return res_list return None REGISTER_PULL = 901235 REGISTER_PULL_MSG = 'Register_Pull' class RegisterPullHandlerResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class RegisterPullHandlerRequest(rpc.Request): def __init__(self, name, pull_func): self.name = name self.pull_func = pull_func def __getstate__(self): return self.name, self.pull_func def __setstate__(self, state): self.name, self.pull_func = state def process_request(self, server_state): kv_store = server_state.kv_store kv_store.pull_handlers[self.name] = self.pull_func res = RegisterPullHandlerResponse(REGISTER_PULL_MSG) return res REGISTER_PUSH = 901236 REGISTER_PUSH_MSG = 'Register_Push' class RegisterPushHandlerResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class RegisterPushHandlerRequest(rpc.Request): def __init__(self, name, push_func): self.name = name self.push_func = push_func def __getstate__(self): return self.name, self.push_func def __setstate__(self, state): self.name, self.push_func = state def process_request(self, server_state): kv_store = server_state.kv_store kv_store.push_handlers[self.name] = self.push_func res = RegisterPushHandlerResponse(REGISTER_PUSH_MSG) return res GET_SHARED = 901237 GET_SHARED_MSG = 'Get_Shared' class GetSharedDataResponse(rpc.Response): def __init__(self, meta): self.meta = meta def __getstate__(self): return self.meta def __setstate__(self, state): self.meta = state class GetSharedDataRequest(rpc.Request): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state def process_request(self, server_state): assert self.msg == GET_SHARED_MSG meta = {} kv_store = server_state.kv_store for name, data in kv_store.data_store.items(): meta[name] = (F.shape(data), F.reverse_data_type_dict[F.dtype(data)], kv_store.part_policy[name].policy_str) res = GetSharedDataResponse(meta) return res GET_PART_SHAPE = 901238 class GetPartShapeResponse(rpc.Response): def __init__(self, shape): self.shape = shape def __getstate__(self): return self.shape def __setstate__(self, state): if isinstance(state, int): self.shape = (state,) else: self.shape = state class GetPartShapeRequest(rpc.Request): def __init__(self, name): self.name = name def __getstate__(self): return self.name def __setstate__(self, state): self.name = state def process_request(self, server_state): kv_store = server_state.kv_store if self.name not in kv_store.data_store: raise RuntimeError("KVServer Cannot find data tensor with name: %s" % self.name) data_shape = F.shape(kv_store.data_store[self.name]) res = GetPartShapeResponse(data_shape) return res SEND_META_TO_BACKUP = 901239 SEND_META_TO_BACKUP_MSG = "Send_Meta_TO_Backup" class SendMetaToBackupResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class SendMetaToBackupRequest(rpc.Request): def __init__(self, name, dtype, shape, policy_str, pull_handler, push_handler): self.name = name self.dtype = dtype self.shape = shape self.policy_str = policy_str self.pull_handler = pull_handler self.push_handler = push_handler def __getstate__(self): return self.name, self.dtype, self.shape, self.policy_str, self.pull_handler, self.push_handler def __setstate__(self, state): self.name, self.dtype, self.shape, self.policy_str, self.pull_handler, self.push_handler = state def process_request(self, server_state): kv_store = server_state.kv_store assert kv_store.is_backup_server() if self.name not in kv_store.data_store: shared_data = empty_shared_mem(self.name+'-kvdata-', False, self.shape, self.dtype) dlpack = shared_data.to_dlpack() kv_store.data_store[self.name] = F.zerocopy_from_dlpack(dlpack) kv_store.part_policy[self.name] = kv_store.find_policy(self.policy_str) kv_store.pull_handlers[self.name] = self.pull_handler kv_store.push_handlers[self.name] = self.push_handler else: assert tuple(F.shape(kv_store.data_store[self.name])) == tuple(self.shape) assert F.reverse_data_type_dict[F.dtype(kv_store.data_store[self.name])] == self.dtype assert kv_store.part_policy[self.name].policy_str == self.policy_str assert kv_store.pull_handlers[self.name] == self.pull_handler assert kv_store.push_handlers[self.name] == self.push_handler res = SendMetaToBackupResponse(SEND_META_TO_BACKUP_MSG) return res DELETE_DATA = 901240 DELETE_MSG = "Delete_Data" class DeleteDataResponse(rpc.Response): def __init__(self, msg): self.msg = msg def __getstate__(self): return self.msg def __setstate__(self, state): self.msg = state class DeleteDataRequest(rpc.Request): def __init__(self, name): self.name = name def __getstate__(self): return self.name def __setstate__(self, state): self.name = state def process_request(self, server_state): kv_store = server_state.kv_store if self.name in kv_store.data_store: del kv_store.data_store[self.name] del kv_store.part_policy[self.name] del kv_store.push_handlers[self.name] del kv_store.pull_handlers[self.name] res = DeleteDataResponse(DELETE_MSG) return res COUNT_LOCAL_NONZERO = 901241 class CountLocalNonzeroResponse(rpc.Response): def __init__(self, num_local_nonzero): self.num_local_nonzero = num_local_nonzero def __getstate__(self): return self.num_local_nonzero def __setstate__(self, state): self.num_local_nonzero = state class CountLocalNonzeroRequest(rpc.Request): def __init__(self, name): self.name = name def __getstate__(self): return self.name def __setstate__(self, state): self.name = state def process_request(self, server_state): kv_store = server_state.kv_store num_local_nonzero = kv_store.count_local_nonzero(self.name) res = CountLocalNonzeroResponse(num_local_nonzero) return res def default_push_handler(target, name, id_tensor, data_tensor): target[name][id_tensor] = data_tensor def default_pull_handler(target, name, id_tensor): return target[name][id_tensor] class KVServer(object): def __init__(self, server_id, ip_config, num_servers, num_clients): assert server_id >= 0, 'server_id (%d) cannot be a negative number.' % server_id assert num_servers > 0, 'num_servers (%d) must be a positive number.' % num_servers assert os.path.exists(ip_config), 'Cannot open file: %s' % ip_config assert num_clients >= 0, 'num_clients (%d) cannot be a negative number.' % num_clients rpc.register_service(KVSTORE_PULL, PullRequest, PullResponse) rpc.register_service(KVSTORE_PUSH, PushRequest, None) rpc.register_service(INIT_DATA, InitDataRequest, InitDataResponse) rpc.register_service(BARRIER, BarrierRequest, BarrierResponse) rpc.register_service(REGISTER_PUSH, RegisterPushHandlerRequest, RegisterPushHandlerResponse) rpc.register_service(REGISTER_PULL, RegisterPullHandlerRequest, RegisterPullHandlerResponse) rpc.register_service(GET_SHARED, GetSharedDataRequest, GetSharedDataResponse) rpc.register_service(GET_PART_SHAPE, GetPartShapeRequest, GetPartShapeResponse) rpc.register_service(SEND_META_TO_BACKUP, SendMetaToBackupRequest, SendMetaToBackupResponse) rpc.register_service(DELETE_DATA, DeleteDataRequest, DeleteDataResponse) rpc.register_service(COUNT_LOCAL_NONZERO, CountLocalNonzeroRequest, CountLocalNonzeroResponse) self._data_store = {} self._policy_set = set() self._part_policy = {} self._server_id = server_id self._server_namebook = rpc.read_ip_config(ip_config, num_servers) assert server_id in self._server_namebook, 'Trying to start server {}, but there are {} servers in the config file'.format( server_id, len(self._server_namebook)) self._machine_id = self._server_namebook[server_id][0] self._group_count = self._server_namebook[server_id][3] self._part_id = self._machine_id self._num_clients = num_clients self._barrier_count = {} self._push_handlers = {} self._pull_handlers = {} @property def server_id(self): return self._server_id @property def barrier_count(self): return self._barrier_count @barrier_count.setter
Apache License 2.0
ugnelis/tensorflow-rnn-ctc
utils.py
texts_encoder
python
def texts_encoder(texts, first_index=(ord('a') - 1), space_index=0, space_token='<space>'): result = [] for text in texts: item = make_char_array(text, space_token) item = np.asarray([space_index if x == space_token else ord(x) - first_index for x in item]) result.append(item) return np.array(result)
Encode texts to numbers. Args: texts: list of texts. Data directory. first_index: int. First index (usually index of 'a'). space_index: int. Index of 'space'. space_token: string. 'space' representation. Returns: array of encoded texts.
https://github.com/ugnelis/tensorflow-rnn-ctc/blob/6998ba91952a9352b6805ddd0209b33a13ed4cc6/utils.py#L190-L212
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import glob import re import logging import unicodedata import codecs import numpy as np import scipy.io.wavfile as wav from python_speech_features import mfcc def read_text_file(path): with codecs.open(path, encoding="utf-8") as file: return file.read() def normalize_text(text, remove_apostrophe=True): result = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode() if remove_apostrophe: result = result.replace("'", "") return re.sub("[^a-zA-Z']+", ' ', result).strip().lower() def read_text_files(dir, extensions=['txt']): if not os.path.isdir(dir): logging.error("Text files directory %s is not found.", dir) return None if not all(isinstance(extension, str) for extension in extensions): logging.error("Variable 'extensions' is not a list of strings.") return None files_paths_list = [] for extension in extensions: file_glob = os.path.join(dir, '*.' + extension) files_paths_list.extend(glob.glob(file_glob)) files = [] for file_path in files_paths_list: file = read_text_file(file_path) file = normalize_text(file) files.append(file) files = np.array(files) return files def read_audio_files(dir, extensions=['wav']): if not os.path.isdir(dir): logging.error("Audio files directory %s is not found.", dir) return None if not all(isinstance(extension, str) for extension in extensions): logging.error("Variable 'extensions' is not a list of strings.") return None files_paths_list = [] for extension in extensions: file_glob = os.path.join(dir, '*.' + extension) files_paths_list.extend(glob.glob(file_glob)) files = [] for file_path in files_paths_list: audio_rate, audio_data = wav.read(file_path) file = mfcc(audio_data, samplerate=audio_rate) files.append(file) files = np.array(files) return files def make_char_array(text, space_token='<space>'): result = np.hstack([space_token if x == ' ' else list(x) for x in text]) return result def sparse_tuples_from_sequences(sequences, dtype=np.int32): indexes = [] values = [] for n, sequence in enumerate(sequences): indexes.extend(zip([n] * len(sequence), range(len(sequence)))) values.extend(sequence) indexes = np.asarray(indexes, dtype=np.int64) values = np.asarray(values, dtype=dtype) shape = np.asarray([len(sequences), np.asarray(indexes).max(0)[1] + 1], dtype=np.int64) return indexes, values, shape def sequence_decoder(sequence, first_index=(ord('a') - 1)): decoded_text = ''.join([chr(x) for x in np.asarray(sequence) + first_index]) decoded_text = decoded_text.replace(chr(ord('z') + 1), '') decoded_text = decoded_text.replace(chr(ord('a') - 1), ' ') return decoded_text
MIT License
renatopp/aerolito
aerolito/pattern.py
Pattern.execute_post
python
def execute_post(self, environ): if self._post: for action in self._post: action.run(environ)
u""" Executes the actions of post tag
https://github.com/renatopp/aerolito/blob/842c38131d546a6f4abc6d839063e0754ee234d9/aerolito/pattern.py#L354-L360
import re import random from aerolito import exceptions from aerolito.utils import remove_accents from aerolito.utils import normalize_input from aerolito.utils import get_meanings def replace(literal, environ): session = environ['session'][environ['user_id']] p = r'\<([\d|\s|\w]*)\>' _vars = re.findall(p, unicode(literal._value), re.I) result = literal._value for var in _vars: temp = var.split() varname = temp[0] params = temp[1:] if varname == 'star': index = int(params[0]) if params else 0 result = result.replace('<%s>'%var, session['stars'][index]) elif varname in environ['globals']: result = result.replace('<%s>'%var, environ['globals'][varname]) elif varname in session['locals']: result = result.replace('<%s>'%var, session['locals'][varname]) else: result = result.replace('<%s>'%var, '') return result class Literal(object): def __init__(self, value): self._value = value def __repr__(self): return '<Literal %s>' % self._value class Action(object): def __init__(self, directive, params): self._directive = directive self._params = params def run(self, environ): params = [] if self._params: params = [replace(x, environ) for x in self._params] return self._directive(params) class Regex(object): def __init__(self, text, ignore=None): if ignore: ignore = '|'.join([re.escape(i) for i in ignore]) self._ignore = re.compile('[%s]'%ignore) self._expression = re.sub(self._ignore, '', text) else: self._ignore = None self._expression = text self._expression = re.escape(self._expression) self._expression = self._expression.replace('\\*', '(.*)') self._expression = self._expression.replace('\\\\(.*)', '\*') self._expression = re.sub('(\\\ )+\(\.\*\)', '(.*)', self._expression) self._expression = re.sub('\(\.\*\)(\\\ )+', '(.*)', self._expression) self._expression = '^%s$'%self._expression self._stars = None def match(self, value): if self._ignore: value = re.sub(self._ignore, '', value) m = re.match(self._expression, value, re.I) if m: self._stars = [x.strip() for x in m.groups()] return True else: self._stars = None return False def __repr__(self): return '<Regex %s>' % self._expression class Pattern(object): def __init__(self, p, environ): self._mean = self.__convert_mean(p, environ) self._ignore = self.__convert_ignore(p, environ) self._after = self.__convert_regex(p, 'after', environ) self._in = self.__convert_regex(p, 'in', environ) self._out = self.__convert_literal(p, 'out', environ) self._when = self.__convert_action(p, 'when', environ) self._post = self.__convert_action(p, 'post', environ) def __convert_mean(self, p, environ=None): meanings = {} synonyms = environ['synonyms'] if p.has_key('mean'): tagValues = p['mean'] if tagValues is None: raise exceptions.InvalidTagValue(u'Invalid value for tag mean') for k in tagValues: key = remove_accents(k) meanings[key] = [normalize_input(v, synonyms) for v in tagValues[k]] return meanings else: return None def __convert_ignore(self, p, environ=None): if p.has_key('ignore'): if isinstance(p['ignore'], (tuple, list)): tag_values = p['ignore'] else: tag_values = list(str(p['ignore'])) else: tag_values = None return tag_values def __convert_regex(self, p, tag, environ=None): synonyms = environ['synonyms'] meanings = environ['meanings'] if p.has_key(tag): tagValues = p[tag] if tagValues is None or tagValues == u'': raise exceptions.InvalidTagValue( u'Invalid value for tag %s.'%tag) if isinstance(tagValues, (tuple, list)): values = tagValues else: values = [tagValues] normalized = [normalize_input(unicode(x), synonyms) for x in values] patterns = [] for x in normalized: patterns.extend(get_meanings(x, meanings, self._mean)) return [Regex(x, self._ignore) for x in patterns] else: return None def __convert_literal(self, p, tag, environ=None): meanings = environ['meanings'] if p.has_key(tag): tagValues = p[tag] if tagValues is None or tagValues == u'': raise exceptions.InvalidTagValue( 'Invalid value for tag %s.'%tag) if isinstance(tagValues, (tuple, list)): values = tagValues else: values = [tagValues] patterns = [] for x in values: patterns.extend(get_meanings(x, meanings, self._mean)) return [Literal(unicode(x)) for x in patterns] else: return None def __convert_action(self, p, tag, environ): if p.has_key(tag): tagValues = p[tag] actions = [] if isinstance(tagValues, dict): tagValues = [tagValues] if isinstance(tagValues, (tuple, list)): for d in tagValues: for k, p in d.iteritems(): if isinstance(p, (tuple, list)): params = [Literal(x) for x in p] else: params = [Literal(p)] if k not in environ['directives']: raise exceptions.InvalidTagValue( u'Directive "%s" not found'%str(k)) action = Action(environ['directives'][k], params) actions.append(action) else: raise exceptions.InvalidTagValue( u'Invalid value for tag %s.'%tag) return actions else: return None def match(self, value, environ): self._stars = None session = environ['session'][environ['user_id']] if self._after: for regex in self._after: if session['responses-normalized'] and regex.match(session['responses-normalized'][-1]): session['stars'] = regex._stars break else: return False if self._in: for regex in self._in: if regex.match(value): session['stars'] = regex._stars break else: return False if self._when: for action in self._when: if not action.run(environ): return False return True def choice_output(self, environ): return replace(random.choice(self._out), environ)
MIT License
compas-dev/compas
src/compas/interop/matlab/client.py
MatlabClient.put
python
def put(self, name, value): try: res = self._app.PutFullMatrix(name, self.workspace, value, None) except Exception: res = self._app.PutWorkspaceData(name, self.workspace, value) if self.verbose: print(res) self._renew_lease()
Put a variable in the Matlab workspace. Parameters ---------- name : str The name of the variable. value : ... The value of the variable. Examples -------- >>> m = MatlabClient(verbose=True, interactive=True) >>> m.put('A', m.matrix([[1, 0, 1, 3], [2, 3, 4, 7], [-1, -3, -3, -4]])) >>> m.put() >>> m.put()
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/interop/matlab/client.py#L245-L269
from __future__ import print_function from __future__ import absolute_import from __future__ import division import System __all__ = ['MatlabClient'] class MatlabClient(object): def __init__(self, verbose=False, interactive=False, workspace='base'): self._type = None self._app = None self._lease = None self.verbose = verbose self.interactive = interactive self.workspace = workspace self.init() def init(self): self._create_instance() self._init_lease() def _create_instance(self): self._type = System.Type.GetTypeFromProgID('Matlab.Application') self._app = System.Activator.CreateInstance(self._type) self._app.Visible = self.interactive def _init_lease(self): self._lease = self._app.InitializeLifetimeService() self._lease.InitialLeaseTime = System.TimeSpan.FromMinutes(5.0) self._lease.RenewOnCallTime = System.TimeSpan.FromMinutes(5.0) def _renew_lease(self): self._lease.Renew(System.TimeSpan.FromMinutes(5.0)) def _get_vector(self, name): _value = self._app.GetVariable(name, self.workspace) try: _value.Rank except AttributeError: value = _value else: value = MatlabClient.list_from_vector(_value) self._renew_lease() return value def _get_matrix_size(self, name): self._app.Execute('[m, n] = size({0});'.format(name)) m = self._app.GetVariable('m', self.workspace) n = self._app.GetVariable('n', self.workspace) return int(m), int(n) @staticmethod def vector_from_list(a, dtype=float): n = len(a) vector = System.Array.CreateInstance(dtype, n) for i in range(n): vector[i] = a[i] return vector @staticmethod def vector_from_array(a): raise NotImplementedError @staticmethod def matrix_from_list(A, dtype=float): m = len(A) n = len(A[0]) if not all([len(row) == n for row in A]): raise Exception('Matrix dimensions inconsistent.') matrix = System.Array.CreateInstance(dtype, m, n) for row in range(m): for col in range(n): matrix[row, col] = A[row][col] return matrix @staticmethod def matrix_from_array(a): raise NotImplementedError @staticmethod def list_from_vector(a): return list(a) @staticmethod def list_from_matrix(A, m, n): nlist = [] for row in range(m): nlist.append([None] * n) for col in range(n): nlist[row][col] = A[row, col] return nlist @staticmethod def double(a): try: len(a[0]) except TypeError: return MatlabClient.vector_from_list(a, dtype=float) else: return MatlabClient.matrix_from_list(a, dtype=float) def eval(self, cmd): res = self._app.Execute(cmd) if self.verbose: print(res) self._renew_lease()
MIT License
olitheolix/aiokubernetes
aiokubernetes/models/v1beta1_custom_resource_validation.py
V1beta1CustomResourceValidation.__eq__
python
def __eq__(self, other): if not isinstance(other, V1beta1CustomResourceValidation): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta1_custom_resource_validation.py#L106-L111
import pprint import re from aiokubernetes.models.v1beta1_json_schema_props import V1beta1JSONSchemaProps class V1beta1CustomResourceValidation(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'open_apiv3_schema': 'V1beta1JSONSchemaProps' } attribute_map = { 'open_apiv3_schema': 'openAPIV3Schema' } def __init__(self, open_apiv3_schema=None): self._open_apiv3_schema = None self.discriminator = None if open_apiv3_schema is not None: self.open_apiv3_schema = open_apiv3_schema @property def open_apiv3_schema(self): return self._open_apiv3_schema @open_apiv3_schema.setter def open_apiv3_schema(self, open_apiv3_schema): self._open_apiv3_schema = open_apiv3_schema def to_dict(self): result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
scikit-multiflow/scikit-multiflow
src/skmultiflow/data/sea_generator.py
SEAGenerator._classification_function_one
python
def _classification_function_one(att1, att2, att3): return 0 if (att1 + att2 <= 9) else 1
classification_function_one Decides the sample class label based on the sum of att1 and att2, and the threshold value of 9. Parameters ---------- att1: float First numeric attribute. att2: float Second numeric attribute. att3: float Third numeric attribute. Returns ------- int Returns the sample class label, either 0 or 1.
https://github.com/scikit-multiflow/scikit-multiflow/blob/d073a706b5006cba2584761286b7fa17e74e87be/src/skmultiflow/data/sea_generator.py#L302-L325
import numpy as np from skmultiflow.data.base_stream import Stream from skmultiflow.utils import check_random_state class SEAGenerator(Stream): def __init__(self, classification_function=0, random_state=None, balance_classes=False, noise_percentage=0.0): super().__init__() self._classification_functions = [self._classification_function_zero, self._classification_function_one, self._classification_function_two, self._classification_function_three] self.classification_function = classification_function self.random_state = random_state self.balance_classes = balance_classes self.noise_percentage = noise_percentage self.n_num_features = 3 self.n_features = self.n_num_features self.n_classes = 2 self.n_targets = 1 self._random_state = None self.next_class_should_be_zero = False self.name = "SEA Generator" self.target_names = ["target_0"] self.feature_names = ["att_num_" + str(i) for i in range(self.n_features)] self.target_values = [i for i in range(self.n_classes)] self._prepare_for_use() @property def classification_function(self): return self._classification_function_idx @classification_function.setter def classification_function(self, classification_function_idx): if classification_function_idx in range(4): self._classification_function_idx = classification_function_idx else: raise ValueError("classification_function takes only these values: 0, 1, 2, 3, {} was " "passed".format(classification_function_idx)) @property def balance_classes(self): return self._balance_classes @balance_classes.setter def balance_classes(self, balance_classes): if isinstance(balance_classes, bool): self._balance_classes = balance_classes else: raise ValueError( "balance_classes should be boolean, {} was passed".format(balance_classes)) @property def noise_percentage(self): return self._noise_percentage @noise_percentage.setter def noise_percentage(self, noise_percentage): if (0.0 <= noise_percentage) and (noise_percentage <= 1.0): self._noise_percentage = noise_percentage else: raise ValueError( "noise percentage should be in [0.0..1.0], {} was passed".format(noise_percentage)) def _prepare_for_use(self): self._random_state = check_random_state(self.random_state) self.next_class_should_be_zero = False def next_sample(self, batch_size=1): data = np.zeros([batch_size, self.n_features + 1]) for j in range(batch_size): self.sample_idx += 1 att1 = att2 = att3 = 0.0 group = 0 desired_class_found = False while not desired_class_found: att1 = 10 * self._random_state.rand() att2 = 10 * self._random_state.rand() att3 = 10 * self._random_state.rand() group = self._classification_functions[self.classification_function](att1, att2, att3) if not self.balance_classes: desired_class_found = True else: if (self.next_class_should_be_zero and (group == 0)) or ((not self.next_class_should_be_zero) and (group == 1)): desired_class_found = True self.next_class_should_be_zero = not self.next_class_should_be_zero if 0.01 + self._random_state.rand() <= self.noise_percentage: group = 1 if (group == 0) else 0 data[j, 0] = att1 data[j, 1] = att2 data[j, 2] = att3 data[j, 3] = group self.current_sample_x = data[:, :self.n_features] self.current_sample_y = data[:, self.n_features:].flatten().astype(np.int64) return self.current_sample_x, self.current_sample_y def generate_drift(self): new_function = self._random_state.randint(4) while new_function == self.classification_function: new_function = self._random_state.randint(4) self.classification_function = new_function @staticmethod def _classification_function_zero(att1, att2, att3): return 0 if (att1 + att2 <= 8) else 1 @staticmethod
BSD 3-Clause New or Revised License
sergioteula/python-amazon-paapi
amazon/paapi5_python_sdk/website_sales_rank.py
WebsiteSalesRank.__eq__
python
def __eq__(self, other): if not isinstance(other, WebsiteSalesRank): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/sergioteula/python-amazon-paapi/blob/9cb744bef17f5127231367430191df12126e9c24/amazon/paapi5_python_sdk/website_sales_rank.py#L195-L200
import pprint import re import six class WebsiteSalesRank(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'context_free_name': 'str', 'display_name': 'str', 'id': 'str', 'sales_rank': 'int' } attribute_map = { 'context_free_name': 'ContextFreeName', 'display_name': 'DisplayName', 'id': 'Id', 'sales_rank': 'SalesRank' } def __init__(self, context_free_name=None, display_name=None, id=None, sales_rank=None): self._context_free_name = None self._display_name = None self._id = None self._sales_rank = None self.discriminator = None if context_free_name is not None: self.context_free_name = context_free_name if display_name is not None: self.display_name = display_name if id is not None: self.id = id if sales_rank is not None: self.sales_rank = sales_rank @property def context_free_name(self): return self._context_free_name @context_free_name.setter def context_free_name(self, context_free_name): self._context_free_name = context_free_name @property def display_name(self): return self._display_name @display_name.setter def display_name(self, display_name): self._display_name = display_name @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def sales_rank(self): return self._sales_rank @sales_rank.setter def sales_rank(self, sales_rank): self._sales_rank = sales_rank def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(WebsiteSalesRank, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
opensuse/yomi
salt/_states/partitioned.py
_get_first_overlapping_partition
python
def _get_first_overlapping_partition(device, start): value, unit = disk.units(start) value += OVERLAPPING_ERROR partitions = _get_cached_partitions(device, unit) partition_number = None partition_start = 0 for number, partition in partitions.items(): p_start = disk.units(partition["start"])[0] p_end = disk.units(partition["end"])[0] if p_start <= value <= p_end: if partition_number is None or partition_start < p_start: partition_number = number partition_start = p_start return partition_number
Return the first partition that contains the start point.
https://github.com/opensuse/yomi/blob/4557cfd35b7b33b311b00cc2fa23cadee5a31340/salt/_states/partitioned.py#L248-L268
import logging import re import disk from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) __virtualname__ = "partitioned" try: __grains__ __opts__ __salt__ except NameError: __grains__ = {} __opts__ = {} __salt__ = {} class EnumerateException(Exception): pass def __virtual__(): return "partition.mkpart" in __salt__ def _check_label(device, label): label = {"dos": "msdos"}.get(label, label) res = __salt__["cmd.run"](["parted", "--list", "--machine", "--script"]) line = "".join((line for line in res.splitlines() if line.startswith(device))) return ":{}:".format(label) in line def labeled(name, label): ret = { "name": name, "result": False, "changes": {}, "comment": [], } if not label: ret["comment"].append("Label parameter is not optional") return ret if _check_label(name, label): ret["result"] = True ret["comment"].append("Label already set to {}".format(label)) return ret if __opts__["test"]: ret["result"] = None ret["comment"].append("Label will be set to {} in {}".format(label, name)) ret["changes"]["label"] = "Will be set to {}".format(label) return ret __salt__["partition.mklabel"](name, label) if _check_label(name, label): ret["result"] = True msg = "Label set to {} in {}".format(label, name) ret["comment"].append(msg) ret["changes"]["label"] = msg else: ret["comment"].append("Failed to set label to {}".format(label)) return ret def _get_partition_type(device): cmd = "parted -s {0} print".format(device) out = __salt__["cmd.run_stdout"](cmd) types = re.findall(r"\s*(\d+).*(primary|extended|logical).*", out) return dict(types) def _get_cached_info(device): if not hasattr(_get_cached_info, "info"): _get_cached_info.info = {} info = _get_cached_info.info if device not in info: info[device] = __salt__["partition.list"](device)["info"] return info[device] def _invalidate_cached_info(): if hasattr(_get_cached_info, "info"): delattr(_get_cached_info, "info") def _get_cached_partitions(device, unit="s"): if not hasattr(_get_cached_partitions, "partitions"): _get_cached_partitions.partitions = {} _get_cached_partitions.types = _get_partition_type(device) if device not in _get_cached_partitions.partitions: _get_cached_partitions.partitions[device] = {} partitions = _get_cached_partitions.partitions[device] if unit not in partitions: partitions[unit] = __salt__["partition.list"](device, unit=unit) types = _get_cached_partitions.types for number, partition in partitions[unit]["partitions"].items(): partition["type"] = types.get(number, "primary") return partitions[unit]["partitions"] def _invalidate_cached_partitions(): if hasattr(_get_cached_partitions, "partitions"): delattr(_get_cached_partitions, "partitions") delattr(_get_cached_partitions, "types") OVERLAPPING_ERROR = 0.75 def _check_partition(device, number, part_type, start, end): number = str(number) partitions = _get_cached_partitions(device) if number not in partitions: return None if part_type != partitions[number]["type"]: return False for value, name in ((start, "start"), (end, "end")): value, unit = disk.units(value) p_value = _get_cached_partitions(device, unit)[number][name] p_value = disk.units(p_value)[0] min_value = value - OVERLAPPING_ERROR max_value = value + OVERLAPPING_ERROR if not min_value <= p_value <= max_value: return False return True
Apache License 2.0
astropy/photutils
photutils/aperture/core.py
SkyAperture.to_pixel
python
def to_pixel(self, wcs): raise NotImplementedError('Needs to be implemented in a subclass.')
Convert the aperture to a `PixelAperture` object defined in pixel coordinates. Parameters ---------- wcs : WCS object A world coordinate system (WCS) transformation that supports the `astropy shared interface for WCS <https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_ (e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`). Returns ------- aperture : `PixelAperture` object A `PixelAperture` object.
https://github.com/astropy/photutils/blob/17192d6ee4517514187fc01c7624fe6eb4b0e233/photutils/aperture/core.py#L705-L723
import abc import copy import numpy as np from astropy.coordinates import SkyCoord import astropy.units as u from .bounding_box import BoundingBox from ._photometry_utils import (_handle_units, _prepare_photometry_data, _validate_inputs) from ..utils._wcs_helpers import _pixel_scale_angle_at_skycoord __all__ = ['Aperture', 'SkyAperture', 'PixelAperture'] class Aperture(metaclass=abc.ABCMeta): _shape_params = () positions = np.array(()) theta = None def __len__(self): if self.isscalar: raise TypeError(f'A scalar {self.__class__.__name__!r} object ' 'has no len()') return self.shape[0] def __getitem__(self, index): if self.isscalar: raise TypeError(f'A scalar {self.__class__.__name__!r} object ' 'cannot be indexed') kwargs = dict() for param in self._shape_params: kwargs[param] = getattr(self, param) return self.__class__(self.positions[index], **kwargs) def __iter__(self): for i in range(len(self)): yield self.__getitem__(i) def _positions_str(self, prefix=None): if isinstance(self, PixelAperture): return np.array2string(self.positions, separator=', ', prefix=prefix) elif isinstance(self, SkyAperture): return repr(self.positions) else: raise TypeError('Aperture must be a subclass of PixelAperture ' 'or SkyAperture') def __repr__(self): prefix = f'{self.__class__.__name__}' cls_info = [self._positions_str(prefix)] if self._shape_params is not None: for param in self._shape_params: cls_info.append(f'{param}={getattr(self, param)}') cls_info = ', '.join(cls_info) return f'<{prefix}({cls_info})>' def __str__(self): prefix = 'positions' cls_info = [ ('Aperture', self.__class__.__name__), (prefix, self._positions_str(prefix + ': '))] if self._shape_params is not None: for param in self._shape_params: cls_info.append((param, getattr(self, param))) fmt = [f'{key}: {val}' for key, val in cls_info] return '\n'.join(fmt) @property def shape(self): if isinstance(self.positions, SkyCoord): return self.positions.shape else: return self.positions.shape[:-1] @property def isscalar(self): return self.shape == () class PixelAperture(Aperture): @property def _default_patch_properties(self): mpl_params = dict() mpl_params['fill'] = False return mpl_params @staticmethod def _translate_mask_mode(mode, subpixels, rectangle=False): if mode not in ('center', 'subpixel', 'exact'): raise ValueError(f'Invalid mask mode: {mode}') if rectangle and mode == 'exact': mode = 'subpixel' subpixels = 32 if mode == 'subpixels': if not isinstance(subpixels, int) or subpixels <= 0: raise ValueError('subpixels must be a strictly positive ' 'integer') if mode == 'center': use_exact = 0 subpixels = 1 elif mode == 'subpixel': use_exact = 0 elif mode == 'exact': use_exact = 1 subpixels = 1 return use_exact, subpixels @property @abc.abstractmethod def _xy_extents(self): raise NotImplementedError('Needs to be implemented in a subclass.') @property def bbox(self): positions = np.atleast_2d(self.positions) x_delta, y_delta = self._xy_extents xmin = positions[:, 0] - x_delta xmax = positions[:, 0] + x_delta ymin = positions[:, 1] - y_delta ymax = positions[:, 1] + y_delta bboxes = [BoundingBox.from_float(x0, x1, y0, y1) for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)] if self.isscalar: return bboxes[0] else: return bboxes @property def _centered_edges(self): edges = [] for position, bbox in zip(np.atleast_2d(self.positions), np.atleast_1d(self.bbox)): xmin = bbox.ixmin - 0.5 - position[0] xmax = bbox.ixmax - 0.5 - position[0] ymin = bbox.iymin - 0.5 - position[1] ymax = bbox.iymax - 0.5 - position[1] edges.append((xmin, xmax, ymin, ymax)) return edges @property def area(self): raise NotImplementedError('Needs to be implemented in a subclass.') @abc.abstractmethod def to_mask(self, method='exact', subpixels=5): raise NotImplementedError('Needs to be implemented in a subclass.') def area_overlap(self, data, *, mask=None, method='exact', subpixels=5): apermasks = self.to_mask(method=method, subpixels=subpixels) if self.isscalar: apermasks = (apermasks,) if mask is not None: mask = np.asarray(mask) if mask.shape != data.shape: raise ValueError('mask and data must have the same shape') data = np.ones_like(data) vals = [apermask.get_values(data, mask=mask) for apermask in apermasks] areas = [val.sum() if val.shape != (0,) else np.nan for val in vals] if self.isscalar: return areas[0] else: return areas def _do_photometry(self, data, variance, method='exact', subpixels=5, unit=None): aperture_sums = [] aperture_sum_errs = [] masks = self.to_mask(method=method, subpixels=subpixels) if self.isscalar: masks = (masks,) for apermask in masks: values = apermask.get_values(data) aper_sum = values.sum() if values.shape != (0,) else np.nan aperture_sums.append(aper_sum) if variance is not None: values = apermask.get_values(variance) aper_var = values.sum() if values.shape != (0,) else np.nan aperture_sum_errs.append(np.sqrt(aper_var)) aperture_sums = np.array(aperture_sums) aperture_sum_errs = np.array(aperture_sum_errs) if unit is not None: aperture_sums = aperture_sums * unit aperture_sum_errs = aperture_sum_errs * unit return aperture_sums, aperture_sum_errs def do_photometry(self, data, error=None, mask=None, method='exact', subpixels=5): data, error = _validate_inputs(data, error) data, error, unit = _handle_units(data, error) data, variance = _prepare_photometry_data(data, error, mask) return self._do_photometry(data, variance, method=method, subpixels=subpixels, unit=unit) @staticmethod def _make_annulus_path(patch_inner, patch_outer): import matplotlib.path as mpath path_inner = patch_inner.get_path() transform_inner = patch_inner.get_transform() path_inner = transform_inner.transform_path(path_inner) path_outer = patch_outer.get_path() transform_outer = patch_outer.get_transform() path_outer = transform_outer.transform_path(path_outer) verts_inner = path_inner.vertices[:-1][::-1] verts_inner = np.concatenate((verts_inner, [verts_inner[-1]])) verts = np.vstack((path_outer.vertices, verts_inner)) codes = np.hstack((path_outer.codes, path_inner.codes)) return mpath.Path(verts, codes) def _define_patch_params(self, origin=(0, 0), **kwargs): xy_positions = copy.deepcopy(np.atleast_2d(self.positions)) xy_positions[:, 0] -= origin[0] xy_positions[:, 1] -= origin[1] patch_params = self._default_patch_properties patch_params.update(kwargs) return xy_positions, patch_params @abc.abstractmethod def _to_patch(self, origin=(0, 0), **kwargs): raise NotImplementedError('Needs to be implemented in a subclass.') def plot(self, axes=None, origin=(0, 0), **kwargs): import matplotlib.pyplot as plt if axes is None: axes = plt.gca() patches = self._to_patch(origin=origin, **kwargs) if self.isscalar: patches = (patches,) for patch in patches: axes.add_patch(patch) return patches def _to_sky_params(self, wcs): sky_params = {} xpos, ypos = np.transpose(self.positions) sky_params['positions'] = wcs.pixel_to_world(xpos, ypos) skypos = sky_params['positions'] if not self.isscalar: skypos = skypos[0] _, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs) shape_params = list(self._shape_params) theta_key = 'theta' if theta_key in shape_params: sky_params[theta_key] = (self.theta * u.rad) - angle.to(u.rad) shape_params.remove(theta_key) for shape_param in shape_params: value = getattr(self, shape_param) sky_params[shape_param] = (value * u.pix * pixscale).to(u.arcsec) return sky_params @abc.abstractmethod def to_sky(self, wcs): raise NotImplementedError('Needs to be implemented in a subclass.') class SkyAperture(Aperture): def _to_pixel_params(self, wcs): pixel_params = {} xpos, ypos = wcs.world_to_pixel(self.positions) pixel_params['positions'] = np.transpose((xpos, ypos)) if self.isscalar: skypos = self.positions else: skypos = self.positions[0] _, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs) shape_params = list(self._shape_params) theta_key = 'theta' if theta_key in shape_params: pixel_params[theta_key] = (self.theta + angle).to(u.radian).value shape_params.remove(theta_key) for shape_param in shape_params: value = getattr(self, shape_param) if value.unit.physical_type == 'angle': pixel_params[shape_param] = ((value / pixscale) .to(u.pixel).value) else: pixel_params[shape_param] = value.value return pixel_params @abc.abstractmethod
BSD 3-Clause New or Revised License
qlyoung/lagopus
docker-images/lagopus-fuzzer/analyzer/crash_analysis/crash_comparer.py
CrashComparer.is_similar
python
def is_similar(self): if not self.crash_state_1 or not self.crash_state_2: return False if self.crash_state_1 == self.crash_state_2: return True if 'FuzzerHash=' in self.crash_state_1: return False crash_state_lines_1 = self.crash_state_1.splitlines() crash_state_lines_2 = self.crash_state_2.splitlines() lines_compared = 0 similarity_ratio_sum = 0.0 for i in range(len(crash_state_lines_1)): if i >= len(crash_state_lines_2): break similarity_ratio = _similarity_ratio(crash_state_lines_1[i], crash_state_lines_2[i]) lines_compared += 1 similarity_ratio_sum += similarity_ratio similarity_ratio_average = similarity_ratio_sum / lines_compared return similarity_ratio_average > self.compare_threshold
Return a bool for whether the two crash results are similar.
https://github.com/qlyoung/lagopus/blob/c5c232ab00a762b712a0b80e88703d4670a229f8/docker-images/lagopus-fuzzer/analyzer/crash_analysis/crash_comparer.py#L65-L97
from __future__ import division from builtins import object from builtins import range def _levenshtein_distance(string_1, string_2): if string_1 == string_2: return 0 elif not string_1: return len(string_2) elif not string_2: return len(string_1) v0 = list(range(len(string_2) + 1)) v1 = [None] * (len(string_2) + 1) for i in range(len(string_1)): v1[0] = i + 1 for j in range(len(string_2)): cost = 0 if string_1[i] == string_2[j] else 1 v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost) for j in range(len(v0)): v0[j] = v1[j] return v1[len(string_2)] def _similarity_ratio(string_1, string_2): length_sum = len(string_1) + len(string_2) if length_sum == 0: return 1.0 return (length_sum - _levenshtein_distance(string_1, string_2)) / ( 1.0 * length_sum) class CrashComparer(object): COMPARE_THRESHOLD = 0.8 def __init__(self, crash_state_1, crash_state_2, compare_threshold=None): self.crash_state_1 = crash_state_1 self.crash_state_2 = crash_state_2 self.compare_threshold = compare_threshold or self.COMPARE_THRESHOLD
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_container.py
V1Container.env_from
python
def env_from(self): return self._env_from
Gets the env_from of this V1Container. # noqa: E501 List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501 :return: The env_from of this V1Container. # noqa: E501 :rtype: list[V1EnvFromSource]
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_container.py#L229-L237
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1Container(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'security_context': 'V1SecurityContext', 'startup_probe': 'V1Probe', 'stdin': 'bool', 'stdin_once': 'bool', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'security_context': 'securityContext', 'startup_probe': 'startupProbe', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, readiness_probe=None, resources=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._readiness_probe = None self._resources = None self._security_context = None self._startup_probe = None self._stdin = None self._stdin_once = None self._termination_message_path = None self._termination_message_policy = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe self.name = name if ports is not None: self.ports = ports if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if security_context is not None: self.security_context = security_context if startup_probe is not None: self.startup_probe = startup_probe if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir @property def args(self): return self._args @args.setter def args(self, args): self._args = args @property def command(self): return self._command @command.setter def command(self, command): self._command = command @property def env(self): return self._env @env.setter def env(self, env): self._env = env @property
Apache License 2.0
opentoallctf/ota-challenge-bot
util/slack_wrapper.py
SlackWrapper.archive_private_channel
python
def archive_private_channel(self, channel_id): return self.client.api_call("groups.archive", channel=channel_id)
Archive a private channel
https://github.com/opentoallctf/ota-challenge-bot/blob/6deea8c059d28ddb86dce277158a39a5ad9517e4/util/slack_wrapper.py#L176-L178
import json import time from slackclient import SlackClient from util.util import load_json class SlackWrapper: def __init__(self, api_key): self.api_key = api_key self.client = SlackClient(self.api_key) self.connected = self.client.rtm_connect(auto_reconnect=True) self.server = None self.username = None self.user_id = None if self.connected: self.server = self.client.server self.username = self.server.username self.user_id = self.server.login_data.get("self").get("id") def read(self): return self.client.rtm_read() def invite_user(self, users, channel, is_private=False): users = [users] if not type(users) == list else users api_call = "conversations.invite" return self.client.api_call(api_call, channel=channel, users=users) def set_purpose(self, channel, purpose, is_private=False): api_call = "conversations.setPurpose" return self.client.api_call(api_call, purpose=purpose, channel=channel) def set_topic(self, channel, topic, is_private=False): api_call = "groups.setTopic" if is_private else "channels.setTopic" return self.client.api_call(api_call, topic=topic, channel=channel) def get_members(self): return self.client.api_call("users.list", presence=True) def get_member(self, user_id): return self.client.api_call("users.info", user=user_id) def create_channel(self, name, is_private=False): api_call = "conversations.create" return self.client.api_call(api_call, name=name, is_private=is_private) def rename_channel(self, channel_id, new_name, is_private=False): api_call = "groups.rename" if is_private else "channels.rename" return self.client.api_call(api_call, channel=channel_id, name=new_name, validate=False) def get_channel_info(self, channel_id, is_private=False): api_call = "conversations.info" return self.client.api_call(api_call, channel=channel_id) def get_channel_members(self, channel_id, next_cursor=None): response = self.client.api_call("conversations.members", channel=channel_id, cursor=next_cursor) members = response['members'] next_cursor = response['response_metadata']['next_cursor'] if not next_cursor: return members else: return members + self.get_channel_members(channel_id, next_cursor) def update_channel_purpose_name(self, channel_id, new_name, is_private=False): channel_info = self.get_channel_info(channel_id, is_private) if channel_info: purpose = load_json(channel_info['channel']['purpose']['value']) purpose['name'] = new_name self.set_purpose(channel_id, json.dumps(purpose), is_private) def post_message(self, channel_id, text, timestamp="", parse="full"): self.client.api_call("chat.postMessage", channel=channel_id, text=text, as_user=True, parse=parse, thread_ts=timestamp) def post_message_with_react(self, channel_id, text, reaction, parse="full"): result = self.client.api_call("chat.postMessage", channel=channel_id, text=text, as_user=True, parse=parse) if result["ok"]: self.client.api_call("reactions.add", channel=channel_id, name=reaction, timestamp=result["ts"]) def get_message(self, channel_id, timestamp): return self.client.api_call("channels.history", channel=channel_id, latest=timestamp, count=1, inclusive=True) def update_message(self, channel_id, msg_timestamp, text, parse="full"): self.client.api_call("chat.update", channel=channel_id, text=text, ts=msg_timestamp, as_user=True, parse=parse) def get_channels(self, types, next_cursor=None): types = [types] if type(types) != list else types response = self.client.api_call("conversations.list", types=types, cursor=next_cursor) channels = response['channels'] next_cursor = response['response_metadata']['next_cursor'] if not next_cursor: return channels else: return channels + self.get_channels(types, next_cursor) def get_all_channels(self): return self.get_channels(["public_channel", "private_channel"]) def get_channel_by_name(self, name): channels = self.get_all_channels() for channel in channels: if channel['name'] == name: return channel def get_public_channels(self): return self.get_channels("public_channel") def get_private_channels(self): return self.get_channels("private_channel") def archive_channel(self, channel_id): return self.client.api_call("conversations.archive", channel=channel_id)
MIT License
stbraun/fuzzing
features/resources/testfuzz.py
main
python
def main(): description = "Simple app to test our fuzzer." parser = argparse.ArgumentParser(description=description) parser.add_argument('in_path', help='The name of a file to read.') parser.add_argument('-c', '--crash', help='Crash the app!', action="store_true") parser.add_argument('-p', '--probability', help='Crash the app with given probability (0.0-1.0)', type=float, default=0.0) args = parser.parse_args() if args.crash: return 1 / 0 if random() < args.probability: return 2 / 0 time.sleep(3) return 0
Test for fuzzer.
https://github.com/stbraun/fuzzing/blob/bba3ad8a2b749e64a6e1f345ccd26a2675584d4e/features/resources/testfuzz.py#L11-L28
import sys import argparse from random import random import time
MIT License
keflavich/agpy
agpy/pyflagger.py
Flagger.lookup
python
def lookup(self, tsname): if tsname not in self.tscache: t0 = time.time() s0 = heapy.heap().size print "Loading and caching %s" % tsname self.tscache[tsname] = self.tsplot_dict[tsname]() print "Loading and caching %s took %0.2g seconds and ate up %0.2g GB" % (tsname,time.time()-t0, (heapy.heap().size-s0)/1024.**3) return self.tscache.get(tsname)
Cache and return data...
https://github.com/keflavich/agpy/blob/fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1/agpy/pyflagger.py#L518-L529
import math import warnings warnings.filterwarnings('ignore','masked') warnings.simplefilter('ignore') import pylab from pylab import * for k,v in pylab.__dict__.iteritems(): if hasattr(v,'__module__'): if v.__module__ is None: locals()[k].__module__ = 'pylab' import matplotlib import pyfits import numpy from mad import MAD,nanmedian from matplotlib.patches import Rectangle,FancyArrow,Circle,Ellipse from matplotlib.lines import Line2D from matplotlib.widgets import Cursor, MultiCursor import matplotlib.cm as cm try: import scipy.optimize except ImportError: pass import time import re import os import subprocess import copy import idlsave import gaussfitter import agpy.mpfit as mpfit import agpy from agpy.PCA_tools import * from AG_image_tools.drizzle import drizzle from agpy import smooth from guppy import hpy heapy = hpy() matplotlib.rcParams['image.origin']='lower' matplotlib.rcParams['image.interpolation']='nearest' matplotlib.rcParams['image.aspect']=1 matplotlib.rcParams['axes.color_cycle'] = [list(clr) for clr in matplotlib.cm.brg(linspace(0,1,144))] matplotlib.defaultParams['image.origin']='lower' matplotlib.defaultParams['image.interpolation']='nearest' matplotlib.defaultParams['image.aspect']=1 rc('font',size=24) rc('font',family='serif') if matplotlib.rcParams['text.usetex']: texOn = True else: texOn = False class lazydata(object): def __init__(self, varname, structname='bgps', reshape=None, flag=True): self.varname = varname self.structname = structname self.reshape = reshape self.flag = flag def __getattr__(self,attribute): return getattr(self,attribute) def __get__(self, obj, type=None): t0 = time.time() if obj.__dict__.has_key(self.varname): return obj.__dict__[self.varname] else: print "Computing %s " % self.varname if self.flag: obj.__dict__[self.varname] = obj.__dict__[self.structname][self.varname][0][obj.whscan,:].astype('float') obj.__dict__[self.varname][obj.whempty,:] = NaN obj.__dict__[self.varname].shape = obj.datashape obj.__dict__[self.varname] = nantomask(obj.__dict__[self.varname]) try: obj.__dict__[self.varname].mask[obj.flags > 0] = True except TypeError: obj.__dict__[self.varname].mask = (obj.flags > 0) else: obj.__dict__[self.varname] = obj.__dict__[self.structname][self.varname][0][obj.whscan,:].astype('float') obj.__dict__[self.varname].shape = obj.datashape print "Finished computing %s in %0.3g seconds" % (self.varname,time.time()-t0) return obj.__dict__[self.varname] class Flagger: def __init__(self, filename, debug=False, npca=13, **kwargs): pylab.figure(0) pylab.figure(1,figsize=[16,12]) pylab.figure(2,figsize=[16,12]) self.filename = filename self.debug = debug self.npca = npca if filename[-4:] == 'fits': self._loadfits(filename,**kwargs) elif filename[-3:] == 'sav': self._loadsav(filename,**kwargs) self.autopca = False self.autohist = False self.help = """ Key commands: left click - flag right click - unflag n - next scan p,N - previous scan q - save and quit Q - quit (no save) . - point to this point in the map f - plot footprint of array at this time point R - reverse order of flag boxes (to delete things hiding on the bottom) r - redraw d - delete flag box t - flag timepoint s - flag scan w - flag Whole scan (this is the same as s, except some python backends catch / steal 's') S,W - unflag scan b - flag bolometer T - unflag timepoint B - unflag bolometer c - toggle current scan v - display data value P - display the PCA decomposition of the displayed timestream o - make a map of the array at the sampled time z - display the power spectra of the displayed timestream (use 'C' to plot one) Z - display the power spectra of the displayed timestream over all time C,L - plot Column/Line j - plot whole timestream for selected bolo a - create a footprint movie between two selected points M,m - flag highest, lowest point in map e - expsub current plane Map Key Commands: c - toggle current scan . - show point in timestream click - show point in timestream middle click - list all points that contribute to that pixel r - redraw """ def _loadfits(self, filename, ncfilename='', flagfile='', mapnum='', axis=None, **kwargs): fnsearch = re.compile( '([0-9]{6}_o[0-9b][0-9]_raw_ds5.nc)(_indiv[0-9]{1,2}pca)').search(filename) ncsearch = re.compile( '[0-9]{6}_o[0-9b][0-9]_raw_ds5.nc').search(ncfilename) if fnsearch is None: print "Couldn't find the correct prefix in the filename" +" - expected form like 050906_o11_raw_ds5.nc_indiv13pca_timestream00.fits" return mapnumsearch = re.compile('([0-9]{2})(\.fits)').search(filename) if mapnumsearch is not None and mapnum=='': mapnum = mapnumsearch.groups()[0] else: mapnum = '01' if fnsearch.groups()[0] == ncsearch.group(): self.ncfilename = ncfilename self.readncfile() else: print "Warning: the NCDF filename doesn't match the input fits file name." + "You'll probably get errors and your work won't be saved." self.ncfilename = self.pathprefix+fnsearch.groups()[0] if fnsearch.group() is not None: self.fileprefix = fnsearch.group() self.pathprefix = filename[:fnsearch.start()] self.tsfn = self.pathprefix+self.fileprefix+"_timestream00.fits" self.tsfile = pyfits.open(self.tsfn) self.mapfn = self.pathprefix+self.fileprefix+"_map"+mapnum+".fits" self.mapfile = pyfits.open(self.mapfn) self.map = self.mapfile[0].data self.map[numpy.isnan(self.map)] = 0 self.tstomapfn = self.pathprefix+self.fileprefix+"_tstomap.fits" self.tstomapfile = pyfits.open(self.tstomapfn) self.tstomap = self.tstomapfile[0].data self.data = self.tsfile[0].data self.flagfn = self.pathprefix+self.fileprefix+"_flags.fits" self._initialize_vars(**kwargs) def _initialize_vars(self,vmax=None): self.reset() self.counter = 0 self.mouse_up = False self.connected = 0 print "There are %i scans" % (self.data.shape[0]) self.maxscan = self.data.shape[0] self.rectangles=[[] for i in xrange(self.maxscan)] self.lines=[[] for i in xrange(self.maxscan)] self.arrows=[] self.maparrows=[] self.connections=[] self.mapconnections=[] self.md = 0 self.mu = 0 self.key = 0 self._lastkey = None self.scannum = 0 self.fignum = 1 self.open = 1 self.currentscan = 0 self.aspect = float(self.data.shape[2])/float(self.data.shape[1]) self.plotfig = None self.bolofig = None self.mapfig = None self.flagfig = None self.datafig = None self.scanim = None self.PCAflag = False self.powerspec_plotted = False self.powerspectra_whole = None self.gaussfit=None self.showmap(vmax=vmax) self.dcon() def _loadsav(self, savfile, flag=True, **kwargs): memtot = heapy.heap().size / 1024.0**3 print "Beginning IDLsave file read. %0.3g GB used" % memtot t0 = time.time() sav = idlsave.read(savfile) memtot = heapy.heap().size / 1024.0**3 t1 = time.time() print "Finished reading IDLsave file in %i seconds using %0.3g GB" % (t1 - t0,memtot) self.bgps = sav.get('bgps') memtot = heapy.heap().size / 1024.0**3 t2 = time.time() print "Set bgps variable in %i seconds using %0.3g GB" % (t2 - t1,memtot) self.mapstr = sav.get('mapstr') self.needed_once_struct = sav.get('needed_once_struct') if self.needed_once_struct is None: neededoncefile = savfile.replace('preiter','neededonce').replace('postiter','neededonce') if os.path.exists(neededoncefile): sav_once = idlsave.read(neededoncefile) self.needed_once_struct = sav_once.get('needed_once_struct') t3 = time.time() print "Completed IDLsave file read in %f seconds." % (t3 - t0) self.ncfilename = savfile self.tsfile = None if self.needed_once_struct is not None: self.ncdf_filename = self.needed_once_struct.filenames[0] self.outfile_prefix = self.mapstr.outmap[0] fnsearch = re.compile( '([0-9]{6}_o[0-9b][0-9]_raw_ds[125].nc)(_indiv[0-9]{1,2}pca)').search(savfile) if fnsearch is not None: self.fileprefix = fnsearch.group() self.pathprefix = savfile[:fnsearch.start()] self.ncscans = self.bgps['scans_info'][0] self.sample_interval = self.bgps['sample_interval'][0] if len(self.ncscans.shape) == 1: self.ncscans.shape = [1,2] self.scanlengths = self.ncscans[:,1]+1-self.ncscans[:,0] self.scanlen = numpy.max(self.scanlengths) self.ncflags = self.bgps['flags'][0] self.timelen = self.ncflags.shape[0] self.nbolos = self.ncflags.shape[1] self.nscans = self.ncscans.shape[0] self.ncbolo_params = self.bgps['bolo_params'][0] self.ncbolo_indices = self.bgps['bolo_indices'][0] self.bolo_indices = self.ncbolo_indices self.ngoodbolos = self.bolo_indices.shape[0] self.whscan = asarray([arange(self.scanlen)+i for i,j in self.ncscans[:,:2]]).ravel() self.scanstarts = arange(self.nscans)*self.scanlen self.whempty = concatenate([arange(i+j,i+self.scanlen) for i,j in zip(self.scanstarts,self.scanlengths) ]).ravel() self.whscan[self.whempty] = 0 self.tsshape = [self.nscans*self.scanlen,self.ngoodbolos] self.datashape = [self.nscans,self.scanlen,self.ngoodbolos] t4 = time.time() memtot = heapy.heap().size / 1024.0**3 print "Beginning array reshaping with %f seconds elapsed, %0.3g GB used." % (t4 - t0, memtot) setattr(self.__class__, 'flags', lazydata('flags',flag=False)) self.flags.shape = self.datashape if self.needed_once_struct is not None: print "Loading 'raw' and 'dc_bolos' from needed_once_struct" setattr(self.__class__, 'raw', lazydata('raw', 'needed_once_struct',flag=flag)) setattr(self.__class__, 'dc_bolos', lazydata('dc_bolos', 'needed_once_struct',flag=flag)) elif self.bgps.dtype.fields.has_key('raw'): print "Loading 'raw' and 'dc_bolos' from bgps" setattr(self.__class__, 'raw', lazydata('raw',flag=flag)) setattr(self.__class__, 'dc_bolos', lazydata('dc_bolos',flag=flag)) self.scale_coeffs = self.bgps['scale_coeffs'][0].astype('float') t5 = time.time() memtot = heapy.heap().size / 1024.0**3 print "Finished array reshaping in %f seconds, %0.3g GB used." % (t5 - t4, memtot) print "Beginning array flagging." datums=['astrosignal','atmosphere','ac_bolos','atmo_one','noise','scalearr','weight','mapped_astrosignal'] for d in datums: if hasattr(self.bgps,d): setattr(self.__class__, d, lazydata(d,flag=flag)) self.weight_by_bolo = self.weight.mean(axis=0).mean(axis=0) if hasattr(self.bgps,'mapped_astrosignal'): setattr(self.__class__, 'mapped_astrosignal', lazydata('mapped_astrosignal',flag=flag)) else: setattr(self.__class__, 'mapped_astrosignal', lazydata('astrosignal',flag=flag)) if list(self.ac_bolos.shape) != self.datashape: import pdb; pdb.set_trace() self.data = self.ac_bolos self.ncfile = None self.flagfn = savfile.replace("sav","_flags.fits") self.map = nantomask( self.mapstr['astromap'][0] ) self.default_map = nantomask( self.mapstr['astromap'][0] ) self.model = nantomask( self.mapstr['model'][0] ) self.noisemap = nantomask( self.mapstr['noisemap'][0] ) if not hasattr(self.bgps,'atmo_one'): print "Reading file as a v1.0.2 sav file" self.atmo_one = self.ac_bolos - self.astrosignal self.mapped_timestream = self.ac_bolos - self.atmosphere self.scalearr = numpy.ones(self.datashape[1])[newaxis,:,newaxis]*self.scale_coeffs.swapaxes(0,1)[:,newaxis,:] self.version = 'v1.0' else: self.mapped_timestream = self.atmo_one - self.atmosphere + self.astrosignal self.version = 'v2.0' if self.map.sum() == 0: self.map = nantomask( self.mapstr['rawmap'][0] ) self.header = pyfits.Header(_hdr_string_list_to_cardlist( self.mapstr['hdr'][0] )) t6 = time.time() memtot = heapy.heap().size / 1024.0**3 print "Finished array flagging in %f seconds, %0.3g GB used." % (t6 - t5, memtot) self.tstomap = reshape( self.mapstr['ts'][0][self.whscan,:] , self.datashape ) t7 = time.time() memtot = heapy.heap().size / 1024.0**3 print "Computed tstomap in %f seconds, %0.3g GB used." % (t7 - t6, memtot) self._initialize_vars(**kwargs) self.tsplot_dict = {'astrosignal': lambda: self.astrosignal if self.astrosignal.sum() != 0 else 0, 'dc_bolos': lambda: self.dc_bolos*self.scalearr, 'dc_bolos_noscale': lambda: self.dc_bolos, 'dcbolos': lambda: self.dc_bolos*self.scalearr, 'dcbolos_noscale': lambda: self.dc_bolos, 'acbolos_noscale': lambda: self.ac_bolos, 'ac_bolos_noscale': lambda: self.ac_bolos, 'atmo_one': lambda:self.atmo_one, 'acbolos': lambda:self.ac_bolos*self.scalearr, 'ac_bolos': lambda:self.ac_bolos*self.scalearr, 'atmosphere': lambda:self.atmosphere, 'skysub_noscale': lambda:self.atmo_one - self.atmosphere, 'new_astro': lambda: self.atmo_one - self.atmosphere, 'new_astro_v1': lambda: self.lookup('PCA_astro_v1'), 'residual': lambda:self.atmo_one - self.atmosphere - self.noise, 'skysub': lambda:self.atmo_one - self.atmosphere + self.astrosignal, 'default': lambda:self.atmo_one - self.atmosphere + self.astrosignal, 'last_astrosignal': lambda:self.atmo_one - self.atmosphere - self.noise + self.astrosignal, 'acbMatmo': lambda: self.ac_bolos - self.atmo_one - self.atmosphere, 'acbMatmosphere': lambda: self.ac_bolos - self.atmosphere, 'acbMatmoone': lambda: self.ac_bolos - self.atmo_one, 'scale': lambda: self.scalearr, 'weight': lambda: self.weight, 'raw': lambda: self.raw, 'rawscaled': lambda: self.raw * self.scalearr, 'noise': lambda: self.noise, 'nomodel_astro': lambda: self.noise+self.astrosignal, 'newnoise': lambda: self.lookup('PCA_astro') + self.astrosignal - self.lookup('astrosignal_from_model'), 'mapped_astrosignal': lambda: self.mapped_astrosignal, 'mapped_timestream': lambda: self.mapped_timestream, 'astrosignal_from_map': lambda: self.default_map.flat[self.tstomap], 'astrosignal_from_model': lambda: self.model.flat[self.tstomap], 'itermedian': lambda: itermedian(self.ac_bolos * self.scalearr), 'zeromedian': lambda: self.atmo_one, 'atmo_one_itermedian': lambda: itermedian(self.atmo_one), 'expsub': lambda: exponent_sub(self.lookup('atmo_one_itermedian')), 'preexpsub': lambda: exponent_sub(self.lookup('acbolos_noscale')), 'preexpsub_piecewise': lambda: exponent_sub(self.lookup('acbolos_noscale'),piecewise=True), 'atmos_remainder': lambda: self.lookup('atmo_one_itermedian'), 'atmos_remainder_v1': lambda: itermedian(self.atmo_one,scale=1.0,niter=1), 'expmodel': lambda: self.lookup('atmo_one_itermedian') - self.lookup('expsub'), 'first_sky': lambda: self.atmo_one - self.lookup('atmos_remainder'), 'first_sky_v1': lambda: self.atmo_one - self.lookup('atmos_remainder_v1'), 'astrosignal_premap': lambda: self.lookup('PCA_astro')+self.astrosignal, 'PCA_atmo_v1': lambda: reshape(unpca_subtract(numpy.nan_to_num(reshape(self.lookup('atmos_remainder_v1'),self.tsshape)),self.npca),self.datashape), 'PCA_astro_v1': lambda: reshape(pca_subtract(numpy.nan_to_num(reshape(self.lookup('atmos_remainder_v1'),self.tsshape)),self.npca),self.datashape), 'PCA_atmo': lambda: reshape(unpca_subtract(numpy.nan_to_num(reshape(self.lookup('atmos_remainder'),self.tsshape)),self.npca),self.datashape), 'PCA_astro': lambda: reshape(pca_subtract(numpy.nan_to_num(reshape(self.lookup('atmos_remainder'),self.tsshape)),self.npca),self.datashape), 'PCA_astrosignal': lambda: reshape(efuncs(reshape(self.astrosignal,self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_acb': lambda: reshape(efuncs(reshape(self.ac_bolos,self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_zeromedian': lambda: reshape(efuncs(reshape(self.atmo_one,self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_itermedian': lambda: reshape(efuncs(reshape(self.lookup('itermedian'),self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_noise': lambda: reshape(efuncs(reshape(self.noise,self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_default': lambda: reshape(efuncs(reshape(self.atmo_one - self.atmosphere + self.astrosignal,self.tsshape)),self.datashape) / self.nbolos**0.5, 'PCA_atmos_remainder': lambda: reshape(efuncs(reshape(numpy.nan_to_num(self.lookup('atmos_remainder')),self.tsshape)),self.datashape) / self.nbolos**0.5, } self.tscache = {} self.tsplot = 'default' self.set_tsplot(**kwargs) print "Completed the rest of initialization in an additional %f seconds" % (time.time()-t1)
MIT License
freifeld/cpabdiffeo
cpab/cpa2d/inference/transformation/Register.py
Register.set_data
python
def set_data(self,x,signal_src,signal_dst,isbinary): self.isbinary=isbinary nPts = len(x) if x.ndim !=2 or x.shape[1]!=2: raise ValueError(x.shape) if signal_src.shape != signal_dst.shape: raise ValueError(gnal_src.shape , signal_dst.shape) if signal_src.ndim !=2: raise ValueError(signal_src.shape) signal_src = signal_src.astype(np.float64) signal_dst = signal_dst.astype(np.float64) if nPts != signal_src.size: raise ValueError( nPts , signal_src.shape) if x.shape[0] != signal_dst.size: raise ValueError( nPts , signal_dst.shape) if x.dtype != np.float64: raise TypeError(x.dtype) if signal_src.dtype != np.float64: raise TypeError(signal_src.dtype) if signal_dst.dtype != np.float64: raise TypeError(signal_dst.dtype) if signal_src.ndim == 1: raise ValueError(signal_src.ndim) if signal_dst.ndim == 1: raise ValueError(signal_dst.ndim) if not isinstance(signal_src,CpuGpuArray): signal_src = CpuGpuArray(signal_src) if not isinstance(signal_dst,CpuGpuArray): signal_dst = CpuGpuArray(signal_dst) self.signal = Bunch() self.signal.src=signal_src self.signal.dst=signal_dst self.src = x self.transformed = CpuGpuArray.zeros_like(self.src) self.signal.transformed=CpuGpuArray.zeros_like(signal_src)
For now, assumes dst was evaluated on evenly-space points // Is the comment above still current?
https://github.com/freifeld/cpabdiffeo/blob/22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6/cpab/cpa2d/inference/transformation/Register.py#L104-L164
import numpy as np import cv2 import pylab from pylab import plt from of.gpu import CpuGpuArray from of.utils import * from cpab.cpa2d.TransformWrapper import TransformWrapper from cpab.cpaNd.inference.Metropolis import Metropolis from cpab.cpaNd.inference.Proposal import Proposal from cpab.cpaNd.model import LogLikelihood as LL from cpab.cpaNd.model import LogPrior as LP from cpab.cpa2d.model.transformations.register import ScaleDependentLogLikelihoodGaussian as SDLL_gaussian from cpab.cpaNd.model import ScaleDependentLogPrior as SDLP class Register(object): def __init__(self,nRows=100, nCols=100, base = [2,2], nLevels=4, tess='tri', zero_v_across_bdry=[False]*2, scale_spatial=1.0 * 10, scale_value=2.0, sigma_signal=None, wlp=1e-4, ll_type=['gaussian','gaussian_on_distancetransform'][0], only_local=False, valid_outside=True): ll_type = ll_type.lower() if ll_type == 'gaussian': self.SDLL=SDLL_gaussian else: raise ValueError(ll_type) self.base = base self.nLevels=nLevels if sigma_signal is None: raise ValueError("sigma_signal cannot be None") self.sigma_signal = sigma_signal self.wlp = wlp self.tw = TransformWrapper(nRows=nRows,nCols=nCols, nLevels=nLevels, base=base, tess=tess, scale_spatial=scale_spatial, scale_value=scale_value, zero_v_across_bdry=zero_v_across_bdry, only_local=only_local, valid_outside=valid_outside ) def set_dense(self,domain_start=-10,domain_end=10): self.src_dense = self.tw.pts_src_dense self.transformed_dense = self.tw.transformed_dense
MIT License
dojot/device-manager
DeviceManager/DeviceHandler.py
DeviceHandler.update_device
python
def update_device(cls, params, device_id, token): try: content_type = params.get('content_type') data_request = params.get('data') device_data, json_payload = parse_payload(content_type, data_request, device_schema) validate_repeated_attrs(json_payload) tenant = init_tenant_context(token, db) old_orm_device = assert_device_exists(device_id) db.session.delete(old_orm_device) db.session.flush() device_data.pop('templates') updated_orm_device = Device(**device_data) parse_template_list(json_payload.get('templates', []), updated_orm_device) auto_create_template(json_payload, updated_orm_device) updated_orm_device.id = device_id updated_orm_device.updated = datetime.now() updated_orm_device.created = old_orm_device.created db.session.add(updated_orm_device) db.session.commit() except IntegrityError as error: handle_consistency_exception(error) except ValidationError as error: raise HTTPRequestError(400, error.messages) full_device = serialize_full_device(updated_orm_device, tenant) kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier) kafka_handler_instance.update(full_device, meta={"service": tenant}) result = { 'message': 'device updated', 'device': serialize_full_device(updated_orm_device, tenant) } return result
Updated the information about a particular device :param params: Parameters received from request (content_type, data) as created by Flask :param device_id: The device to be updated. :param token: The authorization token (JWT). :return The updated device. :rtype JSON :raises HTTPRequestError: If no authorization token was provided (no tenant was informed) :raises HTTPRequestError: If this device could not be found in database.
https://github.com/dojot/device-manager/blob/31b630fe0969f6666f07db59a489772d7f0639d6/DeviceManager/DeviceHandler.py#L531-L584
import re import logging import json import time from datetime import datetime import secrets from flask import request, jsonify, Blueprint, make_response from sqlalchemy.exc import IntegrityError from sqlalchemy import or_, and_, func, text from DeviceManager.utils import * from DeviceManager.utils import create_id, get_pagination, format_response from DeviceManager.utils import HTTPRequestError from DeviceManager.conf import CONFIG from DeviceManager.BackendHandler import KafkaHandler, KafkaInstanceHandler from DeviceManager.DatabaseHandler import db from DeviceManager.DatabaseModels import assert_device_exists, assert_template_exists from DeviceManager.DatabaseModels import handle_consistency_exception, assert_device_relation_exists from DeviceManager.DatabaseModels import DeviceTemplate, DeviceAttr, Device, DeviceTemplateMap, DeviceAttrsPsk from DeviceManager.DatabaseModels import DeviceOverride from DeviceManager.SerializationModels import device_list_schema, device_schema, ValidationError from DeviceManager.SerializationModels import attr_list_schema from DeviceManager.SerializationModels import parse_payload, load_attrs, validate_repeated_attrs from DeviceManager.TenancyManager import init_tenant_context from DeviceManager.app import app from DeviceManager.Logger import Log device = Blueprint('device', __name__) LOGGER = Log().color_log() def fill_overridden_flag(attrs): for templateId in attrs: for attr in attrs[templateId]: if 'is_static_overridden' not in attr and 'static_value' in attr: attr['is_static_overridden'] = False if 'metadata' in attr: for metadata in attr['metadata']: if 'is_static_overridden' not in metadata and 'static_value' in metadata: metadata['is_static_overridden'] = False def serialize_override_attrs(orm_overrides, attrs): fill_overridden_flag(attrs) for override in orm_overrides: if override.attr.template_id is not None: for attr in attrs[override.attr.template_id]: if attr['id'] == override.aid: attr['static_value'] = override.static_value attr['is_static_overridden'] = True else: for attr in attrs[override.attr.parent.template_id]: if attr['id'] == override.attr.parent_id: for metadata in attr['metadata']: if metadata['id'] == override.aid: metadata['static_value'] = override.static_value metadata['is_static_overridden'] = True def serialize_full_device(orm_device, tenant, sensitive_data=False): data = device_schema.dump(orm_device) data['attrs'] = {} for template in orm_device.templates: data['attrs'][template.id] = attr_list_schema.dump(template.attrs) serialize_override_attrs(orm_device.overrides, data['attrs']) if sensitive_data: for psk_data in orm_device.pre_shared_keys: for template_id in data['attrs']: for attr in data['attrs'][template_id]: if attr['id'] == psk_data.attr_id: dec = decrypt(psk_data.psk) attr['static_value'] = dec.decode('ascii') return data def find_template(template_list, id): LOGGER.debug(f" Finding template from template list") for template in template_list: if template.id == int(id): return template def create_orm_override(attr, orm_device, orm_template): try: target = int(attr['id']) except ValueError: LOGGER.error(f" Unknown attribute {attr['id']} in override list") raise HTTPRequestError(400, 'Unknown attribute {} in override list'.format(attr['id'])) found = False for orm_attr in orm_template.attrs: if target == orm_attr.id: found = True if 'static_value' in attr and attr['static_value'] is not None: orm_override = DeviceOverride( device=orm_device, attr=orm_attr, static_value=attr['static_value'] ) db.session.add(orm_override) LOGGER.debug(f" Added overrided form {orm_override}") if 'metadata' in attr: for metadata in attr['metadata']: try: metadata_target = int(metadata['id']) LOGGER.debug(f" Updated metadata {metadata_target}") except ValueError: LOGGER.error(f" metadata attribute {attr['id']} in override list") raise HTTPRequestError(400, 'Unknown metadata attribute {} in override list'.format( metadata['id'])) found = False with db.session.no_autoflush: for orm_attr_child in orm_attr.children: if metadata_target == orm_attr_child.id: found = True if 'static_value' in metadata and metadata['static_value'] is not None: orm_override = DeviceOverride( device=orm_device, attr=orm_attr_child, static_value=metadata['static_value'] ) db.session.add(orm_override) LOGGER.debug(f" Added overrided form {orm_override}") if not found: LOGGER.error(f" Unknown attribute {attr['id']} in override list") raise HTTPRequestError(400, 'Unknown attribute {} in override list'.format(target)) def auto_create_template(json_payload, new_device): if ('attrs' in json_payload) and (new_device.templates is None): device_template = DeviceTemplate( label="device.%s template" % new_device.id) db.session.add(device_template) LOGGER.debug(f" Adding auto-created template {device_template} into database") new_device.templates = [device_template] load_attrs(json_payload['attrs'], device_template, DeviceAttr, db) if ('attrs' in json_payload) and (new_device.templates is not None): for attr in json_payload['attrs']: orm_template = find_template(new_device.templates, attr['template_id']) if orm_template is None: LOGGER.error(f" Unknown template {orm_template} in attr list") raise HTTPRequestError(400, 'Unknown template {} in attr list'.format(orm_template)) create_orm_override(attr, new_device, orm_template) def parse_template_list(template_list, new_device): new_device.templates = [] LOGGER.debug(f" Adding new template list for device {new_device}") for template_id in template_list: new_device.templates.append(assert_template_exists(template_id, db.session)) def find_attribute(orm_device, attr_name, attr_type): for template_id in orm_device['attrs']: for attr in orm_device['attrs'][template_id]: if (attr['label'] == attr_name) and (attr['type'] == attr_type): LOGGER.debug(f" retrieving attribute {attr}") return attr return None class DeviceHandler(object): kafka = KafkaInstanceHandler() def __init__(self): pass @staticmethod def indexed_label(count, c_length, base, index): if count == 1: return base else: return "{}_{:0{width}d}".format(base, index, width=c_length) @staticmethod def generate_device_id(): _attempts = 0 generated_id = '' while _attempts < 10 and len(generated_id) == 0: _attempts += 1 new_id = create_id() if Device.query.filter_by(id=new_id).first() is None: LOGGER.debug(f" Generated a new device id {new_id}") return new_id LOGGER.error(f" Failed to generate unique device_id") raise HTTPRequestError(500, "Failed to generate unique device_id") @staticmethod def list_ids(token): init_tenant_context(token, db) data = [] LOGGER.debug(f" Fetching list with known devices") for id in db.session.query(Device.id).all(): data.append(id[0]) return data @staticmethod def get_devices(token, params, sensitive_data=False): tenant = init_tenant_context(token, db) pagination = {'page': params.get('page_number'), 'per_page': params.get('per_page'), 'error_out': False} SORT_CRITERION = { 'label': Device.label, None: Device.id } sortBy = SORT_CRITERION.get(params.get('sortBy')) attr_filter = [] query = params.get('attr') for attr_label_item in query: parsed = re.search('^(.+){1}=(.+){1}$', attr_label_item) attr_label = [] attr_label.append(DeviceAttr.label == parsed.group(1)) attr_label.append(text("coalesce(overrides.static_value, attrs.static_value)=:static_value ").bindparams(static_value=parsed.group(2))) attr_filter.append(and_(*attr_label)) query = params.get('attr_type') for attr_type_item in query: attr_filter.append(DeviceAttr.value_type == attr_type_item) label_filter = [] target_label = params.get('label') if target_label: label_filter.append(Device.label.like("%{}%".format(target_label))) template_filter = [] target_template = params.get('template') if target_template: template_filter.append(DeviceTemplateMap.template_id == target_template) if (attr_filter): LOGGER.debug(f" Filtering devices by {attr_filter}") page = db.session.query(Device) .join(DeviceTemplateMap, isouter=True) page = page.join(DeviceTemplate) .join(DeviceAttr, isouter=True) .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True) page = page.filter(*label_filter) .filter(*template_filter) .filter(*attr_filter) .order_by(sortBy) .paginate(**pagination) elif label_filter or template_filter: if label_filter: LOGGER.debug(f"Filtering devices by label: {target_label}") if template_filter: LOGGER.debug(f"Filtering devices with template: {target_template}") page = db.session.query(Device) .join(DeviceTemplateMap, isouter=True) if sensitive_data: page = page.join(DeviceTemplate) .join(DeviceAttr, isouter=True) .join(DeviceOverride, (Device.id == DeviceOverride.did) & (DeviceAttr.id == DeviceOverride.aid), isouter=True) page = page.filter(*label_filter) .filter(*template_filter) .order_by(sortBy) .paginate(**pagination) else: LOGGER.debug(f" Querying devices sorted by device id") page = db.session.query(Device).order_by(sortBy).paginate(**pagination) devices = [] if params.get('idsOnly').lower() in ['true', '1', '']: return DeviceHandler.get_only_ids(page) for d in page.items: devices.append(serialize_full_device(d, tenant, sensitive_data)) result = { 'pagination': { 'page': page.page, 'total': page.pages, 'has_next': page.has_next, 'next_page': page.next_num }, 'devices': devices } return result @staticmethod def get_only_ids(page): device_id = [] for device in page.items: data_device = device_schema.dump(device) id_device = data_device.get('id') device_id.append(id_device) return device_id @staticmethod def get_device(token, device_id, sensitive_data=False): tenant = init_tenant_context(token, db) orm_device = assert_device_exists(device_id) return serialize_full_device(orm_device, tenant, sensitive_data) @classmethod def create_device(cls, params, token): tenant = init_tenant_context(token, db) try: count = int(params.get('count')) except ValueError as e: LOGGER.error(e) raise HTTPRequestError(400, "If provided, count must be integer") c_length = len(str(count)) verbose = params.get('verbose') in ['true', '1', 'True'] if verbose and count != 1: raise HTTPRequestError( 400, "Verbose can only be used for single device creation") devices = [] full_device = None orm_devices = [] try: for i in range(0, count): content_type = params.get('content_type') data_request = params.get('data') device_data, json_payload = parse_payload(content_type, data_request, device_schema) validate_repeated_attrs(json_payload) device_data['id'] = DeviceHandler.generate_device_id() device_data['label'] = DeviceHandler.indexed_label(count, c_length, device_data['label'], i) device_data.pop('templates', None) orm_device = Device(**device_data) parse_template_list(json_payload.get('templates', []), orm_device) auto_create_template(json_payload, orm_device) db.session.add(orm_device) orm_devices.append(orm_device) db.session.commit() except IntegrityError as error: handle_consistency_exception(error) except ValidationError as error: raise HTTPRequestError(400, error.messages) for orm_device in orm_devices: devices.append( { 'id': orm_device.id, 'label': orm_device.label } ) full_device = serialize_full_device(orm_device, tenant) kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier) kafka_handler_instance.create(full_device, meta={"service": tenant}) if verbose: result = { 'message': 'device created', 'devices': [full_device] } else: result = { 'message': 'devices created', 'devices': devices } return result @classmethod def delete_device(cls, device_id, token): tenant = init_tenant_context(token, db) orm_device = assert_device_exists(device_id) data = serialize_full_device(orm_device, tenant) kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier) kafka_handler_instance.remove(data, meta={"service": tenant}) db.session.delete(orm_device) db.session.commit() results = {'result': 'ok', 'removed_device': data} return results @staticmethod def delete_all_devices(token): tenant = init_tenant_context(token, db) json_devices = [] devices = db.session.query(Device) for device in devices: db.session.delete(device) json_devices.append(serialize_full_device(device, tenant)) db.session.commit() results = { 'result': 'ok', 'removed_devices': json_devices } return results @classmethod
Apache License 2.0
itisfoundation/osparc-simcore
api/tests/utils.py
list_files_in_api_specs
python
def list_files_in_api_specs(wildcard: str) -> List[str]: specs_dir = specs_folder() return list(str(p) for p in specs_dir.rglob(wildcard))
Helper function to parameterize tests with list of files e.g. pytest -v test_individual_openapi_schemas.py test_individual_openapi_schemas.py::test_valid_individual_openapi_schemas_specs[/home/crespo/devp/osparc-simcore/api/specs/common/schemas/node-meta-v0.0.1.json] PASSED
https://github.com/itisfoundation/osparc-simcore/blob/a50b61735381231abba2cfcd57f3314785c656b0/api/tests/utils.py#L28-L38
import json import sys from pathlib import Path from typing import List import yaml CONVERTED_SUFFIX = "-converted.yaml" current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent def find_current_repo_folder(): cpath = Path(current_dir) while not any(cpath.glob(".git")): cpath = cpath.parent assert cpath!=cpath.parent assert cpath.glob("services") return cpath current_repo_dir = find_current_repo_folder() def specs_folder(): return current_dir.parent / "specs"
MIT License
opencti-platform/connectors
external-import/kaspersky/src/kaspersky/master_ioc/importer.py
MasterIOCImporter.__init__
python
def __init__( self, helper: OpenCTIConnectorHelper, client: KasperskyClient, author: Identity, tlp_marking: MarkingDefinition, create_observables: bool, create_indicators: bool, update_existing_data: bool, master_ioc_fetch_weekday: Optional[int], master_ioc_excluded_ioc_indicator_types: Set[str], master_ioc_report_type: str, master_ioc_report_status: int, ) -> None: super().__init__(helper, client, author, tlp_marking, update_existing_data) self.create_observables = create_observables self.create_indicators = create_indicators self.master_ioc_fetch_weekday = master_ioc_fetch_weekday self.master_ioc_excluded_ioc_indicator_types = ( master_ioc_excluded_ioc_indicator_types ) self.master_ioc_report_type = master_ioc_report_type self.master_ioc_report_status = master_ioc_report_status if not (self.create_observables or self.create_indicators): msg = "'create_observables' and 'create_indicators' false at the same time" raise ValueError(msg)
Initialize Kaspersky Master IOC importer.
https://github.com/opencti-platform/connectors/blob/78d4ec585e8ffcc51e90149e74c7c57c176f5209/external-import/kaspersky/src/kaspersky/master_ioc/importer.py#L30-L59
import itertools from datetime import datetime from typing import Any, List, Mapping, Optional, Set, Tuple from pycti import OpenCTIConnectorHelper from stix2 import Bundle, Identity, MarkingDefinition from stix2.exceptions import STIXError from kaspersky.client import KasperskyClient from kaspersky.importer import BaseImporter from kaspersky.master_ioc.builder import IndicatorGroupBundleBuilder from kaspersky.models import OpenIOCCSV, OpenIOCCSVIndicator from kaspersky.utils import ( convert_openioc_csv_to_openioc_csv_model, datetime_to_timestamp, datetime_utc_now, is_current_weekday_before_datetime, timestamp_to_datetime, ) class MasterIOCImporter(BaseImporter): _LATEST_MASTER_IOC_TIMESTAMP = "latest_master_ioc_timestamp"
Apache License 2.0
pasqal-io/pulser
pulser/waveforms.py
Waveform.duration
python
def duration(self) -> int: pass
The duration of the pulse (in ns).
https://github.com/pasqal-io/pulser/blob/0696d4e375c1ab153ba36002f3b24c9423c4ec78/pulser/waveforms.py#L93-L95
from __future__ import annotations from abc import ABC, abstractmethod import functools import inspect import itertools import sys from sys import version_info from types import FunctionType from typing import Any, cast, Optional, Tuple, Union import warnings from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np from numpy.typing import ArrayLike import scipy.interpolate as interpolate from pulser.parametrized import Parametrized, ParamObj from pulser.parametrized.decorators import parametrize from pulser.json.utils import obj_to_dict if version_info[:2] >= (3, 8): from functools import cached_property else: try: from backports.cached_property import cached_property except ImportError: raise ImportError( "Using pulser with Python version 3.7 requires the" " `backports.cached-property` module. Install it by running" " `pip install backports.cached-property`." ) class Waveform(ABC): def __new__(cls, *args, **kwargs): for x in itertools.chain(args, kwargs.values()): if isinstance(x, Parametrized): return ParamObj(cls, *args, **kwargs) else: return object.__new__(cls) def __init__(self, duration: Union[int, Parametrized]): duration = cast(int, duration) try: _duration = int(duration) except (TypeError, ValueError): raise TypeError( "duration needs to be castable to an int but " f"type {type(duration)} was provided." ) if _duration <= 0: raise ValueError( "A waveform must have a positive duration, " + f"not {duration}." ) elif duration - _duration != 0: warnings.warn( f"A waveform duration of {duration} ns is below the" " supported precision of 1 ns. It was rounded down " + f"to {_duration} ns.", stacklevel=3, ) self._duration = _duration @property @abstractmethod
Apache License 2.0
couchbase/couchbase-cli
cbmgr.py
find_subcommands
python
def find_subcommands(): clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) subclasses = [cls for cls in clsmembers if issubclass(cls[1], (Subcommand, LocalSubcommand)) and cls[1] not in [Subcommand, LocalSubcommand]] subcommands = [] for subclass in subclasses: name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])]) subcommands.append((name, subclass[1])) return subcommands
Finds all subcommand classes
https://github.com/couchbase/couchbase-cli/blob/bc764599731c996d8e7f38979809ffbb21f8f807/cbmgr.py#L269-L279
import getpass import inspect import ipaddress import json import os import platform import random import re import string import subprocess import sys import tempfile import time import urllib.parse from argparse import SUPPRESS, Action, ArgumentError, ArgumentParser, HelpFormatter from operator import itemgetter from typing import Any, Dict, List, Optional from cluster_manager import ClusterManager from pbar import TopologyProgressBar from x509_adapter import X509AdapterError try: from cb_version import VERSION except ImportError: VERSION = "0.0.0-0000-community" print(f'WARNING: Could not import cb_version, setting VERSION to {VERSION}') COUCHBASE_DEFAULT_PORT = 8091 BUCKET_PRIORITY_HIGH_INT = 8 BUCKET_PRIORITY_HIGH_STR = "high" BUCKET_PRIORITY_LOW_INT = 3 BUCKET_PRIORITY_LOW_STR = "low" BUCKET_TYPE_COUCHBASE = "membase" BUCKET_TYPE_MEMCACHED = "memcached" CB_BIN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "bin")) CB_ETC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "etc", "couchbase")) CB_LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lib")) if os.name == 'nt': CB_NS_EBIN_PATH = os.path.join(CB_LIB_PATH, "ns_server", "ebin") CB_BABYSITTER_EBIN_PATH = os.path.join(CB_LIB_PATH, "ns_babysitter", "ebin") else: CB_NS_EBIN_PATH = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_server", "ebin") CB_BABYSITTER_EBIN_PATH = os.path.join(CB_LIB_PATH, "ns_server", "erlang", "lib", "ns_babysitter", "ebin") inetrc_file = os.path.join(CB_ETC_PATH, 'hosts.cfg') if os.path.isfile(inetrc_file): inetrc_file = inetrc_file.encode('unicode-escape').decode() CB_INETRC_OPT = ['inetrc', f'"{inetrc_file}"'] else: CB_INETRC_OPT = [] if platform.system() == "Darwin": CB_CFG_PATH = os.path.expanduser("~/Library/Application Support/Couchbase/var/lib/couchbase") else: CB_CFG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "var", "lib", "couchbase")) CB_MAN_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "share")) if os.name == "nt": CB_MAN_PATH = os.path.join(CB_MAN_PATH, "doc", "couchbase-cli") else: CB_MAN_PATH = os.path.join(CB_MAN_PATH, "man", "man1") def get_doc_page_name(command: str) -> str: return f'{command}.{"1" if os.name != "nt" else "html"}' def remove_prefix(val: str, prefix: str) -> str: return val[len(prefix):] if val.startswith(prefix) else val def force_communicate_tls(rest: ClusterManager) -> bool: settings, err = rest.get_security_settings() _exit_if_errors(err) if 'clusterEncryptionLevel' not in settings or settings['clusterEncryptionLevel'] != 'strict': return False _warning("sub-command requires multi-node communication via TLS enabled ports, '--cacert' or " "'--no-ssl-verify' may need to be supplied") return True def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None): def inner(fn): def decorator(self, opts): _exit_if_errors(validate_credential_flags(opts.cluster, opts.username, opts.password, opts.client_ca, opts.client_ca_password, opts.client_pk, opts.client_pk_password)) try: self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug, client_ca=opts.client_ca, client_ca_password=opts.client_ca_password, client_pk=opts.client_pk, client_pk_password=opts.client_pk_password) except X509AdapterError as error: _exit_if_errors([f"failed to setup client certificate encryption, {error}"]) if cluster_init_check: check_cluster_initialized(self.rest) if version_check: check_versions(self.rest) if enterprise_check is not None: enterprise, errors = self.rest.is_enterprise() _exit_if_errors(errors) if enterprise_check and not enterprise: _exit_if_errors(['Command only available in enterprise edition']) self.enterprise = enterprise return fn(self, opts) return decorator return inner def validate_credential_flags(host, username, password, client_ca, client_ca_password, client_pk, client_pk_password): using_cert_auth = not (client_ca is None and client_ca_password is None and client_pk is None and client_pk_password is None) if using_cert_auth: return validate_certificate_flags( host, username, password, client_ca, client_ca_password, client_pk, client_pk_password) if (username is None and password is None): return ["cluster credentials required, expected --username/--password or --client-cert/--client-key"] if (username is None or password is None): return ["the --username/--password flags must be supplied together"] return None def validate_certificate_flags(host, username, password, client_ca, client_ca_password, client_pk, client_pk_password): if username is not None or password is not None: return ["expected either --username and --password or --client-cert and --client-key but not both"] if not (host.startswith("https://") or host.startswith("couchbases://")): return ["certificate authentication requires a secure connection, use https:// or couchbases://"] if client_ca is None: return ["certificate authentication requires a certificate to be supplied with the --client-cert flag"] if client_ca_password is not None and client_pk_password is not None: return ["--client-cert-password and --client-key-password can't be supplied together"] unencrypted = client_ca_password is None and client_pk_password is None if unencrypted and (client_ca is None or client_pk is None): return ["when no cert/key password is provided, the --client-cert/--client-key flags must be supplied together"] if client_pk_password is not None and client_pk is None: return ["--client-key-password provided without --client-key"] return None def check_cluster_initialized(rest): initialized, errors = rest.is_cluster_initialized() if errors: _exit_if_errors(errors) if not initialized: _exit_if_errors(["Cluster is not initialized, use cluster-init to initialize the cluster"]) def check_versions(rest): result, errors = rest.pools() if errors: return server_version = result['implementationVersion'] if server_version is None or VERSION is None: return major_couch = server_version[: server_version.index('.')] minor_couch = server_version[server_version.index('.') + 1: server_version.index('.', len(major_couch) + 1)] major_cli = VERSION[: VERSION.index('.')] minor_cli = VERSION[VERSION.index('.') + 1: VERSION.index('.', len(major_cli) + 1)] if major_cli != major_couch or minor_cli != minor_couch: _warning(f'couchbase-cli version {VERSION} does not match couchbase server version {server_version}') def index_storage_mode_to_param(value, default="plasma"): if value == "default": return default if value == "memopt": return "memory_optimized" return value def process_services(services, enterprise): sep = "," if services.find(sep) < 0: sep = ";" svc_set = set([w.strip() for w in services.split(sep)]) svc_candidate = ["data", "index", "query", "fts", "eventing", "analytics", "backup"] for svc in svc_set: if svc not in svc_candidate: return None, [f'`{svc}` is not a valid service'] if not enterprise and svc in ["eventing", "analytics", "backup"]: return None, [f'{svc} service is only available on Enterprise Edition'] if not enterprise: ce_svc_30 = set(["data"]) ce_svc_40 = set(["data", "index", "query"]) ce_svc_45 = set(["data", "index", "query", "fts"]) if svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]: return None, [f"Invalid service configuration. Community Edition only supports nodes with the following" f" combinations of services: '{''.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or " f"'{','.join(ce_svc_45)}'"] services = ",".join(svc_set) for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"], ["analytics", "cbas"]]: services = services.replace(old, new) return services, None
Apache License 2.0
simsso/nips-2018-adversarial-vision-challenge
experiments/mnist/src/main.py
main
python
def main(args=None): tf.logging.set_verbosity(tf.logging.INFO) img, adv = attack.get_attack_batch(train.MODEL_NAME, 100) pert.run_analysis(train.MODEL_NAME, img, adv)
This module package contains functionality for three different things: * training a CNN on MNISt and storing the weights (+ logging to TensorBoard) * loading the weights and analyzing the classification of linear combinations between inputs * loading the weights and computing an adversarial example using FGSM
https://github.com/simsso/nips-2018-adversarial-vision-challenge/blob/b32f97b4e9ea98fed20a345f66e1dcbfa787edd5/experiments/mnist/src/main.py#L8-L20
import attack as attack import tensorflow as tf import train import linear_combination as lc import layerwise_perturbation as pert
MIT License
eggplants/deepl-cli
deepl/deepl.py
DeepLCLI._chk_stdin
python
def _chk_stdin(self) -> None: if (sys.stdin.isatty() and len(sys.argv) == 1) or '-h' in sys.argv: self.usage() sys.tracebacklimit = 0 raise DeepLCLIArgCheckingError('show help.') elif sys.stdin.isatty(): raise DeepLCLIArgCheckingError('stdin seems to be empty.')
Check if stdin is entered.
https://github.com/eggplants/deepl-cli/blob/a0b5ad5ab8baf0a4025a40411366b8f3659205a1/deepl/deepl.py#L64-L73
import asyncio import sys from textwrap import dedent from typing import List, Optional, Tuple from urllib.parse import quote from urllib.request import urlopen from pyppeteer.browser import Browser from pyppeteer.errors import TimeoutError from pyppeteer.launcher import launch from pyppeteer.page import Page class DeepLCLIArgCheckingError(Exception): pass class DeepLCLIPageLoadError(Exception): pass class DeepLCLI: def __init__(self, langs: Optional[Tuple[str, str]] = None) -> None: if langs: self.fr_lang, self.to_lang = self._chk_lang(langs) self.max_length = 5000 def usage(self) -> None: print(dedent('''\ $ deepl SYNTAX: $ ... | deepl <from:lang>:<to:lang> $ deepl <from:lang>:<to:lang> << 'EOS' ... EOS $ deepl <from:lang>:<to:lang> <<< "..." $ deepl <from:lang>:<to:lang> < <filepath> USAGE: $ echo Hello | deepl en:ja $ deepl :ru << 'EOS' # :ru is equivalent of auto:ru good morning! good night. EOS $ deepl fr:zh <<< "Mademoiselle" $ deepl de:pl < README_de.md LANGUAGE CODES: <from:lang>: {auto it et nl el sv es sk sl cs da de hu fi fr bg pl pt lv lt ro ru en zh ja} <to:lang>: {it et nl el sv es sk sl cs da de hu fi fr bg pl pt lv lt ro ru en zh ja} ''')) def internet_on(self) -> bool: try: urlopen('https://www.google.com/', timeout=10) return True except IOError: return False
MIT License
geopandas/geopandas
geopandas/geoseries.py
GeoSeries.fillna
python
def fillna(self, value=None, method=None, inplace=False, **kwargs): if value is None: value = BaseGeometry() return super().fillna(value=value, method=method, inplace=inplace, **kwargs)
Fill NA values with a geometry (empty polygon by default). "method" is currently not implemented for pandas <= 0.12. Examples -------- >>> from shapely.geometry import Polygon >>> s = geopandas.GeoSeries( ... [ ... Polygon([(0, 0), (1, 1), (0, 1)]), ... None, ... Polygon([(0, 0), (-1, 1), (0, -1)]), ... ] ... ) >>> s 0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0.... 1 None 2 POLYGON ((0.00000 0.00000, -1.00000 1.00000, 0... dtype: geometry >>> s.fillna() 0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0.... 1 GEOMETRYCOLLECTION EMPTY 2 POLYGON ((0.00000 0.00000, -1.00000 1.00000, 0... dtype: geometry >>> s.fillna(Polygon([(0, 1), (2, 1), (1, 2)])) 0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0.... 1 POLYGON ((0.00000 1.00000, 2.00000 1.00000, 1.... 2 POLYGON ((0.00000 0.00000, -1.00000 1.00000, 0... dtype: geometry See Also -------- GeoSeries.isna : detect missing values
https://github.com/geopandas/geopandas/blob/04d377f321972801888381356cb6259766eb63b6/geopandas/geoseries.py#L754-L794
import json import warnings import numpy as np import pandas as pd from pandas import Series, MultiIndex from pandas.core.internals import SingleBlockManager from pyproj import CRS from shapely.geometry.base import BaseGeometry from geopandas.base import GeoPandasBase, _delegate_property from geopandas.plotting import plot_series from geopandas.explore import _explore_geoseries import geopandas from . import _compat as compat from ._decorator import doc from .array import ( GeometryDtype, from_shapely, from_wkb, from_wkt, points_from_xy, to_wkb, to_wkt, ) from .base import is_geometry_type _SERIES_WARNING_MSG = """\ You are passing non-geometry data to the GeoSeries constructor. Currently, it falls back to returning a pandas Series. But in the future, we will start to raise a TypeError instead.""" def _geoseries_constructor_with_fallback(data=None, index=None, crs=None, **kwargs): try: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message=_SERIES_WARNING_MSG, category=FutureWarning, module="geopandas[.*]", ) return GeoSeries(data=data, index=index, crs=crs, **kwargs) except TypeError: return Series(data=data, index=index, **kwargs) class GeoSeries(GeoPandasBase, Series): _metadata = ["name"] def __new__(cls, data=None, index=None, crs=None, **kwargs): if hasattr(data, "crs") and crs: if not data.crs: data = data.copy() else: if not data.crs == crs: warnings.warn( "CRS mismatch between CRS of the passed geometries " "and 'crs'. Use 'GeoDataFrame.set_crs(crs, " "allow_override=True)' to overwrite CRS or " "'GeoSeries.to_crs(crs)' to reproject geometries. " "CRS mismatch will raise an error in the future versions " "of GeoPandas.", FutureWarning, stacklevel=2, ) if isinstance(data, SingleBlockManager): if isinstance(data.blocks[0].dtype, GeometryDtype): if data.blocks[0].ndim == 2: from pandas.core.internals import ExtensionBlock values = data.blocks[0].values block = ExtensionBlock(values, slice(0, len(values), 1), ndim=1) data = SingleBlockManager([block], data.axes[0], fastpath=True) self = super(GeoSeries, cls).__new__(cls) super(GeoSeries, self).__init__(data, index=index, **kwargs) self.crs = getattr(self.values, "crs", crs) return self warnings.warn(_SERIES_WARNING_MSG, FutureWarning, stacklevel=2) return Series(data, index=index, **kwargs) if isinstance(data, BaseGeometry): n = len(index) if index is not None else 1 data = [data] * n name = kwargs.pop("name", None) if not is_geometry_type(data): kwargs.pop("dtype", None) with compat.ignore_shapely2_warnings(): empty_msg = "The default dtype for empty Series" warnings.filterwarnings("ignore", empty_msg, DeprecationWarning) warnings.filterwarnings("ignore", empty_msg, FutureWarning) s = pd.Series(data, index=index, name=name, **kwargs) if s.dtype != object: if (s.empty and s.dtype == "float64") or data is None: s = s.astype(object) else: warnings.warn(_SERIES_WARNING_MSG, FutureWarning, stacklevel=2) return s try: data = from_shapely(s.values, crs) except TypeError: warnings.warn(_SERIES_WARNING_MSG, FutureWarning, stacklevel=2) return s index = s.index name = s.name self = super(GeoSeries, cls).__new__(cls) super(GeoSeries, self).__init__(data, index=index, name=name, **kwargs) if not self.crs: self.crs = crs return self def __init__(self, *args, **kwargs): pass def append(self, *args, **kwargs): return self._wrapped_pandas_method("append", *args, **kwargs) @property def geometry(self): return self @property def x(self): return _delegate_property("x", self) @property def y(self): return _delegate_property("y", self) @property def z(self): return _delegate_property("z", self) @classmethod def from_file(cls, filename, **kwargs): from geopandas import GeoDataFrame df = GeoDataFrame.from_file(filename, **kwargs) return GeoSeries(df.geometry, crs=df.crs) @classmethod def from_wkb(cls, data, index=None, crs=None, **kwargs): return cls._from_wkb_or_wkb(from_wkb, data, index=index, crs=crs, **kwargs) @classmethod def from_wkt(cls, data, index=None, crs=None, **kwargs): return cls._from_wkb_or_wkb(from_wkt, data, index=index, crs=crs, **kwargs) @classmethod def from_xy(cls, x, y, z=None, index=None, crs=None, **kwargs): if index is None: if ( isinstance(x, Series) and isinstance(y, Series) and x.index.equals(y.index) and (z is None or (isinstance(z, Series) and x.index.equals(z.index))) ): index = x.index return cls(points_from_xy(x, y, z, crs=crs), index=index, crs=crs, **kwargs) @classmethod def _from_wkb_or_wkb( cls, from_wkb_or_wkt_function, data, index=None, crs=None, **kwargs ): if isinstance(data, Series): if index is not None: data = data.reindex(index) else: index = data.index data = data.values return cls(from_wkb_or_wkt_function(data, crs=crs), index=index, **kwargs) @property def __geo_interface__(self): from geopandas import GeoDataFrame return GeoDataFrame({"geometry": self}).__geo_interface__ def to_file(self, filename, driver=None, index=None, **kwargs): from geopandas import GeoDataFrame data = GeoDataFrame({"geometry": self}, index=self.index) data.crs = self.crs data.to_file(filename, driver, index=index, **kwargs) @property def _constructor(self): return _geoseries_constructor_with_fallback @property def _constructor_expanddim(self): from geopandas import GeoDataFrame return GeoDataFrame def _wrapped_pandas_method(self, mtd, *args, **kwargs): val = getattr(super(), mtd)(*args, **kwargs) if type(val) == Series: val.__class__ = GeoSeries val.crs = self.crs return val def __getitem__(self, key): return self._wrapped_pandas_method("__getitem__", key) @doc(pd.Series) def sort_index(self, *args, **kwargs): return self._wrapped_pandas_method("sort_index", *args, **kwargs) @doc(pd.Series) def take(self, *args, **kwargs): return self._wrapped_pandas_method("take", *args, **kwargs) @doc(pd.Series) def select(self, *args, **kwargs): return self._wrapped_pandas_method("select", *args, **kwargs) @doc(pd.Series) def apply(self, func, convert_dtype=True, args=(), **kwargs): result = super().apply(func, convert_dtype=convert_dtype, args=args, **kwargs) if isinstance(result, GeoSeries): if self.crs is not None: result.set_crs(self.crs, inplace=True) return result def __finalize__(self, other, method=None, **kwargs): for name in self._metadata: object.__setattr__(self, name, getattr(other, name, None)) return self def isna(self): if self.is_empty.any(): warnings.warn( "GeoSeries.isna() previously returned True for both missing (None) " "and empty geometries. Now, it only returns True for missing values. " "Since the calling GeoSeries contains empty geometries, the result " "has changed compared to previous versions of GeoPandas.\n" "Given a GeoSeries 's', you can use 's.is_empty | s.isna()' to get " "back the old behaviour.\n\n" "To further ignore this warning, you can do: \n" "import warnings; warnings.filterwarnings('ignore', 'GeoSeries.isna', " "UserWarning)", UserWarning, stacklevel=2, ) return super().isna() def isnull(self): return self.isna() def notna(self): if self.is_empty.any(): warnings.warn( "GeoSeries.notna() previously returned False for both missing (None) " "and empty geometries. Now, it only returns False for missing values. " "Since the calling GeoSeries contains empty geometries, the result " "has changed compared to previous versions of GeoPandas.\n" "Given a GeoSeries 's', you can use '~s.is_empty & s.notna()' to get " "back the old behaviour.\n\n" "To further ignore this warning, you can do: \n" "import warnings; warnings.filterwarnings('ignore', " "'GeoSeries.notna', UserWarning)", UserWarning, stacklevel=2, ) return super().notna() def notnull(self): return self.notna()
BSD 3-Clause New or Revised License
kivy/python-for-android
tests/test_pythonpackage_basic.py
TestGetSystemPythonExecutable.run__get_system_python_executable
python
def run__get_system_python_executable(self, pybin): cmd = [ pybin, "-c", "import importlib\n" "import json\n" "import os\n" "import sys\n" "sys.path = [os.path.dirname(sys.argv[1])] + sys.path\n" "m = importlib.import_module(\n" " os.path.basename(sys.argv[1]).partition('.')[0]\n" ")\n" "print(m._get_system_python_executable())", os.path.join(os.path.dirname(__file__), "..", "pythonforandroid", "pythonpackage.py"), ] try: return subprocess.check_output( cmd, stderr=subprocess.STDOUT ).decode("utf-8", "replace").strip() except subprocess.CalledProcessError as e: raise RuntimeError("call failed, with output: " + str(e.output))
Helper function to run our function. We want to see what _get_system_python_executable() does given a specific python, so we need to make it import it and run it, with that TARGET python, which this function does.
https://github.com/kivy/python-for-android/blob/3a9bcabd91aa498982ab42ef7e59846f90df25d7/tests/test_pythonpackage_basic.py#L228-L256
import os import shutil import sys import subprocess import tempfile import textwrap from unittest import mock from pythonforandroid.pythonpackage import ( _extract_info_from_package, get_dep_names_of_package, get_package_name, _get_system_python_executable, is_filesystem_path, parse_as_folder_reference, transform_dep_for_pip, ) def local_repo_folder(): return os.path.abspath(os.path.join( os.path.dirname(__file__), ".." )) def fake_metadata_extract(dep_name, output_folder, debug=False): with open(os.path.join(output_folder, "METADATA"), "w") as f: f.write(textwrap.dedent("""\ Metadata-Version: 2.1 Name: testpackage Version: 0.1 Requires-Dist: testpkg Requires-Dist: testpkg2 Lorem Ipsum""" )) with open(os.path.join(output_folder, "metadata_source"), "w") as f: f.write(u"wheel") def test__extract_info_from_package(): import pythonforandroid.pythonpackage with mock.patch("pythonforandroid.pythonpackage." "extract_metainfo_files_from_package", fake_metadata_extract): assert _extract_info_from_package( "whatever", extract_type="name" ) == "testpackage" assert set(_extract_info_from_package( "whatever", extract_type="dependencies" )) == {"testpkg", "testpkg2"} def test_get_package_name(): with mock.patch("pythonforandroid.pythonpackage." "extract_metainfo_files_from_package", fake_metadata_extract): assert get_package_name("TeStPackaGe") == "testpackage" temp_d = tempfile.mkdtemp(prefix="p4a-pythonpackage-test-tmp-") try: with open(os.path.join(temp_d, "setup.py"), "w") as f: f.write(textwrap.dedent("""\ from setuptools import setup setup(name="testpackage") """ )) pkg_name = get_package_name(temp_d) assert pkg_name == "testpackage" finally: shutil.rmtree(temp_d) def test_get_dep_names_of_package(): dep_names = get_dep_names_of_package("python-for-android") assert "colorama" in dep_names assert "setuptools" not in dep_names try: dep_names = get_dep_names_of_package( "python-for-android", include_build_requirements=True, verbose=True, ) except NotImplementedError as e: assert "wheel" in str(e) else: assert "setuptools" in dep_names assert "colorama" in get_dep_names_of_package(local_repo_folder()) test_fake_package = tempfile.mkdtemp() try: with open(os.path.join(test_fake_package, "setup.py"), "w") as f: f.write(textwrap.dedent("""\ from setuptools import setup setup(name='fakeproject', description='fake for testing', install_requires=['buildozer==0.39', 'python-for-android>=0.5.1'], ) """)) assert set(get_dep_names_of_package( test_fake_package, recursive=False, keep_version_pins=True, verbose=True )) == {"buildozer==0.39", "python-for-android"} assert set(get_dep_names_of_package( test_fake_package, recursive=False, keep_version_pins=False, verbose=True )) == {"buildozer", "python-for-android"} dep_names = get_dep_names_of_package( test_fake_package, recursive=False, keep_version_pins=False, verbose=True, include_build_requirements=True ) assert len( {"buildozer", "python-for-android", "setuptools"}.intersection( dep_names ) ) == 3 finally: shutil.rmtree(test_fake_package) def test_transform_dep_for_pip(): transformed = ( transform_dep_for_pip( "python-for-android @ https://github.com/kivy/" + "python-for-android/archive/master.zip" ), transform_dep_for_pip( "python-for-android @ https://github.com/kivy/" + "python-for-android/archive/master.zip" + "#egg=python-for-android-master" ), transform_dep_for_pip( "python-for-android @ https://github.com/kivy/" + "python-for-android/archive/master.zip" + "#" ), ) expected = ( "https://github.com/kivy/python-for-android/archive/master.zip" + "#egg=python-for-android" ) assert transformed == (expected, expected, expected) assert transform_dep_for_pip("https://a@b/") == "https://a@b/" def test_is_filesystem_path(): assert is_filesystem_path("/some/test") assert not is_filesystem_path("https://blubb") assert not is_filesystem_path("test @ bla") assert is_filesystem_path("/abc/c@d") assert not is_filesystem_path("https://user:pw@host/") assert is_filesystem_path(".") assert is_filesystem_path("") def test_parse_as_folder_reference(): assert parse_as_folder_reference("file:///a%20test") == "/a test" assert parse_as_folder_reference("https://github.com") is None assert parse_as_folder_reference("/a/folder") == "/a/folder" assert parse_as_folder_reference("test @ /abc") == "/abc" assert parse_as_folder_reference("test @ https://bla") is None class TestGetSystemPythonExecutable(): def test_basic(self): pybin = _get_system_python_executable() pyversion = subprocess.check_output([ pybin, "-c", "import sys; print(sys.version)" ], stderr=subprocess.STDOUT).decode("utf-8", "replace") assert pyversion.strip() == sys.version.strip()
MIT License
arslan-chaudhry/dcsp_segmentation
kaffe/tensorflow/network.py
Network.load
python
def load(self, data_path, session, ignore_missing=False): data_dict = np.load(data_path).item() for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for param_name, data in data_dict[op_name].iteritems(): try: var = tf.get_variable(param_name) session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored.
https://github.com/arslan-chaudhry/dcsp_segmentation/blob/dd510695774fdabd36909501dac82f0f930ef04c/kaffe/tensorflow/network.py#L54-L69
import numpy as np import tensorflow as tf slim = tf.contrib.slim DEFAULT_PADDING = 'SAME' def layer(op): def layer_decorated(self, *args, **kwargs): name = kwargs.setdefault('name', self.get_unique_name(op.__name__)) if len(self.terminals) == 0: raise RuntimeError('No input variables found for layer %s.' % name) elif len(self.terminals) == 1: layer_input = self.terminals[0] else: layer_input = list(self.terminals) layer_output = op(self, layer_input, *args, **kwargs) self.layers[name] = layer_output self.feed(layer_output) return self return layer_decorated class Network(object): def __init__(self, inputs, trainable=True, is_training=False): self.inputs = inputs self.terminals = [] self.layers = dict(inputs) self.trainable = trainable self.use_dropout = tf.placeholder_with_default(tf.constant(1.0), shape=[], name='use_dropout') self.setup(is_training) def setup(self, is_training): raise NotImplementedError('Must be implemented by the subclass.')
MIT License
ibm/compliance-trestle
tests/conftest.py
sample_nist_component_def
python
def sample_nist_component_def() -> ComponentDefinition: component_obj = ComponentDefinition.oscal_read(test_utils.NIST_SAMPLE_CD_JSON) return component_obj
Return a rich component definition object, from the NIST content repository.
https://github.com/ibm/compliance-trestle/blob/ae95c25dd1e3fec3c86ea34cd9981ddca65bf6d4/tests/conftest.py#L91-L94
import os import pathlib import random import string import sys from typing import Iterator from uuid import uuid4 from _pytest.monkeypatch import MonkeyPatch import pytest from tests import test_utils import trestle.core.generators as gens import trestle.oscal.common as common from trestle.cli import Trestle from trestle.core.err import TrestleError from trestle.oscal import catalog as cat from trestle.oscal.component import ComponentDefinition, DefinedComponent from trestle.oscal.profile import Profile TEST_CONFIG: dict = {} @pytest.fixture(scope='function') def rand_str(): rand_str = ''.join(random.choice(string.ascii_letters) for x in range(16)) return rand_str @pytest.fixture(scope='function') def tmp_file(tmp_path): return pathlib.Path(tmp_path) / f'{uuid4()}' @pytest.fixture(scope='session') def tmp_fixed_file(tmp_path): return pathlib.Path(tmp_path) / 'fixed_file' @pytest.fixture(scope='function') def tmp_yaml_file(tmp_path): return pathlib.Path(tmp_path) / f'{uuid4()}.yaml' @pytest.fixture(scope='function') def tmp_json_file(tmp_path): return pathlib.Path(tmp_path) / f'{uuid4()}.json' @pytest.fixture(scope='function') def tmp_xml_file(tmp_path): return pathlib.Path(tmp_path) / f'{uuid4()}.xml' @pytest.fixture(scope='module') def yaml_testdata_path() -> pathlib.Path: return pathlib.Path(test_utils.YAML_TEST_DATA_PATH) @pytest.fixture(scope='module') def json_testdata_path() -> pathlib.Path: return pathlib.Path(test_utils.JSON_TEST_DATA_PATH) @pytest.fixture(scope='function')
Apache License 2.0
constantinpape/elf
elf/evaluation/cremi_score.py
cremi_score
python
def cremi_score(segmentation, groundtruth, ignore_seg=None, ignore_gt=None): ignore_mask = compute_ignore_mask(segmentation, groundtruth, ignore_seg, ignore_gt) if ignore_mask is not None: segmentation = segmentation[ignore_mask] groundtruth = groundtruth[ignore_mask] else: segmentation = segmentation.ravel() groundtruth = groundtruth.ravel() a_dict, b_dict, p_ids, p_counts = contigency_table(groundtruth, segmentation) n_points = segmentation.size vis, vim = compute_vi_scores(a_dict, b_dict, p_ids, p_counts, n_points, use_log2=True) ari, _ = compute_rand_scores(a_dict, b_dict, p_counts, n_points) cs = np.sqrt(ari * (vis + vim)) return vis, vim, ari, cs
Computes cremi scores between two segmentations Arguments: segmentation [np.ndarray] - candidate segmentation to evaluate groundtruth [np.ndarray] - groundtruth ignore_seg [listlike] - ignore ids for segmentation (default: None) ignore_gt [listlike] - ignore ids for groundtruth (default: None) Retuns: float - vi-split float - vi-merge float - adapted rand error float - cremi score
https://github.com/constantinpape/elf/blob/f423ea0815949533933bd169b58a464bb7f3bbc0/elf/evaluation/cremi_score.py#L7-L46
import numpy as np from .util import compute_ignore_mask, contigency_table from .rand_index import compute_rand_scores from .variation_of_information import compute_vi_scores
MIT License
univ-of-utah-marriott-library-apple/display_manager
display_manager.py
Command.__handleUnderscan
python
def __handleUnderscan(self): for display in self.scope: display.setUnderscan(self.underscan)
Sets or shows a display's underscan settings.
https://github.com/univ-of-utah-marriott-library-apple/display_manager/blob/303cd954606a4c8404007640db34d2843fa69c18/display_manager.py#L525-L530
import sys import re import collections from display_manager_lib import * class CommandSyntaxError(Exception): def __init__(self, message, verb=None): self.message = message self.verb = verb Exception.__init__(self, self.message) class CommandValueError(Exception): def __init__(self, message, verb=None): self.message = message self.verb = verb Exception.__init__(self, self.message) class CommandExecutionError(Exception): def __init__(self, message, command=None): self.message = message self.command = command Exception.__init__(self, self.message) class Command(object): def __init__(self, **kwargs): if "verb" in kwargs: if kwargs["verb"] in ["help", "show", "res", "brightness", "rotate", "underscan", "mirror"]: self.verb = kwargs["verb"] else: raise CommandSyntaxError("\"{}\" is not a valid command".format(kwargs["verb"])) else: self.verb = None self.subcommand = kwargs["subcommand"] if "subcommand" in kwargs else None if "scope" in kwargs: if isinstance(kwargs["scope"], list): self.scope = kwargs["scope"] elif isinstance(kwargs["scope"], AbstractDisplay): self.scope = [kwargs["scope"]] else: self.scope = None else: self.scope = None self.width = int(kwargs["width"]) if "width" in kwargs else None self.height = int(kwargs["height"]) if "height" in kwargs else None self.refresh = int(kwargs["refresh"]) if "refresh" in kwargs else None self.hidpi = int(kwargs["hidpi"]) if "hidpi" in kwargs else None self.angle = int(kwargs["angle"]) if "angle" in kwargs else None self.brightness = float(kwargs["brightness"]) if "brightness" in kwargs else None self.underscan = float(kwargs["underscan"]) if "underscan" in kwargs else None self.source = kwargs["source"] if "source" in kwargs else None getIOKit() def __str__(self): stringList = [self.verb] if self.subcommand: stringList.append(self.subcommand) if self.verb == "res": if self.width and self.height: stringList.append(self.width) stringList.append(self.height) elif self.verb == "rotate": stringList.append(self.angle) elif self.verb == "brightness": stringList.append(self.brightness) elif self.verb == "underscan": stringList.append(self.underscan) elif self.verb == "mirror" and self.subcommand == "enable": stringList.append(self.source.tag) if self.verb == "show" or self.verb == "res": if self.hidpi == 1: stringList.append("no-hidpi") elif self.hidpi == 2: stringList.append("only-hidpi") if self.verb == "res": if self.refresh: stringList.append("refresh {}".format(self.refresh)) if self.scope: if len(self.scope) == len(getAllDisplays()): stringList.append("all") else: for display in sorted(self.scope): stringList.append(display.tag) else: if ( self.verb == "res" or self.verb == "rotate" or self.verb == "brightness" or self.verb == "underscan" ): stringList.append("main") elif ( self.verb == "show" or (self.verb == "mirror" and self.subcommand == "disable") ): stringList.append("all") for i in range(len(stringList)): stringList[i] = str(stringList[i]) return " ".join(stringList) def __eq__(self, other): def safeScopeCheckEquals(a, b): if a.scope and b.scope: return set(a.scope) == set(b.scope) else: return a.scope == b.scope if isinstance(other, self.__class__): return all([ isinstance(other, self.__class__), self.verb == other.verb, self.subcommand == other.subcommand, safeScopeCheckEquals(self, other), self.width == other.width, self.height == other.height, self.refresh == other.refresh, self.hidpi == other.hidpi, self.angle == other.angle, self.brightness == other.brightness, self.underscan == other.underscan, self.source == other.source, ]) else: return NotImplemented def __ne__(self, other): if isinstance(other, self.__class__): return not self.__eq__(other) else: return NotImplemented def __lt__(self, other): if self.__eq__(other): return False else: return self.__str__().lower() < self.__str__().lower() def __gt__(self, other): if self.__eq__(other): return False else: return self.__str__().lower() > self.__str__().lower() def __hash__(self): return hash(self.__str__()) def run(self): try: if self.verb == "help": self.__handleHelp() elif self.verb == "show": self.__handleShow() elif self.verb == "res": self.__handleRes() elif self.verb == "rotate": self.__handleRotate() elif self.verb == "brightness": self.__handleBrightness() elif self.verb == "underscan": self.__handleUnderscan() elif self.verb == "mirror": self.__handleMirror() except DisplayError as e: raise CommandExecutionError(e.message, command=self) def __handleHelp(self): helpTypes = { "usage": "\n".join([ "usage: display_manager.py <command>", "", "COMMANDS (required)", " help Show help information about a command", " show Show current/available display configurations", " res Manage display resolution", " brightness Manage display brightness", " rotate Manage display rotation", " underscan Manage display underscan", " mirror Manage screen mirroring", ]), "help": "\n".join([ "usage: display_manager.py help <command>", "", "COMMANDS (required)", " help Show help information about a command", " show Show current/available display configurations", " res Manage display resolution and refresh rate", " brightness Manage display brightness", " rotate Manage display rotation", " underscan Manage display underscan", " mirror Manage screen mirroring", ]), "show": "\n".join([ "usage: display_manager.py show [subcommand] [options] [scope...]", "", "SUBCOMMANDS (optional)", " current (default) Show the current display configuration", " default Apple's recommended default configuration", " highest Show the highest available configuration", " available Show all available configurations", "", "OPTIONS (optional; only applies to \"available\")", " no-hidpi Don\'t show HiDPI resolutions", " only-hidpi Only show HiDPI resolutions", "", " (Note: by default, both HiDPI and non-HiDPI resolutions are shown)", "", "SCOPE (optional)", " main Perform this command on the main display", " ext<N> Perform this command on external display number <N>", " all (default) Perform this command on all connected displays", ]), "res": "\n".join([ "usage: display_manager.py res <resolution> [refresh] [options] [scope...]", "", "RESOLUTION (required)", " default Apple's recommended default configuration", " highest Set the display to the highest available configuration", " <width> <height> Width and height (in pixels)", " (Note: width and height must be separated by at least one space)", "", "REFRESH (not used by \"default\" or \"highest\" resolution; optional otherwise)", " <refresh> Refresh rate (in Hz)", " (Note: if refresh rate is not specified, it will default to a rate that is " "available at the desired resolution, if possible)", "", "OPTIONS (optional)", " no-hidpi Don\'t set to HiDPI resolutions", " only-hidpi Only set to HiDPI resolutions", "", " (Note: by default, both HiDPI and non-HiDPI resolutions are shown)", "", "SCOPE (optional)", " main (default) Perform this command on the main display", " ext<N> Perform this command on external display number <N>", " all Perform this command on all connected displays", ]), "rotate": "\n".join([ "usage: display_manager.py rotate <angle> [scope...]", "", "ANGLE (required)", " <angle> Desired display rotation; must be a multiple of 90", "", "SCOPE (optional)", " main (default) Perform this command on the main display", " ext<N> Perform this command on external display number <N>", " all Perform this command on all connected displays", ]), "brightness": "\n".join([ "usage: display_manager.py brightness <brightness> [scope...]", "", "BRIGHTNESS (required)", " <brightness> A number between 0 and 1 (inclusive); " "0 is minimum brightness, and 1 is maximum brightness", "", "SCOPE (optional)", " main (default) Perform this command on the main display", " ext<N> Perform this command on external display number <N>", " all Perform this command on all connected displays", ]), "underscan": "\n".join([ "usage: display_manager.py underscan <underscan> [scope...]", "", "UNDERSCAN (required)", " <underscan> A number between 0 and 1 (inclusive); " "0 is minimum underscan, and 1 is maximum underscan", "", "SCOPE (optional)", " main (default) Perform this command on the main display", " ext<N> Perform this command on external display number <N>", " all Perform this command on all connected displays", ]), "mirror": "\n".join([ "usage: display_manager.py mirror enable <source> <target...>", " or: display_manager.py mirror disable [scope...]", "", "SUBCOMMANDS (required)", " enable Set <target> to mirror <source>", " disable Disable mirroring on <scope>", "", "SOURCE/TARGET(S) (not used by \"disable\"; required for \"enable\")", " source The display which will be mirrored by the target(s); " "must be a single element of <SCOPE> (see below); cannot be \"all\"", " target(s) The display(s) which will mirror the source; " "must be an element of <SCOPE> (see below)", "", "SCOPE", " main The main display", " ext<N> External display number <N>", " all (default scope for \"disable\")", " For <enable>: all connected displays besides <source>; only available to <target>", " For <disable>: all connected displays", ])} if self.subcommand in helpTypes: print(helpTypes[self.subcommand]) else: print(helpTypes["usage"]) def __handleShow(self): for i, display in enumerate(self.scope): print("display \"{0}\":".format(display.tag)) if self.subcommand == "current": current = display.currentMode print(current.bigString) if display.rotation is not None: print("rotation: {}".format(display.rotation)) if display.brightness is not None: print("brightness: {:.2f}".format(display.brightness)) if display.underscan is not None: print("underscan: {:.2f}".format(display.underscan)) if display.mirrorSource is not None: print("mirror of: {}".format(display.mirrorSource.tag)) elif self.subcommand == "default": default = display.defaultMode if default: print(default.bigString) elif self.subcommand == "highest": highest = display.highestMode(self.hidpi) if highest: print(highest.bigString) elif self.subcommand == "available": current = None default = None hidpi = [] lodpi = [] for mode in sorted(display.allModes, reverse=True): if mode == display.currentMode: current = mode if mode.isDefault: default = mode if mode.hidpi: hidpi.append(mode) if not mode.hidpi: lodpi.append(mode) if current: print("\n".join([ " current mode:", " {}".format(current.littleString), ])) if default: print("\n".join([ " default mode:", " {}".format(default.littleString), ])) if hidpi: print( " HiDPI modes:" ) for mode in hidpi: print( " {}".format(mode.littleString) ) if lodpi: print( " non-HiDPI modes:" ) for mode in lodpi: print( " {}".format(mode.littleString) ) if i < len(self.scope) - 1: print("") def __handleRes(self): for display in self.scope: if self.subcommand == "default": default = display.defaultMode display.setMode(default) elif self.subcommand == "highest": highest = display.highestMode(self.hidpi) display.setMode(highest) else: closest = display.closestMode(self.width, self.height, self.refresh, self.hidpi) display.setMode(closest) def __handleRotate(self): for display in self.scope: display.setRotate(self.angle) def __handleBrightness(self): for display in self.scope: display.setBrightness(self.brightness)
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/akamai_net_storage_output.py
AkamaiNetStorageOutput.password
python
def password(self, password): if password is not None: if not isinstance(password, string_types): raise TypeError("Invalid type for `password`, type has to be `string_types`") self._password = password
Sets the password of this AkamaiNetStorageOutput. Your Akamai NetStorage password (required) :param password: The password of this AkamaiNetStorageOutput. :type: string_types
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/akamai_net_storage_output.py#L139-L153
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.output import Output import pprint import six class AkamaiNetStorageOutput(Output): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, acl=None, host=None, username=None, password=None): super(AkamaiNetStorageOutput, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data, acl=acl) self._host = None self._username = None self._password = None self.discriminator = None if host is not None: self.host = host if username is not None: self.username = username if password is not None: self.password = password @property def openapi_types(self): types = {} if hasattr(super(AkamaiNetStorageOutput, self), 'openapi_types'): types = getattr(super(AkamaiNetStorageOutput, self), 'openapi_types') types.update({ 'host': 'string_types', 'username': 'string_types', 'password': 'string_types' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(AkamaiNetStorageOutput, self), 'attribute_map'): attributes = getattr(super(AkamaiNetStorageOutput, self), 'attribute_map') attributes.update({ 'host': 'host', 'username': 'username', 'password': 'password' }) return attributes @property def host(self): return self._host @host.setter def host(self, host): if host is not None: if not isinstance(host, string_types): raise TypeError("Invalid type for `host`, type has to be `string_types`") self._host = host @property def username(self): return self._username @username.setter def username(self, username): if username is not None: if not isinstance(username, string_types): raise TypeError("Invalid type for `username`, type has to be `string_types`") self._username = username @property def password(self): return self._password @password.setter
MIT License
chrhenning/hypercl
mnets/mnet_interface.py
MainNetInterface.forward
python
def forward(self, x, weights=None, distilled_params=None, condition=None): raise NotImplementedError('TODO implement function')
Compute the output :math:`y` of this network given the input :math:`x`. Args: x: The inputs :math:`x` to the network. weights (optional): List of weight tensors, that are used as network parameters. If attribute :attr:`hyper_shapes_learned` is not ``None``, then this argument is non-optional and the shapes of the weight tensors have to be as specified by :attr:`hyper_shapes_learned`. Otherwise, this option might still be set but the weight tensors must follow the shapes specified by attribute :attr:`param_shapes`. distilled_params (optional): May only be passed if attribute :attr:`hyper_shapes_distilled` is not ``None``. If not passed but the network relies on those parameters (e.g., batchnorm running statistics), then this method simply chooses the current internal representation of these parameters as returned by :meth:`distillation_targets`. condition (optional): Sometimes, the network will have to be conditioned on contextual information, which can be passed via this argument and depends on the actual implementation of this interface. For instance, when using batch normalization in a continual learning scenario, where running statistics have been checkpointed for every task, then this ``condition`` might be the actual task ID, that is passed as the argument ``stats_id`` of the method :meth:`utils.batchnorm_layer.BatchNormLayer.forward`. Returns: The output :math:`y` of the network.
https://github.com/chrhenning/hypercl/blob/4645ef0e5b64abe40674d287b65c23e109ecfca1/mnets/mnet_interface.py#L364-L401
from abc import ABC, abstractmethod import numpy as np from warnings import warn import torch class MainNetInterface(ABC): def __init__(self): super(MainNetInterface, self).__init__() self._weights = None self._param_shapes = None self._hyper_shapes_learned = None self._hyper_shapes_distilled = None self._has_bias = None self._has_fc_out = None self._mask_fc_out = None self._has_linear_out = None self._layer_weight_tensors = None self._layer_bias_vectors = None self._batchnorm_layers = None self._context_mod_layers = None self._num_params = None self._num_internal_params = None self._hyper_shapes = None self._all_shapes = None def _is_properly_setup(self): assert(self._param_shapes is not None or self._all_shapes is not None) if self._param_shapes is None: warn('Private member "_param_shapes" should be specified in each ' + 'sublcass that implements this interface, since private ' + 'member "_all_shapes" is deprecated.', DeprecationWarning) self._param_shapes = self._all_shapes if self._hyper_shapes is not None or self._hyper_shapes_learned is not None: if self._hyper_shapes_learned is None: warn('Private member "_hyper_shapes_learned" should be ' + 'specified in each sublcass that implements this ' + 'interface, since private member "_hyper_shapes" is ' + 'deprecated.', DeprecationWarning) self._hyper_shapes_learned = self._hyper_shapes self._hyper_shapes = self._hyper_shapes_learned assert(self._weights is not None or self._hyper_shapes_learned is not None) if self._hyper_shapes_learned is None and self.hyper_shapes_distilled is None: assert(len(self._weights) == len(self._param_shapes)) assert(isinstance(self._has_bias, bool)) assert(isinstance(self._has_fc_out, bool)) assert(isinstance(self._mask_fc_out, bool)) assert(isinstance(self._has_linear_out, bool)) assert(self._layer_weight_tensors is not None) assert(self._layer_bias_vectors is not None) if self._has_bias: assert(len(self._layer_weight_tensors) == len(self._layer_bias_vectors)) @property def weights(self): return self._weights @property def param_shapes(self): return self._param_shapes @property def hyper_shapes(self): warn('Use atrtibute "hyper_shapes_learned" instead.', DeprecationWarning) return self.hyper_shapes_learned @property def hyper_shapes_learned(self): return self._hyper_shapes_learned @property def hyper_shapes_distilled(self): return self._hyper_shapes_distilled @property def has_bias(self): return self._has_bias @property def has_fc_out(self): return self._has_fc_out @property def mask_fc_out(self): return self._mask_fc_out @property def has_linear_out(self): return self._has_linear_out @property def num_params(self): if self._num_params is None: self._num_params = int(np.sum([np.prod(l) for l in self.param_shapes])) return self._num_params @property def num_internal_params(self): if self._num_internal_params is None: if self.weights is None: self._num_internal_params = 0 else: self._num_internal_params = int(sum(p.numel() for p in self.weights)) return self._num_internal_params @property def layer_weight_tensors(self): return self._layer_weight_tensors @property def layer_bias_vectors(self): return self._layer_bias_vectors @property def batchnorm_layers(self): return self._batchnorm_layers @property def context_mod_layers(self): return self._context_mod_layers @abstractmethod def distillation_targets(self): raise NotImplementedError('TODO implement function') @abstractmethod
Apache License 2.0
akb89/pyfn
pyfn/models/framerelation.py
FrameRelation.sub_frame
python
def sub_frame(self): return self._sub_frame
Return the subFrame of the FrameRelation.
https://github.com/akb89/pyfn/blob/848091d55aaa9dbb1eb939cd6c551876f9f4d6f7/pyfn/models/framerelation.py#L21-L23
__all__ = ['FrameRelation'] class FrameRelation(): def __init__(self, _id=None, sub_frame=None, sup_frame=None, frtype=None): self.__id = _id self._sub_frame = sub_frame self._sup_frame = sup_frame self._frtype = frtype @property def _id(self): return self.__id @property
MIT License
tlc-pack/tenset
python/tvm/relay/op/strategy/x86.py
bitserial_dense_strategy_cpu
python
def bitserial_dense_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_bitserial_dense(topi.x86.bitserial_dense), wrap_topi_schedule(topi.x86.schedule_bitserial_dense), name="bitserial_dense.x86", ) return strategy
bitserial_dense x86 strategy
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/relay/op/strategy/x86.py#L534-L542
import logging import re from tvm import topi from tvm.auto_scheduler import is_auto_scheduler_enabled from tvm.te import SpecializedCondition from tvm.relay.ty import is_dynamic from .generic import * from .. import op as _op logger = logging.getLogger("strategy") _NCHWc_matcher = re.compile("^NCHW[0-9]+c$") _OIHWio_matcher = re.compile("^OIHW[0-9]+i[0-9]+o$") @schedule_injective.register("cpu") def schedule_injective_cpu(attrs, outs, target): with target: return topi.x86.schedule_injective(outs) @schedule_reduce.register("cpu") def schedule_reduce_cpu(attrs, outs, target): with target: return topi.x86.schedule_reduce(outs) @schedule_concatenate.register("cpu") def schedule_concatenate_cpu(attrs, outs, target): with target: return topi.x86.schedule_concatenate(outs) @schedule_pool.register("cpu") def schedule_pool_cpu(attrs, outs, target): with target: return topi.x86.schedule_pool(outs, attrs.layout) @schedule_adaptive_pool.register("cpu") def schedule_adaptive_pool_cpu(attrs, outs, target): with target: return topi.x86.schedule_adaptive_pool(outs) @softmax_strategy.register("cpu") def softmax_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_softmax(topi.nn.softmax), wrap_topi_schedule(topi.x86.schedule_softmax), name="softmax.x86", ) return strategy @schedule_log_softmax.register("cpu") def schedule_log_softmax_cpu(attrs, outs, target): with target: return topi.x86.schedule_softmax(outs) @conv2d_strategy.register("cpu") def conv2d_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() data, kernel = inputs stride_h, stride_w = get_const_tuple(attrs.strides) dilation_h, dilation_w = get_const_tuple(attrs.dilation) groups = attrs.groups layout = attrs.data_layout kernel_layout = attrs.kernel_layout if dilation_h < 1 or dilation_w < 1: raise ValueError("dilation should be positive value") if groups == 1: if layout == "NCHW": assert kernel_layout == "OIHW" if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype): strategy.add_implementation( wrap_compute_conv2d(topi.x86.conv2d_nchw_int8), wrap_topi_schedule(topi.x86.schedule_conv2d_nchw_int8), name="conv2d_nchw_int8.x86", ) else: strategy.add_implementation( wrap_compute_conv2d(topi.x86.conv2d_nchw), wrap_topi_schedule(topi.x86.schedule_conv2d_nchw), name="conv2d_nchw.x86", ) elif _NCHWc_matcher.match(layout): assert _OIHWio_matcher.match(kernel_layout) return conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target) elif layout == "NHWC": assert kernel_layout == "HWIO" if not is_auto_scheduler_enabled(): logger.warning("conv2d NHWC layout is not optimized for x86 with autotvm.") strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_nhwc, need_auto_scheduler_layout=True), wrap_topi_schedule(topi.x86.schedule_conv2d_nhwc), name="conv2d_nhwc.x86", ) judge_winograd_auto_scheduler = False if len(kernel.shape) == 4: kernel_h, kernel_w, _, co = get_const_tuple(kernel.shape) judge_winograd_auto_scheduler = ( "float" in data.dtype and "float" in kernel.dtype and kernel_h == 3 and kernel_w == 3 and stride_h == 1 and stride_w == 1 and dilation_h == 1 and dilation_w == 1 and 64 < co < 512 ) if is_auto_scheduler_enabled() and judge_winograd_auto_scheduler: strategy.add_implementation( wrap_compute_conv2d( topi.nn.conv2d_winograd_nhwc, need_auto_scheduler_layout=True ), naive_schedule, name="conv2d_nhwc.winograd", plevel=15, ) elif layout == "HWCN": assert kernel_layout == "HWIO" if not is_auto_scheduler_enabled(): logger.warning("conv2d HWCN layout is not optimized for x86 with autotvm.") strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_hwcn), wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn), name="conv2d_hwcn.generic", ) else: raise RuntimeError("Unsupported conv2d layout {} for x86".format(layout)) elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" channel_multiplier = get_const_tuple(inputs[1].shape)[1] if channel_multiplier == 1 and dilation_h == 1 and dilation_w == 1: strategy.add_implementation( wrap_compute_conv2d(topi.x86.depthwise_conv2d_nchw), wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_nchw), name="depthwise_conv2d_nchw.x86", ) else: logger.warning( "For x86 target, depthwise_conv2d with channel " "multiplier greater than 1 is not optimized" ) strategy.add_implementation( wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw), wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw), name="depthwise_conv2d_nchw.generic", ) elif _NCHWc_matcher.match(layout): assert _OIHWio_matcher.match(kernel_layout) return depthwise_conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target) elif layout == "NHWC": assert kernel_layout == "HWOI" if not is_auto_scheduler_enabled(): logger.warning( "depthwise_conv2d NHWC layout is not optimized for x86 with autotvm." ) strategy.add_implementation( wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc), wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc), name="depthwise_conv2d_nhwc.generic", ) else: raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) else: if layout == "NCHW": assert kernel_layout == "OIHW" if not is_auto_scheduler_enabled(): logger.warning("group_conv2d is not optimized for x86 with autotvm.") strategy.add_implementation( wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True), wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw), name="group_conv2d_nchw.generic", ) elif layout == "NHWC": assert kernel_layout == "HWIO" if not is_auto_scheduler_enabled(): logger.warning("group_conv2d is not optimized for x86 with autotvm.") strategy.add_implementation( wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True, need_auto_scheduler_layout=True), wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc), name="group_conv2d_nhwc.generic", ) else: raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) return strategy @conv2d_NCHWc_strategy.register("cpu") def conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() data, kernel = inputs if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype): strategy.add_implementation( wrap_compute_conv2d(topi.x86.conv2d_NCHWc_int8, True, True), wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc_int8), name="conv2d_NCHWc_int8.x86", ) else: strategy.add_implementation( wrap_compute_conv2d(topi.x86.conv2d_NCHWc, True, True), wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc), name="conv2d_NCHWc.x86", ) return strategy @depthwise_conv2d_NCHWc_strategy.register("cpu") def depthwise_conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_conv2d(topi.x86.depthwise_conv2d_NCHWc, True, True), wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_NCHWc), name="depthwise_conv2d_NCHWc.x86", ) return strategy @conv2d_transpose_strategy.register("cpu") def conv2d_transpose_strategy_cpu(attrs, inputs, out_type, target): layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) groups = attrs.groups assert dilation == (1, 1), "not support dilate now" assert groups == 1, "only support groups == 1 for now" strategy = _op.OpStrategy() if layout == "NCHW": strategy.add_implementation( wrap_compute_conv2d_transpose(topi.x86.conv2d_transpose_nchw), wrap_topi_schedule(topi.x86.schedule_conv2d_transpose_nchw), name="conv2d_transpose_nchw.x86", ) elif layout == "NHWC": strategy.add_implementation( wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nhwc), naive_schedule, name='conv2d_transpose_nchw.x86', ) else: raise ValueError("Invalid layout: " + layout) return strategy @conv3d_transpose_strategy.register("cpu") def conv3d_transpose_strategy_cpu(attrs, inputs, out_type, target): layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) groups = attrs.groups assert layout == "NCDHW", "only support ncdhw for now" assert dilation == (1, 1, 1), "not support dilate now" assert groups == 1, "only support groups == 1 for now" strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_conv3d_transpose(topi.x86.conv3d_transpose_ncdhw), wrap_topi_schedule(topi.x86.schedule_conv3d_transpose_ncdhw), name="conv3d_transpose_ncdhw.x86", ) return strategy @conv3d_strategy.register("cpu") def conv3d_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.data_layout if is_auto_scheduler_enabled(): if layout == "NCDHW": strategy.add_implementation( wrap_compute_conv3d(topi.nn.conv3d_ncdhw), naive_schedule, name="conv3d_ncdhw.x86", ) elif layout == "NDHWC": strategy.add_implementation( wrap_compute_conv3d(topi.nn.conv3d_ndhwc, need_auto_scheduler_layout=True), naive_schedule, name="conv3d_ndhwc.x86", ) else: raise ValueError("Not support this layout {} yet".format(layout)) else: if layout == "NCDHW": strategy.add_implementation( wrap_compute_conv3d(topi.x86.conv3d_ncdhw), wrap_topi_schedule(topi.x86.schedule_conv3d_ncdhw), name="conv3d_ncdhw.x86", ) elif layout == "NDHWC": strategy.add_implementation( wrap_compute_conv3d(topi.x86.conv3d_ndhwc), wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc), name="conv3d_ndhwc.x86", ) else: raise ValueError("Not support this layout {} yet".format(layout)) return strategy @conv1d_strategy.register("cpu") def conv1d_strategy_cpu(attrs, inputs, out_type, target): layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) if dilation[0] < 1: raise ValueError("dilation should be a positive value") strategy = _op.OpStrategy() if layout == "NCW": strategy.add_implementation( wrap_compute_conv1d(topi.nn.conv1d_ncw), wrap_topi_schedule(topi.x86.schedule_conv1d_ncw), name="conv1d_ncw.x86", ) elif layout == "NWC": strategy.add_implementation( wrap_compute_conv1d(topi.nn.conv1d_nwc), wrap_topi_schedule(topi.x86.schedule_conv1d_nwc), name="conv1d_nwc.x86", ) else: raise ValueError("Unsupported conv1d layout {}".format(layout)) return strategy @dense_strategy.register("cpu") def dense_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() same_type = inputs[0].dtype == inputs[1].dtype == out_type.dtype dtype = inputs[0].dtype u8s8s32 = dtype == "uint8" and inputs[1].dtype == "int8" and out_type.dtype == "int32" strategy.add_implementation( wrap_compute_dense(topi.x86.dense_nopack), wrap_topi_schedule(topi.x86.schedule_dense_nopack), name="dense_nopack.x86", plevel=5, ) strategy.add_implementation( wrap_compute_dense(topi.x86.dense_pack), wrap_topi_schedule(topi.x86.schedule_dense_pack), name="dense_pack.x86", plevel=10, ) if is_auto_scheduler_enabled(): strategy.add_implementation( wrap_compute_dense(topi.nn.dense, need_auto_scheduler_layout=True), naive_schedule, name="dense.generic", plevel=11, ) if "cblas" in target.libs: with SpecializedCondition(same_type and dtype in ["float32", "float64"]): strategy.add_implementation( wrap_compute_dense(topi.x86.dense_cblas), wrap_topi_schedule(topi.x86.schedule_dense_cblas), name="dense_cblas.x86", plevel=13, ) if "mkl" in target.libs: with SpecializedCondition(same_type and dtype in ["float32", "float64"] or u8s8s32): strategy.add_implementation( wrap_compute_dense(topi.x86.dense_mkl), wrap_topi_schedule(topi.x86.schedule_dense_mkl), name="dense_mkl.x86", plevel=14, ) if "mkldnn" in target.libs: with SpecializedCondition(same_type and dtype == "float32"): strategy.add_implementation( wrap_compute_dense(topi.x86.dense_mkldnn), wrap_topi_schedule(topi.x86.schedule_dense_mkldnn), name="dense_mkldnn.x86", plevel=15, ) return strategy @dense_pack_strategy.register("cpu") def dense_pack_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_dense(topi.x86.dense_pack), wrap_topi_schedule(topi.x86.schedule_dense_pack), name="dense_pack.x86", ) return strategy @batch_matmul_strategy.register("cpu") def batch_matmul_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() if is_dynamic(out_type) or is_auto_scheduler_enabled(): strategy.add_implementation( wrap_compute_batch_matmul(topi.nn.batch_matmul, need_auto_scheduler_layout=True), wrap_topi_schedule(topi.generic.nn.schedule_batch_matmul), name="batch_matmul.generic", plevel=10, ) else: strategy.add_implementation( wrap_compute_batch_matmul(topi.x86.batch_matmul), wrap_topi_schedule(topi.x86.schedule_batch_matmul), name="batch_matmul.x86", plevel=10, ) if "cblas" in target.libs: strategy.add_implementation( wrap_compute_batch_matmul(topi.x86.batch_matmul_cblas), wrap_topi_schedule(topi.x86.schedule_batch_matmul_cblas), name="batch_matmul_cblas.x86", plevel=15, ) if "mkl" in target.libs: strategy.add_implementation( wrap_compute_batch_matmul(topi.x86.batch_matmul_mkl), wrap_topi_schedule(topi.x86.schedule_batch_matmul_mkl), name="batch_matmul_mkl.x86", plevel=15, ) return strategy @sparse_dense_strategy.register("cpu") def sparse_dense_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implementation( wrap_compute_sparse_dense(topi.nn.sparse_dense), wrap_topi_schedule(topi.x86.schedule_sparse_dense), name="sparse_dense.x86", plevel=10, ) return strategy @roi_align_strategy.register("cpu") def roi_align_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.layout if layout == "NCHW": strategy.add_implementation( wrap_compute_roi_align(topi.x86.roi_align_nchw), wrap_topi_schedule(topi.generic.schedule_roi_align), name="roi_align.x86", ) else: assert layout == "NHWC", "layout must be NCHW or NHWC." strategy.add_implementation( wrap_compute_roi_align(topi.vision.rcnn.roi_align_nhwc), wrap_topi_schedule(topi.generic.schedule_roi_align), name="roi_align.x86", ) return strategy @bitserial_conv2d_strategy.register("cpu") def bitserial_conv2d_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.data_layout if layout == "NCHW": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nchw), wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nchw), name="bitserial_conv2d_nchw.x86", ) elif layout == "NHWC": strategy.add_implementation( wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nhwc), wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nhwc), name="bitserial_conv2d_nhwc.x86", ) else: raise ValueError("Data layout {} not supported.".format(layout)) return strategy @bitserial_dense_strategy.register("cpu")
Apache License 2.0
marrow/mongo
marrow/mongo/model.py
Model.__init__
python
def __init__(self, collection, model=None, database='default', cache=None): self.database = database self.collection = collection self.model = model self.cache = cache
Construct a new lazy loader for MongoDB collections. Pass in the string name of the collection as the first positional argument, optionally pass in a marrow.mongo document class to remember for later, and the name of the database connection to access may be passed as a keyword arguemnt appropriately called `database`. If you want to utilize the cache (to save on repeated calls) you'll need to pass in the name of the attribute you wish to assign to on the instance as `cache`.
https://github.com/marrow/mongo/blob/1a8e5fe9047b6a5bb2eaeea95f3dc085c737067f/marrow/mongo/model.py#L33-L46
from __future__ import unicode_literals __all__ = ['Model'] class Model(object): __slots__ = ('database', 'collection', 'model', 'cache')
MIT License
stmsolutions/boobsnail
excel4lib/sheet/worksheet.py
Worksheet.column_iterate
python
def column_iterate(self): for c in self.cells.keys(): yield (c, self.cells[c])
Iterate through columns :return: returns all cells in column
https://github.com/stmsolutions/boobsnail/blob/c0c2067d7271ca76ee721998d28e8c3c81a48397/excel4lib/sheet/worksheet.py#L70-L76
from .cell import * class AlreadyReservedException(Exception): pass class CouldNotMoveCellException(Exception): pass class Worksheet(object): def __init__(self, name = ""): self.name = name self.cells = {} self._curr_x = 1 self._curr_y = 1 self._max_x = 1 self._max_y = 1 self.cells_tag = {} def get_column(self, x): return self.cells.get(x, None) def worksheet_iterate(self): i = 0 j = 0 while i < self._max_x: j = 0 while j < self._max_y: yield (i+1, j+1) j = j + 1 i = i + 1 def cell_iterate(self): i = 0 j = 0 while i < self._max_x: j = 0 while j < self._max_y: cell = self.get_cell(i + 1, j + 1) if cell: yield cell j = j + 1 i = i + 1
MIT License
robertcsordas/modules
layers/batch_ops.py
batch_bias_add
python
def batch_bias_add(*args, **kwargs) -> torch.Tensor: return batch_elementwise(*args, op = lambda a,b: a+b, **kwargs)
Batch add bias to the inputs. For more details, see batch_elementwise
https://github.com/robertcsordas/modules/blob/efdb8790b074862581e035c9ab5bf889440a8023/layers/batch_ops.py#L65-L72
import torch import torch.nn.functional as F from typing import Optional, Callable def batch_matmul(input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: assert input.ndim == 2 if weight.ndim == 3: weight = weight.squeeze(0) if weight.ndim == 2: return torch.mm(input, weight) assert weight.ndim == 3 assert input.shape[0] % weight.shape[0] == 0 res = torch.bmm(input.view(weight.shape[0], -1, input.shape[-1]), weight) return res.view(input.shape[0], -1) def batch_elementwise(input: torch.Tensor, param: torch.Tensor, op: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], input_batch_dim: int = 0, pndim: int = 1) -> torch.Tensor: if param.ndim == pndim+1: param = param.squeeze(0) if param.ndim == pndim: return op(input, param) assert param.ndim == pndim + 1 assert input.shape[input_batch_dim] % param.shape[0] == 0 input_r = input.view(*input.shape[:input_batch_dim], param.shape[0], -1, *input.shape[input_batch_dim+1:]) param_r = param.view(*([1]*input_batch_dim), param.shape[0], *([1]*(input_r.ndim - input_batch_dim - param.ndim)), *param.shape[1:]) return op(input_r, param_r).view_as(input)
BSD 3-Clause New or Revised License
cogent3/cogent3
src/cogent3/parse/blast.py
is_blast_junk
python
def is_blast_junk(line): return _is_junk(line, ("BLAST", "TBLAS"))
Ignore empty line or lines with blast info
https://github.com/cogent3/cogent3/blob/3d98bddc0aef2bf7fea21b9a89de76b01f3d2da8/src/cogent3/parse/blast.py#L50-L52
from cogent3.parse.record_finder import ( DelimitedRecordFinder, LabeledRecordFinder, never_ignore, ) __author__ = "Micah Hamady" __copyright__ = "Copyright 2007-2021, The Cogent Project" __credits__ = ["Micah Hamady", "Rob Knight"] __license__ = "BSD-3" __version__ = "2021.10.12a1" __maintainer__ = "Micah Hamady" __email__ = "hamady@colorado.edu" __status__ = "Prototype" strip = str.strip upper = str.upper def iter_finder(line): return line.startswith("# Iteration:") def query_finder(line): return line.startswith("# Query:") def iteration_set_finder(line): return line.startswith("# Iteration: 1") def _is_junk(line, t_strs): if not line or not line.strip(): return True for t_str in t_strs: if line.startswith("# %s" % t_str): return True return False
BSD 3-Clause New or Revised License
google/pasta
pasta/base/token_generator.py
TokenGenerator.block_whitespace
python
def block_whitespace(self, indent_level): start_i = self._i full_whitespace = self.whitespace(comment=True) if not indent_level: return full_whitespace self._i = start_i lines = full_whitespace.splitlines(True) try: last_line_idx = next(i for i, line in reversed(list(enumerate(lines))) if line.startswith(indent_level + '#')) except StopIteration: self._loc = self._tokens[self._i].end return '' lines = lines[:last_line_idx + 1] end_line = self._tokens[self._i].end[0] + 1 + len(lines) list(self.takewhile(lambda tok: tok.start[0] < end_line)) self._loc = self._tokens[self._i].end return ''.join(lines)
Parses whitespace from the current _loc to the end of the block.
https://github.com/google/pasta/blob/a73452d0cfa04f1473abd8e050a8ca3bceab61ad/pasta/base/token_generator.py#L171-L196
from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import collections import contextlib import itertools import tokenize from six import StringIO from pasta.base import formatting as fmt from pasta.base import fstring_utils TOKENS = tokenize Token = collections.namedtuple('Token', ('type', 'src', 'start', 'end', 'line')) FORMATTING_TOKENS = (TOKENS.INDENT, TOKENS.DEDENT, TOKENS.NL, TOKENS.NEWLINE, TOKENS.COMMENT) class TokenGenerator(object): def __init__(self, source, ignore_error_token=False): self.lines = source.splitlines(True) self._tokens = list(_generate_tokens(source, ignore_error_token)) self._parens = [] self._hints = 0 self._scope_stack = [] self._len = len(self._tokens) self._i = -1 self._loc = self.loc_begin() def chars_consumed(self): return len(self._space_between((1, 0), self._tokens[self._i].end)) def loc_begin(self): if self._i < 0: return (1, 0) return self._tokens[self._i].start def loc_end(self): if self._i < 0: return (1, 0) return self._tokens[self._i].end def peek(self): if self._i + 1 >= self._len: return None return self._tokens[self._i + 1] def peek_non_whitespace(self): return self.peek_conditional(lambda t: t.type not in FORMATTING_TOKENS) def peek_conditional(self, condition): return next((t for t in self._tokens[self._i + 1:] if condition(t)), None) def next(self, advance=True): self._i += 1 if self._i >= self._len: return None if advance: self._loc = self._tokens[self._i].end return self._tokens[self._i] def rewind(self, amount=1): self._i -= amount def whitespace(self, max_lines=None, comment=False): next_token = self.peek() if not comment and next_token and next_token.type == TOKENS.COMMENT: return '' def predicate(token): return (token.type in (TOKENS.INDENT, TOKENS.DEDENT) or token.type == TOKENS.COMMENT and (comment or self._hints) or token.type == TOKENS.ERRORTOKEN and token.src == ' ' or max_lines is None and token.type in (TOKENS.NL, TOKENS.NEWLINE)) whitespace = list(self.takewhile(predicate, advance=False)) next_token = self.peek() result = '' for tok in itertools.chain(whitespace, ((next_token,) if next_token else ())): result += self._space_between(self._loc, tok.start) if tok != next_token: result += tok.src self._loc = tok.end else: self._loc = tok.start if ((max_lines is None or max_lines > 0) and next_token and next_token.type in (TOKENS.NL, TOKENS.NEWLINE)): result += self.next().src return result
Apache License 2.0
paddlepaddle/paddlesleeve
AdvBox/examples/objectdetector/ppdet/modeling/necks/yolo_fpn.py
PPYOLOTinyDetBlock.__init__
python
def __init__(self, ch_in, ch_out, name, drop_block=False, block_size=3, keep_prob=0.9, data_format='NCHW'): super(PPYOLOTinyDetBlock, self).__init__() self.drop_block_ = drop_block self.conv_module = nn.Sequential() cfgs = [ ['.0', ch_in, ch_out, 1, 1, 0, 1], ['.1', ch_out, ch_out, 5, 1, 2, ch_out], ['.2', ch_out, ch_out, 1, 1, 0, 1], ['.route', ch_out, ch_out, 5, 1, 2, ch_out], ] for cfg in cfgs: conv_name, conv_ch_in, conv_ch_out, filter_size, stride, padding, groups = cfg self.conv_module.add_sublayer( name + conv_name, ConvBNLayer( ch_in=conv_ch_in, ch_out=conv_ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, name=name + conv_name)) self.tip = ConvBNLayer( ch_in=ch_out, ch_out=ch_out, filter_size=1, stride=1, padding=0, groups=1, name=name + conv_name) if self.drop_block_: self.drop_block = DropBlock( block_size=block_size, keep_prob=keep_prob, data_format=data_format, name=name + '.dropblock')
PPYOLO Tiny DetBlock layer Args: ch_in (list): input channel number ch_out (list): output channel number name (str): block name drop_block: whether user DropBlock block_size: drop block size keep_prob: probability to keep block in DropBlock data_format (str): data format, NCHW or NHWC
https://github.com/paddlepaddle/paddlesleeve/blob/18cc4b83ae311365b8d132ea4619d60abf3945bf/AdvBox/examples/objectdetector/ppdet/modeling/necks/yolo_fpn.py#L246-L305
import paddle import paddle.nn as nn import paddle.nn.functional as F from ppdet.core.workspace import register, serializable from ppdet.modeling.layers import DropBlock from ..backbones.darknet import ConvBNLayer from ..shape_spec import ShapeSpec __all__ = ['YOLOv3FPN', 'PPYOLOFPN', 'PPYOLOTinyFPN', 'PPYOLOPAN'] def add_coord(x, data_format): b = paddle.shape(x)[0] if data_format == 'NCHW': h, w = x.shape[2], x.shape[3] else: h, w = x.shape[1], x.shape[2] gx = paddle.arange(w, dtype=x.dtype) / ((w - 1.) * 2.0) - 1. gy = paddle.arange(h, dtype=x.dtype) / ((h - 1.) * 2.0) - 1. if data_format == 'NCHW': gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w]) gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w]) else: gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1]) gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1]) gx.stop_gradient = True gy.stop_gradient = True return gx, gy class YoloDetBlock(nn.Layer): def __init__(self, ch_in, channel, norm_type, freeze_norm=False, name='', data_format='NCHW'): super(YoloDetBlock, self).__init__() self.ch_in = ch_in self.channel = channel assert channel % 2 == 0, "channel {} cannot be divided by 2".format(channel) conv_def = [ ['conv0', ch_in, channel, 1, '.0.0'], ['conv1', channel, channel * 2, 3, '.0.1'], ['conv2', channel * 2, channel, 1, '.1.0'], ['conv3', channel, channel * 2, 3, '.1.1'], ['route', channel * 2, channel, 1, '.2'], ] self.conv_module = nn.Sequential() for idx, (conv_name, ch_in, ch_out, filter_size, post_name) in enumerate(conv_def): self.conv_module.add_sublayer( conv_name, ConvBNLayer( ch_in=ch_in, ch_out=ch_out, filter_size=filter_size, padding=(filter_size - 1) // 2, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name + post_name)) self.tip = ConvBNLayer( ch_in=channel, ch_out=channel * 2, filter_size=3, padding=1, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name + '.tip') def forward(self, inputs): route = self.conv_module(inputs) tip = self.tip(route) return route, tip class SPP(nn.Layer): def __init__(self, ch_in, ch_out, k, pool_size, norm_type, freeze_norm=False, name='', act='leaky', data_format='NCHW'): super(SPP, self).__init__() self.pool = [] self.data_format = data_format for size in pool_size: pool = self.add_sublayer( '{}.pool1'.format(name), nn.MaxPool2D( kernel_size=size, stride=1, padding=size // 2, data_format=data_format, ceil_mode=False)) self.pool.append(pool) self.conv = ConvBNLayer( ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, freeze_norm=freeze_norm, name=name, act=act, data_format=data_format) def forward(self, x): outs = [x] for pool in self.pool: outs.append(pool(x)) if self.data_format == "NCHW": y = paddle.concat(outs, axis=1) else: y = paddle.concat(outs, axis=-1) y = self.conv(y) return y class CoordConv(nn.Layer): def __init__(self, ch_in, ch_out, filter_size, padding, norm_type, freeze_norm=False, name='', data_format='NCHW'): super(CoordConv, self).__init__() self.conv = ConvBNLayer( ch_in + 2, ch_out, filter_size=filter_size, padding=padding, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name) self.data_format = data_format def forward(self, x): gx, gy = add_coord(x, self.data_format) if self.data_format == 'NCHW': y = paddle.concat([x, gx, gy], axis=1) else: y = paddle.concat([x, gx, gy], axis=-1) y = self.conv(y) return y class PPYOLODetBlock(nn.Layer): def __init__(self, cfg, name, data_format='NCHW'): super(PPYOLODetBlock, self).__init__() self.conv_module = nn.Sequential() for idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]): kwargs.update( name='{}.{}'.format(name, conv_name), data_format=data_format) self.conv_module.add_sublayer(conv_name, layer(*args, **kwargs)) conv_name, layer, args, kwargs = cfg[-1] kwargs.update( name='{}.{}'.format(name, conv_name), data_format=data_format) self.tip = layer(*args, **kwargs) def forward(self, inputs): route = self.conv_module(inputs) tip = self.tip(route) return route, tip class PPYOLOTinyDetBlock(nn.Layer):
Apache License 2.0
wangbinyq/pillow_example
tutorial.py
show
python
def show(newim, oldim = im): cm = Image.new('RGB', (512*2, 512)) left = (0, 0, 512, 512) right = (512, 0, 512*2, 512) cm.paste(oldim, left) cm.paste(newim, right) cm.show()
show origin and new image in one image.
https://github.com/wangbinyq/pillow_example/blob/64fdbeca9385a2a4361e54abceb5948c41809e0c/tutorial.py#L11-L22
from PIL import Image, ImageFilter, ImageEnhance import os fn = 'lena.ppm' im = Image.open(fn) print im.format, im.size, im.mode
BSD 2-Clause Simplified License
altosaar/proximity_vi
inferences/proximity_variational_inference.py
ProximityVariationalInference.build_distance
python
def build_distance(self): cfg = self.config distance = {} proximity = self.proximity_statistic moving_average = proximity.moving_average for name, stat in proximity.statistic.items(): difference = stat - moving_average[name] if cfg['c/distance'] == 'square_difference': dist = tf.square(difference) elif cfg['c/distance'] == 'inverse_huber': dist = tf.where(tf.abs(difference) <= 1.0, tf.abs(difference), 0.5 * tf.square(difference) + 0.5) if 'latent' in proximity.named_shape[name]: dist = tf.reduce_sum(dist, proximity.named_shape[name]['latent']) proximity.named_shape[name].remove('latent') if 'param_0' in proximity.named_shape[name]: dist = tf.reduce_sum(dist, proximity.named_shape[name]['param_0']) proximity.named_shape[name].remove('param_0') if 'param_1' in proximity.named_shape[name]: dist = tf.reduce_sum(dist, proximity.named_shape[name]['param_1']) proximity.named_shape[name].remove('param_1') distance[name] = dist name = '_'.join(['c/distance', cfg['c/proximity_statistic'], name]) tf.summary.scalar(name, tf.reduce_mean(dist)) self.distance = distance res = 0. for dist in distance.values(): res += dist self.distance_sum = res
Distance between statistic f(lambda) and its moving average.
https://github.com/altosaar/proximity_vi/blob/a3640379d06ecf860adac36fe70751858d5122e1/inferences/proximity_variational_inference.py#L49-L78
import tensorflow as tf import numpy as np import collections import time import util from .variational_inference import VariationalInference from inferences import proximity_statistics fw = tf.contrib.framework layers = tf.contrib.layers dist = tf.contrib.distributions class ProximityVariationalInference(VariationalInference): def __init__(self, session, config, model, variational, data): super(ProximityVariationalInference, self).__init__( session, config, model, variational, data) cfg = self.config self.build_proximity_statistic() self.build_distance() if not cfg['optim/deterministic_annealing']: self.build_magnitude() def build_proximity_statistic(self): cfg = self.config if cfg['c/proximity_statistic'] == 'entropy': s = proximity_statistics.Entropy(cfg, self.variational) elif cfg['c/proximity_statistic'] == 'kl': s = proximity_statistics.KL(cfg, self.variational, model=self.model) elif cfg['c/proximity_statistic'] == 'mean_variance': s = proximity_statistics.MeanVariance(cfg, self.variational) elif cfg['c/proximity_statistic'] == 'active_units': s = proximity_statistics.ActiveUnits(cfg, self.variational) elif cfg['c/proximity_statistic'] == 'activations_layer_0_fc0': s = proximity_statistics.Activations(cfg, self.variational) elif cfg['c/proximity_statistic'] == 'log_likelihood': s = proximity_statistics.LogLikelihood( cfg, self.variational, model=self.model, q_z_sample=self.q_z_sample, data=self.data) elif cfg['c/proximity_statistic'] == 'orthogonal': s = proximity_statistics.Orthogonal(cfg, self.variational) else: raise ValueError('Proximity statistic %s not implemented!' % cfg['c/proximity_statistic']) self.proximity_statistic = s
MIT License
zedthree/fort_depend.py
fortdepend/fort_depend.py
FortranProject.get_source
python
def get_source(self, extensions=None): if extensions is None: extensions = [".f90", ".F90"] elif not isinstance(extensions, list): extensions = [extensions] tmp = os.listdir(".") files = [] for ext in extensions: files.extend([x for x in tmp if x.endswith(ext)]) return files
Return a list of filenames ending with any of extensions Args: extensions: List of file extensions (defaults to [".f90", ".F90"])
https://github.com/zedthree/fort_depend.py/blob/ea2caf0010765f00de142d168b05665499ffe1ca/fortdepend/fort_depend.py#L83-L100
from __future__ import print_function import os import sys from colorama import Fore from .smartopen import smart_open from .units import FortranFile, FortranModule from .graph import Graph try: input = raw_input except NameError: pass DEPFILE_HEADER = "# This file is generated automatically. DO NOT EDIT!" DEFAULT_IGNORED_MODULES = ["iso_c_binding", "iso_fortran_env"] class FortranProject: def __init__(self, name=None, exclude_files=None, files=None, ignore_modules=None, macros=None, cpp_includes=None, use_preprocessor=True, verbose=False): if name is None: self.name = os.path.basename(os.getcwd()) else: self.name = name if files is None: files = self.get_source() elif not isinstance(files, list): files = [files] if exclude_files is not None: if not isinstance(exclude_files, list): exclude_files = [exclude_files] files = set(files) - set(exclude_files) self.files = {filename: FortranFile(filename=filename, macros=macros, readfile=True, cpp_includes=cpp_includes, use_preprocessor=use_preprocessor) for filename in files} self.modules = self.get_modules() self.programs = {k: v for k, v in self.modules.items() if v.unit_type == "program"} self.remove_ignored_modules(ignore_modules) self.depends_by_module = self.get_depends_by_module(verbose) self.depends_by_file = self.get_depends_by_file(verbose)
MIT License
lukelbd/proplot
proplot/axes/base.py
Axes._get_background_props
python
def _get_background_props(patch_kw=None, context=True, **kwargs): if patch_kw: warnings._warn_proplot( "Keyword 'patch_kw' was deprecated in v0.8. Please pass " 'patch properties as keyword arguments instead.' ) kwargs.update(patch_kw) props = _pop_props(kwargs, 'patch') if 'color' in props: props.setdefault('edgecolor', props.pop('color')) for key in ('alpha', 'facecolor', 'linewidth', 'edgecolor'): value = rc.find('axes.' + key, context=context) if value is not None: props.setdefault(key, value) kw_face = _pop_kwargs(props, 'alpha', 'facecolor') kw_edge = _pop_kwargs(props, 'edgecolor', 'linewidth', 'linestyle') kw_edge['capstyle'] = 'projecting' if 'color' in props: kw_edge.setdefault('edgecolor', props.pop('color')) if kwargs: raise TypeError(f'Unexpected keyword argument(s): {kwargs!r}') return kw_face, kw_edge
Return boundary properties. Backgrounds are used in all axes projections.
https://github.com/lukelbd/proplot/blob/26e7d5810fc8187e73f2028b5e8be4d0bcd82b91/proplot/axes/base.py#L859-L891
import copy import inspect import re from numbers import Integral import matplotlib.axes as maxes import matplotlib.cm as mcm import matplotlib.colors as mcolors import matplotlib.container as mcontainer import matplotlib.contour as mcontour import matplotlib.gridspec as mgridspec import matplotlib.legend as mlegend import matplotlib.patches as mpatches import matplotlib.projections as mprojections import matplotlib.text as mtext import matplotlib.ticker as mticker import matplotlib.transforms as mtransforms import numpy as np from matplotlib import cbook from .. import colors as pcolors from .. import constructor from ..config import rc from ..internals import ic from ..internals import ( _kwargs_to_args, _not_none, _pop_kwargs, _pop_params, _pop_props, _pop_rc, _translate_loc, dependencies, docstring, guides, rcsetup, texts, warnings, ) from ..utils import _fontsize_to_pt, edges, units try: from cartopy.crs import CRS, PlateCarree except Exception: CRS = PlateCarree = object __all__ = ['Axes'] ABC_STRING = 'abcdefghijklmnopqrstuvwxyz' ALIGN_OPTS = { None: { 'center': 'center', 'left': 'center left', 'right': 'center right', 'top': 'upper center', 'bottom': 'lower center', }, 'left': { 'top': 'upper right', 'center': 'center right', 'bottom': 'lower right', }, 'right': { 'top': 'upper left', 'center': 'center left', 'bottom': 'lower left', }, 'top': { 'left': 'lower left', 'center': 'lower center', 'right': 'lower right' }, 'bottom': { 'left': 'upper left', 'center': 'upper center', 'right': 'upper right' }, } _proj_docstring = """ proj, projection : \ str, `cartopy.crs.Projection`, or `~mpl_toolkits.basemap.Basemap`, optional The map projection specification(s). If ``'cart'`` or ``'cartesian'`` (the default), a `~proplot.axes.CartesianAxes` is created. If ``'polar'``, a `~proplot.axes.PolarAxes` is created. Otherwise, the argument is interpreted by `~proplot.constructor.Proj`, and the result is used to make a `~proplot.axes.GeoAxes` (in this case the argument can be a `cartopy.crs.Projection` instance, a `~mpl_toolkits.basemap.Basemap` instance, or a projection name listed in :ref:`this table <proj_table>`). """ _proj_kw_docstring = """ proj_kw, projection_kw : dict-like, optional Keyword arguments passed to `~mpl_toolkits.basemap.Basemap` or cartopy `~cartopy.crs.Projection` classes on instantiation. """ _basemap_docstring = """ basemap : bool or dict-like, optional Whether to use `~mpl_toolkits.basemap.Basemap` or `~cartopy.crs.Projection` for map projections. Default is :rc:`basemap`. """ docstring._snippet_manager['axes.proj'] = _proj_docstring docstring._snippet_manager['axes.proj_kw'] = _proj_kw_docstring docstring._snippet_manager['axes.basemap'] = _basemap_docstring _space_docstring = """ queue : bool, optional If ``True`` and `loc` is the same as an existing {name}, the input arguments are added to a queue and this function returns ``None``. This is used to "update" the same {name} with successive ``ax.{name}(...)`` calls. If ``False`` (the default) and `loc` is the same as an existing *inset* {name}, the old {name} is removed. If ``False`` and `loc` is an *outer* {name}, the {name}s are stacked. space : unit-spec, optional For outer {name}s only. The fixed space between the {name} and the subplot edge. %(units.em)s When the tight layout algorithm is active for the figure, this is adjusted automatically using `pad`. Otherwise, a suitable default is selected. pad : unit-spec, optional For outer {name}s, this is the tight layout padding between the {name} and the subplot. Default is :rc:`subplots.panelpad`. For inset {name}s, this is the fixed space between the axes edge and the {name}. Default is :rc:`{default}`. %(units.em)s align : {{'center', 'top', 't', 'bottom', 'b', 'left', 'l', 'right', 'r'}}, optional For outer {name}s only. How to align the {name} against the subplot edge. Default is ``'center'``. The values ``'top'`` and ``'bottom'`` are valid for left and right {name}s and ``'left'`` and ``'right'`` are valid for top and bottom {name}s. The default is always ``'center'``. """ docstring._snippet_manager['axes.legend_space'] = _space_docstring.format( name='legend', default='legend.borderaxespad' ) docstring._snippet_manager['axes.colorbar_space'] = _space_docstring.format( name='colorbar', default='colorbar.insetpad' ) _transform_docstring = """ transform : {'data', 'axes', 'figure'} or `~matplotlib.transforms.Transform`, optional The transform used to interpret the bounds. Can be a `~matplotlib.transforms.Transform` instance or a string representing the `~matplotlib.axes.Axes.transData`, `~matplotlib.axes.Axes.transAxes`, or `~matplotlib.figure.Figure.transFigure` transforms. Default is ``'axes'``, i.e. `bounds` is in axes-relative coordinates. """ docstring._snippet_manager['axes.transform'] = _transform_docstring _inset_docstring = """ Add an inset axes. This is similar to `matplotlib.axes.Axes.inset_axes`. Parameters ---------- bounds : 4-tuple of float The (left, bottom, width, height) coordinates for the axes. %(axes.transform)s Default is to use the same projection as the current axes. %(axes.proj)s %(axes.proj_kw)s %(axes.basemap)s zorder : float, optional The `zorder <https://matplotlib.org/stable/gallery/misc/zorder_demo.html>`__ of the axes. Should be greater than the zorder of elements in the parent axes. Default is ``4``. zoom : bool, optional Whether to draw lines indicating the inset zoom using `~Axes.indicate_inset_zoom`. The line positions will automatically adjust when the parent or inset axes limits change. Default is ``True`` only if both axes are `~proplot.axes.CartesianAxes`. zoom_kw : dict, optional Passed to `~Axes.indicate_inset_zoom`. Other parameters ---------------- **kwargs Passed to `proplot.axes.Axes`. Returns ------- proplot.axes.Axes The inset axes. See also -------- Axes.indicate_inset_zoom matplotlib.axes.Axes.inset_axes matplotlib.axes.Axes.indicate_inset matplotlib.axes.Axes.indicate_inset_zoom """ _indicate_inset_docstring = """ Add indicators denoting the zoom range of the inset axes. This will replace previously drawn zoom indicators. Parameters ---------- %(artist.patch)s zorder : float, optional The `zorder <https://matplotlib.org/stable/gallery/misc/zorder_demo.html>`__ of the indicators. Should be greater than the zorder of elements in the parent axes. Default is ``3.5``. Other parameters ---------------- **kwargs Passed to `~matplotlib.patches.Patch`. Note ---- This command must be called from the inset axes rather than the parent axes. It is called automatically when ``zoom=True`` is passed to `~Axes.inset_axes` and whenever the axes are drawn (so the line positions always track the axis limits even if they are later changed). See also -------- matplotlib.axes.Axes.indicate_inset matplotlib.axes.Axes.indicate_inset_zoom """ docstring._snippet_manager['axes.inset'] = _inset_docstring docstring._snippet_manager['axes.indicate_inset'] = _indicate_inset_docstring _panel_loc_docstring = """ ========== ===================== Location Valid keys ========== ===================== left ``'left'``, ``'l'`` right ``'right'``, ``'r'`` bottom ``'bottom'``, ``'b'`` top ``'top'``, ``'t'`` ========== ===================== """ _panel_docstring = """ Add a panel axes. Parameters ---------- side : str, optional The panel location. Valid location keys are as follows. %(axes.panel_loc)s width : unit-spec, optional The panel width. Default is :rc:`subplots.panelwidth`. %(units.in)s space : unit-spec, optional The fixed space between the panel and the subplot edge. %(units.em)s When the tight layout algorithm is active for the figure, this is adjusted automatically using `pad`. Otherwise, a suitable default is selected. pad : unit-spec, optional The tight layout padding between the panel and the subplot. %(units.em)s share : bool, optional Whether to enable axis sharing between the *x* and *y* axes of the main subplot and the panel long axes for each panel in the stack. Sharing between the panel short axis and other panel short axes is determined by figure-wide `sharex` and `sharey` settings. Other parameters ---------------- **kwargs Passed to `proplot.axes.CartesianAxes`. Returns ------- proplot.axes.CartesianAxes The panel axes. """ docstring._snippet_manager['axes.panel_loc'] = _panel_loc_docstring docstring._snippet_manager['axes.panel'] = _panel_docstring _axes_format_docstring = """ title : str, optional The axes title. abc : bool or str, optional The "a-b-c" subplot label style. Must contain the character ``a`` or ``A``, for example ``'a.'``, or ``'A'``. If ``True`` then the default style of ``'a'`` is used. The ``a`` or ``A`` is replaced with the alphabetic character matching the `~Axes.number`. If `~Axes.number` is greater than 26, the characters loop around to a, ..., z, aa, ..., zz, aaa, ..., zzz, etc. abcloc, titleloc : str, optional Strings indicating the location for the a-b-c label and main title. The following locations are valid (defaults are :rc:`abc.loc` and :rc:`title.loc`): .. _title_table: ======================== ============================ Location Valid keys ======================== ============================ center above axes ``'center'``, ``'c'`` left above axes ``'left'``, ``'l'`` right above axes ``'right'``, ``'r'`` lower center inside axes ``'lower center'``, ``'lc'`` upper center inside axes ``'upper center'``, ``'uc'`` upper right inside axes ``'upper right'``, ``'ur'`` upper left inside axes ``'upper left'``, ``'ul'`` lower left inside axes ``'lower left'``, ``'ll'`` lower right inside axes ``'lower right'``, ``'lr'`` ======================== ============================ abcborder, titleborder : bool, optional Whether to draw a white border around titles and a-b-c labels positioned inside the axes. This can help them stand out on top of artists plotted inside the axes. Defaults are :rc:`abc.border` and :rc:`title.border`. abcbbox, titlebbox : bool, optional Whether to draw a white bbox around titles and a-b-c labels positioned inside the axes. This can help them stand out on top of artists plotted inside the axes. Defaults are :rc:`abc.bbox` and :rc:`title.bbox`. abc_kw, title_kw : dict-like, optional Additional settings used to update the a-b-c label and title with ``text.update()``. titlepad : float, optional The padding for the inner and outer titles and a-b-c labels in arbitrary units (default is points). Default is :rc:`title.pad`. titleabove : bool, optional Whether to try to put outer titles and a-b-c labels above panels, colorbars, or legends that are above the axes. Default is :rc:`title.above`. abctitlepad : float, optional The horizontal padding between the a-b-c label and title when they are in the same location. Default is :rc:`abc.titlepad`. ltitle, ctitle, rtitle, ultitle, uctitle, urtitle, lltitle, lctitle, lrtitle \ : str, optional Shorthands for the below keywords. lefttitle, centertitle, righttitle, upperlefttitle, uppercentertitle, upperrighttitle, \ lowerlefttitle, lowercentertitle, lowerrighttitle : str, optoinal Additional titles in specific positions. This works as an alternative to the ``ax.format(title='Title', titleloc=loc)`` workflow and permits adding more than one title-like label for a single axes. a, alpha, fc, facecolor, ec, edgecolor, lw, linewidth, ls, linestyle : optional Additional settings applied to the background patch, and their shorthands. Defaults are :rcraw:`axes.alpha`, :rcraw:`axes.facecolor`, :rcraw:`axes.edgecolor`, :rcraw:`axes.linewidth`, and ``'-'``, respectively. """ _figure_format_docstring = """ rowlabels, collabels, llabels, tlabels, rlabels, blabels Aliases for `leftlabels` and `toplabels`, and for `leftlabels`, `toplabels`, `rightlabels`, and `bottomlabels`, respectively. leftlabels, toplabels, rightlabels, bottomlabels : sequence of str, optional Labels for the subplots lying along the left, top, right, and bottom edges of the figure. The length of each list must match the number of subplots along the corresponding edge. leftlabelpad, toplabelpad, rightlabelpad, bottomlabelpad : float, optional The padding between the labels and the axes content in arbitrary units (default is points). Defaults are :rcraw:`leftlabel.pad`, :rcraw:`toplabel.pad`, :rcraw:`rightlabel.pad`, and :rcraw:`bottomlabel.pad` leftlabels_kw, toplabels_kw, rightlabels_kw, bottomlabels_kw : dict-like, optional Additional settings used to update the labels with ``text.update()``. figtitle Alias for `suptitle`. suptitle : str, optional The figure "super" title, centered between the left edge of the lefmost column of subplots and the right edge of the rightmost column of subplots, and automatically offset above figure titles. This is an improvement on matplotlib's "super" title, which just centers the text between figure edges. suptitlepad : float, optional The padding between the super title and the axes content in arbitrary units (default is points). Default is :rcraw:`suptitle.pad`. suptitle_kw : optional Additional settings used to update the super title with ``text.update()``. includepanels : bool, optional Whether to include panels when aligning figure "super titles" along the top of the subplot grid and when aligning the `spanx` *x* axis labels and `spany` *y* axis labels along the sides of the subplot grid. Default is ``False``. mathtext_fallback : bool or str, optional Apply this :rc:`mathtext.fallback` value when drawing the figure. If ``True`` or string, unavailable glyphs are replaced with a glyph from a fallback font (Computer Modern by default). Otherwise, they are replaced with the "¤" dummy character. For details see this `mathtext tutorial \ <https://matplotlib.org/stable/tutorials/text/mathtext.html#custom-fonts>`__. """ _rc_init_docstring = """ Remaining keyword arguments are passed to `matplotlib.axes.Axes`. """ _rc_format_docstring = """ rc_mode : int, optional The context mode passed to `~proplot.config.Configurator.context`. rc_kw : dict-like, optional An alternative to passing extra keyword arguments. See below. **kwargs {}Keyword arguments that match the name of an `~proplot.config.rc` setting are passed to `proplot.config.Configurator.context` and used to update the axes. If the setting name has "dots" you can simply omit the dots. For example, ``abc='A.'`` modifies the :rcraw:`abc` setting, ``titleloc='left'`` modifies the :rcraw:`title.loc` setting, ``gridminor=True`` modifies the :rcraw:`gridminor` setting, and ``gridbelow=True`` modifies the :rcraw:`grid.below` setting. Many of the keyword arguments documented above are internally applied by retrieving settings passed to `~proplot.config.Configurator.context`. """ docstring._snippet_manager['rc.init'] = _rc_format_docstring.format(_rc_init_docstring.strip()) docstring._snippet_manager['rc.format'] = _rc_format_docstring.format('') docstring._snippet_manager['axes.format'] = _axes_format_docstring docstring._snippet_manager['figure.format'] = _figure_format_docstring _colorbar_args_docstring = """ mappable : mappable, sequence of artist, sequence of color-spec, or colormap-spec There are four options here: 1. A mappable object. Basically, any object with a ``get_cmap`` method, like the objects returned by `~matplotlib.axes.Axes.contourf` and `~matplotlib.axes.Axes.pcolormesh`. 2. A sequence of matplotlib artists. Any object with a ``get_color`` method will do, like `~matplotlib.lines.Line2D` instances. A colormap will be generated from the colors of these objects, and colorbar levels will be selected using `values`. If `values` is ``None``, we try to infer them by converting the handle labels returned by `~matplotlib.artist.Artist.get_label` to `float`. Otherwise, it is set to ``np.linspace(0, 1, len(mappable))``. 3. A sequence of hex strings, color string names, or RGB tuples. A colormap will be generated from these colors, and colorbar levels will be selected using `values`. If `values` is ``None``, it is set to ``np.linspace(0, 1, len(mappable))``. 4. A `~matplotlib.colors.Colormap` instance. In this case, a colorbar will be drawn using this colormap and with levels determined by `values`. If `values` is ``None``, it is set to ``np.linspace(0, 1, cmap.N)``. values : sequence of float or str, optional Ignored if `mappable` is a mappable object. This maps each color or plot handle in the `mappable` list to numeric values, from which a colormap and normalizer are constructed. These can also be strings, in which case the list indices are used for tick locations and the strings are applied as tick labels. """ _colorbar_kwargs_docstring = """ orientation : {None, 'horizontal', 'vertical'}, optional The colorbar orientation. By default this depends on the "side" of the subplot or figure where the colorbar is drawn. Inset colorbars are always horizontal. norm : norm-spec, optional Ignored if `values` is ``None``. The normalizer for converting `values` to colormap colors. Passed to `~proplot.constructor.Norm`. norm_kw : dict-like, optional The normalizer settings. Passed to `~proplot.constructor.Norm`. label, title : str, optional The colorbar label. The `title` keyword is also accepted for consistency with `~matplotlib.axes.Axes.legend`. reverse : bool, optional Whether to reverse the direction of the colorbar. This is done automatically when descending levels are used with `~proplot.colors.DiscreteNorm`. rotation : float, optional The tick label rotation. Default is ``0``. grid, edges, drawedges : bool, optional Whether to draw level dividers (i.e., gridlines) between each distinct color. Default is :rc:`colorbar.grid`. extend : {'neither', 'both', 'min', 'max'}, optional Direction for drawing colorbar "extensions" (i.e. color keys for out-of-bounds data on the end of the colorbar). Default behavior is to use the value of `extend` passed to the plotting command or use ``'neither'`` if the value is unknown. extendfrac : float, optional The length of the colorbar "extensions" relative to the length of the colorbar. This is a native matplotlib `~matplotlib.figure.Figure.colorbar` keyword. extendsize : unit-spec, optional The length of the colorbar "extensions" in physical units. Default is :rc:`colorbar.insetextend` for inset colorbars and :rc:`colorbar.extend` for outer colorbars. %(units.em)s extendrect : bool, optional Whether to draw colorbar "extensions" as rectangles. Default is ``False`` (i.e. extensions are drawn as triangles). locator, ticks : locator-spec, optional Used to determine the colorbar tick positions. Passed to the `~proplot.constructor.Locator` constructor function. locator_kw : dict-like, optional Keyword arguments passed to `matplotlib.ticker.Locator` class. minorlocator, minorticks As with `locator`, `ticks` but for the minor ticks. minorlocator_kw As with `locator_kw`, but for the minor ticks. maxn : int, optional The maximum number of `~proplot.colors.DiscreteNorm` levels that should be assigned major ticks. Ignored if `locator` was passed. Default depends on the colorbar and tick label size. The name `maxn` was inspired by `~matplotlib.ticker.MaxNLocator`. maxn_minor As with `maxn` but for minor ticks. Ignored if `minorlocator` was passed. format, formatter, ticklabels : formatter-spec, optional The tick label format. Passed to the `~proplot.constructor.Formatter` constructor function. formatter_kw : dict-like, optional Keyword arguments passed to `matplotlib.ticker.Formatter` class. frame, frameon : bool, optional For inset colorbars only. Indicates whether to draw a "frame", just like `~matplotlib.axes.Axes.legend`. Default is :rc:`colorbar.frameon`. tickminor : bool, optional Whether to add minor ticks using `~matplotlib.colorbar.ColorbarBase.minorticks_on`. tickloc, ticklocation : {'bottom', 'top', 'left', 'right'}, optional Where to draw tick marks on the colorbar. Default is toward the outside of the subplot for outer colorbars and ``'bottom'`` for inset colorbars. tickdir, tickdirection : {'out', 'in', 'inout'}, optional Direction of major and minor colorbar ticks. Default is :rc:`tick.dir`. ticklen : unit-spec, optional Major tick lengths for the colorbar ticks. Default is :rc:`tick.len`. ticklenratio : float, optional Relative scaling of `ticklen` used to determine minor tick lengths. Default is :rc:`tick.lenratio`. tickwidth : unit-spec, optional Major tick widths for the colorbar ticks. Default is `linewidth` or :rc:`tick.width` if `linewidth` was not passed. tickwidthratio : float, optional Relative scaling of `tickwidth` used to determine minor tick widths. Default is :rc:`tick.widthratio`. ticklabelcolor, ticklabelsize, ticklabelweight : optional The font color, size, and weight for colorbar tick labels. Defaults are :rc:`tick.labelcolor`, :rc:`tick.labelsize`, :rc:`tick.labelweight`. labelloc, labellocation : {'bottom', 'top', 'left', 'right'} The colorbar label location. Inherits from `tickloc` by default. Default is toward the outside of the subplot for outer colorbars and ``'bottom'`` for inset colorbars. labelcolor, labelsize, labelweight : optional The font color, size, and weight for the colorbar label. Defaults are :rc:`label.color`, :rc:`label.size`, and :rc:`label.weight`. a, alpha, framealpha, fc, facecolor, framecolor, ec, edgecolor, ew, edgewidth : optional For inset colorbars only. Controls the transparency and color of the frame. Defaults are :rc:`colorbar.framealpha` and :rc:`colorbar.framecolor`. lw, linewidth, c, color : optional Controls the line width and edge color for both the colorbar outline and the level dividers. %(axes.edgefix)s rasterize : bool, optional Whether to rasterize the colorbar solids. The matplotlib default is ``True`` but we change this to :rcraw:`colorbar.rasterize` because rasterization can cause misalignment between `edges` and the level patches. **kwargs Passed to `~matplotlib.figure.Figure.colorbar`. """ _edgefix_docstring = """ edgefix : bool or float, optional Whether to fix the common issue where white lines appear between adjacent patches in saved vector graphics (this can slow down figure rendering). See this `stackoverflow post <https://stackoverflow.com/q/27092991/4970632>`__ for a demonstration of the problem. Default is :rc:`edgefix`. If ``True``, a small default linewidth is used to cover up the white lines. If float (e.g. ``edgefix=0.5``), this specific linewidth is used to cover up the white lines. This feature is automatically disabled when the patches have transparency. """ docstring._snippet_manager['axes.edgefix'] = _edgefix_docstring docstring._snippet_manager['axes.colorbar_args'] = _colorbar_args_docstring docstring._snippet_manager['axes.colorbar_kwargs'] = _colorbar_kwargs_docstring _legend_args_docstring = """ handles : list of artist, optional List of matplotlib artists, or a list of lists of artist instances (see the `center` keyword). If ``None``, artists with valid labels are retrieved automatically. If the object is a `~matplotlib.contour.ContourSet`, the ``legend_elements`` method is used to pair the collection or contour set label with the central artist in the list (generally giving the central colormap color if the object is controlled with a colormap). labels : list of str, optional A matching list of string labels or ``None`` placeholders, or a matching list of lists (see the `center` keyword). Wherever ``None`` appears in the list (or if no labels were passed at all), labels are retrieved by calling `~matplotlib.artist.Artist.get_label` on each `~matplotlib.artist.Artist` in the handle list. If a handle consists of a tuple group of artists, labels are inferred from the artists in the tuple. If there are multiple unique labels in the tuple group of artists, the tuple group is expanded into unique legend entries. Otherwise, the tuple group elements are drawn on top of eachother. For details on matplotlib's legend handlers, including tuple groups, see the matplotlib `legend guide \ <https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html>`__. """ _legend_kwargs_docstring = """ frame, frameon : bool, optional Toggles the legend frame. For centered-row legends, a frame independent from matplotlib's built-in legend frame is created. ncol, ncols : int, optional The number of columns. `ncols` is an alias, added for consistency with `~matplotlib.pyplot.subplots`. order : {'C', 'F'}, optional Whether legend handles are drawn in row-major (``'C'``) or column-major (``'F'``) order. Analagous to `numpy.array` ordering. Default is ``'F'``. center : bool, optional Whether to center each legend row individually. If ``True``, we draw successive single-row legends stacked on top of each other. If ``None``, we infer this setting from `handles`. By default, `center` is set to ``True`` if `handles` is a list of lists (each sublist is used as a row in the legend). alphabetize : bool, optional Whether to alphabetize the legend entries according to the legend labels. Default is ``False``. title, label : str, optional The legend title. The `label` keyword is also accepted, for consistency with `~matplotlib.figure.Figure.colorbar`. fontsize, fontweight, fontcolor : optional The font size, weight, and color for the legend text. Font size is interpreted by `~proplot.utils.units`. The default font size is :rcraw:`legend.fontsize`. titlefontsize, titlefontweight, titlefontcolor : optional The font size, weight, and color for the legend title. Font size is interpreted by `~proplot.utils.units`. The default size is `fontsize`. borderpad, borderaxespad, handlelength, handleheight, handletextpad, \ labelspacing, columnspacing : unit-spec, optional Various matplotlib `~matplotlib.axes.Axes.legend` spacing arguments. %(units.em)s a, alpha, framealpha, fc, facecolor, framecolor, ec, edgecolor, ew, edgewidth : optional The opacity, face color, edge color, and edge width for the legend frame. Defaults are :rc:`legend.framealpha`, :rc:`legend.facecolor`, :rc:`legend.edgecolor` and :rc:`axes.linewidth`. c, color, lw, linewidth, m, marker, ls, linestyle, dashes, ms, markersize : optional Properties used to override the legend handles. For example, for a legend describing variations in line style ignoring variations in color, you might want to use ``color='black'``. handle_kw : dict-like, optional Additional properties used to override legend handles, e.g. ``handle_kw={'edgecolor': 'black'}``. Only line properties can be passed as keyword arguments. handler_map : dict-like, optional A dictionary mapping instances or types to a legend handler. This `handler_map` updates the default handler map found at `matplotlib.legend.Legend.get_legend_handler_map`. **kwargs Passed to `~matplotlib.axes.Axes.legend`. """ docstring._snippet_manager['axes.legend_args'] = _legend_args_docstring docstring._snippet_manager['axes.legend_kwargs'] = _legend_kwargs_docstring class Axes(maxes.Axes): _name = None _name_aliases = () def __repr__(self): ax = self._get_topmost_axes() try: nrows, ncols, num1, num2 = ax.get_subplotspec()._get_subplot_geometry() params = {'index': (num1, num2)} except (IndexError, ValueError, AttributeError): left, bottom, width, height = np.round(self._position.bounds, 2) params = {'left': left, 'bottom': bottom, 'size': (width, height)} if ax.number: params['number'] = ax.number name = type(self).__name__ if self._inset_parent: name = re.sub('Axes(Subplot)?', 'AxesInset', name) params['bounds'] = tuple(np.round(self._inset_bounds, 2)) if self._altx_parent or self._alty_parent: name = re.sub('Axes(Subplot)?', 'AxesTwin', name) params['axis'] = 'x' if self._altx_parent else 'y' if self._panel_side: name = re.sub('Axes(Subplot)?', 'AxesPanel', name) params['side'] = self._panel_side if self._name in ('cartopy', 'basemap'): name = name.replace('_' + self._name.title(), 'Geo') params['backend'] = self._name params = ', '.join(f'{key}={value!r}' for key, value in params.items()) return f'{name}({params})' def __str__(self): return self.__repr__() @docstring._snippet_manager def __init__(self, *args, **kwargs): ss = kwargs.pop('_subplot_spec', None) number = kwargs.pop('number', None) autoshare = kwargs.pop('autoshare', None) autoshare = _not_none(autoshare, True) rc_kw, rc_mode = _pop_rc(kwargs) kw_format = _pop_props(kwargs, 'patch') if 'zorder' in kw_format: kwargs['zorder'] = kw_format.pop('zorder') for cls, sig in self._format_signatures.items(): if isinstance(self, cls): kw_format.update(_pop_params(kwargs, sig)) super().__init__(*args, **kwargs) self._active_cycle = rc['axes.prop_cycle'] self._auto_format = None self._abc_border_kwargs = {} self._abc_loc = None self._abc_title_pad = rc['abc.titlepad'] self._title_above = rc['title.above'] self._title_border_kwargs = {} self._title_loc = None self._title_pad = rc['title.pad'] self._title_pad_current = None self._altx_parent = None self._alty_parent = None self._inset_parent = None self._inset_bounds = None self._inset_zoom = False self._inset_zoom_artists = None self._panel_hidden = False self._panel_parent = None self._panel_share = False self._panel_sharex_group = False self._panel_sharey_group = False self._panel_side = None self._tight_bbox = None self.xaxis.isDefault_minloc = True self.yaxis.isDefault_minloc = True self._legend_dict = {} self._colorbar_dict = {} d = self._panel_dict = {} d['left'] = [] d['right'] = [] d['bottom'] = [] d['top'] = [] d = self._title_dict = {} kw = {'zorder': 3.5, 'transform': self.transAxes} d['abc'] = self.text(0, 0, '', **kw) d['left'] = self._left_title d['center'] = self.title d['right'] = self._right_title d['upper left'] = self.text(0, 0, '', va='top', ha='left', **kw) d['upper center'] = self.text(0, 0.5, '', va='top', ha='center', **kw) d['upper right'] = self.text(0, 1, '', va='top', ha='right', **kw) d['lower left'] = self.text(0, 0, '', va='bottom', ha='left', **kw) d['lower center'] = self.text(0, 0.5, '', va='bottom', ha='center', **kw) d['lower right'] = self.text(0, 1, '', va='bottom', ha='right', **kw) self._number = None if number: self.number = number if ss is not None: self.set_subplotspec(ss) if autoshare: self._auto_share() self.format(rc_kw=rc_kw, rc_mode=1, skip_figure=True, **kw_format) @staticmethod def _axisbelow_to_zorder(axisbelow): if axisbelow is True: zorder = 0.5 elif axisbelow is False: zorder = 2.5 elif axisbelow in ('line', 'lines'): zorder = 1.5 else: raise ValueError(f'Unexpected axisbelow value {axisbelow!r}.') return zorder def _get_share_axes(self, x, panels=False): if not isinstance(self, maxes.SubplotBase): return [self] y = 'y' if x == 'x' else 'x' idx = 0 if x == 'x' else 1 argfunc = np.argmax if x == 'x' else np.argmin irange = self._range_subplotspec(x) axs = self.figure._iter_axes(hidden=False, children=False, panels=panels) axs = [ax for ax in axs if ax._range_subplotspec(x) == irange] axs = list({self, *axs}) pax = axs.pop(argfunc([ax._range_subplotspec(y)[idx] for ax in axs])) return [pax, *axs] def _get_span_axes(self, side, panels=False): if side not in ('left', 'right', 'bottom', 'top'): raise ValueError(f'Invalid side {side!r}.') if not isinstance(self, maxes.SubplotBase): return [self] x, y = 'xy' if side in ('left', 'right') else 'yx' idx = 0 if side in ('left', 'top') else 1 coord = self._range_subplotspec(x)[idx] axs = self.figure._iter_axes(hidden=False, children=False, panels=panels) axs = [ax for ax in axs if ax._range_subplotspec(x)[idx] == coord] or [self] out = [] for ax in axs: other = getattr(ax, '_share' + y) if other and other._panel_parent: ax = other out.append(ax) return out def _get_topmost_axes(self): for _ in range(5): self = self._axes or self self = self._altx_parent or self self = self._inset_parent or self self = self._panel_parent or self return self @staticmethod
MIT License
mega-data-lab/spectrallda
rand_svd.py
rand_svd
python
def rand_svd(docs, alpha0, k, docs_m1=None, n_iter=1, n_partitions=1): n_docs, vocab_size = docs.shape assert n_docs >= 1 and vocab_size >= 1 if docs_m1 is not None: assert docs_m1.ndim == 1 and vocab_size == docs_m1.shape[0] assert alpha0 > 0 assert k >= 1 assert n_iter >= 0 assert n_partitions >= 1 k_aug = np.min([k + 5, vocab_size]) test_x = np.random.randn(vocab_size, k_aug) if docs_m1 is None: docs_m1 = moment1(docs, n_partitions=n_partitions) for _ in range(2 * n_iter + 1): prod_test = prod_m2_x(docs, test_x, alpha0, docs_m1=docs_m1, n_partitions=n_partitions) test_x, _ = scipy.linalg.qr(prod_test, mode='economic') prod_test = prod_m2_x(docs, test_x, alpha0, n_partitions=n_partitions) prod_test *= alpha0 * (alpha0 + 1) svd_q, svd_s, _ = scipy.linalg.svd(prod_test.T.dot(prod_test)) return np.sqrt(svd_s)[:k], test_x.dot(svd_q)[:, :k]
Randomised SVD in local mode Perform Randomised SVD on scaled M2. PARAMETERS ----------- docs : n_docs-by-vocab_size array or csr_matrix Entire collection of word count vectors. alpha0 : float Sum of Dirichlet prior parameter. k : int Rank for the truncated SVD, >= 1. docs_m1: length-vocab_size array, optional M1 of the entire collection of word count vectors. n_iter: int, optional Number of iterations for the Krylov method, >= 0, 1 by default. n_partitions: int, optional Number of partitions, >= 1, 1 by default. RETURNS ----------- eigval : length-k array Top k eigenvalues of scaled M2. eigvec : vocab_size-by-k array Top k eigenvectors of scaled M2.
https://github.com/mega-data-lab/spectrallda/blob/3f1dfd6130718ca31fcb9247cd636d11e9143791/rand_svd.py#L8-L67
import numpy as np import scipy.linalg from cumulants import moment1, prod_m2_x
Apache License 2.0
msu-mlsys-lab/arch2vec
search_methods/reinforce_darts.py
Env.step
python
def step(self, action): dist = torch.norm(self.features - action.cpu(), dim=1) knn = (-1 * dist).topk(dist.shape[0]) min_dist, min_idx = knn.values, knn.indices count = 0 while True: if len(self.visited) == dist.shape[0]: print("CANNOT FIND IN THE DATASET!") exit() if min_idx[count].item() not in self.visited: self.visited[min_idx[count].item()] = True break count += 1 return self.features[min_idx[count].item()], self.genotype[min_idx[count].item()]
action: 1 x dim self.features. N x dim
https://github.com/msu-mlsys-lab/arch2vec/blob/ea01b0cf1295305596ee3c05fa1b6eb14e303512/search_methods/reinforce_darts.py#L78-L96
import os import sys sys.path.insert(0, os.getcwd()) import argparse import json import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from models.pretraining_nasbench101 import configs from utils.utils import load_json, preprocessing, one_hot_darts from preprocessing.gen_isomorphism_graphs import process from models.model import Model from torch.distributions import MultivariateNormal from darts.cnn.train_search import Train class Env(object): def __init__(self, name, seed, cfg, data_path=None, save=False): self.name = name self.seed = seed self.model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim, num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda() self.dir_name = 'pretrained/dim-{}'.format(args.dim) if not os.path.exists(os.path.join(self.dir_name, 'model-darts.pt')): exit() self.model.load_state_dict(torch.load(os.path.join(self.dir_name, 'model-darts.pt').format(args.dim))['model_state']) self.visited = {} self.features = [] self.genotype = [] self.embedding = {} self._reset(data_path, save) def _reset(self, data_path, save): if not save: print("extract arch2vec on DARTS search space ...") dataset = load_json(data_path) print("length of the dataset: {}".format(len(dataset))) self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt') if os.path.exists(self.f_path): print('{} is already saved'.format(self.f_path)) exit() print('save to {}'.format(self.f_path)) counter = 0 self.model.eval() for k, v in dataset.items(): adj = torch.Tensor(v[0]).unsqueeze(0).cuda() ops = torch.Tensor(one_hot_darts(v[1])).unsqueeze(0).cuda() adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep']) with torch.no_grad(): x, _ = self.model._encoder(ops, adj) self.embedding[counter] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'genotype': process(v[2])} print("{}/{}".format(counter, len(dataset))) counter += 1 torch.save(self.embedding, self.f_path) print("finished arch2vec extraction") exit() else: self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt') print("load arch2vec from: {}".format(self.f_path)) self.embedding = torch.load(self.f_path) for ind in range(len(self.embedding)): self.features.append(self.embedding[ind]['feature']) self.genotype.append(self.embedding[ind]['genotype']) self.features = torch.stack(self.features, dim=0) print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape)) def get_init_state(self): rand_indices = random.randint(0, self.features.shape[0]) self.visited[rand_indices] = True return self.features[rand_indices], self.genotype[rand_indices]
Apache License 2.0
mosen/salt-osx
_states/bluetooth.py
__virtual__
python
def __virtual__(): return 'bluetooth' if salt.utils.platform.is_darwin() else False
Only load on OSX
https://github.com/mosen/salt-osx/blob/818d4ae89bb2853b28999a8ddb883c0fe1b1a657/_states/bluetooth.py#L10-L12
import salt.utils
MIT License
identitypython/pysaml2
src/saml2/assertion.py
Assertion.apply_policy
python
def apply_policy(self, sp_entity_id, policy): policy.acs = self.acs ava = policy.restrict(self, sp_entity_id) for key, val in list(self.items()): if key in ava: self[key] = ava[key] else: del self[key] return ava
Apply policy to the assertion I'm representing :param sp_entity_id: The SP entity ID :param policy: The policy :return: The resulting AVA after the policy is applied
https://github.com/identitypython/pysaml2/blob/f12ade09aa89211c42b7dc6ed94728f8aa69cffb/src/saml2/assertion.py#L845-L862
import copy import importlib import logging import re import six from warnings import warn as _warn from saml2 import saml from saml2 import xmlenc from saml2.attribute_converter import from_local, ac_factory from saml2.attribute_converter import get_local_name from saml2.s_utils import assertion_factory from saml2.s_utils import factory from saml2.s_utils import sid from saml2.s_utils import MissingValue from saml2.saml import NAME_FORMAT_URI from saml2.time_util import instant from saml2.time_util import in_a_while logger = logging.getLogger(__name__) def _filter_values(vals, vlist=None, must=False): if not vlist: return vals if vals is None: return vals if isinstance(vlist, six.string_types): vlist = [vlist] res = [] for val in vlist: if val in vals: res.append(val) if must: if res: return res else: raise MissingValue("Required attribute value missing") else: return res def _match(attr, ava): if attr in ava: return attr _la = attr.lower() if _la in ava: return _la for _at in ava.keys(): if _at.lower() == _la: return _at return None def filter_on_attributes(ava, required=None, optional=None, acs=None, fail_on_unfulfilled_requirements=True): def _match_attr_name(attr, ava): local_name = None for a in ['name_format', 'friendly_name']: _val = attr.get(a) if _val: if a == 'name_format': local_name = get_local_name(acs, attr['name'], _val) else: local_name = _val break if local_name: _fn = _match(local_name, ava) else: _fn = None if not _fn: _fn = _match(attr["name"], ava) return _fn def _apply_attr_value_restrictions(attr, res, must=False): try: values = [av["text"] for av in attr["attribute_value"]] except KeyError: values = [] try: res[_fn].extend(_filter_values(ava[_fn], values)) except KeyError: val = _filter_values(ava[_fn], values) res[_fn] = val if val is not None else [] return _filter_values(ava[_fn], values, must) res = {} if required is None: required = [] for attr in required: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, True) elif fail_on_unfulfilled_requirements: desc = "Required attribute missing: '%s'" % (attr["name"]) raise MissingValue(desc) if optional is None: optional = [] for attr in optional: _fn = _match_attr_name(attr, ava) if _fn: _apply_attr_value_restrictions(attr, res, False) return res def filter_on_demands(ava, required=None, optional=None): if required is None: required = {} lava = dict([(k.lower(), k) for k in ava.keys()]) for attr, vals in required.items(): attr = attr.lower() if attr in lava: if vals: for val in vals: if val not in ava[lava[attr]]: raise MissingValue( "Required attribute value missing: %s,%s" % (attr, val)) else: raise MissingValue("Required attribute missing: %s" % (attr,)) if optional is None: optional = {} oka = [k.lower() for k in required.keys()] oka.extend([k.lower() for k in optional.keys()]) for attr in lava.keys(): if attr not in oka: del ava[lava[attr]] return ava def filter_on_wire_representation(ava, acs, required=None, optional=None): acsdic = dict([(ac.name_format, ac) for ac in acs]) if required is None: required = [] if optional is None: optional = [] res = {} for attr, val in ava.items(): done = False for req in required: try: _name = acsdic[req.name_format]._to[attr] if _name == req.name: res[attr] = val done = True except KeyError: pass if done: continue for opt in optional: try: _name = acsdic[opt.name_format]._to[attr] if _name == opt.name: res[attr] = val break except KeyError: pass return res def filter_attribute_value_assertions(ava, attribute_restrictions=None): if not attribute_restrictions: return ava for attr, vals in list(ava.items()): _attr = attr.lower() try: _rests = attribute_restrictions[_attr] except KeyError: del ava[attr] else: if _rests is None: continue if isinstance(vals, six.string_types): vals = [vals] rvals = [] for restr in _rests: for val in vals: if restr.match(val): rvals.append(val) if rvals: ava[attr] = list(set(rvals)) else: del ava[attr] return ava def restriction_from_attribute_spec(attributes): restr = {} for attribute in attributes: restr[attribute.name] = {} for val in attribute.attribute_value: if not val.text: restr[attribute.name] = None break else: restr[attribute.name] = re.compile(val.text) return restr def compile(restrictions): for who, spec in restrictions.items(): spec = spec or {} entity_categories = spec.get("entity_categories", []) ecs = [] for cat in entity_categories: try: _mod = importlib.import_module(cat) except ImportError: _mod = importlib.import_module("saml2.entity_category.%s" % cat) _ec = {} for key, items in _mod.RELEASE.items(): alist = [k.lower() for k in items] _only_required = getattr(_mod, "ONLY_REQUIRED", {}).get(key, False) _ec[key] = (alist, _only_required) ecs.append(_ec) spec["entity_categories"] = ecs or None attribute_restrictions = spec.get("attribute_restrictions") or {} _attribute_restrictions = {} for key, values in attribute_restrictions.items(): lkey = key.lower() values = [] if not values else values _attribute_restrictions[lkey] = ( [re.compile(value) for value in values] or None ) spec["attribute_restrictions"] = _attribute_restrictions or None return restrictions class Policy(object): def __init__(self, restrictions=None, mds=None): self.metadata_store = mds self._restrictions = self.setup_restrictions(restrictions) logger.debug("policy restrictions: %s", self._restrictions) self.acs = [] def setup_restrictions(self, restrictions=None): if restrictions is None: return None restrictions = copy.deepcopy(restrictions) restrictions = compile(restrictions) return restrictions def get(self, attribute, sp_entity_id, default=None): if not self._restrictions: return default ra_info = ( self.metadata_store.registration_info(sp_entity_id) or {} if self.metadata_store is not None else {} ) ra_entity_id = ra_info.get("registration_authority") sp_restrictions = self._restrictions.get(sp_entity_id) ra_restrictions = self._restrictions.get(ra_entity_id) default_restrictions = ( self._restrictions.get("default") or self._restrictions.get("") ) restrictions = ( sp_restrictions if sp_restrictions is not None else ra_restrictions if ra_restrictions is not None else default_restrictions if default_restrictions is not None else {} ) attribute_restriction = restrictions.get(attribute) restriction = ( attribute_restriction if attribute_restriction is not None else default ) return restriction def get_nameid_format(self, sp_entity_id): return self.get("nameid_format", sp_entity_id, saml.NAMEID_FORMAT_TRANSIENT) def get_name_form(self, sp_entity_id): return self.get("name_form", sp_entity_id, default=NAME_FORMAT_URI) def get_lifetime(self, sp_entity_id): return self.get("lifetime", sp_entity_id, {"hours": 1}) def get_attribute_restrictions(self, sp_entity_id): return self.get("attribute_restrictions", sp_entity_id) def get_fail_on_missing_requested(self, sp_entity_id): return self.get("fail_on_missing_requested", sp_entity_id, default=True) def get_sign(self, sp_entity_id): return self.get("sign", sp_entity_id, default=[]) def get_entity_categories(self, sp_entity_id, mds=None, required=None): if mds is not None: warn_msg = ( "The mds parameter for saml2.assertion.Policy.get_entity_categories " "is deprecated; " "instead, initialize the Policy object setting the mds param." ) logger.warning(warn_msg) _warn(warn_msg, DeprecationWarning) def post_entity_categories(maps, sp_entity_id=None, mds=None, required=None): restrictions = {} required_friendly_names = [ d.get('friendly_name') or get_local_name( acs=self.acs, attr=d['name'], name_format=d['name_format'] ) for d in (required or []) ] required = [ friendly_name.lower() for friendly_name in required_friendly_names ] if mds: ecs = mds.entity_categories(sp_entity_id) for ec_map in maps: for key, (atlist, only_required) in ec_map.items(): if key == "": attrs = atlist elif isinstance(key, tuple): if only_required: attrs = [a for a in atlist if a in required] else: attrs = atlist for _key in key: if _key not in ecs: attrs = [] break elif key in ecs: if only_required: attrs = [a for a in atlist if a in required] else: attrs = atlist else: attrs = [] for attr in attrs: restrictions[attr] = None else: restrictions[''] = None return restrictions sentinel = object() result1 = self.get("entity_categories", sp_entity_id, default=sentinel) if result1 is sentinel: return {} result2 = post_entity_categories( result1, sp_entity_id=sp_entity_id, mds=(mds or self.metadata_store), required=required, ) return result2 def not_on_or_after(self, sp_entity_id): return in_a_while(**self.get_lifetime(sp_entity_id)) def filter(self, ava, sp_entity_id, mdstore=None, required=None, optional=None): if mdstore is not None: warn_msg = ( "The mdstore parameter for saml2.assertion.Policy.filter " "is deprecated; " "instead, initialize the Policy object setting the mds param." ) logger.warning(warn_msg) _warn(warn_msg, DeprecationWarning) if not self.acs: self.acs = ac_factory() subject_ava = ava.copy() _ent_rest = self.get_entity_categories(sp_entity_id, mds=mdstore, required=required) if _ent_rest: subject_ava = filter_attribute_value_assertions(subject_ava, _ent_rest) elif required or optional: logger.debug("required: %s, optional: %s", required, optional) subject_ava = filter_on_attributes( subject_ava, required, optional, self.acs, self.get_fail_on_missing_requested(sp_entity_id), ) _attr_rest = self.get_attribute_restrictions(sp_entity_id) subject_ava = filter_attribute_value_assertions(subject_ava, _attr_rest) return subject_ava or {} def restrict(self, ava, sp_entity_id, metadata=None): if metadata is not None: warn_msg = ( "The metadata parameter for saml2.assertion.Policy.restrict " "is deprecated and ignored; " "instead, initialize the Policy object setting the mds param." ) logger.warning(warn_msg) _warn(warn_msg, DeprecationWarning) metadata_store = metadata or self.metadata_store spec = ( metadata_store.attribute_requirement(sp_entity_id) or {} if metadata_store else {} ) return self.filter( ava, sp_entity_id, required=spec.get("required"), optional=spec.get("optional"), ) def conditions(self, sp_entity_id): return factory( saml.Conditions, not_before=instant(), not_on_or_after=self.not_on_or_after(sp_entity_id), audience_restriction=[ factory( saml.AudienceRestriction, audience=[factory(saml.Audience, text=sp_entity_id)], ), ], ) class EntityCategories(object): pass def _authn_context_class_ref(authn_class, authn_auth=None): cntx_class = factory(saml.AuthnContextClassRef, text=authn_class) if authn_auth: return factory(saml.AuthnContext, authn_context_class_ref=cntx_class, authenticating_authority=factory( saml.AuthenticatingAuthority, text=authn_auth)) else: return factory(saml.AuthnContext, authn_context_class_ref=cntx_class) def _authn_context_decl(decl, authn_auth=None): return factory(saml.AuthnContext, authn_context_decl=decl, authenticating_authority=factory( saml.AuthenticatingAuthority, text=authn_auth)) def _authn_context_decl_ref(decl_ref, authn_auth=None): return factory(saml.AuthnContext, authn_context_decl_ref=decl_ref, authenticating_authority=factory( saml.AuthenticatingAuthority, text=authn_auth)) def authn_statement(authn_class=None, authn_auth=None, authn_decl=None, authn_decl_ref=None, authn_instant="", subject_locality="", session_not_on_or_after=None): if authn_instant: _instant = instant(time_stamp=authn_instant) else: _instant = instant() if authn_class: res = factory( saml.AuthnStatement, authn_instant=_instant, session_index=sid(), session_not_on_or_after=session_not_on_or_after, authn_context=_authn_context_class_ref( authn_class, authn_auth)) elif authn_decl: res = factory( saml.AuthnStatement, authn_instant=_instant, session_index=sid(), session_not_on_or_after=session_not_on_or_after, authn_context=_authn_context_decl(authn_decl, authn_auth)) elif authn_decl_ref: res = factory( saml.AuthnStatement, authn_instant=_instant, session_index=sid(), session_not_on_or_after=session_not_on_or_after, authn_context=_authn_context_decl_ref(authn_decl_ref, authn_auth)) else: res = factory( saml.AuthnStatement, authn_instant=_instant, session_index=sid(), session_not_on_or_after=session_not_on_or_after) if subject_locality: res.subject_locality = saml.SubjectLocality(text=subject_locality) return res def do_subject_confirmation(not_on_or_after, key_info=None, **treeargs): _sc = factory(saml.SubjectConfirmation, **treeargs) _scd = _sc.subject_confirmation_data _scd.not_on_or_after = not_on_or_after if _sc.method == saml.SCM_HOLDER_OF_KEY: _scd.add_extension_element(key_info) return _sc def do_subject(not_on_or_after, name_id, **farg): specs = farg['subject_confirmation'] if isinstance(specs, list): res = [do_subject_confirmation(not_on_or_after, **s) for s in specs] else: res = [do_subject_confirmation(not_on_or_after, **specs)] return factory(saml.Subject, name_id=name_id, subject_confirmation=res) class Assertion(dict): def __init__(self, dic=None): dict.__init__(self, dic) self.acs = [] def construct(self, sp_entity_id, attrconvs, policy, issuer, farg, authn_class=None, authn_auth=None, authn_decl=None, encrypt=None, sec_context=None, authn_decl_ref=None, authn_instant="", subject_locality="", authn_statem=None, name_id=None, session_not_on_or_after=None): _name_format = policy.get_name_form(sp_entity_id) attr_statement = saml.AttributeStatement( attribute=from_local(attrconvs, self, _name_format) ) if encrypt == "attributes": for attr in attr_statement.attribute: enc = sec_context.encrypt(text="%s" % attr) encd = xmlenc.encrypted_data_from_string(enc) encattr = saml.EncryptedAttribute(encrypted_data=encd) attr_statement.encrypted_attribute.append(encattr) attr_statement.attribute = [] conds = policy.conditions(sp_entity_id) if authn_statem: _authn_statement = authn_statem elif authn_auth or authn_class or authn_decl or authn_decl_ref: _authn_statement = authn_statement(authn_class, authn_auth, authn_decl, authn_decl_ref, authn_instant, subject_locality, session_not_on_or_after=session_not_on_or_after) else: _authn_statement = None subject = do_subject( policy.not_on_or_after(sp_entity_id), name_id, **farg['subject'] ) _ass = assertion_factory(issuer=issuer, conditions=conds, subject=subject) if _authn_statement: _ass.authn_statement = [_authn_statement] if not attr_statement.empty(): _ass.attribute_statement = [attr_statement] return _ass
Apache License 2.0
humio/python-humio
src/humiolib/HumioClient.py
HumioClient._get_users
python
def _get_users(self): endpoint = "users" return self.webcaller.call_rest("get", endpoint, headers=self._default_user_headers)
Gets users registered to Humio instance :return: Response to web request as json string :rtype: str
https://github.com/humio/python-humio/blob/30adce199322c17724ca0100fffac8a29cf0ced9/src/humiolib/HumioClient.py#L371-L379
import requests import json from humiolib.WebCaller import WebCaller, WebStreamer from humiolib.QueryJob import StaticQueryJob, LiveQueryJob from humiolib.HumioExceptions import HumioConnectionException class BaseHumioClient(): def __init__(self, base_url): self.base_url = base_url self.webcaller = WebCaller(self.base_url) @classmethod def _from_saved_state(cls, state_dump): data = json.loads(state_dump) instance = cls(**data) return instance @staticmethod def _create_unstructured_data_object(messages, parser=None, fields=None, tags=None): return dict( (k, v) for k, v in [ ("messages", messages), ("type", parser), ("fields", fields), ("tags", tags), ] if v is not None ) class HumioClient(BaseHumioClient): def __init__( self, repository, user_token, base_url="http://localhost:3000", ): super().__init__(base_url) self.repository = repository self.user_token = user_token @property def _default_user_headers(self): return { "Content-Type": "application/json", "Authorization": "Bearer {}".format(self.user_token), } @property def _state(self): return json.dumps( { "user_token": self.user_token, "repository": self.repository, "base_url": self.base_url, } ) def _streaming_query( self, query_string, start=None, end=None, is_live=None, timezone_offset_minutes=None, arguments=None, raw_data=None, media_type="application/x-ndjson", **kwargs ): if raw_data is None: raw_data = {} endpoint = "dataspaces/{}/query".format(self.repository) headers = self._default_user_headers headers["Accept"] = media_type headers.update(kwargs.pop("headers", {})) data = dict( (k, v) for k, v in [ ("queryString", query_string), ("start", start), ("end", end), ("isLive", is_live), ("timeZoneOffsetMinutes", timezone_offset_minutes), ("arguments", arguments), ] if v is not None ) data.update(raw_data) connection = self.webcaller.call_rest( "post", endpoint, data=json.dumps(data), headers=headers, stream=True, **kwargs ) return WebStreamer(connection) def streaming_query( self, query_string, start=None, end=None, is_live=None, timezone_offset_minutes=None, arguments=None, raw_data=None, **kwargs ): media_type = "application/x-ndjson" encoding = "utf-8" res = self._streaming_query( query_string=query_string, start=start, end=end, is_live=is_live, timezone_offset_minutes=timezone_offset_minutes, arguments=arguments, media_type=media_type, raw_data=raw_data, **kwargs ) for event in res: yield json.loads(event.decode(encoding)) def create_queryjob( self, query_string, start=None, end=None, is_live=None, timezone_offset_minutes=None, arguments=None, raw_data=None, **kwargs ): endpoint = "dataspaces/{}/queryjobs".format(self.repository) headers = self._default_user_headers headers.update(kwargs.pop("headers", {})) data = dict( (k, v) for k, v in [ ("queryString", query_string), ("start", start), ("end", end), ("isLive", is_live), ("timeZoneOffsetMinutes", timezone_offset_minutes), ("arguments", arguments), ] if v is not None ) if raw_data is not None: data.update(raw_data) query_id = self.webcaller.call_rest( "post", endpoint, data=json.dumps(data), headers=headers, **kwargs ).json()['id'] if is_live: return LiveQueryJob(query_id, self.base_url, self.repository, self.user_token) else: return StaticQueryJob(query_id, self.base_url, self.repository, self.user_token) def _ingest_json_data(self, json_elements=None, **kwargs): if json_elements is None: json_elements = [] headers = self._default_user_headers headers.update(kwargs.pop("headers", {})) endpoint = "dataspaces/{}/ingest".format(self.repository) return self.webcaller.call_rest( "post", endpoint, data=json.dumps(json_elements), headers=headers, **kwargs ) ingest_json_data = WebCaller.response_as_json(_ingest_json_data) def _ingest_messages( self, messages=None, parser=None, fields=None, tags=None, **kwargs ): if messages is None: messages = [] headers = self._default_user_headers headers.update(kwargs.pop("headers", {})) endpoint = "dataspaces/{}/ingest-messages".format(self.repository) obj = self._create_unstructured_data_object( messages, parser=parser, fields=fields, tags=tags ) return self.webcaller.call_rest( "post", endpoint, data=json.dumps([obj]), headers=headers, **kwargs ) ingest_messages = WebCaller.response_as_json(_ingest_messages) def _get_status(self, **kwargs): endpoint = "status" return self.webcaller.call_rest("get", endpoint, **kwargs) get_status = WebCaller.response_as_json(_get_status)
Apache License 2.0
pycon/pycon
pycon/finaid/utils.py
email_address
python
def email_address(): return getattr(settings, "FINANCIAL_AID_EMAIL", DEFAULT_EMAIL_ADDRESS)
Return the email address that financial aid emails should come from, applications should send emails to with questions, etc. Default is ``pycon-aid@python.org``. Override by setting FINANCIAL_AID_EMAIL.
https://github.com/pycon/pycon/blob/666c1444f1b550d539cf6e087c83e749eb2ebf0a/pycon/finaid/utils.py#L56-L64
from django.conf import settings from django.core.mail.message import EmailMessage from django.template import Context, Template from django.template.loader import get_template from pycon.finaid.models import FinancialAidApplication, FinancialAidApplicationPeriod, STATUS_WITHDRAWN from . import models DEFAULT_EMAIL_ADDRESS = "pycon-aid@python.org" def applications_open(): return FinancialAidApplicationPeriod.open() def is_reviewer(user): return user.has_perm("finaid.review_financial_aid") def has_application(user): if not hasattr(user, "_has_finaid_application"): try: getattr(user, 'financial_aid') except (FinancialAidApplication.DoesNotExist, AttributeError): user._has_finaid_application = False else: user._has_finaid_application = True return user._has_finaid_application def offer_accepted(user): return has_application(user) and user.financial_aid.status == models.STATUS_ACCEPTED def has_withdrawn_application(user): if not has_application(user): return False return user.financial_aid.status == STATUS_WITHDRAWN
BSD 3-Clause New or Revised License
levlaz/circleci.py
circleci/api.py
Api._download
python
def _download(self, url, destdir=None, filename=None): if not filename: filename = url.split('/')[-1] if not destdir: destdir = os.getcwd() endpoint = "{0}?circle-token={1}".format(url, self.token) resp = requests.get(endpoint, stream=True) path = "{0}/{1}".format(destdir, filename) with open(path, 'wb') as f: for chunk in resp.iter_content(chunk_size=1024): if chunk: f.write(chunk) return path
File download helper. :param url: The URL to the artifact. :param destdir: The optional destination directory. \ Defaults to None (curent working directory). :param filename: Optional file name. Defaults to the name of the artifact file.
https://github.com/levlaz/circleci.py/blob/95063ae69d2f19dcca8162c038f45e2d536e45ea/circleci/api.py#L678-L703
import os import requests from requests.auth import HTTPBasicAuth from circleci.error import BadKeyError, BadVerbError, InvalidFilterError class Api(): def __init__(self, token, url='https://circleci.com/api/v1.1'): self.token = token self.url = url def get_user_info(self): resp = self._request('GET', 'me') return resp def get_projects(self): resp = self._request('GET', 'projects') return resp def follow_project(self, username, project, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/follow'.format( vcs_type, username, project ) resp = self._request('POST', endpoint) return resp def get_project_build_summary( self, username, project, limit=30, offset=0, status_filter=None, branch=None, vcs_type='github'): valid_filters = [None, 'completed', 'successful', 'failed', 'running'] if status_filter not in valid_filters: raise InvalidFilterError(status_filter, 'status') if branch: endpoint = 'project/{0}/{1}/{2}/tree/{3}?limit={4}&offset={5}&filter={6}'.format( vcs_type, username, project, branch, limit, offset, status_filter ) else: endpoint = 'project/{0}/{1}/{2}?limit={3}&offset={4}&filter={5}'.format( vcs_type, username, project, limit, offset, status_filter ) resp = self._request('GET', endpoint) return resp def get_recent_builds(self, limit=30, offset=0): endpoint = 'recent-builds?limit={0}&offset={1}'.format(limit, offset) resp = self._request('GET', endpoint) return resp def get_build_info(self, username, project, build_num, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/{3}'.format( vcs_type, username, project, build_num ) resp = self._request('GET', endpoint) return resp def get_artifacts(self, username, project, build_num, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/{3}/artifacts'.format( vcs_type, username, project, build_num ) resp = self._request('GET', endpoint) return resp def get_latest_artifact( self, username, project, branch=None, status_filter='completed', vcs_type='github'): valid_filters = ['completed', 'successful', 'failed'] if status_filter not in valid_filters: raise InvalidFilterError(status_filter, 'artifacts') if branch: endpoint = 'project/{0}/{1}/{2}/latest/artifacts?branch={3}&filter={4}'.format( vcs_type, username, project, branch, status_filter ) else: endpoint = 'project/{0}/{1}/{2}/latest/artifacts?filter={3}'.format( vcs_type, username, project, status_filter ) resp = self._request('GET', endpoint) return resp def download_artifact(self, url, destdir=None, filename=None): resp = self._download(url, destdir, filename) return resp def retry_build(self, username, project, build_num, ssh=False, vcs_type='github'): if ssh: endpoint = 'project/{0}/{1}/{2}/{3}/ssh'.format( vcs_type, username, project, build_num ) else: endpoint = 'project/{0}/{1}/{2}/{3}/retry'.format( vcs_type, username, project, build_num ) resp = self._request('POST', endpoint) return resp def cancel_build(self, username, project, build_num, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/{3}/cancel'.format( vcs_type, username, project, build_num ) resp = self._request('POST', endpoint) return resp def add_ssh_user(self, username, project, build_num, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/{3}/ssh-users'.format( vcs_type, username, project, build_num ) resp = self._request('POST', endpoint) return resp def trigger_build( self, username, project, branch='master', revision=None, tag=None, parallel=None, params=None, vcs_type='github'): data = { 'revision': revision, 'tag': tag, 'parallel': parallel, } if params: data.update(params) endpoint = 'project/{0}/{1}/{2}/tree/{3}'.format( vcs_type, username, project, branch ) resp = self._request('POST', endpoint, data=data) return resp def add_ssh_key( self, username, project, ssh_key, vcs_type='github', hostname=None): endpoint = 'project/{0}/{1}/{2}/ssh-key'.format( vcs_type, username, project ) params = { "hostname": hostname, "private_key": ssh_key } resp = self._request('POST', endpoint, data=params) return resp def list_checkout_keys(self, username, project, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/checkout-key'.format( vcs_type, username, project ) resp = self._request('GET', endpoint) return resp def create_checkout_key(self, username, project, key_type, vcs_type='github'): valid_types = ['deploy-key', 'github-user-key'] if key_type not in valid_types: raise BadKeyError(key_type) params = { "type": key_type } endpoint = 'project/{0}/{1}/{2}/checkout-key'.format( vcs_type, username, project ) resp = self._request('POST', endpoint, data=params) return resp def get_checkout_key(self, username, project, fingerprint, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/checkout-key/{3}'.format( vcs_type, username, project, fingerprint ) resp = self._request('GET', endpoint) return resp def delete_checkout_key(self, username, project, fingerprint, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/checkout-key/{3}'.format( vcs_type, username, project, fingerprint ) resp = self._request('DELETE', endpoint) return resp def get_test_metadata(self, username, project, build_num, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/{3}/tests'.format( vcs_type, username, project, build_num ) resp = self._request('GET', endpoint) return resp def list_envvars(self, username, project, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/envvar'.format( vcs_type, username, project ) resp = self._request('GET', endpoint) return resp def add_envvar(self, username, project, name, value, vcs_type='github'): params = { "name": name, "value": value } endpoint = 'project/{0}/{1}/{2}/envvar'.format( vcs_type, username, project ) resp = self._request('POST', endpoint, data=params) return resp def get_envvar(self, username, project, name, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/envvar/{3}'.format( vcs_type, username, project, name ) resp = self._request('GET', endpoint) return resp def delete_envvar(self, username, project, name, vcs_type='github'): endpoint = 'project/{0}/{1}/{2}/envvar/{3}'.format( vcs_type, username, project, name ) resp = self._request('DELETE', endpoint) return resp def _request(self, verb, endpoint, data=None): headers = { 'Accept': 'application/json', } auth = HTTPBasicAuth(self.token, '') resp = None request_url = "{0}/{1}".format(self.url, endpoint) if verb == 'GET': resp = requests.get(request_url, auth=auth, headers=headers) elif verb == 'POST': resp = requests.post(request_url, auth=auth, headers=headers, json=data) elif verb == 'DELETE': resp = requests.delete(request_url, auth=auth, headers=headers) else: raise BadVerbError(verb) resp.raise_for_status() return resp.json()
MIT License
mrknow/filmkodi
plugin.video.mrknow/mylib/_pydev_imps/_pydev_SimpleXMLRPCServer.py
SimpleXMLRPCDispatcher.system_methodHelp
python
def system_methodHelp(self, method_name): method = None if self.funcs.has_key(method_name): method = self.funcs[method_name] elif self.instance is not None: if hasattr(self.instance, '_methodHelp'): return self.instance._methodHelp(method_name) elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name, self.allow_dotted_names ) except AttributeError: pass if method is None: return "" else: try: import pydoc except ImportError: return "" else: return pydoc.getdoc(method)
system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/plugin.video.mrknow/mylib/_pydev_imps/_pydev_SimpleXMLRPCServer.py#L320-L354
try: True False except: import __builtin__ setattr(__builtin__, 'True', 1) setattr(__builtin__, 'False', 0) from _pydev_imps import _pydev_xmlrpclib as xmlrpclib from _pydev_imps._pydev_xmlrpclib import Fault from _pydev_imps import _pydev_SocketServer as SocketServer from _pydev_imps import _pydev_BaseHTTPServer as BaseHTTPServer import sys import os try: import fcntl except ImportError: fcntl = None def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): if allow_dotted_names: attrs = attr.split('.') else: attrs = [attr] for i in attrs: if i.startswith('_'): raise AttributeError( 'attempt to access private attribute "%s"' % i ) else: obj = getattr(obj, i) return obj def list_public_methods(obj): return [member for member in dir(obj) if not member.startswith('_') and callable(getattr(obj, member))] def remove_duplicates(lst): u = {} for x in lst: u[x] = 1 return u.keys() class SimpleXMLRPCDispatcher: def __init__(self, allow_none, encoding): self.funcs = {} self.instance = None self.allow_none = allow_none self.encoding = encoding def register_instance(self, instance, allow_dotted_names=False): self.instance = instance self.allow_dotted_names = allow_dotted_names def register_function(self, function, name=None): if name is None: name = function.__name__ self.funcs[name] = function def register_introspection_functions(self): self.funcs.update({'system.listMethods' : self.system_listMethods, 'system.methodSignature' : self.system_methodSignature, 'system.methodHelp' : self.system_methodHelp}) def register_multicall_functions(self): self.funcs.update({'system.multicall' : self.system_multicall}) def _marshaled_dispatch(self, data, dispatch_method=None): try: params, method = xmlrpclib.loads(data) if dispatch_method is not None: response = dispatch_method(method, params) else: response = self._dispatch(method, params) response = (response,) response = xmlrpclib.dumps(response, methodresponse=1, allow_none=self.allow_none, encoding=self.encoding) except Fault, fault: response = xmlrpclib.dumps(fault, allow_none=self.allow_none, encoding=self.encoding) except: response = xmlrpclib.dumps( xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), encoding=self.encoding, allow_none=self.allow_none, ) return response def system_listMethods(self): methods = self.funcs.keys() if self.instance is not None: if hasattr(self.instance, '_listMethods'): methods = remove_duplicates( methods + self.instance._listMethods() ) elif not hasattr(self.instance, '_dispatch'): methods = remove_duplicates( methods + list_public_methods(self.instance) ) methods.sort() return methods def system_methodSignature(self, method_name): return 'signatures not supported'
Apache License 2.0
jeeftor/alfredtoday
src/GoogleInterface.py
GoogleInterface._get_credentials
python
def _get_credentials(self): home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) self.credential_path = os.path.join(credential_dir, 'calendar-alfred-today.json') self.store = oauth2client.file.Storage(self.credential_path) self.credentials = self.store.get() return self.credentials
Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential.
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/GoogleInterface.py#L78-L99
import sys, os sys.path = [os.path.join(os.path.dirname(__file__), 'lib')] + sys.path from workflow import Workflow3 import oauth2client import googleapiclient from apiclient import discovery from oauth2client import client from oauth2client import tools from lib.apiclient import discovery from lib.oauth2client import client, tools import httplib2 from settings import get_http_kw_args class AuthorizationNeededException(Exception): pass class NoCalendarException(Exception): pass class GoogleInterface(object): def __init__(self, wf): self.HTTP_INSTANCE = httplib2.Http(**get_http_kw_args(wf)) self.log = wf.logger self.wf = wf self.CLIENT_SECRET_FILE = 'client_secret.json' self.APPLICATION_NAME = 'Alfred Today' self.SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' credentials = self._get_credentials() if not credentials or credentials.invalid: self._authorize_google() http = credentials.authorize(self.HTTP_INSTANCE) self.service = discovery.build('calendar', 'v3', http=http) def __check_auth_status(self): if not self.credentials or self.credentials.invalid: raise AuthorizationNeededException() def _authorize_google(self): try: self.__check_auth_status() except AuthorizationNeededException: flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES) flow.user_agent = self.APPLICATION_NAME flags = None self.credentials = tools.run_flow_wf(self.wf, flow, self.store, flags, http=self.HTTP_INSTANCE) self.wf.logger.info("Storing credentials to [%s]", self.credential_path)
MIT License
philipbergen/zero
py/zero/__init__.py
Zero.sock
python
def sock(self): if not hasattr(self, '_sock'): self._sock = self.setup.ctx.socket(self.setup.method) if self.setup.linger: self._sock.setsockopt(zmq.LINGER, self.setup.linger) for subsc in self.setup.subscriptions: self._sock.setsockopt(zmq.SUBSCRIBE, subsc) if self.setup.bind: self._sock.bind(self.setup.point) else: self._sock.connect(self.setup.point) self.setup.debug('Created ZMQ socket %r', self) return self._sock
Returns the zmq.Socket, lazy initialization.
https://github.com/philipbergen/zero/blob/83c1487ae64d1a95e5529f875f02bf88c425c09f/py/zero/__init__.py#L388-L401
import sys import zmq import json from itertools import izip __all__ = ('ZeroSetup', 'Zero') class UnsupportedZmqMethod(Exception): class ZeroSetup(object): def __init__(self, method, point): self._method = method.lower() self.bind = self.method not in (zmq.SUB, zmq.PUSH, zmq.REQ) self.debugging(False) self._point = point self.linger = 1000 self.block = True self.output = sys.stderr @staticmethod def argv(argv=sys.argv[1:]): from docopt import docopt from itertools import count args = docopt(__doc__, argv) method = [meth for meth in ('push', 'req', 'rep', 'pub', 'pull', 'sub') if args[meth]] if not method: raise UnsupportedZmqMethod('Unsupported ZMQ method', '?', args) method = method[0] setup = ZeroSetup(method, args['<socket>']).debugging(args['--dbg']) if args['--bind']: setup.binding(True) if args['--connect']: setup.binding(False) if args['<subscription>']: setup.subscribing(args['<subscription>']) setup.args = args setup.debug('%r', setup) msgloop = None if setup.transmits: if args['-']: msgloop = ZeroSetup.iter_stdin() else: msgloop = args['<message>'] elif args['-n'] == 'inf': msgloop = count() else: msgloop = xrange(int(args['-n'])) return setup, msgloop @staticmethod def iter_stdin(): from json import loads def liner(): res = sys.stdin.readline() if not res: return None return loads(res.rstrip()) return iter(liner, None) def __repr__(self): res = ['ZeroSetup(%r, %r)' % (self._method, self._point)] res.append('.binding(%s)' % self.bind) if self.debug == self._debug_on: res.append('.debugging()') if not self.block: res.append('.nonblocking()') if self.subscriptions: res.append('.subscribing(%r)' % self.subscriptions) return ''.join(res) __str__ = __repr__ def _print(self, pre, col, s, *args, **kwarg): from textwrap import wrap if args: s = s % args if kwarg: s = s % kwarg for i in wrap(s, 95): self.output.write(pre + i + '\n') self.output.flush() def _debug_off(self, s, *args, **kwarg): pass def _debug_on(self, s, *args, **kwarg): from ansicolor import blu self._print('> ', blu, s, *args, **kwarg) def warn(self, s, *args, **kwarg): from ansicolor import yel self._print('>> ', yel, s, *args, **kwarg) def err(self, s, *args, **kwarg): from ansicolor import red, bld self._print('>>> ', lambda x: bld(red(x)), s, *args, **kwarg) def binding(self, val=True): self.bind = val return self def subscribing(self, heads): if self.method == zmq.SUB: self._filters = list(iter(heads)) else: raise ValueError('Only zmq.SUB accepts subscriptions (%r)' % self) return self def debugging(self, val=True): self.debug = self._debug_on if val else self._debug_off return self def nonblocking(self, val=True): self.block = not val return self def opposite(self): res = eval(repr(self)) if res._method == 'pub': res._method = 'sub' elif res._method == 'sub': res._method = 'pub' elif res._method == 'pull': res._method = 'push' elif res._method == 'push': res._method = 'pull' elif res._method == 'rep': res._method = 'req' elif res._method == 'req': res._method = 'rep' return res.binding(not res.bind).debugging(False) @property def subscriptions(self): if self.method == zmq.SUB: return getattr(self, '_filters', ['']) return [] @property def method(self): try: return getattr(zmq, self._method.upper()) except AttributeError: raise UnsupportedZmqMethod('Unsupported ZMQ method', self._method, {}) @property def point(self): if str(self._point)[:1] == ':': self._point = self._point[1:] try: int(self._point) if self.bind: return 'tcp://*:%s' % self._point return 'tcp://localhost:%s' % self._point except ValueError: return self._point @property def transmits(self): return self.method in (zmq.PUSH, zmq.PUB, zmq.REQ, zmq.REP) @property def replies(self): return self.method == zmq.REP @property def yields(self): return self.method in (zmq.PULL, zmq.SUB, zmq.REQ, zmq.REP) class Zero(object): def __init__(self, setup): self.setup = setup self.marshals() self.naptime = 0.5 if not hasattr(setup, 'ctx'): setup.ctx = zmq.Context() def __del__(self): self.close() def close(self): if hasattr(self, '_sock'): self._sock.close() del self._sock def marshals(self, encode=json.dumps, decode=json.loads): self._encode = encode self._decode = decode return self def activated(self, zerorpc): if not self.setup.yields: raise ValueError('Only setups that yield can be activated', self) if not callable(zerorpc): raise ValueError('Objects used for activation must be callable', self, zerorpc) self.rpc = zerorpc self.rpc.zero = self return self def __repr__(self): res = ['Zero(%r)' % self.setup] if self._encode != json.dumps or self._decode != json.loads: res.append('.marshals(%r, %r)' % (self._encode, self._decode)) if hasattr(self, 'rpc'): res.append('.activated(%r)' % self.rpc) return ''.join(res) __str__ = __repr__ @property
MIT License
bihealth/sodar_core
projectroles/management/commands/batchupdateroles.py
Command._invite_user
python
def _invite_user(self, email, project, role): logger.info( 'Creating and sending invite to {} for role {}..'.format( email, role.name ) ) invite = ProjectInvite.objects.filter( email=email, project=project, active=True, date_expire__gte=timezone.now(), ).first() if invite: logger.info('Invite already exists for user in project') return invite = ProjectInvite.objects.create( email=email, project=project, role=role, issuer=self.issuer, date_expire=get_expiry_date(), secret=build_secret(), ) self.handle_invite(invite, self.request, add_message=False) self.invite_count += 1
Create and send user for user not yet in system
https://github.com/bihealth/sodar_core/blob/4176f762b77fae4dfdf24d51328938b94d3a64ce/projectroles/management/commands/batchupdateroles.py#L102-L127
from email.utils import parseaddr import logging import sys from django.conf import settings from django.contrib import auth from django.core.management.base import BaseCommand from django.http import HttpRequest from django.utils import timezone from projectroles.models import ( Project, Role, RoleAssignment, ProjectInvite, SODAR_CONSTANTS, ) from projectroles.views import RoleAssignmentModifyMixin, ProjectInviteMixin from projectroles.utils import get_expiry_date, build_secret User = auth.get_user_model() logger = logging.getLogger(__name__) SITE_MODE_TARGET = SODAR_CONSTANTS['SITE_MODE_TARGET'] class MockRequest(HttpRequest): scheme = 'http' def mock_scheme(self, host): self.scheme = host.scheme def scheme(self): return self.scheme class Command(RoleAssignmentModifyMixin, ProjectInviteMixin, BaseCommand): help = 'Batch updates project roles and sends invites' roles = None owner_role = None del_role = None issuer = None update_count = 0 invite_count = 0 request = None sodar_url = None def __init__( self, stdout=None, stderr=None, no_color=False, sodar_url=None ): self.sodar_url = sodar_url super().__init__(stdout, stderr, no_color) def _make_request(self): host = settings.SODAR_API_DEFAULT_HOST request = MockRequest() request.mock_scheme(host) request.META['HTTP_HOST'] = host.hostname if host.port: request.META['HTTP_HOST'] += ':' + str(host.port) request.user = self.issuer return request def _update_role(self, project, user, role): logger.info( 'Updating role of user {} to {}..'.format(user.username, role.name) ) if user in [a.user for a in project.get_owners(inherited_only=True)]: logger.info('Skipping as user has inherited ownership') return role_as = RoleAssignment.objects.filter( project=project, user=user ).first() if role_as and role_as.role == role: logger.info('Skipping as role already exists for user') return elif role_as and role_as.role == self.owner_role: logger.warning( 'Skipping as ownership transfer is not permitted here' ) return self.modify_assignment( data={'user': user, 'role': role}, request=self.request, project=project, instance=role_as, sodar_url=self.sodar_url, ) self.update_count += 1
MIT License
forcebru/pyvm
VM/fetchLoop.py
FetchLoopMixin.run
python
def run(self: CPU32) -> int: pref_segments = { 0x2E: SegmentRegs.CS, 0x36: SegmentRegs.SS, 0x3E: SegmentRegs.DS, 0x26: SegmentRegs.ES, 0x64: SegmentRegs.FS, 0x65: SegmentRegs.GS } pref_op_size_override = {0x66, 0x67} pref_lock = {0xf0} rep = {0xf3} prefixes = set(pref_segments) | pref_op_size_override | pref_lock | rep self.running = True while self.running and self.eip + 1 < self.mem.size: overrides = [] self.opcode = self.mem.get(self.eip, 1) while self.opcode in prefixes: overrides.append(self.opcode) self.eip += 1 self.opcode = self.mem.get(self.eip, 1) size_override_active = False for ov in overrides: if ov == 0x66: if not size_override_active: self.current_mode = not self.current_mode size_override_active = True old_operand_size = self.operand_size self.operand_size = self.sizes[self.current_mode] logger.debug( 'Operand size override: %d -> %d', old_operand_size, self.operand_size ) elif ov == 0x67: if not size_override_active: self.current_mode = not self.current_mode size_override_active = True old_address_size = self.address_size self.address_size = self.sizes[self.current_mode] logger.debug( 'Address size override: %d -> %d', old_address_size, self.address_size ) elif ov in pref_segments: is_special = ov >> 6 if is_special: sreg_number = 4 + (ov & 1) else: sreg_number = (ov >> 3) & 0b11 self.mem.segment_override = sreg_number logger.debug('Segment override: %s', self.mem.segment_override) elif ov == 0xf0: logger.debug('LOCK prefix') elif ov == 0xf3: self.opcode = ov self.eip -= 1 self.execute_opcode() for ov in overrides: if ov == 0x66: self.current_mode = self.default_mode self.operand_size = self.sizes[self.current_mode] elif ov == 0x67: self.current_mode = self.default_mode self.address_size = self.sizes[self.current_mode] elif ov in pref_segments: self.mem.segment_override = SegmentRegs.DS return self.reg.eax
Implements the basic CPU instruction cycle (https://en.wikipedia.org/wiki/Instruction_cycle) :param self: passed implicitly :param offset: location of the first opcode :return: None
https://github.com/forcebru/pyvm/blob/cf1838ac4d94419ce10919b4bf83eeb484ad73fe/VM/fetchLoop.py#L56-L139
import enum from .ELF import ELF32, enums from .util import SegmentRegs, MissingOpcodeError from .CPU import CPU32 import logging logger = logging.getLogger(__name__) class FetchLoopMixin: _attrs_ = 'eip', 'mem', 'reg.ebx', 'fmt', 'instr', 'sizes', 'default_mode' def execute_opcode(self: CPU32) -> None: self.eip += 1 off = 1 if self.opcode == 0x0F: op = self.mem.get_eip(self.eip, 1) self.eip += 1 self.opcode = (self.opcode << 8) | op off += 1 if __debug__: logger.debug(self.fmt, self.eip - off, self.opcode) try: impls = self.instr[self.opcode] except KeyError: ... else: for impl in impls: if impl(): return op = self.mem.get_eip(self.eip, 1) self.eip += 1 self.opcode = (self.opcode << 8) | op try: impls = self.instr[self.opcode] except KeyError: raise MissingOpcodeError(f'Opcode {self.opcode:x} is not recognized yet (at 0x{self.eip - off - 1:08x})') else: for impl in impls: if impl(): return raise NotImplementedError(f'No suitable implementation found for opcode {self.opcode:x} (@0x{self.eip - off - 1:02x})')
MIT License
google/pyctr
overloads/testing/reverse_conditional_logic.py
if_stmt
python
def if_stmt(cond, body, orelse, *_): if cond(): orelse() else: body()
Reverses body and orelse.
https://github.com/google/pyctr/blob/056a85cd338d2977f69483d0c735bb695666e2aa/overloads/testing/reverse_conditional_logic.py#L18-L23
Apache License 2.0
datamllab/tods
tods/detection_algorithm/core/AutoRegOD.py
AutoRegOD.fit
python
def fit(self, X: np.array) -> object: X = check_array(X).astype(np.float) sub_matrices, self.left_inds_, self.right_inds_ = get_sub_matrices( X, window_size=self.window_size, step=self.step_size, return_numpy=True, flatten=True) sub_matrices = sub_matrices[:-1, :] self.left_inds_ = self.left_inds_[:-1] self.right_inds_ = self.right_inds_[:-1] self.valid_len_ = sub_matrices.shape[0] y_buf = np.zeros([self.valid_len_, 1]) for i in range(self.valid_len_): y_buf[i] = X[i * self.step_size + self.window_size] self.lr_ = LinearRegression(fit_intercept=True) self.lr_.fit(sub_matrices, y_buf) self.decision_scores_ = np.absolute( y_buf.ravel() - self.lr_.predict(sub_matrices).ravel()) self._process_decision_scores() return self
Fit detector. y is ignored in unsupervised methods. Parameters ---------- X : numpy array of shape (n_samples, n_features) The input samples. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Fitted estimator.
https://github.com/datamllab/tods/blob/f77825bc6ee865db18e3930ce93de672969282d7/tods/detection_algorithm/core/AutoRegOD.py#L59-L104
import numpy as np from sklearn.utils import check_array from sklearn.utils.validation import check_is_fitted from sklearn.linear_model import LinearRegression from .CollectiveBase import CollectiveBaseDetector from .utility import get_sub_matrices class AutoRegOD(CollectiveBaseDetector): def __init__(self, window_size, step_size=1, contamination=0.1): super(AutoRegOD, self).__init__(contamination=contamination) self.window_size = window_size self.step_size = step_size
Apache License 2.0
flutterwave/rave-python
rave_python/rave_card.py
Card._handleChargeResponse
python
def _handleChargeResponse(self, response, txRef, request=None): res = self._preliminaryResponseChecks(response, CardChargeError, txRef=txRef) responseJson = res["json"] flwRef = res["flwRef"] if responseJson["data"].get("authurl", "N/A") == "N/A": authUrl = None else: authUrl = responseJson["data"]["authurl"] if not (responseJson["data"].get("chargeResponseCode", None) == "00"): suggestedAuth = responseJson["data"].get("suggested_auth", None) return {"error": False, "validationRequired": True, "txRef": txRef, "flwRef": flwRef, "suggestedAuth": suggestedAuth, "authUrl": authUrl} else: return {"error": False, "status": responseJson["status"], "validationRequired": False, "txRef": txRef, "flwRef": flwRef, "suggestedAuth": None, "authUrl": authUrl}
This handles charge responses
https://github.com/flutterwave/rave-python/blob/15b6e7032de15a58265d504983850877b871959f/rave_python/rave_card.py#L16-L35
from rave_python.rave_exceptions import RaveError, IncompletePaymentDetailsError, CardChargeError, TransactionVerificationError, ServerError from rave_python.rave_payment import Payment from rave_python.rave_misc import generateTransactionReference class Card(Payment): def __init__(self, publicKey, secretKey, production, usingEnv): super(Card, self).__init__(publicKey, secretKey, production, usingEnv)
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/image_processing/opencv.py
_get_default_classifier
python
def _get_default_classifier(dest_path): _LOGGER.info("Downloading default classifier") req = requests.get(CASCADE_URL, stream=True) with open(dest_path, 'wb') as fil: for chunk in req.iter_content(chunk_size=1024): if chunk: fil.write(chunk)
Download the default OpenCV classifier.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/image_processing/opencv.py#L73-L80
from datetime import timedelta import logging import requests import voluptuous as vol from homeassistant.components.image_processing import ( CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA, ImageProcessingEntity) from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['numpy==1.14.3'] _LOGGER = logging.getLogger(__name__) ATTR_MATCHES = 'matches' ATTR_TOTAL_MATCHES = 'total_matches' CASCADE_URL = 'https://raw.githubusercontent.com/opencv/opencv/master/data/' + 'lbpcascades/lbpcascade_frontalface.xml' CONF_CLASSIFIER = 'classifier' CONF_FILE = 'file' CONF_MIN_SIZE = 'min_size' CONF_NEIGHBORS = 'neighbors' CONF_SCALE = 'scale' DEFAULT_CLASSIFIER_PATH = 'lbp_frontalface.xml' DEFAULT_MIN_SIZE = (30, 30) DEFAULT_NEIGHBORS = 4 DEFAULT_SCALE = 1.1 DEFAULT_TIMEOUT = 10 SCAN_INTERVAL = timedelta(seconds=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_CLASSIFIER): { cv.string: vol.Any( cv.isfile, vol.Schema({ vol.Required(CONF_FILE): cv.isfile, vol.Optional(CONF_SCALE, DEFAULT_SCALE): float, vol.Optional(CONF_NEIGHBORS, DEFAULT_NEIGHBORS): cv.positive_int, vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE): vol.Schema((int, int)) }) ) } }) def _create_processor_from_config(hass, camera_entity, config): classifier_config = config.get(CONF_CLASSIFIER) name = '{} {}'.format( config[CONF_NAME], split_entity_id(camera_entity)[1].replace('_', ' ')) processor = OpenCVImageProcessor( hass, camera_entity, name, classifier_config) return processor
MIT License
huggingface/accelerate
src/accelerate/utils.py
set_seed
python
def set_seed(seed: int): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if is_tpu_available(): xm.set_rng_state(seed)
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``. Args: seed (:obj:`int`): The seed to set.
https://github.com/huggingface/accelerate/blob/34a4e4ea15931bdd36543d1f446caec56f848db0/src/accelerate/utils.py#L58-L71
import importlib import os import random from collections.abc import Mapping from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import numpy as np import torch from .state import AcceleratorState, DistributedType, is_deepspeed_available, is_tpu_available if is_tpu_available(): import torch_xla.core.xla_model as xm def is_boto3_available(): return importlib.util.find_spec("boto3") is not None def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None if is_deepspeed_available(): from deepspeed import DeepSpeedEngine class RNGType(Enum): TORCH = "torch" CUDA = "cuda" XLA = "xla" GENERATOR = "generator" @dataclass class TensorInformation: shape: torch.Size dtype: torch.dtype
Apache License 2.0
codait/graph_def_editor
graph_def_editor/util.py
copy_directory
python
def copy_directory(oldpath, newpath, overwrite=False): assert tf.gfile.IsDirectory(oldpath) items = tf.gfile.Walk(oldpath) for dirname, subdirs, filenames in items: for subdir in subdirs: tf.gfile.MakeDirs(os.path.join(dirname, subdir)) full_subdir = os.path.join(dirname, subdir) remote_dir_path = os.path.join(newpath, full_subdir[1 + len(oldpath) :]) tf.gfile.MakeDirs(remote_dir_path) for filename in filenames: full_filename = os.path.join(dirname, filename) remote_file_path = os.path.join(newpath, full_filename[1 + len(oldpath) :]) tf.gfile.Copy(full_filename, remote_file_path, overwrite=overwrite)
Recursively copy a directory of files to GCS. Args: oldpath: string, bytes, or os.PathLike; a pathname of a directory. newpath: string, bytes, or os.PathLike; a pathname to which the directory will be copied. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file.
https://github.com/codait/graph_def_editor/blob/862fbe725fda2e174918b670316ce80ec8a41af6/graph_def_editor/util.py#L836-L858
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import json import re import sys if sys.version >= '3': from typing import Any, List import numpy as np from six import iteritems, string_types import tensorflow.compat.v1 as tf from graph_def_editor import base_graph, node, tensor __all__ = [ "make_list_of_op", "make_list_of_t", "get_generating_ops", "get_consuming_ops", "ControlOutputs", "placeholder_name", "make_placeholder_from_tensor", "make_placeholder_from_dtype_and_shape", "load_variables_to_tf_graph", "make_const", "make_placeholder", "make_simple_binary_op" ] _DEFAULT_PLACEHOLDER_PREFIX = "geph" def concatenate_unique(la, lb): la_set = set(la) for l in lb: if l not in la_set: la.append(l) la_set.add(l) return la class ListView(object): def __init__(self, list_): if not isinstance(list_, list): raise TypeError("Expected a list, got: {}.".format(type(list_))) self._list = list_ def __iter__(self): return iter(self._list) def __len__(self): return len(self._list) def __bool__(self): return bool(self._list) __nonzero__ = __bool__ def __getitem__(self, i): return self._list[i] def __add__(self, other): if not isinstance(other, list): other = list(other) return list(self) + other def __str__(self): return "ListView[{}]".format(self._list) def is_iterable(obj): if isinstance(obj, node.Node): return False try: _ = iter(obj) except Exception: return False return True def flatten_tree(tree, leaves=None): if leaves is None: leaves = [] if isinstance(tree, dict): for _, child in iteritems(tree): flatten_tree(child, leaves) elif is_iterable(tree): for child in tree: flatten_tree(child, leaves) else: leaves.append(tree) return leaves def transform_tree(tree, fn, iterable_type=tuple): if is_iterable(tree): if isinstance(tree, dict): res = tree.__new__(type(tree)) res.__init__( (k, transform_tree(child, fn)) for k, child in iteritems(tree)) return res elif isinstance(tree, tuple): if hasattr(tree, "_asdict"): res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn)) else: res = tree.__new__(type(tree), (transform_tree(child, fn) for child in tree)) return res elif isinstance(tree, collections.Sequence): res = tree.__new__(type(tree)) res.__init__(transform_tree(child, fn) for child in tree) return res else: return iterable_type(transform_tree(child, fn) for child in tree) else: return fn(tree) def check_graphs(*args): g = None for i, sgv in enumerate(args): if g is None and sgv.graph is not None: g = sgv.graph elif sgv.graph is not None and sgv.graph is not g: raise ValueError("Argument[{}]: Wrong graph!".format(i)) def get_unique_graph(tops, check_types=None, none_if_empty=False): if isinstance(tops, base_graph.BaseGraph): return tops if not is_iterable(tops): raise TypeError("{} is not iterable".format(type(tops))) if check_types is None: check_types = (node.Node, tensor.Tensor,) elif not is_iterable(check_types): check_types = (check_types,) g = None for op in tops: if not isinstance(op, check_types): raise TypeError("Expected a type in ({}), got: {}".format(", ".join([str( t) for t in check_types]), type(op))) if g is None: g = op.graph elif g is not op.graph: raise ValueError("Operation {} does not belong to given graph".format(op)) if g is None and not none_if_empty: raise ValueError("Can't find the unique graph of an empty list") return g def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False): if isinstance(ops, base_graph.BaseGraph): if allow_graph: return ops.nodes else: raise TypeError("allow_graph is False: cannot convert a gde.Graph.") else: if not is_iterable(ops): ops = [ops] if not ops: return [] if check_graph: check_types = None if ignore_ts else node.Node get_unique_graph(ops, check_types=check_types) return [op for op in ops if isinstance(op, node.Node)] def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False): if isinstance(ts, base_graph.BaseGraph): if allow_graph: return ts.tensors else: raise TypeError("allow_graph is False: cannot convert a gde.Graph.") else: if not is_iterable(ts): ts = [ts] if not ts: return [] if check_graph: check_types = None if ignore_ops else (tensor.Tensor,) get_unique_graph(ts, check_types=check_types) return [t for t in ts if isinstance(t, tensor.Tensor)] def get_generating_ops(ts): ts = make_list_of_t(ts, allow_graph=False) return [t.node for t in ts] def get_consuming_ops(ts): ts = make_list_of_t(ts, allow_graph=False) ops = [] for t in ts: for op in t.consumers(): if op not in ops: ops.append(op) return ops class ControlOutputs(object): def __init__(self, g ): if not isinstance(g, base_graph.BaseGraph): raise TypeError("Expected a gde.Graph, got: {}".format(type(g))) self._control_outputs = {} self._graph = g self._version = None self._build() def update(self): if self._version != self._graph.version: self._build() return self def _build(self): self._control_outputs.clear() for n in self._graph.nodes: for control_input in n.control_inputs: if control_input not in self._control_outputs: self._control_outputs[control_input] = [] if n not in self._control_outputs[control_input]: self._control_outputs[control_input].append(n) self._version = self._graph.version def get_all(self): return self._control_outputs def get(self, op): if op in self._control_outputs: return self._control_outputs[op] else: return () @property def graph(self): return self._graph def scope_finalize(scope): if scope and scope[-1] != "/": scope += "/" return scope def scope_dirname(scope): slash = scope.rfind("/") if slash == -1: return "" return scope[:slash + 1] def scope_basename(scope): slash = scope.rfind("/") if slash == -1: return scope return scope[slash + 1:] def placeholder_name(t=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX): if scope is not None: scope = scope_finalize(scope) if t is not None: if not isinstance(t, tensor.Tensor): raise TypeError("Expected a gde.Tensor, got: {}".format(type(t))) op_dirname = scope_dirname(t.node.name) op_basename = scope_basename(t.node.name) if scope is None: scope = op_dirname if op_basename.startswith("{}__".format(prefix)): ph_name = op_basename else: ph_name = "{}__{}_{}".format(prefix, op_basename, t.value_index) return scope + ph_name else: if scope is None: scope = "" return "{}{}".format(scope, prefix) def make_placeholder_from_tensor( g, t, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX ): return make_placeholder(g, dtype=t.dtype, shape=t.shape, name=placeholder_name(t, scope=scope, prefix=prefix)) def make_placeholder_from_dtype_and_shape(g, dtype, shape=None, scope=None, prefix=_DEFAULT_PLACEHOLDER_PREFIX): return make_placeholder(g, dtype=dtype, shape=shape, name=placeholder_name(scope=scope, prefix=prefix)) _INTERNAL_VARIABLE_RE = re.compile(r"^__\w+__$") def get_predefined_collection_names(): return [getattr(tf.GraphKeys, key) for key in dir(tf.GraphKeys) if not _INTERNAL_VARIABLE_RE.match(key)] def find_corresponding_elem(target, dst_graph, dst_scope="", src_scope=""): src_name = target.name if src_scope: src_scope = scope_finalize(src_scope) if not src_name.startswidth(src_scope): raise ValueError("{} does not start with {}".format(src_name, src_scope)) src_name = src_name[len(src_scope):] dst_name = src_name if dst_scope: dst_scope = scope_finalize(dst_scope) dst_name = dst_scope + dst_name if isinstance(target, tensor.Tensor): return dst_graph.get_tensor_by_name(dst_name) if isinstance(target, node.Node): return dst_graph[dst_name] raise TypeError("Expected gde.Tensor or gde.Node, got: {}", type(target)) def find_corresponding(targets, dst_graph, dst_scope="", src_scope=""): def func(top): return find_corresponding_elem(top, dst_graph, dst_scope, src_scope) return transform_tree(targets, func) def _python_type_to_attr_list_elem( list_value, elem, attr_name ): if isinstance(elem, string_types): list_value.s.append(tf.compat.as_bytes(elem)) elif isinstance(elem, bool): list_value.b.append(elem) elif isinstance(elem, int): list_value.i.append(elem) elif isinstance(elem, float): list_value.f.append(elem) elif isinstance(elem, tf.DType): list_value.type.append(elem.as_datatype_enum) elif isinstance(elem, tf.TensorShape): list_value.shape.add().CopyFrom(elem.as_proto()) elif isinstance(elem, np.ndarray) or isinstance(elem, list): list_value.tensor.add().CopyFrom(tf.make_tensor_proto(values=elem)) else: raise ValueError("Don't know how to convert a {} to " "tf.AttrValue.ListValue for attribute {}".format(type(elem), attr_name)) def python_type_to_attr_value(value, attr_name ): if isinstance(value, list) or isinstance(value, tuple): if 0 == len(value): return tf.AttrValue(list=tf.AttrValue.ListValue()) else: list_value = tf.AttrValue.ListValue() for elem in value: _python_type_to_attr_list_elem(list_value, elem, attr_name) return tf.AttrValue(list=list_value) elif isinstance(value, tf.AttrValue): return value elif isinstance(value, string_types): return tf.AttrValue(s=tf.compat.as_bytes(value)) elif isinstance(value, bool): return tf.AttrValue(b=value) elif (isinstance(value, int) or isinstance(value, np.int32) or isinstance(value, np.int64)): return tf.AttrValue(i=value) elif isinstance(value, float) or isinstance(value, np.float): return tf.AttrValue(f=value) elif isinstance(value, tf.DType): return tf.AttrValue(type=value.as_datatype_enum) elif isinstance(value, np.dtype): return tf.AttrValue(type=tf.as_dtype(value).as_datatype_enum) elif isinstance(value, tf.TensorShape): return tf.AttrValue(shape=value.as_proto()) elif isinstance(value, np.ndarray): return tf.AttrValue(tensor=tf.make_tensor_proto(values=value)) else: raise ValueError("Don't know how to convert a {} to " "tf.AttrValue for attribute {}".format( type(value), attr_name)) def attr_value_to_python_type( attr_value, attr_name ): if attr_value.HasField("s"): return tf.compat.as_str(attr_value.s) elif attr_value.HasField("i"): return attr_value.i elif attr_value.HasField("f"): return attr_value.f elif attr_value.HasField("b"): return attr_value.b elif attr_value.HasField("type"): return tf.DType(attr_value.type) elif attr_value.HasField("shape"): return tf.TensorShape(attr_value.shape) elif attr_value.HasField("tensor"): return tf.make_ndarray(attr_value.tensor) elif attr_value.HasField("list"): return attr_value.list elif attr_value.HasField("func"): return attr_value.func else: raise ValueError("Don't know how to convert AttrValue {} to " "a Python object for attribute {}".format(attr_value, attr_name)) def load_variables_to_tf_graph(g ): for var_name in g.variable_names: var = g.get_variable_by_name(var_name) tf_var = tf.Variable.from_proto(var.to_proto()) tf.add_to_collections(var.collection_names, tf_var) def make_const(g, name, value, uniquify_name=False ): dtype = tf.as_dtype(value.dtype) ret = g.add_node(name, "Const", uniquify_name=uniquify_name) ret.add_attr("dtype", dtype) ret.add_attr("value", value) ret.set_outputs_from_pairs([(dtype, tf.TensorShape(value.shape))]) return ret def make_placeholder(g, name, dtype, shape, uniquify_name=False ): ret = g.add_node(name, "Placeholder", uniquify_name=uniquify_name) ret.add_attr("dtype", dtype) ret.set_outputs_from_pairs([(dtype, shape)]) return ret def make_identity(g, name, input, uniquify_name=False ): ret = g.add_node(name, "Identity", uniquify_name=uniquify_name) ret.set_inputs([input]) ret.set_outputs_from_pairs([(input.dtype, input.shape)]) ret.add_attr("T", input.dtype) return ret def make_simple_binary_op(g, name, op_name, input_1, input_2, dtype=None, uniquify_name=False ): if dtype is None: dtype = input_1.dtype ret = g.add_node(name, op_name, uniquify_name=uniquify_name) ret.add_attr("T", dtype) ret.set_inputs([input_1, input_2]) ret.infer_outputs() return ret
Apache License 2.0
stencila/hub
manager/projects/models/projects.py
Project.event
python
def event(self, data: dict, source=None): ProjectEvent.objects.create(project=self, data=data, source=source)
Handle an event notification. Records the event and evaluates each project trigger.
https://github.com/stencila/hub/blob/e696c39213156bb43a098f81286197e919379cdf/manager/projects/models/projects.py#L399-L409
import datetime import os from typing import Dict, List, Optional from urllib.parse import urlencode import shortuuid from django.conf import settings from django.core.files.base import ContentFile from django.db import models from django.db.models import Q from django.db.models.signals import post_save from django.http import HttpRequest from django.shortcuts import reverse from django.utils import timezone from meta.views import Meta from accounts.models import Account, AccountTeam from jobs.models import Job, JobMethod from manager.helpers import EnumChoice from manager.storage import ( StorageUsageMixin, media_storage, snapshots_storage, working_storage, ) from users.models import User class ProjectLiveness(EnumChoice): LIVE = "live" LATEST = "latest" PINNED = "pinned" @staticmethod def as_choices(): return ( ("live", "Use working directory"), ("latest", "Use latest snapshot"), ("pinned", "Pinned to snapshot"), ) def generate_project_key(): return shortuuid.ShortUUID().random(length=32) class Project(StorageUsageMixin, models.Model): account = models.ForeignKey( Account, on_delete=models.CASCADE, related_name="projects", null=False, blank=False, help_text="Account that the project belongs to.", ) creator = models.ForeignKey( User, null=True, blank=True, on_delete=models.SET_NULL, related_name="projects_created", help_text="The user who created the project.", ) created = models.DateTimeField( auto_now_add=True, help_text="The time the project was created." ) name = models.SlugField( null=False, blank=False, help_text="Name of the project. Lowercase only and unique for the account. " "Will be used in URLS e.g. https://hub.stenci.la/awesome-org/great-project.", ) title = models.CharField( null=True, blank=True, max_length=256, help_text="Title of the project to display in its profile.", ) temporary = models.BooleanField( default=False, help_text="Is the project temporary?" ) public = models.BooleanField( default=True, help_text="Is the project publicly visible?" ) featured = models.BooleanField( default=False, help_text="Is the project to be featured in listings?" ) key = models.CharField( default=generate_project_key, max_length=64, help_text="A unique, and very difficult to guess, key to access this project if it is not public.", ) description = models.TextField( null=True, blank=True, help_text="Brief description of the project." ) image_file = models.ImageField( null=True, blank=True, storage=media_storage(), upload_to="projects/images", help_text="The image used for this project in project listings and HTML meta data.", ) image_path = models.CharField( null=True, blank=True, max_length=1024, help_text="Path of file in the project's working directory to use as this project's image. " "Allows the project's image to update as it is re-executed.", ) image_updated = models.DateTimeField( null=True, blank=True, help_text="When the image file was last updated (e.g. from image_path).", ) theme = models.TextField( null=True, blank=True, help_text="The name of the theme to use as the default when generating content for this project." ) extra_head = models.TextField( null=True, blank=True, help_text="Content to inject into the <head> element of HTML served for this project.", ) extra_top = models.TextField( null=True, blank=True, help_text="Content to inject at the top of the <body> element of HTML served for this project.", ) extra_bottom = models.TextField( null=True, blank=True, help_text="Content to inject at the bottom of the <body> element of HTML served for this project.", ) container_image = models.TextField( null=True, blank=True, help_text="The container image to use as the execution environment for this project.", ) session_timeout = models.PositiveIntegerField( null=True, blank=True, help_text="The amount of time of inactivity after which a session will end (s).", ) session_timelimit = models.PositiveIntegerField( null=True, blank=True, help_text="The maximum duration of a session (s)." ) session_memory = models.PositiveIntegerField( null=True, blank=True, help_text="The amount of memory allocated (request and limit) for a session (MiB).", ) main = models.TextField( null=True, blank=True, help_text="Path of the main file of the project", ) liveness = models.CharField( max_length=16, choices=ProjectLiveness.as_choices(), default=ProjectLiveness.LATEST.value, help_text="Where to serve the content for this project from.", ) pinned = models.ForeignKey( "Snapshot", null=True, blank=True, on_delete=models.SET_NULL, related_name="project_pinned", help_text="If pinned, the snapshot to pin to, when serving content.", ) class Meta: constraints = [ models.UniqueConstraint( fields=["account", "name"], name="%(class)s_unique_account_name" ) ] TEMPORARY_PROJECT_LIFESPANS = { "temp": datetime.timedelta(days=1), "default": datetime.timedelta(days=7), } TEMPORARY_PROJECT_WARNING = datetime.timedelta(days=2) STORAGE = working_storage() def __str__(self): return self.name def get_meta(self) -> Meta: return Meta( object_type="article", title=self.title or self.name, description=self.description, image=self.image_file.url if self.image_file else None, ) def set_image_from_file(self, file): if isinstance(file, str): try: file = self.files.filter(current=True, path=file)[0] except IndexError: return content = file.get_content() format = file.get_format() ext = format.default_extension if format else "" file = ContentFile(content) file.name = f"{self.id}-{shortuuid.uuid()}{ext}" self.image_file = file self.image_updated = timezone.now() self.save() def update_image(self): modified_since = ( dict(modified__gt=self.image_updated) if self.image_updated else {} ) if self.image_path and self.image_path != "__uploaded__": images = self.files.filter( current=True, path=self.image_path, **modified_since ).order_by("-modified") if len(images) > 0: self.set_image_from_file(images[0]) else: images = self.files.filter( current=True, mimetype__startswith="image/", **modified_since, ).order_by("-modified") if len(images) > 0: self.set_image_from_file(images[0]) def update_image_all_projects(self): projects = Project.objects.all(temporary=False) for project in projects: project.update_image() @property def scheduled_deletion_time(self) -> Optional[datetime.datetime]: if not self.temporary: return None delta = Project.TEMPORARY_PROJECT_LIFESPANS.get( self.account.name, Project.TEMPORARY_PROJECT_LIFESPANS.get("default") ) return self.created + delta @property def scheduled_deletion_warning(self) -> Optional[datetime.datetime]: time = self.scheduled_deletion_time return time - Project.TEMPORARY_PROJECT_WARNING if time else None def get_main(self): if self.main: try: return self.files.filter(path=self.main, current=True).order_by( "-created" )[0] except IndexError: pass candidates = self.files.filter( Q(path__startswith="main.") | Q(path__startswith="README."), current=True ).order_by("-modified") if len(candidates): return candidates[0] return None def get_theme(self) -> str: return self.theme or self.account.theme def content_url(self, snapshot=None, path=None, live=False) -> str: params: Dict = {} if settings.CONFIGURATION.endswith("Dev"): url = ( reverse("ui-accounts-content", kwargs=dict(project_name=self.name)) + "/" ) params.update(account=self.account.name) else: url = "https://{account}.{domain}/{project}/".format( account=self.account.name, domain=settings.ACCOUNTS_DOMAIN, project=self.name, ) if live: url += "live/" elif snapshot: url += "v{0}/".format(snapshot.number) if not self.public: url += "~{0}/".format(self.key) if path: url += path if params: url += "?" + urlencode(params) return url def file_location(self, file: str) -> str: return os.path.join(str(self.id), file)
Apache License 2.0
picoctf/picoctf
picoCTF-web/tests/integration/common.py
ensure_before_competition
python
def ensure_before_competition(): db = get_conn() db.settings.update_one( {}, { "$set": { "start_time": datetime.datetime.utcnow() + datetime.timedelta(11), "end_time": datetime.datetime.utcnow() + datetime.timedelta(10), } }, )
Adjust the competition times so that @block_before_competition fails.
https://github.com/picoctf/picoctf/blob/280dcf21ac024067cf436894947ffffd328be048/picoCTF-web/tests/integration/common.py#L728-L739
import datetime import json import re import pymongo import pytest import api RATE_LIMIT_BYPASS_KEY = "test_bypass" TESTING_DB_NAME = "ctf_test" db = None def decode_response(res): decoded_dict = json.loads(res.data.decode("utf-8")) return (decoded_dict["status"], decoded_dict["message"], decoded_dict["data"]) def get_csrf_token(res): for header in res.headers: m = re.search("token=(.+?);", header[1]) if m: return m.group(1) raise RuntimeError( "Could not find CSRF token in response headers: " + str(res.headers) ) def get_conn(): global db if db is None: client = pymongo.MongoClient(host="127.0.0.1", port=27018) db = client[TESTING_DB_NAME] return db def clear_db(): db = get_conn() db.command("dropDatabase") @pytest.fixture def client(): app = api.create_app( { "TESTING": True, "MONGO_DB_NAME": TESTING_DB_NAME, "MONGO_PORT": 27018, "RATE_LIMIT_BYPASS_KEY": RATE_LIMIT_BYPASS_KEY, } ) return app.test_client() def app(): app = api.create_app( {"TESTING": True, "MONGO_DB_NAME": TESTING_DB_NAME, "MONGO_PORT": 27018} ) return app def cache(f, *args, **kwargs): result = f(reset_cache=True, *args, **kwargs) return result def update_all_scoreboards(): api.stats.get_all_team_scores() for scoreboard in api.scoreboards.get_all_scoreboards(): api.stats.get_all_team_scores(scoreboard_id=scoreboard["sid"]) for group in api.group.get_all_groups(): api.stats.get_group_scores(gid=group["gid"]) ADMIN_DEMOGRAPHICS = { "username": "adminuser", "password": "adminpw", "firstname": "Admin", "lastname": "User", "email": "admin@example.com", "country": "US", "affiliation": "Admin School", "usertype": "other", "demo": {"parentemail": "admin@example.com", "age": "18+"}, "gid": None, "rid": None, } TEACHER_DEMOGRAPHICS = { "username": "teacheruser", "password": "teacherpw", "firstname": "Teacher", "lastname": "User", "email": "teacher@example.com", "country": "US", "affiliation": "Sample School", "usertype": "teacher", "demo": {"parentemail": "teacher@example.com", "age": "18+"}, "gid": None, "rid": None, } STUDENT_DEMOGRAPHICS = { "username": "studentuser", "password": "studentpw", "firstname": "Student", "lastname": "User", "email": "student@example.com", "country": "US", "affiliation": "Sample School", "usertype": "student", "demo": {"parentemail": "student@example.com", "age": "13-17"}, "gid": None, "rid": None, } STUDENT_2_DEMOGRAPHICS = { "username": "studentuser2", "password": "studentpw2", "firstname": "Student", "lastname": "Usertwo", "email": "student2@example.com", "country": "US", "affiliation": "Sample School", "usertype": "student", "demo": {"parentemail": "student2@example.com", "age": "18+"}, "gid": None, "rid": None, } OTHER_USER_DEMOGRAPHICS = { "username": "otheruser", "password": "otherpw", "firstname": "Other", "lastname": "User", "email": "other@example.com", "country": "US", "affiliation": "Sample Organization", "usertype": "other", "demo": {"age": "18+"}, "gid": None, "rid": None, } def register_test_accounts(): with app().app_context(): api.user.add_user(ADMIN_DEMOGRAPHICS) api.user.add_user(TEACHER_DEMOGRAPHICS) api.user.add_user(STUDENT_DEMOGRAPHICS) api.user.add_user(STUDENT_2_DEMOGRAPHICS) api.user.add_user(OTHER_USER_DEMOGRAPHICS) sample_shellserver_publish_output = r""" { "problems": [ { "name": "ECB 1", "category": "Cryptography", "description": "There is a crypto service running at {{server}}:{{port}}. We were able to recover the source code, which you can download at {{url_for(\"ecb.py\")}}.", "walkthrough": "Let me google that for you.", "score": 70, "author": "Tim Becker", "organization": "ForAllSecure", "event": "Sample", "pip_requirements": [ "pycrypto" ], "pip_python_version": "3", "unique_name": "ecb-1-b06174a", "instances": [ { "user": "ecb-1_0", "deployment_directory": "/problems/ecb-1_0_73a0108a98d2862a86f4b71534aaf7c3", "service": "ecb-1_0", "socket": null, "server": "192.168.2.3", "description": "There is a crypto service running at 192.168.2.3:46981. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/fd59acc6b8d2359d48bd939a08ecb8ab/ecb.py'>ecb.py</a>.", "hints": [], "flag": "49e56ea9bf2e2b60ba9af034b5b2a5fd", "flag_sha1": "77cec418714d6eb0dc48afa6d6f38200402a83c0", "instance_number": 0, "should_symlink": false, "files": [ { "path": "flag", "permissions": 288, "user": null, "group": null }, { "path": "key", "permissions": 288, "user": null, "group": null }, { "path": "ecb.py", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 46981 }, { "user": "ecb-1_1", "deployment_directory": "/problems/ecb-1_1_83b2ed9a1806c86219347bc4982a66de", "service": "ecb-1_1", "socket": null, "server": "192.168.2.3", "description": "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.", "hints": [], "flag": "85a32ccd05fa30e0efd8da555c1a101a", "flag_sha1": "f28581a86561c885152f7622200057585787c063", "instance_number": 1, "should_symlink": false, "files": [ { "path": "flag", "permissions": 288, "user": null, "group": null }, { "path": "key", "permissions": 288, "user": null, "group": null }, { "path": "ecb.py", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 21953 }, { "user": "ecb-1_2", "deployment_directory": "/problems/ecb-1_2_1998c2cc0f0d17ae54170200f5478b7f", "service": "ecb-1_2", "socket": null, "server": "192.168.2.3", "description": "There is a crypto service running at 192.168.2.3:17648. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/19e863cba0bf14ad676e4b4799eacc72/ecb.py'>ecb.py</a>.", "hints": [], "flag": "f76d2f6b885255450ed2f7307d96e28e", "flag_sha1": "43cf6f1dab026cf2100e2f663509512416112219", "instance_number": 2, "should_symlink": false, "files": [ { "path": "flag", "permissions": 288, "user": null, "group": null }, { "path": "key", "permissions": 288, "user": null, "group": null }, { "path": "ecb.py", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 17648 } ], "sanitized_name": "ecb-1" }, { "name": "SQL Injection 1", "category": "Web Exploitation", "pkg_dependencies": [ "php7.2-sqlite3" ], "description": "There is a website running at http://{{server}}:{{port}}. Try to see if you can login!", "score": 40, "author": "Tim Becker", "organization": "ForAllSecure", "event": "Sample", "unique_name": "sql-injection-1-0c436d0", "instances": [ { "user": "sql-injection-1_0", "deployment_directory": "/problems/sql-injection-1_0_9e114b246c48eb158b16525f71ae2a00", "service": "sql-injection-1_0", "socket": null, "server": "192.168.2.3", "description": "There is a website running at http://192.168.2.3:46984. Try to see if you can login!", "hints": [], "flag": "9ac0a74de6bced3cdce8e7fd466f32d0", "flag_sha1": "958416d52940e4948eca8d9fb1eca21e4cf7eda1", "instance_number": 0, "should_symlink": false, "files": [ { "path": "webroot/index.html", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.php", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.phps", "permissions": 436, "user": null, "group": null }, { "path": "webroot/config.php", "permissions": 436, "user": null, "group": null }, { "path": "users.db", "permissions": 288, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 46984 }, { "user": "sql-injection-1_1", "deployment_directory": "/problems/sql-injection-1_1_10a4b1cdfd3a0f78d0d8b9759e6d69c5", "service": "sql-injection-1_1", "socket": null, "server": "192.168.2.3", "description": "There is a website running at http://192.168.2.3:21955. Try to see if you can login!", "hints": [], "flag": "28054fef0f362256c78025f82e6572c3", "flag_sha1": "f57fa5d3861c22a657eecafe30a43bd4ad7a4a2a", "instance_number": 1, "should_symlink": false, "files": [ { "path": "webroot/index.html", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.php", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.phps", "permissions": 436, "user": null, "group": null }, { "path": "webroot/config.php", "permissions": 436, "user": null, "group": null }, { "path": "users.db", "permissions": 288, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 21955 }, { "user": "sql-injection-1_2", "deployment_directory": "/problems/sql-injection-1_2_57a103ad26a005f69b4332e62d611372", "service": "sql-injection-1_2", "socket": null, "server": "192.168.2.3", "description": "There is a website running at http://192.168.2.3:17649. Try to see if you can login!", "hints": [], "flag": "6ed19af4c4540d444ae08735aa5664af", "flag_sha1": "19bbc88ca231ddfde8063acdda75a92b1e6fd993", "instance_number": 2, "should_symlink": false, "files": [ { "path": "webroot/index.html", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.php", "permissions": 436, "user": null, "group": null }, { "path": "webroot/login.phps", "permissions": 436, "user": null, "group": null }, { "path": "webroot/config.php", "permissions": 436, "user": null, "group": null }, { "path": "users.db", "permissions": 288, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null }, { "path": "xinet_startup.sh", "permissions": 1517, "user": null, "group": null } ], "port": 17649 } ], "sanitized_name": "sql-injection-1" }, { "name": "Buffer Overflow 1", "category": "Binary Exploitation", "description": "Exploit the {{url_for(\"vuln\", display=\"Buffer Overflow\")}} found here: {{directory}}.", "score": 50, "walkthrough": "PROTIP: Find the correct answer to get the points.", "author": "Tim Becker", "organization": "ForAllSecure", "event": "Sample", "unique_name": "buffer-overflow-1-35e6d9d", "instances": [ { "user": "buffer-overflow-1_0", "deployment_directory": "/problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f", "service": null, "socket": null, "server": "192.168.2.3", "description": "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.", "hints": [ "This is a classic buffer overflow with no modern protections." ], "flag": "638608c79eca2165e7b241ff365df05b", "flag_sha1": "4b97abef055a11ec19c14622eb31eb1168d98aca", "instance_number": 0, "should_symlink": true, "files": [ { "path": "flag.txt", "permissions": 288, "user": null, "group": null }, { "path": "vuln", "permissions": 1517, "user": null, "group": null } ] }, { "user": "buffer-overflow-1_1", "deployment_directory": "/problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6", "service": null, "socket": null, "server": "192.168.2.3", "description": "Exploit the <a href='//192.168.2.3/static/c95410042007bb17f49b891a2a87afb2/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6.", "hints": [ "This is a classic buffer overflow with no modern protections." ], "flag": "35013564b97b80d4fd3f2be45e5836ff", "flag_sha1": "5675d2d5819084d4203c1ef314239527074938a9", "instance_number": 1, "should_symlink": true, "files": [ { "path": "flag.txt", "permissions": 288, "user": null, "group": null }, { "path": "vuln", "permissions": 1517, "user": null, "group": null } ] }, { "user": "buffer-overflow-1_2", "deployment_directory": "/problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61", "service": null, "socket": null, "server": "192.168.2.3", "description": "Exploit the <a href='//192.168.2.3/static/dbeb4d34945e752ea988dcdb4454f57d/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61.", "hints": [ "This is a classic buffer overflow with no modern protections." ], "flag": "8dfabcb5c4a18d03ad5ecea19eef27a6", "flag_sha1": "aef4789685665a1bf4994d62ef10941dbce5647a", "instance_number": 2, "should_symlink": true, "files": [ { "path": "flag.txt", "permissions": 288, "user": null, "group": null }, { "path": "vuln", "permissions": 1517, "user": null, "group": null } ] } ], "sanitized_name": "buffer-overflow-1" } ], "bundles": [ { "name": "Challenge Sampler", "author": "Christopher Ganas", "description": "Dependency weightmap for the example challenges provided in the picoCTF-Problems repository.", "dependencies": { "ecb-1-b06174a": { "threshold": 1, "weightmap": { "buffer-overflow-1-35e6d9d": 1 } }, "sql-injection-1-0c436d0": { "threshold": 1, "weightmap": { "buffer-overflow-1-35e6d9d": 1, "ecb-1-b06174a": 1 } } } } ], "sid": "728f36885f7c4686805593b9e4988c30" } """ problems_endpoint_response = [ { "name": "SQL Injection 1", "category": "Web Exploitation", "description": "There is a website running at http://192.168.2.3:17648. Try to see if you can login!", "score": 40, "hints": [], "author": "Tim Becker", "organization": "ForAllSecure", "sanitized_name": "sql-injection-1", "disabled": False, "pid": "4508167aa0b219fd9d131551d10aa58e", "solves": 0, "socket": None, "server": "192.168.2.3", "port": 17648, "server_number": 1, "solved": False, "unlocked": True, }, { "name": "Buffer Overflow 1", "category": "Binary Exploitation", "description": "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.", "score": 50, "hints": ["This is a classic buffer overflow with no modern protections."], "author": "Tim Becker", "organization": "ForAllSecure", "sanitized_name": "buffer-overflow-1", "disabled": False, "pid": "1bef644c399e10a3f35fecdbf590bd0c", "solves": 0, "socket": None, "server": "192.168.2.3", "server_number": 1, "solved": False, "unlocked": True, }, { "name": "ECB 1", "category": "Cryptography", "description": "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.", "hints": [], "score": 70, "author": "Tim Becker", "organization": "ForAllSecure", "sanitized_name": "ecb-1", "disabled": False, "pid": "7afda419da96e8471b49df9c2009e2ef", "solves": 0, "socket": None, "server": "192.168.2.3", "port": 21953, "server_number": 1, "solved": False, "unlocked": True, }, ] def load_sample_problems(): with app().app_context(): db = get_conn() db.shell_servers.insert_one( { "sid": "728f36885f7c4686805593b9e4988c30", "name": "Test shell server", "host": "testing.picoctf.com", "port": "22", "username": "username", "password": "password", "protocol": "HTTPS", "server_number": 1, } ) api.problem.load_published(json.loads(sample_shellserver_publish_output)) def enable_sample_problems(): db = get_conn() db.problems.update_many({}, {"$set": {"disabled": False}}) def ensure_within_competition(): db = get_conn() db.settings.update_one( {}, { "$set": { "start_time": datetime.datetime.utcnow() - datetime.timedelta(1), "end_time": datetime.datetime.utcnow() + datetime.timedelta(1), } }, )
MIT License
wdm0006/sklearn-extensions
sklearn_extensions/kernel_sgd/kernel_sgd.py
SquaredLoss.get_update
python
def get_update(self, p, y): return -p + y
:param p: :param y: :return:
https://github.com/wdm0006/sklearn-extensions/blob/329f3efdb8c3c3a367b264f7c76c76411a784530/sklearn_extensions/kernel_sgd/kernel_sgd.py#L120-L128
import numpy as np from sklearn.base import BaseEstimator def euclidean_distances(X, Y=None, squared=False): XX = np.sum(X * X, axis=1)[:, np.newaxis] YY = np.sum(Y ** 2, axis=1)[np.newaxis, :] distances = np.dot(X, Y.T) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, distances) if X is Y: distances.flat[::distances.shape[0] + 1] = 0.0 return distances if squared else np.sqrt(distances) class GaussianKernel(object): def __init__(self, gamma=1.0): self.gamma = gamma def compute(self, X, Y): K = euclidean_distances(X, Y, squared=True) K *= -self.gamma np.exp(K, K) return K class HingeLoss(object): def __init__(self, threshold=1): self.threshold = threshold def get_update(self, p, y): z = p * y if z <= self.threshold: return y return 0.0 class LogLoss(object): def get_update(self, p, y): z = p * y if z > 18.0: return np.exp(-z) * y if z < -18.0: return y return y / (np.exp(z) + 1.0) class SquaredLoss(object):
BSD 3-Clause New or Revised License
pyccel/pyccel
pyccel/parser/semantic.py
SemanticParser.get_import
python
def get_import(self, name): imp = None container = self.namespace while container: if name in container.imports['imports']: imp = container.imports['imports'][name] break container = container.parent_scope return imp
Search for an import with the given name in the current namespace. Return None if not found.
https://github.com/pyccel/pyccel/blob/2a5bd75c33d270cdd675ad46b8ce718113b70498/pyccel/parser/semantic.py#L470-L487
from collections import OrderedDict from itertools import chain from sympy.utilities.iterables import iterable as sympy_iterable from sympy import Sum as Summation from sympy import Symbol as sp_Symbol from sympy import Integer as sp_Integer from sympy import ceiling from sympy.core import cache from pyccel.ast.basic import Basic, PyccelAstNode from pyccel.ast.builtins import PythonPrint from pyccel.ast.builtins import PythonInt, PythonBool, PythonFloat, PythonComplex from pyccel.ast.builtins import python_builtin_datatype from pyccel.ast.builtins import PythonList from pyccel.ast.builtins import (PythonRange, PythonZip, PythonEnumerate, PythonMap, PythonTuple, Lambda) from pyccel.ast.core import Comment, CommentBlock, Pass from pyccel.ast.core import If, IfSection from pyccel.ast.core import Allocate, Deallocate from pyccel.ast.core import Assign, AliasAssign, SymbolicAssign from pyccel.ast.core import AugAssign, CodeBlock from pyccel.ast.core import Return, FunctionDefArgument from pyccel.ast.core import ConstructorCall from pyccel.ast.core import FunctionDef, Interface, FunctionAddress, FunctionCall, FunctionCallArgument from pyccel.ast.core import DottedFunctionCall from pyccel.ast.core import ClassDef from pyccel.ast.core import For from pyccel.ast.core import Module from pyccel.ast.core import While from pyccel.ast.core import SymbolicPrint from pyccel.ast.core import Del from pyccel.ast.core import Program from pyccel.ast.core import EmptyNode from pyccel.ast.core import Concatenate from pyccel.ast.core import Import from pyccel.ast.core import AsName from pyccel.ast.core import With from pyccel.ast.core import Duplicate from pyccel.ast.core import StarredArguments from pyccel.ast.core import Iterable from pyccel.ast.core import InProgram from pyccel.ast.class_defs import NumpyArrayClass, TupleClass, get_cls_base from pyccel.ast.datatypes import NativeRange, str_dtype from pyccel.ast.datatypes import NativeSymbol from pyccel.ast.datatypes import DataTypeFactory from pyccel.ast.datatypes import (NativeInteger, NativeBool, NativeReal, NativeString, NativeGeneric, NativeComplex) from pyccel.ast.functionalexpr import FunctionalSum, FunctionalMax, FunctionalMin, GeneratorComprehension, FunctionalFor from pyccel.ast.headers import FunctionHeader, ClassHeader, MethodHeader, Header from pyccel.ast.headers import MacroFunction, MacroVariable from pyccel.ast.internals import Slice, PyccelSymbol from pyccel.ast.itertoolsext import Product from pyccel.ast.literals import LiteralTrue, LiteralFalse from pyccel.ast.literals import LiteralInteger, LiteralFloat from pyccel.ast.literals import Nil, LiteralString from pyccel.ast.mathext import math_constants from pyccel.ast.numpyext import NumpyZeros, NumpyMatmul from pyccel.ast.numpyext import NumpyBool from pyccel.ast.numpyext import NumpyInt, NumpyInt8, NumpyInt16, NumpyInt32, NumpyInt64 from pyccel.ast.numpyext import NumpyFloat, NumpyFloat32, NumpyFloat64 from pyccel.ast.numpyext import NumpyComplex, NumpyComplex64, NumpyComplex128 from pyccel.ast.numpyext import NumpyTranspose from pyccel.ast.numpyext import NumpyNewArray from pyccel.ast.omp import (OMP_For_Loop, OMP_Simd_Construct, OMP_Distribute_Construct, OMP_TaskLoop_Construct, OMP_Sections_Construct, Omp_End_Clause, OMP_Single_Construct, OMP_Parallel_Construct) from pyccel.ast.operators import PyccelIs, PyccelIsNot, IfTernaryOperator, PyccelUnarySub from pyccel.ast.operators import PyccelNot, PyccelEq from pyccel.ast.sympy_helper import sympy_to_pyccel, pyccel_to_sympy from pyccel.ast.utilities import builtin_function as pyccel_builtin_function from pyccel.ast.utilities import python_builtin_libs from pyccel.ast.utilities import builtin_import as pyccel_builtin_import from pyccel.ast.utilities import builtin_import_registery as pyccel_builtin_import_registery from pyccel.ast.utilities import split_positional_keyword_arguments from pyccel.ast.variable import Constant from pyccel.ast.variable import Variable from pyccel.ast.variable import TupleVariable, HomogeneousTupleVariable, InhomogeneousTupleVariable from pyccel.ast.variable import IndexedElement from pyccel.ast.variable import DottedName, DottedVariable from pyccel.errors.errors import Errors from pyccel.errors.errors import PyccelSemanticError from pyccel.errors.messages import * from pyccel.parser.base import BasicParser, Scope from pyccel.parser.base import get_filename_from_import from pyccel.parser.syntactic import SyntaxParser import pyccel.decorators as def_decorators errors = Errors() def _get_name(var): if isinstance(var, str): return var if isinstance(var, (PyccelSymbol, DottedName)): return str(var) if isinstance(var, (IndexedElement)): return str(var.base) if isinstance(var, FunctionCall): return var.funcdef if isinstance(var, AsName): return var.target msg = 'Name of Object : {} cannot be determined'.format(type(var).__name__) errors.report(PYCCEL_RESTRICTION_TODO+'\n'+msg, symbol=var, severity='fatal') class SemanticParser(BasicParser): def __init__(self, inputs, **kwargs): self._parents = kwargs.pop('parents', []) self._d_parsers = kwargs.pop('d_parsers', OrderedDict()) if not isinstance(inputs, SyntaxParser): raise TypeError('> Expecting a syntactic parser as input') parser = inputs BasicParser.__init__(self, **kwargs) self._fst = parser._fst self._ast = parser._ast self._filename = parser._filename self._metavars = parser._metavars self._namespace = parser._namespace self._namespace.imports['imports'] = OrderedDict() self._program_namespace = Scope() self._module_namespace = self._namespace self._used_names = parser.used_names self._dummy_counter = parser._dummy_counter self._allocs = [] self._additional_exprs = [] self._code = parser._code settings = {} self.annotate() @property def parents(self): return self._parents @property def d_parsers(self): return self._d_parsers @property def program_namespace(self): return self._program_namespace def annotate(self, **settings): if self.semantic_done: print ('> semantic analysis already done') return self.ast errors = Errors() if self.filename: errors.set_target(self.filename, 'file') errors.set_parser_stage('semantic') ast = self.ast self._allocs.append([]) PyccelAstNode.stage = 'semantic' ast = self._visit(ast, **settings) self._ast = ast if self.is_header_file: target = [] for parent in self.parents: for (key, item) in parent.imports.items(): if get_filename_from_import(key) == self.filename: target += item target = set(target) target_headers = target.intersection(self.namespace.headers.keys()) for name in list(target_headers): v = self.namespace.headers[name][0] if isinstance(v, FunctionHeader) and not isinstance(v, MethodHeader): F = self.get_function(name) if F is None: interfaces = v.create_definition() for F in interfaces: self.insert_function(F) else: errors.report(IMPORTING_EXISTING_IDENTIFIED, symbol=name, blocker=True, severity='fatal') self._semantic_done = True return ast def change_to_program_scope(self): self._allocs.append([]) self._module_namespace = self._namespace self._namespace = self._program_namespace def change_to_module_scope(self): self._program_namespace = self._namespace self._namespace = self._module_namespace def get_variable_from_scope(self, name): container = self.namespace while container.is_loop: container = container.parent_scope var = self._get_variable_from_scope(name, container) return var def _get_variable_from_scope(self, name, container): if name in container.variables: return container.variables[name] if name in container.imports['variables']: return container.imports['variables'][name] for container in container.loops: var = self._get_variable_from_scope(name, container) if var: return var return None def check_for_variable(self, name): if self.current_class: for i in self._current_class.attributes: if i.name == name: var = i return var container = self.namespace while container.is_loop: container = container.parent_scope while container: var = self._get_variable_from_scope(name, container) if var is not None: return var container = container.parent_scope return None def get_variable(self, name): var = self.check_for_variable(name) if var is None: errors.report(UNDEFINED_VARIABLE, symbol=name, bounding_box=(self._current_fst_node.lineno, self._current_fst_node.col_offset), severity='fatal', blocker=True) else: return var def get_variables(self, container): variables = [] variables.extend(container.variables.values()) for container in container.loops: variables.extend(self.get_variables(container)) return variables def get_parent_functions(self): container = self.namespace funcs = container.functions.copy() container = container.parent_scope while container: for i in container.functions: if not i in funcs: funcs[i] = container.functions[i] container = container.parent_scope return funcs def get_class(self, name): container = self.namespace while container: if name in container.classes: return container.classes[name] elif name in container.imports['classes']: return container.imports['classes'][name] container = container.parent_scope return None def insert_variable(self, var, name=None): if not isinstance(var, Variable): raise TypeError('variable must be of type Variable') if name is None: name = var.name self.namespace.variables[name] = var def insert_class(self, cls, parent=False): if isinstance(cls, ClassDef): name = cls.name container = self.namespace if parent: container = container.parent_scope container.classes[name] = cls else: raise TypeError('Expected A class definition ') def insert_template(self, expr): self.namespace.templates[expr.name] = expr def insert_header(self, expr): if isinstance(expr, (FunctionHeader, MethodHeader)): if expr.name in self.namespace.headers: self.namespace.headers[expr.name].append(expr) else: self.namespace.headers[expr.name] = [expr] elif isinstance(expr, ClassHeader): self.namespace.headers[expr.name] = expr iterable = 'iterable' in expr.options with_construct = 'with' in expr.options dtype = DataTypeFactory(expr.name, '_name', is_iterable=iterable, is_with_construct=with_construct) self.set_class_construct(expr.name, dtype) else: msg = 'header of type{0} is not supported' msg = msg.format(str(type(expr))) raise TypeError(msg) def get_function(self, name): func = None container = self.namespace while container: if name in container.functions: func = container.functions[name] break if name in container.imports['functions']: func = container.imports['functions'][name] break container = container.parent_scope return func
MIT License
awslabs/dgl-ke
python/dglke/models/general_models.py
KEModel.save_emb
python
def save_emb(self, path, dataset): self.entity_emb.save(path, dataset+'_'+self.model_name+'_entity') if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.save(path, dataset+'_'+self.model_name+'_relation') else: self.relation_emb.save(path, dataset+'_'+self.model_name+'_relation') self.score_func.save(path, dataset+'_'+self.model_name)
Save the model. Parameters ---------- path : str Directory to save the model. dataset : str Dataset name as prefix to the saved embeddings.
https://github.com/awslabs/dgl-ke/blob/30558e069c42038cded08bddd26ac75f153aae75/python/dglke/models/general_models.py#L290-L306
import os import numpy as np import math import dgl.backend as F backend = os.environ.get('DGLBACKEND', 'pytorch') if backend.lower() == 'mxnet': from .mxnet.tensor_models import masked_select from .mxnet.tensor_models import logsigmoid from .mxnet.tensor_models import abs from .mxnet.tensor_models import get_device, get_dev from .mxnet.tensor_models import norm from .mxnet.tensor_models import get_scalar from .mxnet.tensor_models import reshape from .mxnet.tensor_models import cuda from .mxnet.tensor_models import ExternalEmbedding from .mxnet.tensor_models import InferEmbedding from .mxnet.score_fun import * DEFAULT_INFER_BATCHSIZE = 1024 else: from .pytorch.tensor_models import logsigmoid from .pytorch.tensor_models import abs from .pytorch.tensor_models import masked_select from .pytorch.tensor_models import get_device, get_dev from .pytorch.tensor_models import norm from .pytorch.tensor_models import get_scalar from .pytorch.tensor_models import reshape from .pytorch.tensor_models import cuda from .pytorch.tensor_models import ExternalEmbedding from .pytorch.tensor_models import InferEmbedding from .pytorch.score_fun import * from .pytorch.loss import LossGenerator DEFAULT_INFER_BATCHSIZE = 2048 EMB_INIT_EPS = 2.0 class InferModel(object): def __init__(self, device, model_name, hidden_dim, double_entity_emb=False, double_relation_emb=False, gamma=0., batch_size=DEFAULT_INFER_BATCHSIZE): super(InferModel, self).__init__() self.device = device self.model_name = model_name entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim self.entity_emb = InferEmbedding(device) self.relation_emb = InferEmbedding(device) self.batch_size = batch_size if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': assert False, 'Do not support inference of TransR model now.' elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': emb_init = (gamma + EMB_INIT_EPS) / hidden_dim self.score_func = RotatEScore(gamma, emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() def load_emb(self, path, dataset): self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity') self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation') self.score_func.load(path, dataset+'_'+self.model_name) def score(self, head, rel, tail, triplet_wise=False): head_emb = self.entity_emb(head) rel_emb = self.relation_emb(rel) tail_emb = self.entity_emb(tail) num_head = F.shape(head)[0] num_rel = F.shape(rel)[0] num_tail = F.shape(tail)[0] batch_size = self.batch_size score = [] if triplet_wise: class FakeEdge(object): def __init__(self, head_emb, rel_emb, tail_emb): self._hobj = {} self._robj = {} self._tobj = {} self._hobj['emb'] = head_emb self._robj['emb'] = rel_emb self._tobj['emb'] = tail_emb @property def src(self): return self._hobj @property def dst(self): return self._tobj @property def data(self): return self._robj for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] st_emb = tail_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] edata = FakeEdge(sh_emb, sr_emb, st_emb) score.append(F.copy_to(self.score_func.edge_func(edata)['score'], F.cpu())) score = F.cat(score, dim=0) return score else: for i in range((num_head + batch_size - 1) // batch_size): sh_emb = head_emb[i * batch_size : (i + 1) * batch_size if (i + 1) * batch_size < num_head else num_head] s_score = [] for j in range((num_tail + batch_size - 1) // batch_size): st_emb = tail_emb[j * batch_size : (j + 1) * batch_size if (j + 1) * batch_size < num_tail else num_tail] s_score.append(F.copy_to(self.score_func.infer(sh_emb, rel_emb, st_emb), F.cpu())) score.append(F.cat(s_score, dim=2)) score = F.cat(score, dim=0) return F.reshape(score, (num_head * num_rel * num_tail,)) @property def num_entity(self): return self.entity_emb.emb.shape[0] @property def num_rel(self): return self.relation_emb.emb.shape[0] class KEModel(object): def __init__(self, args, model_name, n_entities, n_relations, hidden_dim, gamma, double_entity_emb=False, double_relation_emb=False): super(KEModel, self).__init__() self.args = args self.has_edge_importance = args.has_edge_importance self.n_entities = n_entities self.n_relations = n_relations self.model_name = model_name self.hidden_dim = hidden_dim self.eps = EMB_INIT_EPS self.emb_init = (gamma + self.eps) / hidden_dim entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim device = get_device(args) self.loss_gen = LossGenerator(args, args.loss_genre if hasattr(args, 'loss_genre') else 'Logsigmoid', args.neg_adversarial_sampling if hasattr(args, 'neg_adversarial_sampling') else False, args.adversarial_temperature if hasattr(args, 'adversarial_temperature') else 1.0, args.pairwise if hasattr(args, 'pairwise') else False) self.entity_emb = ExternalEmbedding(args, n_entities, entity_dim, F.cpu() if args.mix_cpu_gpu else device) if model_name == 'RESCAL': rel_dim = relation_dim * entity_dim else: rel_dim = relation_dim self.rel_dim = rel_dim self.entity_dim = entity_dim self.strict_rel_part = args.strict_rel_part self.soft_rel_part = args.soft_rel_part if not self.strict_rel_part and not self.soft_rel_part: self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu() if args.mix_cpu_gpu else device) else: self.global_relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu()) if model_name == 'TransE' or model_name == 'TransE_l2': self.score_func = TransEScore(gamma, 'l2') elif model_name == 'TransE_l1': self.score_func = TransEScore(gamma, 'l1') elif model_name == 'TransR': projection_emb = ExternalEmbedding(args, n_relations, entity_dim * relation_dim, F.cpu() if args.mix_cpu_gpu else device) self.score_func = TransRScore(gamma, projection_emb, relation_dim, entity_dim) elif model_name == 'DistMult': self.score_func = DistMultScore() elif model_name == 'ComplEx': self.score_func = ComplExScore() elif model_name == 'RESCAL': self.score_func = RESCALScore(relation_dim, entity_dim) elif model_name == 'RotatE': self.score_func = RotatEScore(gamma, self.emb_init) elif model_name == 'SimplE': self.score_func = SimplEScore() self.model_name = model_name self.head_neg_score = self.score_func.create_neg(True) self.tail_neg_score = self.score_func.create_neg(False) self.head_neg_prepare = self.score_func.create_neg_prepare(True) self.tail_neg_prepare = self.score_func.create_neg_prepare(False) self.reset_parameters() def share_memory(self): self.entity_emb.share_memory() if self.strict_rel_part or self.soft_rel_part: self.global_relation_emb.share_memory() else: self.relation_emb.share_memory() if self.model_name == 'TransR': self.score_func.share_memory()
Apache License 2.0
microprediction/timemachines
timemachines/skatertools/data/real.py
hospital_with_exog
python
def hospital_with_exog(k: int, n: int, offset: bool = True) -> ([[float]], [[float]]): import math y0 = hospital() y0_ahead_1 = [h for h in y0[1:]] + [y0[-1]] y0_ahead_2 = [h for h in y0[1:]] + [y0[-1], y0[-1]] z1 = [ya1 + 5 * np.random.randn() for ya1 in y0_ahead_1] z2 = [ya2 + 10 * np.random.randn() for ya2 in y0_ahead_2] a0 = [ math.cos( 2*i*math.pi / 24 ) for i in range(len(y0))] y0_bumped = [y0i + 5 * a0i for y0i, a0i in zip(y0, a0)] y = [[y0bi, z1i, z2i] for y0bi, z1i, z2i in zip(y0_bumped, z1, z2)] a = [ [a0i] for a0i in a0 ] assert len(y) == len(a), 'not same len' y_trunc = y[-(n + k):] a_trunc = a[-(n + k):] if offset: a_ahead = a_trunc[k:] return y_trunc[:len(a_ahead)], a_ahead else: return y_trunc, a_trunc
Returns real data with fake known-in advance and exogenous :param k: steps to look ahead :param n: length of time series :param offset: if true, this will offset a so it can be fed to a skater directly :returns: y, a
https://github.com/microprediction/timemachines/blob/4332cd2a97b1bb2d1ea0a4437e73f3255f2a762e/timemachines/skatertools/data/real.py#L77-L101
import csv import numpy as np def hospital(n:int=2000): return [146.0, 126.0, 126.0, 124.0, 124.0, 157.0, 157.0, 155.0, 155.0, 230.0, 230.0, 265.0, 265.0, 221.0, 221.0, 194.0, 194.0, 181.0, 248.0, 248.0, 248.0, 214.0, 214.0, 172.0, 172.0, 201.0, 201.0, 216.0, 216.0, 179.0, 179.0, 232.0, 232.0, 202.0, 202.0, 195.0, 195.0, 190.0, 190.0, 185.0, 185.0, 201.0, 201.0, 221.0, 221.0, 214.0, 214.0, 213.0, 213.0, 215.0, 215.0, 199.0, 199.0, 183.0, 183.0, 201.0, 201.0, 197.0, 197.0, 181.0, 181.0, 177.0, 177.0, 193.0, 193.0, 179.0, 179.0, 192.0, 192.0, 177.0, 177.0, 187.0, 187.0, 177.0, 177.0, 201.0, 201.0, 209.0, 209.0, 202.0, 202.0, 188.0, 188.0, 145.0, 145.0, 139.0, 139.0, 150.0, 150.0, 136.0, 136.0, 114.0, 114.0, 96.0, 96.0, 78.0, 78.0, 56.0, 56.0, 53.0, 53.0, 50.0, 50.0, 43.0, 43.0, 37.0, 37.0, 43.0, 43.0, 53.0, 53.0, 58.0, 58.0, 60.0, 60.0, 50.0, 50.0, 42.0, 42.0, 40.0, 40.0, 36.0, 36.0, 42.0, 42.0, 39.0, 39.0, 30.0, 30.0, 14.0, 14.0, 23.0, 23.0, 23.0, 23.0, 10.0, 10.0, 16.0, 16.0, 15.0, 15.0, 5.0, 5.0, 49.0, 49.0, 72.0, 72.0, 97.0, 97.0, 107.0, 107.0, 81.0, 81.0, 82.0, 82.0, 109.0, 109.0, 123.0, 123.0, 99.0, 99.0, 102.0, 102.0, 132.0, 132.0, 154.0, 154.0, 172.0, 172.0, 153.0, 153.0, 149.0, 149.0, 150.0, 150.0, 136.0, 136.0, 130.0, 130.0, 118.0, 118.0, 92.0, 92.0, 92.0, 92.0, 105.0, 105.0, 82.0, 82.0, 78.0, 78.0, 72.0, 72.0, 58.0, 58.0, 53.0, 53.0, 60.0, 60.0, 50.0, 50.0, 43.0, 43.0, 38.0, 38.0, 32.0, 32.0, 30.0, 30.0, 32.0, 32.0, 26.0, 26.0, 36.0, 36.0, 37.0, 37.0, 42.0, 42.0, 51.0, 51.0, 51.0, 51.0, 54.0, 54.0, 32.0, 32.0, 33.0, 33.0, 35.0, 35.0, 32.0, 32.0, 51.0, 51.0, 67.0, 67.0, 56.0, 56.0, 51.0, 51.0, 41.0, 41.0, 28.0, 28.0, 31.0, 31.0, 38.0, 38.0, 45.0, 45.0, 53.0, 53.0, 51.0, 51.0, 65.0, 65.0, 64.0, 64.0, 52.0, 52.0, 39.0, 39.0, 52.0, 52.0, 54.0, 54.0, 82.0, 82.0, 107.0, 107.0, 97.0, 97.0, 90.0, 90.0, 79.0, 79.0, 80.0, 80.0, 75.0, 75.0, 85.0, 85.0, 62.0, 62.0, 58.0, 58.0, 55.0, 55.0, 50.0, 50.0, 56.0, 56.0, 62.0, 62.0, 74.0, 74.0, 77.0, 77.0, 66.0, 66.0, 80.0, 80.0, 88.0, 88.0, 81.0, 81.0, 92.0, 92.0, 151.0, 151.0, 172.0, 172.0, 173.0, 173.0, 233.0, 233.0, 262.0, 262.0, 262.0, 262.0, 212.0, 212.0, 224.0, 224.0, 277.0, 277.0, 248.0, 248.0, 234.0, 234.0, 236.0, 236.0, 218.0, 218.0, 169.0, 169.0, 174.0, 174.0, 248.0, 248.0, 231.0, 231.0, 231.0, 231.0, 233.0, 233.0, 187.0, 187.0, 174.0, 174.0, 242.0, 242.0, 176.0, 176.0, 158.0, 158.0, 159.0, 159.0, 168.0, 168.0, 183.0, 183.0, 172.0, 172.0, 167.0, 167.0, 184.0, 184.0, 173.0, 173.0, 166.0, 166.0, 166.0, 166.0, 167.0, 167.0, 164.0, 164.0, 157.0, 157.0, 147.0, 147.0, 146.0, 146.0, 143.0, 143.0, 113.0, 113.0, 94.0, 94.0, 80.0, 80.0, 77.0, 77.0, 65.0, 65.0, 73.0, 73.0, 74.0, 74.0, 60.0, 60.0, 45.0, 45.0, 36.0, 36.0, 91.0, 91.0, 91.0, 91.0, 163.0, 163.0, 163.0, 163.0, 222.0, 222.0, 192.0, 192.0, 194.0, 194.0, 218.0, 218.0, 250.0, 250.0, 236.0, 236.0, 241.0, 241.0, 216.0, 216.0, 191.0, 191.0, 194.0, 194.0, 189.0, 189.0, 164.0, 164.0, 157.0, 157.0, 174.0, 174.0, 219.0, 219.0, 192.0, 192.0, 152.0, 152.0, 153.0, 153.0, 141.0, 141.0, 133.0, 133.0, 145.0, 145.0, 170.0, 170.0, 153.0, 153.0, 150.0, 150.0, 173.0, 173.0, 171.0, 171.0, 162.0, 162.0, 166.0, 166.0, 148.0, 148.0, 141.0, 141.0, 151.0, 151.0, 131.0, 115.0, 115.0, 103.0, 103.0, 103.0, 75.0, 75.0, 64.0, 64.0, 50.0, 50.0, 48.0, 48.0, 36.0, 36.0, 23.0, 23.0, 30.0, 30.0, 38.0, 38.0, 24.0, 24.0, 49.0, 49.0, 44.0, 44.0, 44.0, 44.0, 50.0, 50.0, 48.0, 48.0, 61.0, 61.0, 65.0, 65.0, 77.0, 77.0, 66.0, 66.0, 51.0, 51.0, 52.0, 52.0, 54.0, 54.0, 62.0, 62.0, 68.0, 68.0, 66.0, 66.0, 58.0, 58.0, 82.0, 82.0, 98.0, 98.0, 95.0, 95.0, 102.0, 102.0, 94.0, 94.0, 78.0, 78.0, 68.0, 68.0, 61.0, 61.0, 56.0, 56.0, 63.0, 63.0, 83.0, 83.0, 87.0, 87.0, 80.0, 80.0, 77.0, 77.0, 74.0, 74.0, 68.0, 68.0, 63.0, 63.0, 52.0, 52.0, 61.0, 61.0, 70.0, 70.0, 72.0, 72.0, 65.0, 65.0, 63.0, 63.0, 61.0, 61.0, 44.0, 44.0, 37.0, 37.0, 46.0, 46.0, 43.0, 43.0, 55.0, 55.0, 68.0, 68.0, 67.0, 67.0, 59.0, 59.0, 55.0, 55.0, 66.0, 66.0, 69.0, 69.0, 96.0, 96.0, 156.0, 156.0, 190.0, 190.0, 162.0, 162.0, 187.0, 187.0, 193.0, 193.0, 191.0, 191.0, 238.0, 238.0, 256.0, 256.0, 295.0, 295.0, 264.0, 264.0, 261.0, 261.0, 265.0, 265.0, 244.0, 244.0, 262.0, 262.0, 271.0, 271.0, 233.0, 233.0, 217.0, 217.0, 213.0, 213.0, 205.0, 205.0, 162.0, 162.0, 160.0, 160.0, 188.0, 188.0, 170.0, 170.0, 143.0, 143.0, 140.0, 140.0, 146.0, 146.0, 121.0, 121.0, 94.0, 94.0, 78.0, 78.0, 75.0, 75.0, 64.0, 64.0, 72.0, 72.0, 80.0, 80.0, 71.0, 71.0, 62.0, 62.0, 56.0, 56.0, 48.0, 48.0, 49.0, 49.0, 54.0, 54.0, 58.0, 58.0, 59.0, 59.0, 72.0, 72.0, 81.0, 81.0, 117.0, 117.0, 113.0, 113.0, 153.0, 153.0, 173.0, 173.0, 128.0, 128.0, 118.0, 118.0, 125.0, 125.0, 102.0, 102.0, 120.0, 120.0, 139.0, 139.0, 123.0, 123.0, 196.0, 196.0, 241.0, 241.0, 198.0, 198.0, 169.0, 169.0, 182.0, 182.0, 161.0, 161.0, 119.0, 119.0, 198.0, 198.0, 192.0, 192.0, 198.0, 198.0, 228.0, 228.0, 196.0, 196.0, 187.0, 187.0, 201.0, 201.0, 187.0, 187.0, 169.0, 169.0, 136.0, 136.0, 145.0, 145.0, 146.0, 146.0, 136.0, 136.0, 137.0, 137.0, 124.0, 124.0, 130.0, 130.0, 139.0, 139.0, 118.0, 118.0, 102.0, 102.0, 87.0, 87.0, 79.0, 79.0, 67.0, 67.0, 60.0, 60.0, 56.0, 56.0, 52.0, 52.0, 52.0, 52.0, 54.0, 54.0, 77.0, 77.0, 85.0, 85.0, 85.0, 85.0, 134.0, 134.0, 128.0, 128.0, 129.0, 129.0, 198.0, 198.0, 252.0, 252.0, 122.0, 122.0, 365.0, 365.0, 365.0, 365.0, 234.0, 234.0, 214.0, 214.0, 229.0, 229.0, 152.0, 152.0, 256.0, 256.0, 256.0, 256.0, 311.0, 311.0, 325.0, 325.0, 305.0, 305.0, 258.0, 258.0, 196.0, 196.0, 243.0, 243.0, 292.0, 292.0, 203.0, 203.0, 185.0, 185.0, 175.0, 175.0, 213.0, 213.0, 220.0, 220.0, 220.0, 220.0, 213.0, 213.0, 193.0, 193.0, 192.0, 192.0, 156.0, 156.0, 173.0, 173.0, 154.0, 154.0, 147.0, 147.0, 151.0, 151.0, 112.0, 112.0, 90.0, 90.0, 90.0, 90.0, 89.0, 89.0, 82.0, 82.0, 70.0, 70.0, 64.0, 64.0, 56.0, 56.0, 46.0, 46.0, 34.0, 34.0, 89.0, 89.0, 86.0, 86.0, 90.0, 90.0, 130.0, 130.0, 116.0, 116.0, 128.0, 128.0, 318.0, 318.0, 346.0, 346.0, 289.0, 289.0, 254.0, 254.0, 269.0, 269.0, 231.0, 231.0, 198.0, 198.0, 199.0, 199.0, 199.0, 199.0, 144.0, 144.0, 117.0, 117.0, 89.0, 89.0, 75.0, 75.0, 73.0, 73.0, 92.0, 92.0, 96.0, 96.0, 98.0, 98.0, 110.0, 110.0, 102.0, 102.0, 92.0, 92.0, 76.0, 76.0, 83.0, 83.0, 98.0, 98.0, 125.0, 125.0, 131.0, 131.0, 134.0, 134.0, 142.0, 112.0, 112.0, 112.0, 91.0, 94.0, 94.0, 94.0, 91.0, 91.0, 71.0, 71.0, 58.0, 58.0, 81.0, 81.0, 102.0, 102.0, 112.0, 112.0, 117.0, 117.0, 114.0, 114.0, 112.0, 112.0, 89.0, 89.0, 76.0, 76.0, 52.0, 52.0, 39.0, 39.0, 33.0, 33.0, 32.0, 32.0, 30.0, 30.0, 23.0, 23.0, 64.0, 64.0, 56.0, 56.0, 66.0, 66.0, 91.0, 91.0, 30.0, 30.0, 28.0, 28.0, 86.0, 86.0, 99.0, 99.0, 97.0, 97.0][:n]
MIT License
camelot-dev/camelot
camelot/parsers/stream.py
Stream._group_rows
python
def _group_rows(text, row_tol=2): row_y = 0 rows = [] temp = [] for t in text: if t.get_text().strip(): if not np.isclose(row_y, t.y0, atol=row_tol): rows.append(sorted(temp, key=lambda t: t.x0)) temp = [] row_y = t.y0 temp.append(t) rows.append(sorted(temp, key=lambda t: t.x0)) if len(rows) > 1: __ = rows.pop(0) return rows
Groups PDFMiner text objects into rows vertically within a tolerance. Parameters ---------- text : list List of PDFMiner text objects. row_tol : int, optional (default: 2) Returns ------- rows : list Two-dimensional list of text objects grouped into rows.
https://github.com/camelot-dev/camelot/blob/644bbe7c6d57b95aefa2f049a9aacdbc061cc04f/camelot/parsers/stream.py#L105-L139
import os import logging import warnings import numpy as np import pandas as pd from .base import BaseParser from ..core import TextEdges, Table from ..utils import text_in_bbox, get_table_index, compute_accuracy, compute_whitespace logger = logging.getLogger("camelot") class Stream(BaseParser): def __init__( self, table_regions=None, table_areas=None, columns=None, split_text=False, flag_size=False, strip_text="", edge_tol=50, row_tol=2, column_tol=0, **kwargs, ): self.table_regions = table_regions self.table_areas = table_areas self.columns = columns self._validate_columns() self.split_text = split_text self.flag_size = flag_size self.strip_text = strip_text self.edge_tol = edge_tol self.row_tol = row_tol self.column_tol = column_tol @staticmethod def _text_bbox(t_bbox): xmin = min([t.x0 for direction in t_bbox for t in t_bbox[direction]]) ymin = min([t.y0 for direction in t_bbox for t in t_bbox[direction]]) xmax = max([t.x1 for direction in t_bbox for t in t_bbox[direction]]) ymax = max([t.y1 for direction in t_bbox for t in t_bbox[direction]]) text_bbox = (xmin, ymin, xmax, ymax) return text_bbox @staticmethod
MIT License
jzahedieh/django-tennis-ladder
ladder/models.py
Season.get_leader_stats
python
def get_leader_stats(self, user): current_leaders = {} for ladder in self.ladder_set.all(): current_leaders[ladder.id] = ladder.get_leader(user=user) return { 'current_leaders': current_leaders, }
Generates the list of leaders for current season
https://github.com/jzahedieh/django-tennis-ladder/blob/03a9fc9ec6d0830ac1d6648428eca11755eabb00/ladder/models.py#L42-L53
from datetime import date import operator from django.db import models from django.db.models import Avg from django.contrib.auth.models import User class Season(models.Model): name = models.CharField(max_length=150) start_date = models.DateField('Start date') end_date = models.DateField('End date') season_round = models.IntegerField() class Meta: ordering = ['-start_date',] def __str__(self): return str(self.start_date.year) + ' Round ' + str(self.season_round) def get_stats(self): player_count = 0 results_count = 0 total_games_count = 0.0 for ladder in self.ladder_set.all(): player_count += ladder.league_set.count() results_count += ladder.result_set.count() / 2 total_games_count += (ladder.league_set.count() * (ladder.league_set.count() - 1)) / 2 percentage_played = (results_count / total_games_count) * 100 return { 'divisions': self.ladder_set.count(), 'percentage_played': "{0:.2f}".format(percentage_played), 'total_games_count': total_games_count, 'results_count': results_count, 'player_count': player_count }
MIT License
exhuma/puresnmp
puresnmp/api/pythonic.py
walk
python
def walk( ip, community, oid, port=161, timeout=DEFAULT_TIMEOUT, version=Version.V2C ): raw_result = raw.walk(ip, community, oid, port, timeout, version=version) for raw_oid, raw_value in raw_result: yield VarBind(raw_oid, raw_value.pythonize())
Delegates to :py:func:`~puresnmp.api.raw.walk` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples.
https://github.com/exhuma/puresnmp/blob/a755b5365acdc8958e530234f6916a90923f5b96/puresnmp/api/pythonic.py#L183-L196
import logging from collections import OrderedDict from datetime import timedelta from typing import TYPE_CHECKING, Generator, List, TypeVar from warnings import warn from x690.types import ObjectIdentifier, Type from ..const import DEFAULT_TIMEOUT, Version from ..pdu import Trap from ..snmp import VarBind from ..util import BulkResult from . import raw if TYPE_CHECKING: from typing import Any, Callable, Dict, Tuple, Union from puresnmp.typevars import PyType T = TypeVar("T", bound=PyType) _set = set LOG = logging.getLogger(__name__) OID = ObjectIdentifier.from_string TWalkResponse = Generator[VarBind, None, None] class TrapInfo: raw_trap: Trap def __init__(self, raw_trap: Trap) -> None: self.raw_trap = raw_trap def __repr__(self): return "<TrapInfo from %s on %s with %d values>" % ( self.origin, self.oid, len(self.values), ) @property def origin(self) -> str: if self.raw_trap is None or self.raw_trap.source is None: return "" return self.raw_trap.source.address @property def uptime(self) -> timedelta: return self.raw_trap.varbinds[0].value.pythonize() @property def oid(self) -> str: return self.raw_trap.varbinds[1].value.pythonize() @property def values(self): output = {} for oid_raw, value_raw in self.raw_trap.varbinds[2:]: oid = oid_raw.pythonize() value = value_raw.pythonize() output[oid] = value return output def get(ip, community, oid, port=161, timeout=2, version=Version.V2C): raw_value = raw.get( ip, community, oid, port, timeout=timeout, version=version ) return raw_value.pythonize() def multiget( ip, community, oids, port=161, timeout=DEFAULT_TIMEOUT, version=Version.V2C ): raw_output = raw.multiget( ip, community, oids, port, timeout, version=version ) pythonized = [value.pythonize() for value in raw_output] return pythonized def getnext( ip, community, oid, port=161, timeout=DEFAULT_TIMEOUT, version=Version.V2C ): result = multigetnext( ip, community, [oid], port, timeout=timeout, version=version ) return result[0] def multigetnext( ip, community, oids, port=161, timeout=DEFAULT_TIMEOUT, version=Version.V2C ): raw_output = raw.multigetnext( ip, community, oids, port, timeout, version=version ) pythonized = [ VarBind(oid, value.pythonize()) for oid, value in raw_output ] return pythonized
MIT License
ageitgey/medium_to_ghost
medium_to_ghost/medium_to_ghost.py
create_export_file
python
def create_export_file(converted_posts): return { "db": [ { "meta": { "exported_on": int(time.time()), "version": "2.18.3" }, "data": { "posts": converted_posts } } ] }
Create a Ghost import json from a list of Ghost post documents. :param converted_posts: Ghost formatted python docs. :return: A Dict representation of a ghost export file you can dump to json.
https://github.com/ageitgey/medium_to_ghost/blob/0403e8e7b005e1b5af78b4ae58199d3b513416cc/medium_to_ghost/medium_to_ghost.py#L23-L41
import click from pathlib import Path from medium_to_ghost.medium_post_parser import convert_medium_post_to_ghost_json import time import json from zipfile import ZipFile import logging import sys import shutil logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logger = logging.getLogger('medium_to_ghost') def create_ghost_import_zip(): shutil.make_archive("medium_export_for_ghost", "zip", "exported_content", logger=logger)
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/alarm_control_panel/envisalink.py
EnvisalinkAlarm.async_alarm_disarm
python
def async_alarm_disarm(self, code=None): if code: self.hass.data[DATA_EVL].disarm_partition( str(code), self._partition_number) else: self.hass.data[DATA_EVL].disarm_partition( str(self._code), self._partition_number)
Send disarm command.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/alarm_control_panel/envisalink.py#L131-L138
import asyncio import logging import voluptuous as vol from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect import homeassistant.components.alarm_control_panel as alarm import homeassistant.helpers.config_validation as cv from homeassistant.components.envisalink import ( DATA_EVL, EnvisalinkDevice, PARTITION_SCHEMA, CONF_CODE, CONF_PANIC, CONF_PARTITIONNAME, SIGNAL_KEYPAD_UPDATE, SIGNAL_PARTITION_UPDATE) from homeassistant.const import ( STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_UNKNOWN, STATE_ALARM_TRIGGERED, STATE_ALARM_PENDING, ATTR_ENTITY_ID) _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['envisalink'] SERVICE_ALARM_KEYPRESS = 'envisalink_alarm_keypress' ATTR_KEYPRESS = 'keypress' ALARM_KEYPRESS_SCHEMA = vol.Schema({ vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_KEYPRESS): cv.string }) @asyncio.coroutine def async_setup_platform(hass, config, async_add_devices, discovery_info=None): configured_partitions = discovery_info['partitions'] code = discovery_info[CONF_CODE] panic_type = discovery_info[CONF_PANIC] devices = [] for part_num in configured_partitions: device_config_data = PARTITION_SCHEMA(configured_partitions[part_num]) device = EnvisalinkAlarm( hass, part_num, device_config_data[CONF_PARTITIONNAME], code, panic_type, hass.data[DATA_EVL].alarm_state['partition'][part_num], hass.data[DATA_EVL] ) devices.append(device) async_add_devices(devices) @callback def alarm_keypress_handler(service): entity_ids = service.data.get(ATTR_ENTITY_ID) keypress = service.data.get(ATTR_KEYPRESS) target_devices = [device for device in devices if device.entity_id in entity_ids] for device in target_devices: device.async_alarm_keypress(keypress) hass.services.async_register( alarm.DOMAIN, SERVICE_ALARM_KEYPRESS, alarm_keypress_handler, schema=ALARM_KEYPRESS_SCHEMA) return True class EnvisalinkAlarm(EnvisalinkDevice, alarm.AlarmControlPanel): def __init__(self, hass, partition_number, alarm_name, code, panic_type, info, controller): self._partition_number = partition_number self._code = code self._panic_type = panic_type _LOGGER.debug("Setting up alarm: %s", alarm_name) super().__init__(alarm_name, info, controller) @asyncio.coroutine def async_added_to_hass(self): async_dispatcher_connect( self.hass, SIGNAL_KEYPAD_UPDATE, self._update_callback) async_dispatcher_connect( self.hass, SIGNAL_PARTITION_UPDATE, self._update_callback) @callback def _update_callback(self, partition): if partition is None or int(partition) == self._partition_number: self.async_schedule_update_ha_state() @property def code_format(self): if self._code: return None return '^\\d{4,6}$' @property def state(self): state = STATE_UNKNOWN if self._info['status']['alarm']: state = STATE_ALARM_TRIGGERED elif self._info['status']['armed_away']: state = STATE_ALARM_ARMED_AWAY elif self._info['status']['armed_stay']: state = STATE_ALARM_ARMED_HOME elif self._info['status']['exit_delay']: state = STATE_ALARM_PENDING elif self._info['status']['entry_delay']: state = STATE_ALARM_PENDING elif self._info['status']['alpha']: state = STATE_ALARM_DISARMED return state @asyncio.coroutine
MIT License
orbingol/nurbs-python
geomdl/linalg.py
vector_is_zero
python
def vector_is_zero(vector_in, tol=10e-8): if not isinstance(vector_in, (list, tuple)): raise TypeError("Input vector must be a list or a tuple") res = [False for _ in range(len(vector_in))] for idx in range(len(vector_in)): if abs(vector_in[idx]) < tol: res[idx] = True return all(res)
Checks if the input vector is a zero vector. :param vector_in: input vector :type vector_in: list, tuple :param tol: tolerance value :type tol: float :return: True if the input vector is zero, False otherwise :rtype: bool
https://github.com/orbingol/nurbs-python/blob/8ae8b127eb0b130a25a6c81e98e90f319733bca0/geomdl/linalg.py#L260-L277
import os import math from copy import deepcopy from functools import reduce from .exceptions import GeomdlException from . import _linalg try: from functools import lru_cache except ImportError: from .functools_lru_cache import lru_cache def vector_cross(vector1, vector2): try: if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0: raise ValueError("Input vectors cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise if not 1 < len(vector1) <= 3 or not 1 < len(vector2) <= 3: raise ValueError("The input vectors should contain 2 or 3 elements") if len(vector1) == 2: v1 = [float(v) for v in vector1] + [0.0] else: v1 = vector1 if len(vector2) == 2: v2 = [float(v) for v in vector2] + [0.0] else: v2 = vector2 vector_out = [(v1[1] * v2[2]) - (v1[2] * v2[1]), (v1[2] * v2[0]) - (v1[0] * v2[2]), (v1[0] * v2[1]) - (v1[1] * v2[0])] return vector_out def vector_dot(vector1, vector2): try: if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0: raise ValueError("Input vectors cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise prod = 0.0 for v1, v2 in zip(vector1, vector2): prod += v1 * v2 return prod def vector_multiply(vector_in, scalar): scaled_vector = [v * scalar for v in vector_in] return scaled_vector def vector_sum(vector1, vector2, coeff=1.0): summed_vector = [v1 + (coeff * v2) for v1, v2 in zip(vector1, vector2)] return summed_vector def vector_normalize(vector_in, decimals=18): try: if vector_in is None or len(vector_in) == 0: raise ValueError("Input vector cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise magnitude = vector_magnitude(vector_in) if magnitude > 0: vector_out = [] for vin in vector_in: vector_out.append(vin / magnitude) return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out] else: raise ValueError("The magnitude of the vector is zero") def vector_generate(start_pt, end_pt, normalize=False): try: if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0: raise ValueError("Input points cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise ret_vec = [] for sp, ep in zip(start_pt, end_pt): ret_vec.append(ep - sp) if normalize: ret_vec = vector_normalize(ret_vec) return ret_vec def vector_mean(*args): sz = len(args) mean_vector = [0.0 for _ in range(len(args[0]))] for input_vector in args: mean_vector = [a+b for a, b in zip(mean_vector, input_vector)] mean_vector = [a / sz for a in mean_vector] return mean_vector def vector_magnitude(vector_in): sq_sum = 0.0 for vin in vector_in: sq_sum += vin**2 return math.sqrt(sq_sum) def vector_angle_between(vector1, vector2, **kwargs): degrees = kwargs.get('degrees', True) magn1 = vector_magnitude(vector1) magn2 = vector_magnitude(vector2) acos_val = vector_dot(vector1, vector2) / (magn1 * magn2) angle_radians = math.acos(acos_val) if degrees: return math.degrees(angle_radians) else: return angle_radians
MIT License
pydoit/doit
doit/reporter.py
ConsoleReporter.execute_task
python
def execute_task(self, task): if task.actions and (task.name[0] != '_'): self.write('. %s\n' % task.title())
called when execution starts
https://github.com/pydoit/doit/blob/5f71d3b113a03292b35280479ffe5203b8f81156/doit/reporter.py#L38-L43
import sys import time import datetime import json from io import StringIO class ConsoleReporter(object): desc = 'console output' def __init__(self, outstream, options): self.failures = [] self.runtime_errors = [] self.failure_verbosity = options.get('failure_verbosity', 0) self.outstream = outstream def write(self, text): self.outstream.write(text) def initialize(self, tasks, selected_tasks): pass def get_status(self, task): pass
MIT License